Commit d9f0585f authored by Sabine's avatar Sabine
Browse files

Merge branch 'dev' of git.nilu.no:flexpart/flexpart into dev

parents d404d981 c8fc7249
......@@ -66,9 +66,10 @@ program flexpart
end do
call gasdev1(idummy,rannumb(maxrand),rannumb(maxrand-1))
! FLEXPART version string
flexversion_major = '10' ! Major version number, also used for species file names
flexversion='Version '//trim(flexversion_major)//'.0beta (2015-05-01)'
flexversion='Version '//trim(flexversion_major)//'.1beta (2016-11-02)'
verbosity=0
! Read the pathnames where input/output files are stored
......@@ -383,6 +384,17 @@ program flexpart
end do
end do
! Inform whether output kernel is used or not
!*********************************************
if (lroot) then
if (lnokernel) then
write(*,*) "Concentrations are calculated without using kernel"
else
write(*,*) "Concentrations are calculated using kernel"
end if
end if
! Calculate particle trajectories
!********************************
......@@ -401,11 +413,16 @@ program flexpart
call timemanager
! NIK 16.02.2005
write(*,*) '**********************************************'
write(*,*) 'Total number of occurences of below-cloud scavenging', tot_blc_count
write(*,*) 'Total number of occurences of in-cloud scavenging', tot_inc_count
write(*,*) '**********************************************'
do i=1,nspec
write(*,*) '**********************************************'
write(*,*) 'Scavenging statistics for species ', species(i), ':'
write(*,*) 'Total number of occurences of below-cloud scavenging', &
& tot_blc_count(i)
write(*,*) 'Total number of occurences of in-cloud scavenging', &
& tot_inc_count(i)
write(*,*) '**********************************************'
end do
write(*,*) 'CONGRATULATIONS: YOU HAVE SUCCESSFULLY COMPLETED A FLE&
&XPART MODEL RUN!'
......
......@@ -54,15 +54,17 @@ program flexpart
character(len=256) :: inline_options !pathfile, flexversion, arg2
! Initialize mpi
!*********************
! Initialize mpi
!*********************
call mpif_init
if (mp_measure_time) call mpif_mtime('flexpart',0)
! Initialize arrays in com_mod
!*****************************
call com_mod_allocate_part(maxpart_mpi)
! Initialize arrays in com_mod
!*****************************
if(.not.(lmpreader.and.lmp_use_reader)) call com_mod_allocate_part(maxpart_mpi)
! Generate a large number of random numbers
!******************************************
......@@ -78,7 +80,7 @@ program flexpart
! FLEXPART version string
flexversion_major = '10' ! Major version number, also used for species file names
! flexversion='Ver. 10 Beta MPI (2015-05-01)'
flexversion='Ver. '//trim(flexversion_major)//' Beta MPI (2015-05-01)'
flexversion='Ver. '//trim(flexversion_major)//'.1beta MPI (2016-11-02)'
verbosity=0
! Read the pathnames where input/output files are stored
......@@ -305,9 +307,11 @@ program flexpart
print*,'Initialize all particles to non-existent'
endif
do j=1, size(itra1) ! maxpart_mpi
itra1(j)=-999999999
end do
if (.not.(lmpreader.and.lmp_use_reader)) then
do j=1, size(itra1) ! maxpart_mpi
itra1(j)=-999999999
end do
end if
! For continuation of previous run, read in particle positions
!*************************************************************
......@@ -317,7 +321,7 @@ program flexpart
print*,'call readpartpositions'
endif
! readwind process skips this step
if (lmp_use_reader.and..not.lmpreader) call readpartpositions
if (.not.(lmpreader.and.lmp_use_reader)) call readpartpositions
else
if (verbosity.gt.0 .and. lroot) then
print*,'numpart=0, numparticlecount=0'
......@@ -424,6 +428,16 @@ program flexpart
end do
end do
! Inform whether output kernel is used or not
!*********************************************
if (lroot) then
if (lnokernel) then
write(*,*) "Concentrations are calculated without using kernel"
else
write(*,*) "Concentrations are calculated using kernel"
end if
end if
! Calculate particle trajectories
!********************************
......@@ -447,24 +461,29 @@ program flexpart
! NIK 16.02.2005
if (lroot) then
call MPI_Reduce(MPI_IN_PLACE, tot_blc_count, 1, MPI_INTEGER8, MPI_SUM, id_root, &
call MPI_Reduce(MPI_IN_PLACE, tot_blc_count, nspec, MPI_INTEGER8, MPI_SUM, id_root, &
& mp_comm_used, mp_ierr)
call MPI_Reduce(MPI_IN_PLACE, tot_inc_count, 1, MPI_INTEGER8, MPI_SUM, id_root, &
call MPI_Reduce(MPI_IN_PLACE, tot_inc_count, nspec, MPI_INTEGER8, MPI_SUM, id_root, &
& mp_comm_used, mp_ierr)
else
if (mp_partgroup_pid.ge.0) then ! Skip for readwind process
call MPI_Reduce(tot_blc_count, 0, 1, MPI_INTEGER8, MPI_SUM, id_root, &
call MPI_Reduce(tot_blc_count, 0, nspec, MPI_INTEGER8, MPI_SUM, id_root, &
& mp_comm_used, mp_ierr)
call MPI_Reduce(tot_inc_count, 0, 1, MPI_INTEGER8, MPI_SUM, id_root, &
call MPI_Reduce(tot_inc_count, 0, nspec, MPI_INTEGER8, MPI_SUM, id_root, &
& mp_comm_used, mp_ierr)
end if
end if
if (lroot) then
write(*,*) '**********************************************'
write(*,*) 'Total number of occurences of below-cloud scavenging', tot_blc_count
write(*,*) 'Total number of occurences of in-cloud scavenging', tot_inc_count
write(*,*) '**********************************************'
do i=1,nspec
write(*,*) '**********************************************'
write(*,*) 'Scavenging statistics for species ', species(i), ':'
write(*,*) 'Total number of occurences of below-cloud scavenging', &
& tot_blc_count(i)
write(*,*) 'Total number of occurences of in-cloud scavenging', &
& tot_inc_count(i)
write(*,*) '**********************************************'
end do
write(*,*) 'CONGRATULATIONS: YOU HAVE SUCCESSFULLY COMPLETED A FLE&
&XPART MODEL RUN!'
......
This diff is collapsed.
......@@ -131,15 +131,16 @@ module com_mod
! gdomainfill .T., if domain-filling is global, .F. if not
!ZHG SEP 2015 wheather or not to read clouds from GRIB
logical :: readclouds
logical :: readclouds=.false.
!ESO DEC 2015 whether or not both clwc and ciwc are present (if so they are summed)
logical :: sumclouds
logical :: sumclouds=.false.
logical,dimension(maxnests) :: readclouds_nest, sumclouds_nest
!NIK 16.02.2015
integer(selected_int_kind(16)) :: tot_blc_count=0, tot_inc_count=0
integer(selected_int_kind(16)), dimension(maxspec) :: tot_blc_count=0, &
&tot_inc_count=0
!*********************************************************************
......@@ -575,7 +576,8 @@ module com_mod
integer :: numxgridn,numygridn
real :: dxoutn,dyoutn,outlon0n,outlat0n,xoutshiftn,youtshiftn
!real outheight(maxzgrid),outheighthalf(maxzgrid)
logical :: DEP,DRYDEP,DRYDEPSPEC(maxspec),WETDEP,OHREA,ASSSPEC
logical :: DEP,DRYDEP,DRYDEPSPEC(maxspec),WETDEP,WETDEPSPEC(maxspec),&
& OHREA,ASSSPEC
! numxgrid,numygrid number of grid points in x,y-direction
! numxgridn,numygridn number of grid points in x,y-direction for nested output grid
......@@ -592,6 +594,7 @@ module com_mod
! DRYDEP .true., if dry deposition is switched on
! DRYDEPSPEC .true., if dry deposition is switched on for that species
! WETDEP .true., if wet deposition is switched on
! WETDEPSPEC .true., if wet deposition is switched on for that species
! OHREA .true., if OH reaction is switched on
! ASSSPEC .true., if there are two species asscoiated
! (i.e. transfer of mass between these two occurs
......@@ -742,7 +745,7 @@ module com_mod
logical, parameter :: nmlout=.true.
! These variables are used to avoid having separate versions of
! files in cases where differences with MPI version is minor (eso)
! files in cases where differences with MPI version are minor (eso)
!*****************************************************************
integer :: mpi_mode=0 ! .gt. 0 if running MPI version
logical :: lroot=.true. ! true if serial version, or if MPI .and. root process
......
......@@ -125,7 +125,7 @@ subroutine conccalc(itime,weight)
! Take density from 2nd wind field in memory (accurate enough, no time interpolation needed)
!*****************************************************************************
do ind=indz,indzp
rhoprof(ind-indz+1)=p1*rho(ix ,jy ,ind,2) &
rhoprof(ind-indz+1)=p1*rho(ix ,jy ,ind,memind(2)) &
+p2*rho(ixp,jy ,ind,2) &
+p3*rho(ix ,jyp,ind,2) &
+p4*rho(ixp,jyp,ind,2)
......@@ -180,7 +180,7 @@ subroutine conccalc(itime,weight)
! If a particle is close to the domain boundary, do not use the kernel either.
!*****************************************************************************
if ((itage.lt.10800).or.(xl.lt.0.5).or.(yl.lt.0.5).or. &
if (lnokernel.or.(itage.lt.10800).or.(xl.lt.0.5).or.(yl.lt.0.5).or. &
(xl.gt.real(numxgrid-1)-0.5).or. &
(yl.gt.real(numygrid-1)-0.5)) then ! no kernel, direct attribution to grid cell
if ((ix.ge.0).and.(jy.ge.0).and.(ix.le.numxgrid-1).and. &
......
......@@ -193,7 +193,7 @@ subroutine conccalc(itime,weight)
! If a particle is close to the domain boundary, do not use the kernel either.
!*****************************************************************************
if ((itage.lt.10800).or.(xl.lt.0.5).or.(yl.lt.0.5).or. &
if (lnokernel.or.(itage.lt.10800).or.(xl.lt.0.5).or.(yl.lt.0.5).or. &
(xl.gt.real(numxgrid-1)-0.5).or. &
(yl.gt.real(numygrid-1)-0.5)) then ! no kernel, direct attribution to grid cell
if ((ix.ge.0).and.(jy.ge.0).and.(ix.le.numxgrid-1).and. &
......
......@@ -625,24 +625,24 @@ subroutine concoutput(itime,outnum,gridtotalunc,wetgridtotalunc, &
! Reinitialization of grid
!*************************
! do ks=1,nspec
! do kp=1,maxpointspec_act
! do i=1,numreceptor
! creceptor(i,ks)=0.
! end do
! do jy=0,numygrid-1
! do ix=0,numxgrid-1
! do l=1,nclassunc
! do nage=1,nageclass
! do kz=1,numzgrid
! gridunc(ix,jy,kz,ks,kp,l,nage)=0.
! end do
! end do
! end do
! end do
! end do
! end do
! end do
! do ks=1,nspec
! do kp=1,maxpointspec_act
! do i=1,numreceptor
! creceptor(i,ks)=0.
! end do
! do jy=0,numygrid-1
! do ix=0,numxgrid-1
! do l=1,nclassunc
! do nage=1,nageclass
! do kz=1,numzgrid
! gridunc(ix,jy,kz,ks,kp,l,nage)=0.
! end do
! end do
! end do
! end do
! end do
! end do
! end do
creceptor(:,:)=0.
gridunc(:,:,:,:,:,:,:)=0.
......
......@@ -104,9 +104,6 @@ subroutine concoutput_nest(itime,outnum)
! Measure execution time
if (mp_measure_time) call mpif_mtime('iotime',0)
! call cpu_time(mp_root_time_beg)
! mp_root_wtime_beg = mpi_wtime()
! end if
if (verbosity.eq.1) then
print*,'inside concoutput_surf '
......
This diff is collapsed.
......@@ -39,6 +39,11 @@ subroutine drydepokernel(nunc,deposit,x,y,nage,kp)
! deposit amount (kg) to be deposited *
! *
!*****************************************************************************
! Changes:
! eso 10/2016: Added option to disregard kernel
!
!*****************************************************************************
use unc_mod
use par_mod
......@@ -46,7 +51,8 @@ subroutine drydepokernel(nunc,deposit,x,y,nage,kp)
implicit none
real :: x,y,deposit(maxspec),ddx,ddy,xl,yl,wx,wy,w
real(dep_prec), dimension(maxspec) :: deposit
real :: x,y,ddx,ddy,xl,yl,wx,wy,w
integer :: ix,jy,ixp,jyp,ks,nunc,nage,kp
......@@ -73,20 +79,35 @@ subroutine drydepokernel(nunc,deposit,x,y,nage,kp)
wy=0.5+ddy
endif
! If no kernel is used, direct attribution to grid cell
!******************************************************
if (lnokernel) then
do ks=1,nspec
if ((abs(deposit(ks)).gt.0).and.DRYDEPSPEC(ks)) then
if ((ix.ge.0).and.(jy.ge.0).and.(ix.le.numxgrid-1).and. &
(jy.le.numygrid-1)) then
drygridunc(ix,jy,ks,kp,nunc,nage)= &
drygridunc(ix,jy,ks,kp,nunc,nage)+deposit(ks)
end if
end if
end do
else ! use kernel
! Determine mass fractions for four grid points
!**********************************************
do ks=1,nspec
do ks=1,nspec
if ((abs(deposit(ks)).gt.0).and.DRYDEPSPEC(ks)) then
if ((abs(deposit(ks)).gt.0).and.DRYDEPSPEC(ks)) then
if ((ix.ge.0).and.(jy.ge.0).and.(ix.le.numxgrid-1).and. &
(jy.le.numygrid-1)) then
w=wx*wy
drygridunc(ix,jy,ks,kp,nunc,nage)= &
drygridunc(ix,jy,ks,kp,nunc,nage)+deposit(ks)*w
continue
endif
w=wx*wy
drygridunc(ix,jy,ks,kp,nunc,nage)= &
drygridunc(ix,jy,ks,kp,nunc,nage)+deposit(ks)*w
continue
endif
if ((ixp.ge.0).and.(jyp.ge.0).and.(ixp.le.numxgrid-1).and. &
(jyp.le.numygrid-1)) then
......@@ -111,6 +132,7 @@ subroutine drydepokernel(nunc,deposit,x,y,nage,kp)
endif
end do
end do
end if
end subroutine drydepokernel
......@@ -49,7 +49,8 @@ subroutine drydepokernel_nest(nunc,deposit,x,y,nage,kp)
implicit none
real :: x,y,deposit(maxspec),ddx,ddy,xl,yl,wx,wy,w
real(dep_prec), dimension(maxspec) :: deposit
real :: x,y,ddx,ddy,xl,yl,wx,wy,w
integer :: ix,jy,ixp,jyp,ks,kp,nunc,nage
......
......@@ -41,7 +41,8 @@ module wind_mod
! Maximum dimensions of the input mother grids
!*********************************************
integer,parameter :: nxmax=721,nymax=361,nuvzmax=64,nwzmax=64,nzmax=64
! integer,parameter :: nxmax=721,nymax=361,nuvzmax=64,nwzmax=64,nzmax=64
integer,parameter :: nxmax=361,nymax=181,nuvzmax=64,nwzmax=64,nzmax=64
integer,parameter :: nxshift=0 ! for GFS or FNL
!*********************************************
......
......@@ -420,18 +420,19 @@ subroutine gridcheck
! Output of grid info
!********************
write(*,*)
write(*,*)
write(*,'(a,2i7)') 'Vertical levels in NCEP data: ', &
nuvz,nwz
write(*,*)
write(*,'(a)') 'Mother domain:'
write(*,'(a,f10.2,a1,f10.2,a,f10.2)') ' Longitude range: ', &
xlon0,' to ',xlon0+(nx-1)*dx,' Grid distance: ',dx
write(*,'(a,f10.2,a1,f10.2,a,f10.2)') ' Latitude range : ', &
ylat0,' to ',ylat0+(ny-1)*dy,' Grid distance: ',dy
write(*,*)
if (lroot) then
write(*,*)
write(*,*)
write(*,'(a,2i7)') 'Vertical levels in NCEP data: ', &
nuvz,nwz
write(*,*)
write(*,'(a)') 'Mother domain:'
write(*,'(a,f10.2,a1,f10.2,a,f10.2)') ' Longitude range: ', &
xlon0,' to ',xlon0+(nx-1)*dx,' Grid distance: ',dx
write(*,'(a,f10.2,a1,f10.2,a,f10.2)') ' Latitude range : ', &
ylat0,' to ',ylat0+(ny-1)*dy,' Grid distance: ',dy
write(*,*)
end if
! CALCULATE VERTICAL DISCRETIZATION OF ECMWF MODEL
! PARAMETER akm,bkm DESCRIBE THE HYBRID "ETA" COORDINATE SYSTEM
......
This diff is collapsed.
......@@ -418,7 +418,7 @@ shift_field_0.o: par_mod.o
timemanager.o: com_mod.o flux_mod.o netcdf_output_mod.o oh_mod.o outg_mod.o \
par_mod.o point_mod.o unc_mod.o xmass_mod.o
timemanager_mpi.o: com_mod.o flux_mod.o mpi_mod.o oh_mod.o outg_mod.o \
par_mod.o point_mod.o unc_mod.o xmass_mod.o
par_mod.o point_mod.o unc_mod.o xmass_mod.o netcdf_output_mod.o
unc_mod.o: par_mod.o
verttransform.o: cmapf_mod.o com_mod.o par_mod.o
verttransform_gfs.o: cmapf_mod.o com_mod.o par_mod.o
......
......@@ -28,7 +28,9 @@ module mean_mod
interface mean
module procedure mean_sp
module procedure mean_dp
module procedure mean_mixed_prec
module procedure mean_mixed_dss
module procedure mean_mixed_dsd
end interface mean
contains
......@@ -63,9 +65,9 @@ contains
! real(sp) :: x_sp(number),xm,xs,xl,xq,xaux
! real(sp),parameter :: eps=1.0e-30
integer,intent(in) :: number
real(sp), intent(in) :: x_sp(number)
real(sp), intent(out) ::xm,xs
integer,intent(in) :: number
real(sp) :: xl,xq,xaux
real(sp),parameter :: eps=1.0e-30
integer :: i
......@@ -115,9 +117,9 @@ contains
implicit none
integer,intent(in) :: number
real(dp), intent(in) :: x_dp(number)
real(dp), intent(out) ::xm,xs
integer,intent(in) :: number
real(dp) :: xl,xq,xaux
real(dp),parameter :: eps=1.0e-30
integer :: i
......@@ -141,7 +143,7 @@ contains
end subroutine mean_dp
subroutine mean_mixed_prec(x_dp,xm,xs,number)
subroutine mean_mixed_dss(x_dp,xm,xs,number)
!*****************************************************************************
! *
......@@ -149,7 +151,7 @@ contains
! *
! AUTHOR: Andreas Stohl, 25 January 1994 *
! *
! Mixed precision version ESO 2016 (dp input, sp output) *
! Mixed precision version ESO 2016 (dp in, sp out, sp out) *
!*****************************************************************************
! *
! Variables: *
......@@ -167,9 +169,9 @@ contains
implicit none
integer,intent(in) :: number
real(dp), intent(in) :: x_dp(number)
real(sp), intent(out) ::xm,xs
integer,intent(in) :: number
real(sp) :: xl,xq,xaux
real(sp),parameter :: eps=1.0e-30
integer :: i
......@@ -191,5 +193,59 @@ contains
xs=sqrt(xaux/real(number-1,kind=sp))
endif
end subroutine mean_mixed_prec
end subroutine mean_mixed_dss
subroutine mean_mixed_dsd(x_dp,xm,xs_dp,number)
!*****************************************************************************
! *
! This subroutine calculates mean and standard deviation of a given element.*
! *
! AUTHOR: Andreas Stohl, 25 January 1994 *
! *
! Mixed precision version ESO 2016 (dp in, sp out, dp out) *
!*****************************************************************************
! *
! Variables: *
! x_dp(number) field of input data *
! xm mean *
! xs_dp standard deviation *
! number number of elements of field x_dp *
! *
! Constants: *
! eps tiny number *
! *
!*****************************************************************************
use par_mod, only: sp,dp
implicit none
integer,intent(in) :: number
real(dp), intent(in) :: x_dp(number)
real(sp), intent(out) ::xm
real(dp), intent(out) ::xs_dp
real(dp) :: xl,xq,xaux
real(dp),parameter :: eps=1.0e-30_dp
integer :: i
xl=0._dp
xq=0._dp
do i=1,number
xl=xl+x_dp(i)
xq=xq+x_dp(i)*x_dp(i)
end do
xm=xl/real(number,kind=sp)
xaux=xq-xl*xl/real(number,kind=dp)
if (xaux.lt.eps) then
xs_dp=0._dp
else
xs_dp=sqrt(xaux/real(number-1,kind=dp))
endif
end subroutine mean_mixed_dsd
end module mean_mod
......@@ -121,7 +121,7 @@ module mpi_mod
logical, parameter :: mp_dbg_mode = .false.
logical, parameter :: mp_dev_mode = .false.
logical, parameter :: mp_dbg_out = .false.
logical, parameter :: mp_time_barrier=.false.
logical, parameter :: mp_time_barrier=.true.
logical, parameter :: mp_measure_time=.false.
logical, parameter :: mp_exact_numpart=.true.
......@@ -250,7 +250,7 @@ contains
& 'all procs call getfields. Setting lmp_sync=.true.'
write(*,FMT='(80("#"))')
end if
lmp_sync=.true. ! :DBG: eso fix this...
lmp_sync=.true.
end if
! TODO: Add more warnings for unimplemented flexpart features
......@@ -260,6 +260,7 @@ contains
! as running with one process less but not using separate read process
!**********************************************************************
! id_read = min(mp_np-1, 1)
id_read = mp_np-1
if (mp_pid.eq.id_read) lmpreader=.true.
......@@ -329,7 +330,7 @@ contains
! Set maxpart per process
! eso 08/2016: Increase maxpart per process, in case of unbalanced distribution
maxpart_mpi=int(mp_maxpart_factor*maxpart/mp_partgroup_np)
maxpart_mpi=int(mp_maxpart_factor*real(maxpart)/real(mp_partgroup_np))
! Do not allocate particle data arrays for readwind process
if (lmpreader.and.lmp_use_reader) then
......@@ -485,6 +486,19 @@ contains
integer,intent(in) :: num_part
integer :: i,jj, addone
! Exit if too many particles
if (num_part.gt.maxpart_mpi) then
write(*,*) '#####################################################'
write(*,*) '#### ERROR - TOTAL NUMBER OF PARTICLES REQUIRED ####'
write(*,*) '#### EXCEEDS THE MAXIMUM ALLOWED NUMBER. REDUCE ####'
write(*,*) '#### EITHER NUMBER OF PARTICLES PER RELEASE POINT####'
write(*,*) '#### OR INCREASE MAXPART. ####'
write(*,*) '#####################################################'
! call MPI_FINALIZE(mp_ierr)
stop
end if
! Time for MPI communications
!****************************
if (mp_measure_time) call mpif_mtime('commtime',0)
......@@ -526,7 +540,6 @@ contains
end do