Skip to content
GitLab
Menu
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
flexpart
flexpart
Commits
d005a67c
Commit
d005a67c
authored
May 21, 2019
by
Sabine
Browse files
Merge remote-tracking branch 'refs/remotes/origin/dev' into dev
parents
5d74ed91
0c8c7f2f
Changes
19
Hide whitespace changes
Inline
Side-by-side
options/COMMAND
View file @
d005a67c
...
@@ -18,7 +18,7 @@
...
@@ -18,7 +18,7 @@
CTL= -5.0000000, ! CTL>1, ABL time step = (Lagrangian timescale (TL))/CTL, uses LSYNCTIME if CTL<0
CTL= -5.0000000, ! CTL>1, ABL time step = (Lagrangian timescale (TL))/CTL, uses LSYNCTIME if CTL<0
IFINE= 4, ! Reduction for time step in vertical transport, used only if CTL>1
IFINE= 4, ! Reduction for time step in vertical transport, used only if CTL>1
IOUT= 1, ! Output type: [1]mass 2]pptv 3]1&2 4]plume 5]1&4, +8 for NetCDF output
IOUT= 1, ! Output type: [1]mass 2]pptv 3]1&2 4]plume 5]1&4, +8 for NetCDF output
IPOUT= 0, ! Particle position output: 0]no 1]every output 2]only at end
IPOUT= 0, ! Particle position output: 0]no 1]every output 2]only at end
3]time averaged
LSUBGRID= 0, ! Increase of ABL heights due to sub-grid scale orographic variations;[0]off 1]on
LSUBGRID= 0, ! Increase of ABL heights due to sub-grid scale orographic variations;[0]off 1]on
LCONVECTION= 1, ! Switch for convection parameterization;0]off [1]on
LCONVECTION= 1, ! Switch for convection parameterization;0]off [1]on
LAGESPECTRA= 0, ! Switch for calculation of age spectra (needs AGECLASSES);[0]off 1]on
LAGESPECTRA= 0, ! Switch for calculation of age spectra (needs AGECLASSES);[0]off 1]on
...
...
src/FLEXPART.f90
View file @
d005a67c
...
@@ -67,13 +67,7 @@ program flexpart
...
@@ -67,13 +67,7 @@ program flexpart
integer
::
metdata_format
=
GRIBFILE_CENTRE_UNKNOWN
integer
::
metdata_format
=
GRIBFILE_CENTRE_UNKNOWN
integer
::
detectformat
integer
::
detectformat
! Initialize arrays in com_mod
!*****************************
call
com_mod_allocate_part
(
maxpart
)
! Generate a large number of random numbers
! Generate a large number of random numbers
!******************************************
!******************************************
...
@@ -171,6 +165,11 @@ program flexpart
...
@@ -171,6 +165,11 @@ program flexpart
endif
endif
endif
endif
! Initialize arrays in com_mod
!*****************************
call
com_mod_allocate_part
(
maxpart
)
! Read the age classes to be used
! Read the age classes to be used
!********************************
!********************************
if
(
verbosity
.gt.
0
)
then
if
(
verbosity
.gt.
0
)
then
...
...
src/FLEXPART_MPI.f90
View file @
d005a67c
...
@@ -76,12 +76,7 @@ program flexpart
...
@@ -76,12 +76,7 @@ program flexpart
if
(
mp_measure_time
)
call
mpif_mtime
(
'flexpart'
,
0
)
if
(
mp_measure_time
)
call
mpif_mtime
(
'flexpart'
,
0
)
! Initialize arrays in com_mod
!*****************************
if
(
.not.
(
lmpreader
.and.
lmp_use_reader
))
call
com_mod_allocate_part
(
maxpart_mpi
)
! Generate a large number of random numbers
! Generate a large number of random numbers
!******************************************
!******************************************
...
@@ -179,6 +174,11 @@ program flexpart
...
@@ -179,6 +174,11 @@ program flexpart
endif
endif
endif
endif
! Initialize arrays in com_mod
!*****************************
if
(
.not.
(
lmpreader
.and.
lmp_use_reader
))
call
com_mod_allocate_part
(
maxpart_mpi
)
! Read the age classes to be used
! Read the age classes to be used
!********************************
!********************************
...
@@ -412,7 +412,7 @@ program flexpart
...
@@ -412,7 +412,7 @@ program flexpart
if
(
nested_output
.ne.
1.
and
.
surf_only
.eq.
1
)
call
writeheader_surf
if
(
nested_output
.ne.
1.
and
.
surf_only
.eq.
1
)
call
writeheader_surf
end
if
! (mpif_pid == 0)
end
if
! (mpif_pid == 0)
if
(
mp_measure_time
)
call
mpif_mtime
(
'iotime'
,
0
)
if
(
mp_measure_time
)
call
mpif_mtime
(
'iotime'
,
1
)
if
(
verbosity
.gt.
0
.and.
lroot
)
then
if
(
verbosity
.gt.
0
.and.
lroot
)
then
print
*
,
'call openreceptors'
print
*
,
'call openreceptors'
...
...
src/com_mod.f90
View file @
d005a67c
...
@@ -18,6 +18,8 @@ module com_mod
...
@@ -18,6 +18,8 @@ module com_mod
implicit
none
implicit
none
!****************************************************************
!****************************************************************
! Variables defining where FLEXPART input/output files are stored
! Variables defining where FLEXPART input/output files are stored
!****************************************************************
!****************************************************************
...
@@ -68,7 +70,7 @@ module com_mod
...
@@ -68,7 +70,7 @@ module com_mod
! outstep = real(abs(loutstep))
! outstep = real(abs(loutstep))
real
::
ctl
,
fine
real
::
ctl
,
fine
integer
::
ifine
,
iout
,
ipout
,
ipin
,
iflux
,
mdomainfill
integer
::
ifine
,
iout
,
ipout
,
ipin
,
iflux
,
mdomainfill
,
ipoutfac
integer
::
mquasilag
,
nested_output
,
ind_source
,
ind_receptor
integer
::
mquasilag
,
nested_output
,
ind_source
,
ind_receptor
integer
::
ind_rel
,
ind_samp
,
ioutputforeachrelease
,
linit_cond
,
surf_only
integer
::
ind_rel
,
ind_samp
,
ioutputforeachrelease
,
linit_cond
,
surf_only
logical
::
turbswitch
logical
::
turbswitch
...
@@ -81,6 +83,7 @@ module com_mod
...
@@ -81,6 +83,7 @@ module com_mod
! iflux flux calculation options: 1 calculation of fluxes, 2 no fluxes
! iflux flux calculation options: 1 calculation of fluxes, 2 no fluxes
! iout output options: 1 conc. output (ng/m3), 2 mixing ratio (pptv), 3 both
! iout output options: 1 conc. output (ng/m3), 2 mixing ratio (pptv), 3 both
! ipout particle dump options: 0 no, 1 every output interval, 2 only at end
! ipout particle dump options: 0 no, 1 every output interval, 2 only at end
! ipoutfac increase particle dump interval by factor (default 1)
! ipin read in particle positions from dumped file from a previous run
! ipin read in particle positions from dumped file from a previous run
! fine real(ifine)
! fine real(ifine)
! mdomainfill 0: normal run
! mdomainfill 0: normal run
...
@@ -127,7 +130,6 @@ module com_mod
...
@@ -127,7 +130,6 @@ module com_mod
logical
::
gdomainfill
logical
::
gdomainfill
! gdomainfill .T., if domain-filling is global, .F. if not
! gdomainfill .T., if domain-filling is global, .F. if not
!ZHG SEP 2015 wheather or not to read clouds from GRIB
!ZHG SEP 2015 wheather or not to read clouds from GRIB
...
@@ -650,6 +652,7 @@ module com_mod
...
@@ -650,6 +652,7 @@ module com_mod
real
::
xreceptor
(
maxreceptor
),
yreceptor
(
maxreceptor
)
real
::
xreceptor
(
maxreceptor
),
yreceptor
(
maxreceptor
)
real
::
receptorarea
(
maxreceptor
)
real
::
receptorarea
(
maxreceptor
)
real
::
creceptor
(
maxreceptor
,
maxspec
)
real
::
creceptor
(
maxreceptor
,
maxspec
)
real
,
allocatable
,
dimension
(:,:)
::
creceptor0
character
(
len
=
16
)
::
receptorname
(
maxreceptor
)
character
(
len
=
16
)
::
receptorname
(
maxreceptor
)
integer
::
numreceptor
integer
::
numreceptor
...
@@ -673,6 +676,14 @@ module com_mod
...
@@ -673,6 +676,14 @@ module com_mod
real
,
allocatable
,
dimension
(:,:)
::
xmass1
real
,
allocatable
,
dimension
(:,:)
::
xmass1
real
,
allocatable
,
dimension
(:,:)
::
xscav_frac1
real
,
allocatable
,
dimension
(:,:)
::
xscav_frac1
! Variables used for writing out interval averages for partoutput
!****************************************************************
integer
,
allocatable
,
dimension
(:)
::
npart_av
real
,
allocatable
,
dimension
(:)
::
part_av_cartx
,
part_av_carty
,
part_av_cartz
,
part_av_z
,
part_av_topo
real
,
allocatable
,
dimension
(:)
::
part_av_pv
,
part_av_qv
,
part_av_tt
,
part_av_rho
,
part_av_tro
,
part_av_hmix
real
,
allocatable
,
dimension
(:)
::
part_av_uu
,
part_av_vv
,
part_av_energy
! eso: Moved from timemanager
! eso: Moved from timemanager
real
,
allocatable
,
dimension
(:)
::
uap
,
ucp
,
uzp
,
us
,
vs
,
ws
real
,
allocatable
,
dimension
(:)
::
uap
,
ucp
,
uzp
,
us
,
vs
,
ws
integer
(
kind
=
2
),
allocatable
,
dimension
(:)
::
cbt
integer
(
kind
=
2
),
allocatable
,
dimension
(:)
::
cbt
...
@@ -779,13 +790,21 @@ contains
...
@@ -779,13 +790,21 @@ contains
allocate
(
itra1
(
nmpart
),
npoint
(
nmpart
),
nclass
(
nmpart
),&
allocate
(
itra1
(
nmpart
),
npoint
(
nmpart
),
nclass
(
nmpart
),&
&
idt
(
nmpart
),
itramem
(
nmpart
),
itrasplit
(
nmpart
),&
&
idt
(
nmpart
),
itramem
(
nmpart
),
itrasplit
(
nmpart
),&
&
xtra1
(
nmpart
),
ytra1
(
nmpart
),
ztra1
(
nmpart
),&
&
xtra1
(
nmpart
),
ytra1
(
nmpart
),
ztra1
(
nmpart
),&
&
xmass1
(
nmpart
,
maxspec
),&
&
xmass1
(
nmpart
,
maxspec
))
! ,&
&
checklifetime
(
nmpart
,
maxspec
),
species_lifetime
(
maxspec
,
2
))
!CGZ-lifetime
! & checklifetime(nmpart,maxspec), species_lifetime(maxspec,2))!CGZ-lifetime
if
(
ipout
.eq.
3
)
then
allocate
(
npart_av
(
nmpart
),
part_av_cartx
(
nmpart
),
part_av_carty
(
nmpart
),&
&
part_av_cartz
(
nmpart
),
part_av_z
(
nmpart
),
part_av_topo
(
nmpart
))
allocate
(
part_av_pv
(
nmpart
),
part_av_qv
(
nmpart
),
part_av_tt
(
nmpart
),&
&
part_av_rho
(
nmpart
),
part_av_tro
(
nmpart
),
part_av_hmix
(
nmpart
))
allocate
(
part_av_uu
(
nmpart
),
part_av_vv
(
nmpart
),
part_av_energy
(
nmpart
))
end
if
allocate
(
uap
(
nmpart
),
ucp
(
nmpart
),
uzp
(
nmpart
),
us
(
nmpart
),&
allocate
(
uap
(
nmpart
),
ucp
(
nmpart
),
uzp
(
nmpart
),
us
(
nmpart
),&
&
vs
(
nmpart
),
ws
(
nmpart
),
cbt
(
nmpart
))
&
vs
(
nmpart
),
ws
(
nmpart
),
cbt
(
nmpart
))
end
subroutine
com_mod_allocate_part
end
subroutine
com_mod_allocate_part
...
...
src/init_domainfill.f90
View file @
d005a67c
...
@@ -86,6 +86,10 @@ subroutine init_domainfill
...
@@ -86,6 +86,10 @@ subroutine init_domainfill
endif
endif
endif
endif
! Exit here if resuming a run from particle dump
!***********************************************
if
(
gdomainfill
.and.
ipin
.ne.
0
)
return
! Do not release particles twice (i.e., not at both in the leftmost and rightmost
! Do not release particles twice (i.e., not at both in the leftmost and rightmost
! grid cell) for a global domain
! grid cell) for a global domain
!*****************************************************************************
!*****************************************************************************
...
@@ -413,7 +417,7 @@ subroutine init_domainfill
...
@@ -413,7 +417,7 @@ subroutine init_domainfill
! This overrides any previous calculations.
! This overrides any previous calculations.
!***************************************************************************
!***************************************************************************
if
(
ipin
.eq.
1
)
then
if
(
(
ipin
.eq.
1
)
.and.
(
.not.
gdomainfill
))
then
open
(
unitboundcond
,
file
=
path
(
2
)(
1
:
length
(
2
))//
'boundcond.bin'
,
&
open
(
unitboundcond
,
file
=
path
(
2
)(
1
:
length
(
2
))//
'boundcond.bin'
,
&
form
=
'unformatted'
)
form
=
'unformatted'
)
read
(
unitboundcond
)
numcolumn_we
,
numcolumn_sn
,
&
read
(
unitboundcond
)
numcolumn_we
,
numcolumn_sn
,
&
...
...
src/init_domainfill_mpi.f90
View file @
d005a67c
...
@@ -109,6 +109,10 @@ subroutine init_domainfill
...
@@ -109,6 +109,10 @@ subroutine init_domainfill
endif
endif
endif
endif
! Exit here if resuming a run from particle dump
!***********************************************
if
(
gdomainfill
.and.
ipin
.ne.
0
)
return
! Do not release particles twice (i.e., not at both in the leftmost and rightmost
! Do not release particles twice (i.e., not at both in the leftmost and rightmost
! grid cell) for a global domain
! grid cell) for a global domain
!*****************************************************************************
!*****************************************************************************
...
@@ -212,7 +216,6 @@ subroutine init_domainfill
...
@@ -212,7 +216,6 @@ subroutine init_domainfill
pp
(
nz
)
=
rho
(
ix
,
jy
,
nz
,
1
)
*
r_air
*
tt
(
ix
,
jy
,
nz
,
1
)
pp
(
nz
)
=
rho
(
ix
,
jy
,
nz
,
1
)
*
r_air
*
tt
(
ix
,
jy
,
nz
,
1
)
colmass
(
ix
,
jy
)
=
(
pp
(
1
)
-
pp
(
nz
))/
ga
*
gridarea
(
jy
)
colmass
(
ix
,
jy
)
=
(
pp
(
1
)
-
pp
(
nz
))/
ga
*
gridarea
(
jy
)
colmasstotal
=
colmasstotal
+
colmass
(
ix
,
jy
)
colmasstotal
=
colmasstotal
+
colmass
(
ix
,
jy
)
end
do
end
do
end
do
end
do
...
@@ -465,7 +468,7 @@ subroutine init_domainfill
...
@@ -465,7 +468,7 @@ subroutine init_domainfill
!***************************************************************************
!***************************************************************************
! eso TODO: only needed for root process
! eso TODO: only needed for root process
if
(
ipin
.eq.
1
)
then
if
(
(
ipin
.eq.
1
)
.and.
(
.not.
gdomainfill
))
then
open
(
unitboundcond
,
file
=
path
(
2
)(
1
:
length
(
2
))//
'boundcond.bin'
,
&
open
(
unitboundcond
,
file
=
path
(
2
)(
1
:
length
(
2
))//
'boundcond.bin'
,
&
form
=
'unformatted'
)
form
=
'unformatted'
)
read
(
unitboundcond
)
numcolumn_we
,
numcolumn_sn
,
&
read
(
unitboundcond
)
numcolumn_we
,
numcolumn_sn
,
&
...
@@ -473,27 +476,33 @@ subroutine init_domainfill
...
@@ -473,27 +476,33 @@ subroutine init_domainfill
close
(
unitboundcond
)
close
(
unitboundcond
)
endif
endif
numpart
=
numpart
/
mp_partgroup_np
if
(
ipin
.eq.
0
)
then
if
(
mod
(
numpart
,
mp_partgroup_np
)
.ne.
0
)
numpart
=
numpart
+1
numpart
=
numpart
/
mp_partgroup_np
if
(
mod
(
numpart
,
mp_partgroup_np
)
.ne.
0
)
numpart
=
numpart
+1
else
! Allocate dummy arrays for receiving processes
end
if
allocate
(
itra1_tmp
(
nullsize
),
npoint_tmp
(
nullsize
),
nclass_tmp
(
nullsize
),&
&
idt_tmp
(
nullsize
),
itramem_tmp
(
nullsize
),
itrasplit_tmp
(
nullsize
),&
else
! Allocate dummy arrays for receiving processes
&
xtra1_tmp
(
nullsize
),
ytra1_tmp
(
nullsize
),
ztra1_tmp
(
nullsize
),&
if
(
ipin
.eq.
0
)
then
&
xmass1_tmp
(
nullsize
,
nullsize
))
allocate
(
itra1_tmp
(
nullsize
),
npoint_tmp
(
nullsize
),
nclass_tmp
(
nullsize
),&
&
idt_tmp
(
nullsize
),
itramem_tmp
(
nullsize
),
itrasplit_tmp
(
nullsize
),&
&
xtra1_tmp
(
nullsize
),
ytra1_tmp
(
nullsize
),
ztra1_tmp
(
nullsize
),&
&
xmass1_tmp
(
nullsize
,
nullsize
))
end
if
end
if
! end if(lroot)
end
if
! end if(lroot)
! Distribute particles to other processes (numpart is 'per-process', not total)
! Distribute particles to other processes (numpart is 'per-process', not total)
call
MPI_Bcast
(
numpart
,
1
,
MPI_INTEGER
,
id_root
,
mp_comm_used
,
mp_ierr
)
! Only if not restarting from previous run
! eso TODO: xmassperparticle: not necessary to s
en
d
if
(
ipin
.eq.
0
)
th
en
call
MPI_Bcast
(
xmassperparticle
,
1
,
mp_sp
,
id_root
,
mp_comm_used
,
mp_ierr
)
call
MPI_Bcast
(
numpart
,
1
,
MPI_INTEGER
,
id_root
,
mp_comm_used
,
mp_ierr
)
call
mpif_send_part_properties
(
n
um
part
)
call
mpif_send_part_properties
(
npart
(
1
)/
mp_partgroup_np
)
! Deallocate the temporary arrays used for all particles
! Deallocate the temporary arrays used for all particles
deallocate
(
itra1_tmp
,
npoint_tmp
,
nclass_tmp
,
idt_tmp
,
itramem_tmp
,&
deallocate
(
itra1_tmp
,
npoint_tmp
,
nclass_tmp
,
idt_tmp
,
itramem_tmp
,&
&
itrasplit_tmp
,
xtra1_tmp
,
ytra1_tmp
,
ztra1_tmp
,
xmass1_tmp
)
&
itrasplit_tmp
,
xtra1_tmp
,
ytra1_tmp
,
ztra1_tmp
,
xmass1_tmp
)
end
if
end
subroutine
init_domainfill
end
subroutine
init_domainfill
src/makefile
View file @
d005a67c
...
@@ -117,6 +117,7 @@ mpi_mod.o
...
@@ -117,6 +117,7 @@ mpi_mod.o
## Serial versions (MPI version with same functionality and name '_mpi.f90' exists)
## Serial versions (MPI version with same functionality and name '_mpi.f90' exists)
OBJECTS_SERIAL
=
\
OBJECTS_SERIAL
=
\
releaseparticles.o partoutput.o
\
releaseparticles.o partoutput.o
\
partoutput_average.o
\
conccalc.o
\
conccalc.o
\
init_domainfill.o concoutput.o
\
init_domainfill.o concoutput.o
\
timemanager.o FLEXPART.o
\
timemanager.o FLEXPART.o
\
...
@@ -131,7 +132,7 @@ OBJECTS_SERIAL = \
...
@@ -131,7 +132,7 @@ OBJECTS_SERIAL = \
## For MPI version
## For MPI version
OBJECTS_MPI
=
releaseparticles_mpi.o partoutput_mpi.o
\
OBJECTS_MPI
=
releaseparticles_mpi.o partoutput_mpi.o
\
conccalc_mpi.o
\
partoutput_average_mpi.o
conccalc_mpi.o
\
init_domainfill_mpi.o concoutput_mpi.o
\
init_domainfill_mpi.o concoutput_mpi.o
\
timemanager_mpi.o FLEXPART_MPI.o
\
timemanager_mpi.o FLEXPART_MPI.o
\
readpartpositions_mpi.o
\
readpartpositions_mpi.o
\
...
@@ -148,7 +149,7 @@ OBJECTS_NCF = netcdf_output_mod.o
...
@@ -148,7 +149,7 @@ OBJECTS_NCF = netcdf_output_mod.o
OBJECTS
=
\
OBJECTS
=
\
advance.o initialize.o
\
advance.o initialize.o
\
writeheader.o writeheader_txt.o
\
writeheader.o writeheader_txt.o
\
writeprecip.o
\
partpos_average.o
writeprecip.o
\
writeheader_surf.o assignland.o
\
writeheader_surf.o assignland.o
\
part0.o gethourlyOH.o
\
part0.o gethourlyOH.o
\
caldate.o partdep.o
\
caldate.o partdep.o
\
...
@@ -347,7 +348,10 @@ outgrid_init.o: com_mod.o flux_mod.o oh_mod.o outg_mod.o par_mod.o unc_mod.o
...
@@ -347,7 +348,10 @@ outgrid_init.o: com_mod.o flux_mod.o oh_mod.o outg_mod.o par_mod.o unc_mod.o
outgrid_init_nest.o
:
com_mod.o outg_mod.o par_mod.o unc_mod.o
outgrid_init_nest.o
:
com_mod.o outg_mod.o par_mod.o unc_mod.o
part0.o
:
par_mod.o
part0.o
:
par_mod.o
partdep.o
:
par_mod.o
partdep.o
:
par_mod.o
partpos_average.o
:
com_mod.o par_mod.o
partoutput.o
:
com_mod.o par_mod.o
partoutput.o
:
com_mod.o par_mod.o
partoutput_average.o
:
com_mod.o par_mod.o
partoutput_average_mpi.o
:
com_mod.o par_mod.o mpi_mod.o
partoutput_mpi.o
:
com_mod.o mpi_mod.o par_mod.o
partoutput_mpi.o
:
com_mod.o mpi_mod.o par_mod.o
partoutput_short.o
:
com_mod.o par_mod.o
partoutput_short.o
:
com_mod.o par_mod.o
partoutput_short_mpi.o
:
com_mod.o mpi_mod.o par_mod.o
partoutput_short_mpi.o
:
com_mod.o mpi_mod.o par_mod.o
...
...
src/mpi_mod.f90
View file @
d005a67c
...
@@ -87,6 +87,7 @@ module mpi_mod
...
@@ -87,6 +87,7 @@ module mpi_mod
! Variables for MPI processes in the 'particle' group
! Variables for MPI processes in the 'particle' group
integer
,
allocatable
,
dimension
(:)
::
mp_partgroup_rank
integer
,
allocatable
,
dimension
(:)
::
mp_partgroup_rank
integer
,
allocatable
,
dimension
(:)
::
npart_per_process
integer
::
mp_partgroup_comm
,
mp_partgroup_pid
,
mp_partgroup_np
integer
::
mp_partgroup_comm
,
mp_partgroup_pid
,
mp_partgroup_np
integer
::
mp_seed
=
0
integer
::
mp_seed
=
0
...
@@ -124,7 +125,7 @@ module mpi_mod
...
@@ -124,7 +125,7 @@ module mpi_mod
! mp_measure_time Measure cpu/wall time, write out at end of run
! mp_measure_time Measure cpu/wall time, write out at end of run
! mp_time_barrier Measure MPI barrier time
! mp_time_barrier Measure MPI barrier time
! mp_exact_numpart Use an extra MPI communication to give the exact number of particles
! mp_exact_numpart Use an extra MPI communication to give the exact number of particles
! to standard output (this does
*
not
*
otherwise affect the simulation)
! to standard output (this does not otherwise affect the simulation)
logical
,
parameter
::
mp_dbg_mode
=
.false.
logical
,
parameter
::
mp_dbg_mode
=
.false.
logical
,
parameter
::
mp_dev_mode
=
.false.
logical
,
parameter
::
mp_dev_mode
=
.false.
logical
,
parameter
::
mp_dbg_out
=
.false.
logical
,
parameter
::
mp_dbg_out
=
.false.
...
@@ -189,8 +190,8 @@ contains
...
@@ -189,8 +190,8 @@ contains
! mpi_mode default 0, set to 2/3 if running MPI version
! mpi_mode default 0, set to 2/3 if running MPI version
! mp_np number of running processes, decided at run-time
! mp_np number of running processes, decided at run-time
!***********************************************************************
!***********************************************************************
use
par_mod
,
only
:
maxpart
,
numwfmem
,
dep_prec
use
par_mod
,
only
:
maxpart
,
numwfmem
,
dep_prec
,
maxreceptor
,
maxspec
use
com_mod
,
only
:
mpi_mode
,
verbosity
use
com_mod
,
only
:
mpi_mode
,
verbosity
,
creceptor0
implicit
none
implicit
none
...
@@ -336,7 +337,7 @@ contains
...
@@ -336,7 +337,7 @@ contains
end
if
end
if
! Set maxpart per process
! Set maxpart per process
!
eso
08/2016: Increase maxpart per process, in case of unbalanced distribution
!
ESO
08/2016: Increase maxpart per process, in case of unbalanced distribution
maxpart_mpi
=
int
(
mp_maxpart_factor
*
real
(
maxpart
)/
real
(
mp_partgroup_np
))
maxpart_mpi
=
int
(
mp_maxpart_factor
*
real
(
maxpart
)/
real
(
mp_partgroup_np
))
if
(
mp_np
==
1
)
maxpart_mpi
=
maxpart
if
(
mp_np
==
1
)
maxpart_mpi
=
maxpart
...
@@ -364,6 +365,16 @@ contains
...
@@ -364,6 +365,16 @@ contains
reqs
(:)
=
MPI_REQUEST_NULL
reqs
(:)
=
MPI_REQUEST_NULL
end
if
end
if
! Allocate array for number of particles per process
allocate
(
npart_per_process
(
0
:
mp_partgroup_np
-1
))
! Write whether MPI_IN_PLACE is used or not
#ifdef USE_MPIINPLACE
if
(
lroot
)
write
(
*
,
*
)
'Using MPI_IN_PLACE operations'
#else
if
(
lroot
)
allocate
(
creceptor0
(
maxreceptor
,
maxspec
))
if
(
lroot
)
write
(
*
,
*
)
'Not using MPI_IN_PLACE operations'
#endif
goto
101
goto
101
100
write
(
*
,
*
)
'#### mpi_mod::mpif_init> ERROR ####'
,
mp_ierr
100
write
(
*
,
*
)
'#### mpi_mod::mpif_init> ERROR ####'
,
mp_ierr
...
@@ -558,7 +569,7 @@ contains
...
@@ -558,7 +569,7 @@ contains
! "numpart" is larger than the actual used, so we reduce it if there are
! "numpart" is larger than the actual used, so we reduce it if there are
! invalid particles at the end of the arrays
! invalid particles at the end of the arrays
601
do
i
=
num
_
part
,
1
,
-1
601
do
i
=
numpart
,
1
,
-1
if
(
itra1
(
i
)
.eq.
-999999999
)
then
if
(
itra1
(
i
)
.eq.
-999999999
)
then
numpart
=
numpart
-1
numpart
=
numpart
-1
else
else
...
@@ -597,7 +608,7 @@ contains
...
@@ -597,7 +608,7 @@ contains
real
::
pmin
,
z
real
::
pmin
,
z
integer
::
i
,
jj
,
nn
,
num_part
=
1
,
m
,
imin
,
num_trans
integer
::
i
,
jj
,
nn
,
num_part
=
1
,
m
,
imin
,
num_trans
logical
::
first_iter
logical
::
first_iter
integer
,
allocatable
,
dimension
(:)
::
numparticles_mpi
,
idx_arr
integer
,
allocatable
,
dimension
(:)
::
idx_arr
real
,
allocatable
,
dimension
(:)
::
sorted
! TODO: we don't really need this
real
,
allocatable
,
dimension
(:)
::
sorted
! TODO: we don't really need this
! Exit if running with only 1 process
! Exit if running with only 1 process
...
@@ -606,20 +617,22 @@ contains
...
@@ -606,20 +617,22 @@ contains
! All processes exchange information on number of particles
! All processes exchange information on number of particles
!****************************************************************************
!****************************************************************************
allocate
(
numparticles_mpi
(
0
:
mp_partgroup_np
-1
),
&
allocate
(
idx_arr
(
0
:
mp_partgroup_np
-1
),
sorted
(
0
:
mp_partgroup_np
-1
))
&
idx_arr
(
0
:
mp_partgroup_np
-1
),
sorted
(
0
:
mp_partgroup_np
-1
))
call
MPI_Allgather
(
numpart
,
1
,
MPI_INTEGER
,
n
um
part
icles_mpi
,
&
call
MPI_Allgather
(
numpart
,
1
,
MPI_INTEGER
,
npart
_per_process
,
&
&
1
,
MPI_INTEGER
,
mp_comm_used
,
mp_ierr
)
&
1
,
MPI_INTEGER
,
mp_comm_used
,
mp_ierr
)
! Sort from lowest to highest
! Sort from lowest to highest
! Initial guess: correct order
! Initial guess: correct order
sorted
(:)
=
n
um
part
icles_mpi
(:)
sorted
(:)
=
npart
_per_process
(:)
do
i
=
0
,
mp_partgroup_np
-1
do
i
=
0
,
mp_partgroup_np
-1
idx_arr
(
i
)
=
i
idx_arr
(
i
)
=
i
end
do
end
do
! Do not rebalance particles for ipout=3
if
(
ipout
.eq.
3
)
return
! For each successive element in index array, see if a lower value exists
! For each successive element in index array, see if a lower value exists
do
i
=
0
,
mp_partgroup_np
-2
do
i
=
0
,
mp_partgroup_np
-2
pmin
=
sorted
(
i
)
pmin
=
sorted
(
i
)
...
@@ -644,13 +657,13 @@ contains
...
@@ -644,13 +657,13 @@ contains
m
=
mp_partgroup_np
-1
! index for last sorted process (most particles)
m
=
mp_partgroup_np
-1
! index for last sorted process (most particles)
do
i
=
0
,
mp_partgroup_np
/
2-1
do
i
=
0
,
mp_partgroup_np
/
2-1
num_trans
=
n
um
part
icles_mpi
(
idx_arr
(
m
))
-
n
um
part
icles_mpi
(
idx_arr
(
i
))
num_trans
=
npart
_per_process
(
idx_arr
(
m
))
-
npart
_per_process
(
idx_arr
(
i
))
if
(
mp_partid
.eq.
idx_arr
(
m
)
.or.
mp_partid
.eq.
idx_arr
(
i
))
then
if
(
mp_partid
.eq.
idx_arr
(
m
)
.or.
mp_partid
.eq.
idx_arr
(
i
))
then
if
(
n
um
part
icles_mpi
(
idx_arr
(
m
))
.gt.
mp_min_redist
.and.
&
if
(
npart
_per_process
(
idx_arr
(
m
))
.gt.
mp_min_redist
.and.
&
&
real
(
num_trans
)/
real
(
n
um
part
icles_mpi
(
idx_arr
(
m
)))
.gt.
mp_redist_fract
)
then
&
real
(
num_trans
)/
real
(
npart
_per_process
(
idx_arr
(
m
)))
.gt.
mp_redist_fract
)
then
! DBG
! DBG
! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, n
um
part
icles_mpi
', &
! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, npart
_per_process
', &
! &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, n
um
part
icles_mpi
! &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, npart
_per_process
! DBG
! DBG
call
mpif_redist_part
(
itime
,
idx_arr
(
m
),
idx_arr
(
i
),
num_trans
/
2
)
call
mpif_redist_part
(
itime
,
idx_arr
(
m
),
idx_arr
(
i
),
num_trans
/
2
)
end
if
end
if
...
@@ -658,7 +671,7 @@ contains
...
@@ -658,7 +671,7 @@ contains
m
=
m
-1
m
=
m
-1
end
do
end
do
deallocate
(
numparticles_mpi
,
idx_arr
,
sorted
)
deallocate
(
idx_arr
,
sorted
)
end
subroutine
mpif_calculate_part_redist
end
subroutine
mpif_calculate_part_redist
...
@@ -1960,7 +1973,7 @@ contains
...
@@ -1960,7 +1973,7 @@ contains
! For now assume that data at all steps either have or do not have water
! For now assume that data at all steps either have or do not have water
if
(
readclouds
)
then
if
(
readclouds
)
then
j
=
j
+1
j
=
j
+1
call
MPI_Irecv
(
ctwc
(:,:,
mind
),
d2s1
,
mp_sp
,
id_read
,
MPI_ANY_TAG
,&
call
MPI_Irecv
(
ctwc
(:,:,
mind
),
d2s1
*
5
,
mp_sp
,
id_read
,
MPI_ANY_TAG
,&
&
MPI_COMM_WORLD
,
reqs
(
j
),
mp_ierr
)
&
MPI_COMM_WORLD
,
reqs
(
j
),
mp_ierr
)
if
(
mp_ierr
/
=
0
)
goto
600
if
(
mp_ierr
/
=
0
)
goto
600
end
if
end
if
...
@@ -2325,7 +2338,7 @@ contains
...
@@ -2325,7 +2338,7 @@ contains
! For now assume that data at all steps either have or do not have water
! For now assume that data at all steps either have or do not have water
if
(
readclouds
)
then
if
(
readclouds
)
then
j
=
j
+1
j
=
j
+1
call
MPI_Irecv
(
ctwcn
(:,:,
mind
,
k
),
d2s1
,
mp_sp
,
id_read
,
MPI_ANY_TAG
,&
call
MPI_Irecv
(
ctwcn
(:,:,
mind
,
k
),
d2s1
*
5
,
mp_sp
,
id_read
,
MPI_ANY_TAG
,&
&
MPI_COMM_WORLD
,
reqs
(
j
),
mp_ierr
)
&
MPI_COMM_WORLD
,
reqs
(
j
),
mp_ierr
)
if
(
mp_ierr
/
=
0
)
goto
600
if
(
mp_ierr
/
=
0
)
goto
600
end
if
end
if
...
@@ -2461,12 +2474,28 @@ contains
...
@@ -2461,12 +2474,28 @@ contains
&
mp_comm_used
,
mp_ierr
)