Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
flexpart
flexpart
Commits
0a94e13d
Commit
0a94e13d
authored
May 06, 2019
by
Espen Sollum
Browse files
Added ipout=3 option for time averaged particle output
parent
328fdf90
Changes
15
Hide whitespace changes
Inline
Side-by-side
options/COMMAND
View file @
0a94e13d
...
...
@@ -18,7 +18,7 @@
CTL= -5.0000000, ! CTL>1, ABL time step = (Lagrangian timescale (TL))/CTL, uses LSYNCTIME if CTL<0
IFINE= 4, ! Reduction for time step in vertical transport, used only if CTL>1
IOUT= 1, ! Output type: [1]mass 2]pptv 3]1&2 4]plume 5]1&4, +8 for NetCDF output
IPOUT= 0, ! Particle position output: 0]no 1]every output 2]only at end
IPOUT= 0, ! Particle position output: 0]no 1]every output 2]only at end
3]time averaged
LSUBGRID= 0, ! Increase of ABL heights due to sub-grid scale orographic variations;[0]off 1]on
LCONVECTION= 1, ! Switch for convection parameterization;0]off [1]on
LAGESPECTRA= 0, ! Switch for calculation of age spectra (needs AGECLASSES);[0]off 1]on
...
...
src/FLEXPART.f90
View file @
0a94e13d
...
...
@@ -67,13 +67,7 @@ program flexpart
integer
::
metdata_format
=
GRIBFILE_CENTRE_UNKNOWN
integer
::
detectformat
! Initialize arrays in com_mod
!*****************************
call
com_mod_allocate_part
(
maxpart
)
! Generate a large number of random numbers
!******************************************
...
...
@@ -171,6 +165,11 @@ program flexpart
endif
endif
! Initialize arrays in com_mod
!*****************************
call
com_mod_allocate_part
(
maxpart
)
! Read the age classes to be used
!********************************
if
(
verbosity
.gt.
0
)
then
...
...
src/FLEXPART_MPI.f90
View file @
0a94e13d
...
...
@@ -76,12 +76,7 @@ program flexpart
if
(
mp_measure_time
)
call
mpif_mtime
(
'flexpart'
,
0
)
! Initialize arrays in com_mod
!*****************************
if
(
.not.
(
lmpreader
.and.
lmp_use_reader
))
call
com_mod_allocate_part
(
maxpart_mpi
)
! Generate a large number of random numbers
!******************************************
...
...
@@ -179,6 +174,11 @@ program flexpart
endif
endif
! Initialize arrays in com_mod
!*****************************
if
(
.not.
(
lmpreader
.and.
lmp_use_reader
))
call
com_mod_allocate_part
(
maxpart_mpi
)
! Read the age classes to be used
!********************************
...
...
src/com_mod.f90
View file @
0a94e13d
...
...
@@ -18,6 +18,8 @@ module com_mod
implicit
none
!****************************************************************
! Variables defining where FLEXPART input/output files are stored
!****************************************************************
...
...
@@ -68,7 +70,7 @@ module com_mod
! outstep = real(abs(loutstep))
real
::
ctl
,
fine
integer
::
ifine
,
iout
,
ipout
,
ipin
,
iflux
,
mdomainfill
integer
::
ifine
,
iout
,
ipout
,
ipin
,
iflux
,
mdomainfill
,
ipoutfac
integer
::
mquasilag
,
nested_output
,
ind_source
,
ind_receptor
integer
::
ind_rel
,
ind_samp
,
ioutputforeachrelease
,
linit_cond
,
surf_only
logical
::
turbswitch
...
...
@@ -81,6 +83,7 @@ module com_mod
! iflux flux calculation options: 1 calculation of fluxes, 2 no fluxes
! iout output options: 1 conc. output (ng/m3), 2 mixing ratio (pptv), 3 both
! ipout particle dump options: 0 no, 1 every output interval, 2 only at end
! ipoutfac increase particle dump interval by factor (default 1)
! ipin read in particle positions from dumped file from a previous run
! fine real(ifine)
! mdomainfill 0: normal run
...
...
@@ -127,7 +130,6 @@ module com_mod
logical
::
gdomainfill
! gdomainfill .T., if domain-filling is global, .F. if not
!ZHG SEP 2015 wheather or not to read clouds from GRIB
...
...
@@ -674,6 +676,14 @@ module com_mod
real
,
allocatable
,
dimension
(:,:)
::
xmass1
real
,
allocatable
,
dimension
(:,:)
::
xscav_frac1
! Variables used for writing out interval averages for partoutput
!****************************************************************
integer
,
allocatable
,
dimension
(:)
::
npart_av
real
,
allocatable
,
dimension
(:)
::
part_av_cartx
,
part_av_carty
,
part_av_cartz
,
part_av_z
,
part_av_topo
real
,
allocatable
,
dimension
(:)
::
part_av_pv
,
part_av_qv
,
part_av_tt
,
part_av_rho
,
part_av_tro
,
part_av_hmix
real
,
allocatable
,
dimension
(:)
::
part_av_uu
,
part_av_vv
,
part_av_energy
! eso: Moved from timemanager
real
,
allocatable
,
dimension
(:)
::
uap
,
ucp
,
uzp
,
us
,
vs
,
ws
integer
(
kind
=
2
),
allocatable
,
dimension
(:)
::
cbt
...
...
@@ -780,13 +790,21 @@ contains
allocate
(
itra1
(
nmpart
),
npoint
(
nmpart
),
nclass
(
nmpart
),&
&
idt
(
nmpart
),
itramem
(
nmpart
),
itrasplit
(
nmpart
),&
&
xtra1
(
nmpart
),
ytra1
(
nmpart
),
ztra1
(
nmpart
),&
&
xmass1
(
nmpart
,
maxspec
),&
&
checklifetime
(
nmpart
,
maxspec
),
species_lifetime
(
maxspec
,
2
))
!CGZ-lifetime
&
xmass1
(
nmpart
,
maxspec
))
! ,&
! & checklifetime(nmpart,maxspec), species_lifetime(maxspec,2))!CGZ-lifetime
if
(
ipout
.eq.
3
)
then
allocate
(
npart_av
(
nmpart
),
part_av_cartx
(
nmpart
),
part_av_carty
(
nmpart
),&
&
part_av_cartz
(
nmpart
),
part_av_z
(
nmpart
),
part_av_topo
(
nmpart
))
allocate
(
part_av_pv
(
nmpart
),
part_av_qv
(
nmpart
),
part_av_tt
(
nmpart
),&
&
part_av_rho
(
nmpart
),
part_av_tro
(
nmpart
),
part_av_hmix
(
nmpart
))
allocate
(
part_av_uu
(
nmpart
),
part_av_vv
(
nmpart
),
part_av_energy
(
nmpart
))
end
if
allocate
(
uap
(
nmpart
),
ucp
(
nmpart
),
uzp
(
nmpart
),
us
(
nmpart
),&
&
vs
(
nmpart
),
ws
(
nmpart
),
cbt
(
nmpart
))
end
subroutine
com_mod_allocate_part
...
...
src/init_domainfill.f90
View file @
0a94e13d
...
...
@@ -86,6 +86,10 @@ subroutine init_domainfill
endif
endif
! Exit here if resuming a run from particle dump
!***********************************************
if
(
gdomainfill
.and.
ipin
.ne.
0
)
return
! Do not release particles twice (i.e., not at both in the leftmost and rightmost
! grid cell) for a global domain
!*****************************************************************************
...
...
src/makefile
View file @
0a94e13d
...
...
@@ -117,6 +117,7 @@ mpi_mod.o
## Serial versions (MPI version with same functionality and name '_mpi.f90' exists)
OBJECTS_SERIAL
=
\
releaseparticles.o partoutput.o
\
partoutput_average.o
\
conccalc.o
\
init_domainfill.o concoutput.o
\
timemanager.o FLEXPART.o
\
...
...
@@ -131,7 +132,7 @@ OBJECTS_SERIAL = \
## For MPI version
OBJECTS_MPI
=
releaseparticles_mpi.o partoutput_mpi.o
\
conccalc_mpi.o
\
partoutput_average_mpi.o
conccalc_mpi.o
\
init_domainfill_mpi.o concoutput_mpi.o
\
timemanager_mpi.o FLEXPART_MPI.o
\
readpartpositions_mpi.o
\
...
...
@@ -148,7 +149,7 @@ OBJECTS_NCF = netcdf_output_mod.o
OBJECTS
=
\
advance.o initialize.o
\
writeheader.o writeheader_txt.o
\
writeprecip.o
\
partpos_average.o
writeprecip.o
\
writeheader_surf.o assignland.o
\
part0.o gethourlyOH.o
\
caldate.o partdep.o
\
...
...
@@ -347,7 +348,10 @@ outgrid_init.o: com_mod.o flux_mod.o oh_mod.o outg_mod.o par_mod.o unc_mod.o
outgrid_init_nest.o
:
com_mod.o outg_mod.o par_mod.o unc_mod.o
part0.o
:
par_mod.o
partdep.o
:
par_mod.o
partpos_average.o
:
com_mod.o par_mod.o
partoutput.o
:
com_mod.o par_mod.o
partoutput_average.o
:
com_mod.o par_mod.o
partoutput_average_mpi.o
:
com_mod.o par_mod.o mpi_mod.o
partoutput_mpi.o
:
com_mod.o mpi_mod.o par_mod.o
partoutput_short.o
:
com_mod.o par_mod.o
partoutput_short_mpi.o
:
com_mod.o mpi_mod.o par_mod.o
...
...
src/mpi_mod.f90
100644 → 100755
View file @
0a94e13d
...
...
@@ -87,6 +87,7 @@ module mpi_mod
! Variables for MPI processes in the 'particle' group
integer
,
allocatable
,
dimension
(:)
::
mp_partgroup_rank
integer
,
allocatable
,
dimension
(:)
::
npart_per_process
integer
::
mp_partgroup_comm
,
mp_partgroup_pid
,
mp_partgroup_np
integer
::
mp_seed
=
0
...
...
@@ -124,15 +125,13 @@ module mpi_mod
! mp_measure_time Measure cpu/wall time, write out at end of run
! mp_time_barrier Measure MPI barrier time
! mp_exact_numpart Use an extra MPI communication to give the exact number of particles
! to standard output (this does *not* otherwise affect the simulation)
! mp_rebalance Attempt to rebalance particle between processes
! to standard output (this does not otherwise affect the simulation)
logical
,
parameter
::
mp_dbg_mode
=
.false.
logical
,
parameter
::
mp_dev_mode
=
.false.
logical
,
parameter
::
mp_dbg_out
=
.false.
logical
,
parameter
::
mp_time_barrier
=
.true.
logical
,
parameter
::
mp_measure_time
=
.false.
logical
,
parameter
::
mp_exact_numpart
=
.true.
logical
,
parameter
::
mp_rebalance
=
.true.
! for measuring CPU/Wall time
real
(
sp
),
private
::
mp_comm_time_beg
,
mp_comm_time_end
,
mp_comm_time_total
=
0.
...
...
@@ -145,8 +144,8 @@ module mpi_mod
real
(
sp
),
private
::
tm_tot_beg
,
tm_tot_end
,
tm_tot_total
=
0.
real
(
dp
),
private
::
mp_getfields_wtime_beg
,
mp_getfields_wtime_end
,
mp_getfields_wtime_total
=
0.
real
(
sp
),
private
::
mp_getfields_time_beg
,
mp_getfields_time_end
,
mp_getfields_time_total
=
0.
!
real(dp),private :: mp_readwind_wtime_beg, mp_readwind_wtime_end, mp_readwind_wtime_total=0.
!
real(sp),private :: mp_readwind_time_beg, mp_readwind_time_end, mp_readwind_time_total=0.
real
(
dp
),
private
::
mp_readwind_wtime_beg
,
mp_readwind_wtime_end
,
mp_readwind_wtime_total
=
0.
real
(
sp
),
private
::
mp_readwind_time_beg
,
mp_readwind_time_end
,
mp_readwind_time_total
=
0.
real
(
dp
),
private
::
mp_io_wtime_beg
,
mp_io_wtime_end
,
mp_io_wtime_total
=
0.
real
(
sp
),
private
::
mp_io_time_beg
,
mp_io_time_end
,
mp_io_time_total
=
0.
real
(
dp
),
private
::
mp_wetdepo_wtime_beg
,
mp_wetdepo_wtime_end
,
mp_wetdepo_wtime_total
=
0.
...
...
@@ -366,6 +365,9 @@ contains
reqs
(:)
=
MPI_REQUEST_NULL
end
if
! Allocate array for number of particles per process
allocate
(
npart_per_process
(
0
:
mp_partgroup_np
-1
))
! Write whether MPI_IN_PLACE is used or not
#ifdef USE_MPIINPLACE
if
(
lroot
)
write
(
*
,
*
)
'Using MPI_IN_PLACE operations'
...
...
@@ -606,7 +608,7 @@ contains
real
::
pmin
,
z
integer
::
i
,
jj
,
nn
,
num_part
=
1
,
m
,
imin
,
num_trans
logical
::
first_iter
integer
,
allocatable
,
dimension
(:)
::
numparticles_mpi
,
idx_arr
integer
,
allocatable
,
dimension
(:)
::
idx_arr
real
,
allocatable
,
dimension
(:)
::
sorted
! TODO: we don't really need this
! Exit if running with only 1 process
...
...
@@ -615,20 +617,22 @@ contains
! All processes exchange information on number of particles
!****************************************************************************
allocate
(
numparticles_mpi
(
0
:
mp_partgroup_np
-1
),
&
&
idx_arr
(
0
:
mp_partgroup_np
-1
),
sorted
(
0
:
mp_partgroup_np
-1
))
allocate
(
idx_arr
(
0
:
mp_partgroup_np
-1
),
sorted
(
0
:
mp_partgroup_np
-1
))
call
MPI_Allgather
(
numpart
,
1
,
MPI_INTEGER
,
n
um
part
icles_mpi
,
&
call
MPI_Allgather
(
numpart
,
1
,
MPI_INTEGER
,
npart
_per_process
,
&
&
1
,
MPI_INTEGER
,
mp_comm_used
,
mp_ierr
)
! Sort from lowest to highest
! Initial guess: correct order
sorted
(:)
=
n
um
part
icles_mpi
(:)
sorted
(:)
=
npart
_per_process
(:)
do
i
=
0
,
mp_partgroup_np
-1
idx_arr
(
i
)
=
i
end
do
! Do not rebalance particles for ipout=3
if
(
ipout
.eq.
3
)
return
! For each successive element in index array, see if a lower value exists
do
i
=
0
,
mp_partgroup_np
-2
pmin
=
sorted
(
i
)
...
...
@@ -653,13 +657,13 @@ contains
m
=
mp_partgroup_np
-1
! index for last sorted process (most particles)
do
i
=
0
,
mp_partgroup_np
/
2-1
num_trans
=
n
um
part
icles_mpi
(
idx_arr
(
m
))
-
n
um
part
icles_mpi
(
idx_arr
(
i
))
num_trans
=
npart
_per_process
(
idx_arr
(
m
))
-
npart
_per_process
(
idx_arr
(
i
))
if
(
mp_partid
.eq.
idx_arr
(
m
)
.or.
mp_partid
.eq.
idx_arr
(
i
))
then
if
(
n
um
part
icles_mpi
(
idx_arr
(
m
))
.gt.
mp_min_redist
.and.
&
&
real
(
num_trans
)/
real
(
n
um
part
icles_mpi
(
idx_arr
(
m
)))
.gt.
mp_redist_fract
)
then
if
(
npart
_per_process
(
idx_arr
(
m
))
.gt.
mp_min_redist
.and.
&
&
real
(
num_trans
)/
real
(
npart
_per_process
(
idx_arr
(
m
)))
.gt.
mp_redist_fract
)
then
! DBG
! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, n
um
part
icles_mpi
', &
! &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, n
um
part
icles_mpi
! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, npart
_per_process
', &
! &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, npart
_per_process
! DBG
call
mpif_redist_part
(
itime
,
idx_arr
(
m
),
idx_arr
(
i
),
num_trans
/
2
)
end
if
...
...
@@ -667,7 +671,7 @@ contains
m
=
m
-1
end
do
deallocate
(
numparticles_mpi
,
idx_arr
,
sorted
)
deallocate
(
idx_arr
,
sorted
)
end
subroutine
mpif_calculate_part_redist
...
...
@@ -2715,19 +2719,19 @@ contains
&
mp_vt_time_beg
)
end
if
!
case ('readwind')
!
if (imode.eq.0) then
!
call cpu_time(mp_readwind_time_beg)
!
mp_readwind_wtime_beg = mpi_wtime()
!
else
!
call cpu_time(mp_readwind_time_end)
!
mp_readwind_wtime_end = mpi_wtime()
!
!
mp_readwind_time_total = mp_readwind_time_total + &
!
&(mp_readwind_time_end - mp_readwind_time_beg)
!
mp_readwind_wtime_total = mp_readwind_wtime_total + &
!
&(mp_readwind_wtime_end - mp_readwind_wtime_beg)
!
end if
case
(
'readwind'
)
if
(
imode
.eq.
0
)
then
call
cpu_time
(
mp_readwind_time_beg
)
mp_readwind_wtime_beg
=
mpi_wtime
()
else
call
cpu_time
(
mp_readwind_time_end
)
mp_readwind_wtime_end
=
mpi_wtime
()
mp_readwind_time_total
=
mp_readwind_time_total
+
&
&(
mp_readwind_time_end
-
mp_readwind_time_beg
)
mp_readwind_wtime_total
=
mp_readwind_wtime_total
+
&
&(
mp_readwind_wtime_end
-
mp_readwind_wtime_beg
)
end
if
case
(
'commtime'
)
if
(
imode
.eq.
0
)
then
...
...
src/netcdf_output_mod.f90
View file @
0a94e13d
...
...
@@ -92,7 +92,7 @@ module netcdf_output_mod
logical
,
parameter
::
min_size
=
.false.
! if set true, redundant fields (topography) are not written to minimize file size
character
(
len
=
255
),
parameter
::
institution
=
'NILU'
integer
::
tpointer
integer
::
tpointer
=
0
character
(
len
=
255
)
::
ncfname
,
ncfnamen
! netcdf dimension and variable IDs for main and nested output grid
...
...
src/par_mod.f90
View file @
0a94e13d
...
...
@@ -279,7 +279,7 @@ module par_mod
!************************************
integer
,
parameter
::
unitpath
=
1
,
unitcommand
=
1
,
unitageclasses
=
1
,
unitgrid
=
1
integer
,
parameter
::
unitavailab
=
1
,
unitreleases
=
88
,
unitpartout
=
93
integer
,
parameter
::
unitavailab
=
1
,
unitreleases
=
88
,
unitpartout
=
93
,
unitpartout_average
=
105
integer
,
parameter
::
unitpartin
=
93
,
unitflux
=
98
,
unitouttraj
=
96
integer
,
parameter
::
unitvert
=
1
,
unitoro
=
1
,
unitpoin
=
1
,
unitreceptor
=
1
integer
,
parameter
::
unitoutgrid
=
97
,
unitoutgridppt
=
99
,
unitoutinfo
=
1
...
...
src/partoutput.f90
View file @
0a94e13d
...
...
@@ -70,7 +70,7 @@ subroutine partoutput(itime)
! Open output file and write the output
!**************************************
if
(
ipout
.eq.
1
)
then
if
(
ipout
.eq.
1
.
or
.ipout.
eq
.3
)
then
open
(
unitpartout
,
file
=
path
(
2
)(
1
:
length
(
2
))//
'partposit_'
//
adate
//
&
atime
,
form
=
'unformatted'
)
else
...
...
src/partoutput_mpi.f90
View file @
0a94e13d
...
...
@@ -77,7 +77,7 @@ subroutine partoutput(itime)
! Open output file and write the output
!**************************************
if
(
ipout
.eq.
1
)
then
if
(
ipout
.eq.
1
.
or
.ipout.
eq
.3
)
then
open
(
unitpartout
,
file
=
path
(
2
)(
1
:
length
(
2
))//
'partposit_'
//
adate
//
&
atime
,
form
=
'unformatted'
,
status
=
file_stat
,
position
=
'append'
)
else
...
...
src/readcommand.f90
View file @
0a94e13d
...
...
@@ -49,6 +49,7 @@ subroutine readcommand
! trajectory output, 5 = options 1 and 4 *
! ipin 1 continue simulation with dumped particle data, 0 no *
! ipout 0 no particle dump, 1 every output time, 3 only at end*
! ipoutfac increase particle dump interval by factor (default 1) *
! itsplit [s] time constant for particle splitting *
! loutaver [s] concentration output is an average over loutaver *
! seconds *
...
...
@@ -96,6 +97,7 @@ subroutine readcommand
ifine
,
&
iout
,
&
ipout
,
&
ipoutfac
,
&
lsubgrid
,
&
lconvection
,
&
lagespectra
,
&
...
...
@@ -128,6 +130,7 @@ subroutine readcommand
ifine
=
4
iout
=
3
ipout
=
0
ipoutfac
=
1
lsubgrid
=
1
lconvection
=
1
lagespectra
=
0
...
...
@@ -506,9 +509,9 @@ subroutine readcommand
! Check whether a valid options for particle dump has been chosen
!****************************************************************
if
((
ipout
.ne.
0
)
.and.
(
ipout
.ne.
1
)
.and.
(
ipout
.ne.
2
))
then
if
((
ipout
.ne.
0
)
.and.
(
ipout
.ne.
1
)
.and.
(
ipout
.ne.
2
)
.and.
(
ipout
.ne.
3
)
)
then
write
(
*
,
*
)
' #### FLEXPART MODEL ERROR! FILE COMMAND: #### '
write
(
*
,
*
)
' #### IPOUT MUST BE 1, 2 OR 3!
#### '
write
(
*
,
*
)
' #### IPOUT MUST BE
0,
1, 2 OR 3! #### '
stop
endif
...
...
src/timemanager.f90
View file @
0a94e13d
...
...
@@ -450,7 +450,10 @@ subroutine timemanager(metdata_format)
!write(*,46) float(itime)/3600,itime,numpart
45
format
(
i13
,
' Seconds simulated: '
,
i13
,
' Particles: Uncertainty: '
,
3f7.3
)
46
format
(
' Simulated '
,
f7.1
,
' hours ('
,
i13
,
' s), '
,
i13
,
' particles'
)
if
(
ipout
.ge.
1
)
call
partoutput
(
itime
)
! dump particle positions
if
(
ipout
.ge.
1
)
then
if
(
mod
(
itime
,
ipoutfac
*
loutstep
)
.eq.
0
)
call
partoutput
(
itime
)
! dump particle positions
if
(
ipout
.eq.
3
)
call
partoutput_average
(
itime
)
! dump particle positions
endif
loutnext
=
loutnext
+
loutstep
loutstart
=
loutnext
-
loutaver
/
2
loutend
=
loutnext
+
loutaver
/
2
...
...
@@ -608,6 +611,12 @@ subroutine timemanager(metdata_format)
cbt
(
j
))
! write (*,*) 'advance: ',prob(1),xmass1(j,1),ztra1(j)
! Calculate average position for particle dump output
!****************************************************
if
(
ipout
.eq.
3
)
call
partpos_average
(
itime
,
j
)
! Calculate the gross fluxes across layer interfaces
!***************************************************
...
...
src/timemanager_mpi.f90
View file @
0a94e13d
...
...
@@ -112,7 +112,7 @@ subroutine timemanager(metdata_format)
logical
::
reqv_state
=
.false.
! .true. if waiting for a MPI_Irecv to complete
integer
::
j
,
ks
,
kp
,
l
,
n
,
itime
=
0
,
nstop
,
nstop1
,
memstat
=
0
! integer :: ksp
integer
::
ip
integer
::
ip
,
irec
integer
::
loutnext
,
loutstart
,
loutend
integer
::
ix
,
jy
,
ldeltat
,
itage
,
nage
,
idummy
integer
::
i_nan
=
0
,
ii_nan
,
total_nan_intl
=
0
!added by mc to check instability in CBL scheme
...
...
@@ -129,6 +129,7 @@ subroutine timemanager(metdata_format)
! Measure time spent in timemanager
if
(
mp_measure_time
)
call
mpif_mtime
(
'timemanager'
,
0
)
! First output for time 0
!************************
...
...
@@ -343,7 +344,7 @@ subroutine timemanager(metdata_format)
! Check if particles should be redistributed among processes
!***********************************************************
if
(
mp_rebalance
)
call
mpif_calculate_part_redist
(
itime
)
call
mpif_calculate_part_redist
(
itime
)
! Compute convective mixing for forward runs
...
...
@@ -592,8 +593,13 @@ subroutine timemanager(metdata_format)
45
format
(
i13
,
' SECONDS SIMULATED: '
,
i13
,
' PARTICLES: Uncertainty: '
,
3f7.3
)
46
format
(
' Simulated '
,
f7.1
,
' hours ('
,
i13
,
' s), '
,
i13
,
' particles'
)
if
(
ipout
.ge.
1
)
then
irec
=
0
do
ip
=
0
,
mp_partgroup_np
-1
if
(
ip
.eq.
mp_partid
)
call
partoutput
(
itime
)
! dump particle positions
if
(
ip
.eq.
mp_partid
)
then
if
(
mod
(
itime
,
ipoutfac
*
loutstep
)
.eq.
0
)
call
partoutput
(
itime
)
! dump particle positions
if
(
ipout
.eq.
3
)
call
partoutput_average
(
itime
,
irec
)
! dump particle positions
endif
if
(
ipout
.eq.
3
)
irec
=
irec
+
npart_per_process
(
ip
)
call
mpif_mpi_barrier
end
do
end
if
...
...
@@ -756,6 +762,11 @@ subroutine timemanager(metdata_format)
if
(
mp_measure_time
)
call
mpif_mtime
(
'advance'
,
1
)
! Calculate average position for particle dump output
!****************************************************
if
(
ipout
.eq.
3
)
call
partpos_average
(
itime
,
j
)
! Calculate the gross fluxes across layer interfaces
!***************************************************
...
...
@@ -894,7 +905,7 @@ subroutine timemanager(metdata_format)
! MPI process 0 creates the file, the other processes append to it
do
ip
=
0
,
mp_partgroup_np
-1
if
(
ip
.eq.
mp_partid
)
then
!
if (mp_dbg_mode) write(*,*) 'call partoutput(itime), proc, mp_partid',ip,mp_partid
if
(
mp_dbg_mode
)
write
(
*
,
*
)
'call partoutput(itime), proc, mp_partid'
,
ip
,
mp_partid
call
partoutput
(
itime
)
! dump particle positions
end
if
call
mpif_mpi_barrier
...
...
src/verttransform_ecmwf.f90
View file @
0a94e13d
...
...
@@ -72,7 +72,6 @@ subroutine verttransform_ecmwf(n,uuh,vvh,wwh,pvh)
use
par_mod
use
com_mod
use
cmapf_mod
,
only
:
cc2gll
! use mpi_mod
implicit
none
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment