Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
flexpart
flexpart
Commits
d9f0585f
Commit
d9f0585f
authored
May 08, 2017
by
Sabine
Browse files
Merge branch 'dev' of git.nilu.no:flexpart/flexpart into dev
parents
d404d981
c8fc7249
Changes
29
Hide whitespace changes
Inline
Side-by-side
src/FLEXPART.f90
View file @
d9f0585f
...
@@ -66,9 +66,10 @@ program flexpart
...
@@ -66,9 +66,10 @@ program flexpart
end
do
end
do
call
gasdev1
(
idummy
,
rannumb
(
maxrand
),
rannumb
(
maxrand
-1
))
call
gasdev1
(
idummy
,
rannumb
(
maxrand
),
rannumb
(
maxrand
-1
))
! FLEXPART version string
! FLEXPART version string
flexversion_major
=
'10'
! Major version number, also used for species file names
flexversion_major
=
'10'
! Major version number, also used for species file names
flexversion
=
'Version '
//
trim
(
flexversion_major
)//
'.
0
beta (201
5-05
-0
1
)'
flexversion
=
'Version '
//
trim
(
flexversion_major
)//
'.
1
beta (201
6-11
-0
2
)'
verbosity
=
0
verbosity
=
0
! Read the pathnames where input/output files are stored
! Read the pathnames where input/output files are stored
...
@@ -383,6 +384,17 @@ program flexpart
...
@@ -383,6 +384,17 @@ program flexpart
end
do
end
do
end
do
end
do
! Inform whether output kernel is used or not
!*********************************************
if
(
lroot
)
then
if
(
lnokernel
)
then
write
(
*
,
*
)
"Concentrations are calculated without using kernel"
else
write
(
*
,
*
)
"Concentrations are calculated using kernel"
end
if
end
if
! Calculate particle trajectories
! Calculate particle trajectories
!********************************
!********************************
...
@@ -401,11 +413,16 @@ program flexpart
...
@@ -401,11 +413,16 @@ program flexpart
call
timemanager
call
timemanager
! NIK 16.02.2005
! NIK 16.02.2005
write
(
*
,
*
)
'**********************************************'
do
i
=
1
,
nspec
write
(
*
,
*
)
'Total number of occurences of below-cloud scavenging'
,
tot_blc_count
write
(
*
,
*
)
'**********************************************'
write
(
*
,
*
)
'Total number of occurences of in-cloud scavenging'
,
tot_inc_count
write
(
*
,
*
)
'Scavenging statistics for species '
,
species
(
i
),
':'
write
(
*
,
*
)
'**********************************************'
write
(
*
,
*
)
'Total number of occurences of below-cloud scavenging'
,
&
&
tot_blc_count
(
i
)
write
(
*
,
*
)
'Total number of occurences of in-cloud scavenging'
,
&
&
tot_inc_count
(
i
)
write
(
*
,
*
)
'**********************************************'
end
do
write
(
*
,
*
)
'CONGRATULATIONS: YOU HAVE SUCCESSFULLY COMPLETED A FLE&
write
(
*
,
*
)
'CONGRATULATIONS: YOU HAVE SUCCESSFULLY COMPLETED A FLE&
&XPART MODEL RUN!'
&XPART MODEL RUN!'
...
...
src/FLEXPART_MPI.f90
View file @
d9f0585f
...
@@ -54,15 +54,17 @@ program flexpart
...
@@ -54,15 +54,17 @@ program flexpart
character
(
len
=
256
)
::
inline_options
!pathfile, flexversion, arg2
character
(
len
=
256
)
::
inline_options
!pathfile, flexversion, arg2
! Initialize mpi
! Initialize mpi
!*********************
!*********************
call
mpif_init
call
mpif_init
if
(
mp_measure_time
)
call
mpif_mtime
(
'flexpart'
,
0
)
if
(
mp_measure_time
)
call
mpif_mtime
(
'flexpart'
,
0
)
! Initialize arrays in com_mod
! Initialize arrays in com_mod
!*****************************
!*****************************
call
com_mod_allocate_part
(
maxpart_mpi
)
if
(
.not.
(
lmpreader
.and.
lmp_use_reader
))
call
com_mod_allocate_part
(
maxpart_mpi
)
! Generate a large number of random numbers
! Generate a large number of random numbers
!******************************************
!******************************************
...
@@ -78,7 +80,7 @@ program flexpart
...
@@ -78,7 +80,7 @@ program flexpart
! FLEXPART version string
! FLEXPART version string
flexversion_major
=
'10'
! Major version number, also used for species file names
flexversion_major
=
'10'
! Major version number, also used for species file names
! flexversion='Ver. 10 Beta MPI (2015-05-01)'
! flexversion='Ver. 10 Beta MPI (2015-05-01)'
flexversion
=
'Ver. '
//
trim
(
flexversion_major
)//
'
B
eta MPI (201
5-05
-0
1
)'
flexversion
=
'Ver. '
//
trim
(
flexversion_major
)//
'
.1b
eta MPI (201
6-11
-0
2
)'
verbosity
=
0
verbosity
=
0
! Read the pathnames where input/output files are stored
! Read the pathnames where input/output files are stored
...
@@ -305,9 +307,11 @@ program flexpart
...
@@ -305,9 +307,11 @@ program flexpart
print
*
,
'Initialize all particles to non-existent'
print
*
,
'Initialize all particles to non-existent'
endif
endif
do
j
=
1
,
size
(
itra1
)
! maxpart_mpi
if
(
.not.
(
lmpreader
.and.
lmp_use_reader
))
then
itra1
(
j
)
=
-999999999
do
j
=
1
,
size
(
itra1
)
! maxpart_mpi
end
do
itra1
(
j
)
=
-999999999
end
do
end
if
! For continuation of previous run, read in particle positions
! For continuation of previous run, read in particle positions
!*************************************************************
!*************************************************************
...
@@ -317,7 +321,7 @@ program flexpart
...
@@ -317,7 +321,7 @@ program flexpart
print
*
,
'call readpartpositions'
print
*
,
'call readpartpositions'
endif
endif
! readwind process skips this step
! readwind process skips this step
if
(
lmp_use_
reader
.and.
.not.
lmp
reader
)
call
readpartpositions
if
(
.not.
(
lmp
reader
.and.
lmp_use_
reader
)
)
call
readpartpositions
else
else
if
(
verbosity
.gt.
0
.and.
lroot
)
then
if
(
verbosity
.gt.
0
.and.
lroot
)
then
print
*
,
'numpart=0, numparticlecount=0'
print
*
,
'numpart=0, numparticlecount=0'
...
@@ -424,6 +428,16 @@ program flexpart
...
@@ -424,6 +428,16 @@ program flexpart
end
do
end
do
end
do
end
do
! Inform whether output kernel is used or not
!*********************************************
if
(
lroot
)
then
if
(
lnokernel
)
then
write
(
*
,
*
)
"Concentrations are calculated without using kernel"
else
write
(
*
,
*
)
"Concentrations are calculated using kernel"
end
if
end
if
! Calculate particle trajectories
! Calculate particle trajectories
!********************************
!********************************
...
@@ -447,24 +461,29 @@ program flexpart
...
@@ -447,24 +461,29 @@ program flexpart
! NIK 16.02.2005
! NIK 16.02.2005
if
(
lroot
)
then
if
(
lroot
)
then
call
MPI_Reduce
(
MPI_IN_PLACE
,
tot_blc_count
,
1
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
call
MPI_Reduce
(
MPI_IN_PLACE
,
tot_blc_count
,
nspec
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
&
mp_comm_used
,
mp_ierr
)
&
mp_comm_used
,
mp_ierr
)
call
MPI_Reduce
(
MPI_IN_PLACE
,
tot_inc_count
,
1
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
call
MPI_Reduce
(
MPI_IN_PLACE
,
tot_inc_count
,
nspec
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
&
mp_comm_used
,
mp_ierr
)
&
mp_comm_used
,
mp_ierr
)
else
else
if
(
mp_partgroup_pid
.ge.
0
)
then
! Skip for readwind process
if
(
mp_partgroup_pid
.ge.
0
)
then
! Skip for readwind process
call
MPI_Reduce
(
tot_blc_count
,
0
,
1
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
call
MPI_Reduce
(
tot_blc_count
,
0
,
nspec
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
&
mp_comm_used
,
mp_ierr
)
&
mp_comm_used
,
mp_ierr
)
call
MPI_Reduce
(
tot_inc_count
,
0
,
1
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
call
MPI_Reduce
(
tot_inc_count
,
0
,
nspec
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
&
mp_comm_used
,
mp_ierr
)
&
mp_comm_used
,
mp_ierr
)
end
if
end
if
end
if
end
if
if
(
lroot
)
then
if
(
lroot
)
then
write
(
*
,
*
)
'**********************************************'
do
i
=
1
,
nspec
write
(
*
,
*
)
'Total number of occurences of below-cloud scavenging'
,
tot_blc_count
write
(
*
,
*
)
'**********************************************'
write
(
*
,
*
)
'Total number of occurences of in-cloud scavenging'
,
tot_inc_count
write
(
*
,
*
)
'Scavenging statistics for species '
,
species
(
i
),
':'
write
(
*
,
*
)
'**********************************************'
write
(
*
,
*
)
'Total number of occurences of below-cloud scavenging'
,
&
&
tot_blc_count
(
i
)
write
(
*
,
*
)
'Total number of occurences of in-cloud scavenging'
,
&
&
tot_inc_count
(
i
)
write
(
*
,
*
)
'**********************************************'
end
do
write
(
*
,
*
)
'CONGRATULATIONS: YOU HAVE SUCCESSFULLY COMPLETED A FLE&
write
(
*
,
*
)
'CONGRATULATIONS: YOU HAVE SUCCESSFULLY COMPLETED A FLE&
&XPART MODEL RUN!'
&XPART MODEL RUN!'
...
...
src/boundcond_domainfill_mpi.f90
View file @
d9f0585f
...
@@ -20,28 +20,39 @@
...
@@ -20,28 +20,39 @@
!**********************************************************************
!**********************************************************************
subroutine
boundcond_domainfill
(
itime
,
loutend
)
subroutine
boundcond_domainfill
(
itime
,
loutend
)
! i i
! i i
!*****************************************************************************
!*****************************************************************************
! *
! *
! Particles are created by this subroutine continuously throughout the *
! Particles are created by this subroutine continuously throughout the *
! simulation at the boundaries of the domain-filling box. *
! simulation at the boundaries of the domain-filling box. *
! All particles carry the same amount of mass which alltogether comprises the*
! All particles carry the same amount of mass which alltogether comprises the*
! mass of air within the box, which remains (more or less) constant. *
! mass of air within the box, which remains (more or less) constant. *
! *
! *
! Author: A. Stohl *
! Author: A. Stohl *
! *
! *
! 16 October 2002 *
! 16 October 2002 *
! *
! *
!*****************************************************************************
!*****************************************************************************
! *
! *
! Variables: *
! Variables: *
! *
! *
! nx_we(2) grid indices for western and eastern boundary of domain- *
! nx_we(2) grid indices for western and eastern boundary of domain- *
! filling trajectory calculations *
! filling trajectory calculations *
! ny_sn(2) grid indices for southern and northern boundary of domain- *
! ny_sn(2) grid indices for southern and northern boundary of domain- *
! filling trajectory calculations *
! filling trajectory calculations *
! *
! *
!*****************************************************************************
!*****************************************************************************
! CHANGES
! 08/2016 eso: MPI version:
!
! -Root process release particles and distributes to other processes.
! Temporary arrays are used, also for the non-root (receiving) processes.
! -The scheme can be improved by having all processes report numpart
! (keeping track of how many particles have left the domain), so that
! a proportional amount of new particles can be distributed (however
! we have a separate function called from timemanager that will
! redistribute particles among processes if there are imbalances)
!*****************************************************************************
use
point_mod
use
point_mod
use
par_mod
use
par_mod
...
@@ -54,7 +65,8 @@ subroutine boundcond_domainfill(itime,loutend)
...
@@ -54,7 +65,8 @@ subroutine boundcond_domainfill(itime,loutend)
real
::
dz
,
dz1
,
dz2
,
dt1
,
dt2
,
dtt
,
ylat
,
xm
,
cosfact
,
accmasst
real
::
dz
,
dz1
,
dz2
,
dt1
,
dt2
,
dtt
,
ylat
,
xm
,
cosfact
,
accmasst
integer
::
itime
,
in
,
indz
,
indzp
,
i
,
loutend
integer
::
itime
,
in
,
indz
,
indzp
,
i
,
loutend
integer
::
j
,
k
,
ix
,
jy
,
m
,
indzh
,
indexh
,
minpart
,
ipart
,
mmass
integer
::
j
,
k
,
ix
,
jy
,
m
,
indzh
,
indexh
,
minpart
,
ipart
,
mmass
integer
::
numactiveparticles
integer
::
numactiveparticles
,
numpart_total
,
rel_counter
integer
,
allocatable
,
dimension
(:)
::
numrel_mpi
!, numactiveparticles_mpi
real
::
windl
(
2
),
rhol
(
2
)
real
::
windl
(
2
),
rhol
(
2
)
real
::
windhl
(
2
),
rhohl
(
2
)
real
::
windhl
(
2
),
rhohl
(
2
)
...
@@ -65,26 +77,37 @@ subroutine boundcond_domainfill(itime,loutend)
...
@@ -65,26 +77,37 @@ subroutine boundcond_domainfill(itime,loutend)
real
::
pvpart
,
ddx
,
ddy
,
rddx
,
rddy
,
p1
,
p2
,
p3
,
p4
,
y1
(
2
),
yh1
(
2
)
real
::
pvpart
,
ddx
,
ddy
,
rddx
,
rddy
,
p1
,
p2
,
p3
,
p4
,
y1
(
2
),
yh1
(
2
)
integer
::
idummy
=
-11
integer
::
idummy
=
-11
integer
::
mtag
logical
::
first_call
=
.true.
logical
::
first_call
=
.true.
! Sizes of temporary arrays are maxpartfract*maxpart. Increase maxpartfract if
! needed.
real
,
parameter
::
maxpartfract
=
0.1
integer
::
tmp_size
=
int
(
maxpartfract
*
maxpart
)
! Use different seed for each process
! Use different seed for each process
if
(
first_call
)
then
if
(
first_call
)
then
idummy
=
idummy
+
mp_seed
idummy
=
idummy
+
mp_seed
first_call
=
.false.
first_call
=
.false.
end
if
end
if
! If domain-filling is global, no boundary conditions are needed
! If domain-filling is global, no boundary conditions are needed
!***************************************************************
!***************************************************************
if
(
gdomainfill
)
return
if
(
gdomainfill
)
return
accmasst
=
0.
accmasst
=
0.
numactiveparticles
=
0
numactiveparticles
=
0
! Keep track of active particles on each process
allocate
(
numrel_mpi
(
0
:
mp_partgroup_np
-1
))
! numactiveparticles_mpi(0:mp_partgroup_np-1)
! Terminate trajectories that have left the domain, if domain-filling
! New particles to be released on each process
! trajectory calculation domain is not global
numrel_mpi
(:)
=
0
!********************************************************************
! Terminate trajectories that have left the domain, if domain-filling
! trajectory calculation domain is not global. Done for all processes
!********************************************************************
do
i
=
1
,
numpart
do
i
=
1
,
numpart
if
(
itra1
(
i
)
.eq.
itime
)
then
if
(
itra1
(
i
)
.eq.
itime
)
then
...
@@ -97,504 +120,681 @@ subroutine boundcond_domainfill(itime,loutend)
...
@@ -97,504 +120,681 @@ subroutine boundcond_domainfill(itime,loutend)
if
(
itra1
(
i
)
.ne.
-999999999
)
numactiveparticles
=
&
if
(
itra1
(
i
)
.ne.
-999999999
)
numactiveparticles
=
&
numactiveparticles
+1
numactiveparticles
+1
end
do
end
do
! numactiveparticles_mpi(mp_partid) = numactiveparticles
! Determine auxiliary variables for time interpolation
! Collect number of active particles from all processes
!*****************************************************
! call MPI_Allgather(numactiveparticles, 1, MPI_INTEGER, &
! &numactiveparticles_mpi, 1, MPI_INTEGER, mp_comm_used, mp_ierr)
dt1
=
real
(
itime
-
memtime
(
1
))
dt2
=
real
(
memtime
(
2
)
-
itime
)
dtt
=
1.
/(
dt1
+
dt2
)
! Initialize auxiliary variable used to search for vacant storage space
! Total number of new releases
!**********************************************************************
numpart_total
=
0
minpart
=
1
!***************************************
! This section only done by root process
! Western and eastern boundary condition
!***************************************
!***************************************
if
(
lroot
)
then
! Use separate arrays for newly released particles
!*************************************************
allocate
(
itra1_tmp
(
tmp_size
),
npoint_tmp
(
tmp_size
),
nclass_tmp
(
tmp_size
),&
&
idt_tmp
(
tmp_size
),
itramem_tmp
(
tmp_size
),
itrasplit_tmp
(
tmp_size
),&
&
xtra1_tmp
(
tmp_size
),
ytra1_tmp
(
tmp_size
),
ztra1_tmp
(
tmp_size
),&
&
xmass1_tmp
(
tmp_size
,
maxspec
))
! Initialize all particles as non-existent
itra1_tmp
(:)
=
-999999999
! Determine auxiliary variables for time interpolation
!*****************************************************
! Loop from south to north
dt1
=
real
(
itime
-
memtime
(
1
))
!*************************
dt2
=
real
(
memtime
(
2
)
-
itime
)
dtt
=
1.
/(
dt1
+
dt2
)
do
jy
=
ny_sn
(
1
),
ny_sn
(
2
)
! Initialize auxiliary variable used to search for vacant storage space
!**********************************************************************
! Loop over western (index 1) and eastern (index 2) boundary
minpart
=
1
!***********************************************************
do
k
=
1
,
2
!***************************************
! Western and eastern boundary condition
!***************************************
! Loop
over all release locations in a column
! Loop
from south to north
!*******************
*************************
!
*************************
do
j
=
1
,
numcolumn_we
(
k
,
jy
)
do
j
y
=
ny_sn
(
1
),
ny_sn
(
2
)
! Determine, for each release location, the area of the corresponding
boundary
! Loop over western (index 1) and eastern (index 2)
boundary
!******************
***********************************************************
!
***********************************************************
if
(
j
.eq.
1
)
then
do
k
=
1
,
2
deltaz
=
(
zcolumn_we
(
k
,
jy
,
2
)
+
zcolumn_we
(
k
,
jy
,
1
))/
2.
else
if
(
j
.eq.
numcolumn_we
(
k
,
jy
))
then
! deltaz=height(nz)-(zcolumn_we(k,jy,j-1)+
! + zcolumn_we(k,jy,j))/2.
! In order to avoid taking a very high column for very many particles,
! use the deltaz from one particle below instead
deltaz
=
(
zcolumn_we
(
k
,
jy
,
j
)
-
zcolumn_we
(
k
,
jy
,
j
-2
))/
2.
else
deltaz
=
(
zcolumn_we
(
k
,
jy
,
j
+1
)
-
zcolumn_we
(
k
,
jy
,
j
-1
))/
2.
endif
if
((
jy
.eq.
ny_sn
(
1
))
.or.
(
jy
.eq.
ny_sn
(
2
)))
then
boundarea
=
deltaz
*
111198.5
/
2.
*
dy
else
boundarea
=
deltaz
*
111198.5
*
dy
endif
! Loop over all release locations in a column
!********************************************
! Interpolate the wind velocity and density to the release location
do
j
=
1
,
numcolumn_we
(
k
,
jy
)
!******************************************************************
! Determine
the model level below the release position
! Determine
, for each release location, the area of the corresponding boundary
!
*****************************************************
!************************
*****************************************************
do
i
=
2
,
nz
if
(
j
.eq.
1
)
then
if
(
height
(
i
)
.gt.
zcolumn_we
(
k
,
jy
,
j
))
then
deltaz
=
(
zcolumn_we
(
k
,
jy
,
2
)
+
zcolumn_we
(
k
,
jy
,
1
))/
2.
indz
=
i
-1
else
if
(
j
.eq.
numcolumn_we
(
k
,
jy
))
then
indzp
=
i
! deltaz=height(nz)-(zcolumn_we(k,jy,j-1)+
goto
6
! + zcolumn_we(k,jy,j))/2.
! In order to avoid taking a very high column for very many particles,
! use the deltaz from one particle below instead
deltaz
=
(
zcolumn_we
(
k
,
jy
,
j
)
-
zcolumn_we
(
k
,
jy
,
j
-2
))/
2.
else
deltaz
=
(
zcolumn_we
(
k
,
jy
,
j
+1
)
-
zcolumn_we
(
k
,
jy
,
j
-1
))/
2.
endif
if
((
jy
.eq.
ny_sn
(
1
))
.or.
(
jy
.eq.
ny_sn
(
2
)))
then
boundarea
=
deltaz
*
111198.5
/
2.
*
dy
else
boundarea
=
deltaz
*
111198.5
*
dy
endif
endif
end
do
6
continue
! Vertical distance to the level below and above current position
!****************************************************************
dz1
=
zcolumn_we
(
k
,
jy
,
j
)
-
height
(
indz
)
! Interpolate the wind velocity and density to the release location
dz2
=
height
(
indzp
)
-
zcolumn_we
(
k
,
jy
,
j
)
!******************************************************************
dz
=
1.
/(
dz1
+
dz2
)
! Vertical and temporal interpola
tion
! Determine the model level below the release posi
tion
!
************************************
!*****************
************************************
do
m
=
1
,
2
do
i
=
2
,
nz
indexh
=
memind
(
m
)
if
(
height
(
i
)
.gt.
zcolumn_we
(
k
,
jy
,
j
))
then
do
in
=
1
,
2
indz
=
i
-1
indz
h
=
i
ndz
+
in
-1
indz
p
=
i
windl
(
in
)
=
uu
(
nx_we
(
k
),
jy
,
indzh
,
indexh
)
goto
6
rhol
(
in
)
=
rho
(
nx_we
(
k
),
jy
,
indzh
,
indexh
)
endif
end
do
end
do
6
continue
windhl
(
m
)
=
(
dz2
*
windl
(
1
)
+
dz1
*
windl
(
2
))
*
dz
! Vertical distance to the level below and above current position
rhohl
(
m
)
=
(
dz2
*
rhol
(
1
)
+
dz1
*
rhol
(
2
))
*
dz
!****************************************************************
end
do
windx
=
(
windhl
(
1
)
*
dt2
+
windhl
(
2
)
*
dt1
)
*
dtt
dz1
=
zcolumn_we
(
k
,
jy
,
j
)
-
height
(
indz
)
rhox
=
(
rhohl
(
1
)
*
dt2
+
rhohl
(
2
)
*
dt1
)
*
dtt
dz2
=
height
(
indzp
)
-
zcolumn_we
(
k
,
jy
,
j
)
dz
=
1.
/(
dz1
+
dz2
)
! Calculate mass flux, divided by number of processes
! Vertical and temporal interpolation
!****************
************************************
!
************************************
fluxofmass
=
windx
*
rhox
*
boundarea
*
real
(
lsynctime
)/
mp_partgroup_np
do
m
=
1
,
2
indexh
=
memind
(
m
)
do
in
=
1
,
2
indzh
=
indz
+
in
-1
windl
(
in
)
=
uu
(
nx_we
(
k
),
jy
,
indzh
,
indexh
)
rhol
(
in
)
=
rho
(
nx_we
(
k
),
jy
,
indzh
,
indexh
)
end
do
windhl
(
m
)
=
(
dz2
*
windl
(
1
)
+
dz1
*
windl
(
2
))
*
dz
rhohl
(
m
)
=
(
dz2
*
rhol
(
1
)
+
dz1
*
rhol
(
2
))
*
dz
end
do
windx
=
(
windhl
(
1
)
*
dt2
+
windhl
(
2
)
*
dt1
)
*
dtt
rhox
=
(
rhohl
(
1
)
*
dt2
+
rhohl
(
2
)
*
dt1
)
*
dtt
! Calculate mass flux
!********************
fluxofmass
=
windx
*
rhox
*
boundarea
*
real
(
lsynctime
)
! If the mass flux is directed into the domain, add it to previous mass fluxes;
! if it is out of the domain, set accumulated mass flux to zero
!******************************************************************************
if
(
k
.eq.
1
)
then
! If the mass flux is directed into the domain, add it to previous mass fluxes;
if
(
fluxofmass
.ge.
0.
)
then
! if it is out of the domain, set accumulated mass flux to zero
acc_mass_we
(
k
,
jy
,
j
)
=
acc_mass_we
(
k
,
jy
,
j
)
+
fluxofmass
!******************************************************************************
if
(
k
.eq.
1
)
then
if
(
fluxofmass
.ge.
0.
)
then
acc_mass_we
(
k
,
jy
,
j
)
=
acc_mass_we
(
k
,
jy
,
j
)
+
fluxofmass
else
acc_mass_we
(
k
,
jy
,
j
)
=
0.
endif
else
else
acc_mass_we
(
k
,
jy
,
j
)
=
0.
if
(
fluxofmass
.le.
0.
)
then
acc_mass_we
(
k
,
jy
,
j
)
=
acc_mass_we
(
k
,
jy
,
j
)
+
abs
(
fluxofmass
)
else
acc_mass_we
(
k
,
jy
,
j
)
=
0.
endif
endif
endif
else
accmasst
=
accmasst
+
acc_mass_we
(
k
,
jy
,
j
)
if
(
fluxofmass
.le.
0.
)
then
acc_mass_we
(
k
,
jy
,
j
)
=
acc_mass_we
(
k
,
jy
,
j
)
+
abs
(
fluxofmass
)
! If the accumulated mass exceeds half the mass that each particle shall carry,
! one (or more) particle(s) is (are) released and the accumulated mass is
! reduced by the mass of this (these) particle(s)