Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
flexpart
flexpart
Commits
d9f0585f
Commit
d9f0585f
authored
May 08, 2017
by
Sabine
Browse files
Merge branch 'dev' of git.nilu.no:flexpart/flexpart into dev
parents
d404d981
c8fc7249
Changes
29
Show whitespace changes
Inline
Side-by-side
src/FLEXPART.f90
View file @
d9f0585f
...
@@ -66,9 +66,10 @@ program flexpart
...
@@ -66,9 +66,10 @@ program flexpart
end
do
end
do
call
gasdev1
(
idummy
,
rannumb
(
maxrand
),
rannumb
(
maxrand
-1
))
call
gasdev1
(
idummy
,
rannumb
(
maxrand
),
rannumb
(
maxrand
-1
))
! FLEXPART version string
! FLEXPART version string
flexversion_major
=
'10'
! Major version number, also used for species file names
flexversion_major
=
'10'
! Major version number, also used for species file names
flexversion
=
'Version '
//
trim
(
flexversion_major
)//
'.
0
beta (201
5-05
-0
1
)'
flexversion
=
'Version '
//
trim
(
flexversion_major
)//
'.
1
beta (201
6-11
-0
2
)'
verbosity
=
0
verbosity
=
0
! Read the pathnames where input/output files are stored
! Read the pathnames where input/output files are stored
...
@@ -383,6 +384,17 @@ program flexpart
...
@@ -383,6 +384,17 @@ program flexpart
end
do
end
do
end
do
end
do
! Inform whether output kernel is used or not
!*********************************************
if
(
lroot
)
then
if
(
lnokernel
)
then
write
(
*
,
*
)
"Concentrations are calculated without using kernel"
else
write
(
*
,
*
)
"Concentrations are calculated using kernel"
end
if
end
if
! Calculate particle trajectories
! Calculate particle trajectories
!********************************
!********************************
...
@@ -401,10 +413,15 @@ program flexpart
...
@@ -401,10 +413,15 @@ program flexpart
call
timemanager
call
timemanager
! NIK 16.02.2005
! NIK 16.02.2005
do
i
=
1
,
nspec
write
(
*
,
*
)
'**********************************************'
write
(
*
,
*
)
'**********************************************'
write
(
*
,
*
)
'Total number of occurences of below-cloud scavenging'
,
tot_blc_count
write
(
*
,
*
)
'Scavenging statistics for species '
,
species
(
i
),
':'
write
(
*
,
*
)
'Total number of occurences of in-cloud scavenging'
,
tot_inc_count
write
(
*
,
*
)
'Total number of occurences of below-cloud scavenging'
,
&
&
tot_blc_count
(
i
)
write
(
*
,
*
)
'Total number of occurences of in-cloud scavenging'
,
&
&
tot_inc_count
(
i
)
write
(
*
,
*
)
'**********************************************'
write
(
*
,
*
)
'**********************************************'
end
do
write
(
*
,
*
)
'CONGRATULATIONS: YOU HAVE SUCCESSFULLY COMPLETED A FLE&
write
(
*
,
*
)
'CONGRATULATIONS: YOU HAVE SUCCESSFULLY COMPLETED A FLE&
&XPART MODEL RUN!'
&XPART MODEL RUN!'
...
...
src/FLEXPART_MPI.f90
View file @
d9f0585f
...
@@ -54,15 +54,17 @@ program flexpart
...
@@ -54,15 +54,17 @@ program flexpart
character
(
len
=
256
)
::
inline_options
!pathfile, flexversion, arg2
character
(
len
=
256
)
::
inline_options
!pathfile, flexversion, arg2
! Initialize mpi
! Initialize mpi
!*********************
!*********************
call
mpif_init
call
mpif_init
if
(
mp_measure_time
)
call
mpif_mtime
(
'flexpart'
,
0
)
if
(
mp_measure_time
)
call
mpif_mtime
(
'flexpart'
,
0
)
! Initialize arrays in com_mod
! Initialize arrays in com_mod
!*****************************
!*****************************
call
com_mod_allocate_part
(
maxpart_mpi
)
if
(
.not.
(
lmpreader
.and.
lmp_use_reader
))
call
com_mod_allocate_part
(
maxpart_mpi
)
! Generate a large number of random numbers
! Generate a large number of random numbers
!******************************************
!******************************************
...
@@ -78,7 +80,7 @@ program flexpart
...
@@ -78,7 +80,7 @@ program flexpart
! FLEXPART version string
! FLEXPART version string
flexversion_major
=
'10'
! Major version number, also used for species file names
flexversion_major
=
'10'
! Major version number, also used for species file names
! flexversion='Ver. 10 Beta MPI (2015-05-01)'
! flexversion='Ver. 10 Beta MPI (2015-05-01)'
flexversion
=
'Ver. '
//
trim
(
flexversion_major
)//
'
B
eta MPI (201
5-05
-0
1
)'
flexversion
=
'Ver. '
//
trim
(
flexversion_major
)//
'
.1b
eta MPI (201
6-11
-0
2
)'
verbosity
=
0
verbosity
=
0
! Read the pathnames where input/output files are stored
! Read the pathnames where input/output files are stored
...
@@ -305,9 +307,11 @@ program flexpart
...
@@ -305,9 +307,11 @@ program flexpart
print
*
,
'Initialize all particles to non-existent'
print
*
,
'Initialize all particles to non-existent'
endif
endif
if
(
.not.
(
lmpreader
.and.
lmp_use_reader
))
then
do
j
=
1
,
size
(
itra1
)
! maxpart_mpi
do
j
=
1
,
size
(
itra1
)
! maxpart_mpi
itra1
(
j
)
=
-999999999
itra1
(
j
)
=
-999999999
end
do
end
do
end
if
! For continuation of previous run, read in particle positions
! For continuation of previous run, read in particle positions
!*************************************************************
!*************************************************************
...
@@ -317,7 +321,7 @@ program flexpart
...
@@ -317,7 +321,7 @@ program flexpart
print
*
,
'call readpartpositions'
print
*
,
'call readpartpositions'
endif
endif
! readwind process skips this step
! readwind process skips this step
if
(
lmp_use_
reader
.and.
.not.
lmp
reader
)
call
readpartpositions
if
(
.not.
(
lmp
reader
.and.
lmp_use_
reader
)
)
call
readpartpositions
else
else
if
(
verbosity
.gt.
0
.and.
lroot
)
then
if
(
verbosity
.gt.
0
.and.
lroot
)
then
print
*
,
'numpart=0, numparticlecount=0'
print
*
,
'numpart=0, numparticlecount=0'
...
@@ -424,6 +428,16 @@ program flexpart
...
@@ -424,6 +428,16 @@ program flexpart
end
do
end
do
end
do
end
do
! Inform whether output kernel is used or not
!*********************************************
if
(
lroot
)
then
if
(
lnokernel
)
then
write
(
*
,
*
)
"Concentrations are calculated without using kernel"
else
write
(
*
,
*
)
"Concentrations are calculated using kernel"
end
if
end
if
! Calculate particle trajectories
! Calculate particle trajectories
!********************************
!********************************
...
@@ -447,24 +461,29 @@ program flexpart
...
@@ -447,24 +461,29 @@ program flexpart
! NIK 16.02.2005
! NIK 16.02.2005
if
(
lroot
)
then
if
(
lroot
)
then
call
MPI_Reduce
(
MPI_IN_PLACE
,
tot_blc_count
,
1
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
call
MPI_Reduce
(
MPI_IN_PLACE
,
tot_blc_count
,
nspec
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
&
mp_comm_used
,
mp_ierr
)
&
mp_comm_used
,
mp_ierr
)
call
MPI_Reduce
(
MPI_IN_PLACE
,
tot_inc_count
,
1
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
call
MPI_Reduce
(
MPI_IN_PLACE
,
tot_inc_count
,
nspec
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
&
mp_comm_used
,
mp_ierr
)
&
mp_comm_used
,
mp_ierr
)
else
else
if
(
mp_partgroup_pid
.ge.
0
)
then
! Skip for readwind process
if
(
mp_partgroup_pid
.ge.
0
)
then
! Skip for readwind process
call
MPI_Reduce
(
tot_blc_count
,
0
,
1
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
call
MPI_Reduce
(
tot_blc_count
,
0
,
nspec
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
&
mp_comm_used
,
mp_ierr
)
&
mp_comm_used
,
mp_ierr
)
call
MPI_Reduce
(
tot_inc_count
,
0
,
1
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
call
MPI_Reduce
(
tot_inc_count
,
0
,
nspec
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
&
mp_comm_used
,
mp_ierr
)
&
mp_comm_used
,
mp_ierr
)
end
if
end
if
end
if
end
if
if
(
lroot
)
then
if
(
lroot
)
then
do
i
=
1
,
nspec
write
(
*
,
*
)
'**********************************************'
write
(
*
,
*
)
'**********************************************'
write
(
*
,
*
)
'Total number of occurences of below-cloud scavenging'
,
tot_blc_count
write
(
*
,
*
)
'Scavenging statistics for species '
,
species
(
i
),
':'
write
(
*
,
*
)
'Total number of occurences of in-cloud scavenging'
,
tot_inc_count
write
(
*
,
*
)
'Total number of occurences of below-cloud scavenging'
,
&
&
tot_blc_count
(
i
)
write
(
*
,
*
)
'Total number of occurences of in-cloud scavenging'
,
&
&
tot_inc_count
(
i
)
write
(
*
,
*
)
'**********************************************'
write
(
*
,
*
)
'**********************************************'
end
do
write
(
*
,
*
)
'CONGRATULATIONS: YOU HAVE SUCCESSFULLY COMPLETED A FLE&
write
(
*
,
*
)
'CONGRATULATIONS: YOU HAVE SUCCESSFULLY COMPLETED A FLE&
&XPART MODEL RUN!'
&XPART MODEL RUN!'
...
...
src/boundcond_domainfill_mpi.f90
View file @
d9f0585f
...
@@ -20,28 +20,39 @@
...
@@ -20,28 +20,39 @@
!**********************************************************************
!**********************************************************************
subroutine
boundcond_domainfill
(
itime
,
loutend
)
subroutine
boundcond_domainfill
(
itime
,
loutend
)
! i i
! i i
!*****************************************************************************
!*****************************************************************************
! *
! *
! Particles are created by this subroutine continuously throughout the *
! Particles are created by this subroutine continuously throughout the *
! simulation at the boundaries of the domain-filling box. *
! simulation at the boundaries of the domain-filling box. *
! All particles carry the same amount of mass which alltogether comprises the*
! All particles carry the same amount of mass which alltogether comprises the*
! mass of air within the box, which remains (more or less) constant. *
! mass of air within the box, which remains (more or less) constant. *
! *
! *
! Author: A. Stohl *
! Author: A. Stohl *
! *
! *
! 16 October 2002 *
! 16 October 2002 *
! *
! *
!*****************************************************************************
!*****************************************************************************
! *
! *
! Variables: *
! Variables: *
! *
! *
! nx_we(2) grid indices for western and eastern boundary of domain- *
! nx_we(2) grid indices for western and eastern boundary of domain- *
! filling trajectory calculations *
! filling trajectory calculations *
! ny_sn(2) grid indices for southern and northern boundary of domain- *
! ny_sn(2) grid indices for southern and northern boundary of domain- *
! filling trajectory calculations *
! filling trajectory calculations *
! *
! *
!*****************************************************************************
!*****************************************************************************
! CHANGES
! 08/2016 eso: MPI version:
!
! -Root process release particles and distributes to other processes.
! Temporary arrays are used, also for the non-root (receiving) processes.
! -The scheme can be improved by having all processes report numpart
! (keeping track of how many particles have left the domain), so that
! a proportional amount of new particles can be distributed (however
! we have a separate function called from timemanager that will
! redistribute particles among processes if there are imbalances)
!*****************************************************************************
use
point_mod
use
point_mod
use
par_mod
use
par_mod
...
@@ -54,7 +65,8 @@ subroutine boundcond_domainfill(itime,loutend)
...
@@ -54,7 +65,8 @@ subroutine boundcond_domainfill(itime,loutend)
real
::
dz
,
dz1
,
dz2
,
dt1
,
dt2
,
dtt
,
ylat
,
xm
,
cosfact
,
accmasst
real
::
dz
,
dz1
,
dz2
,
dt1
,
dt2
,
dtt
,
ylat
,
xm
,
cosfact
,
accmasst
integer
::
itime
,
in
,
indz
,
indzp
,
i
,
loutend
integer
::
itime
,
in
,
indz
,
indzp
,
i
,
loutend
integer
::
j
,
k
,
ix
,
jy
,
m
,
indzh
,
indexh
,
minpart
,
ipart
,
mmass
integer
::
j
,
k
,
ix
,
jy
,
m
,
indzh
,
indexh
,
minpart
,
ipart
,
mmass
integer
::
numactiveparticles
integer
::
numactiveparticles
,
numpart_total
,
rel_counter
integer
,
allocatable
,
dimension
(:)
::
numrel_mpi
!, numactiveparticles_mpi
real
::
windl
(
2
),
rhol
(
2
)
real
::
windl
(
2
),
rhol
(
2
)
real
::
windhl
(
2
),
rhohl
(
2
)
real
::
windhl
(
2
),
rhohl
(
2
)
...
@@ -65,26 +77,37 @@ subroutine boundcond_domainfill(itime,loutend)
...
@@ -65,26 +77,37 @@ subroutine boundcond_domainfill(itime,loutend)
real
::
pvpart
,
ddx
,
ddy
,
rddx
,
rddy
,
p1
,
p2
,
p3
,
p4
,
y1
(
2
),
yh1
(
2
)
real
::
pvpart
,
ddx
,
ddy
,
rddx
,
rddy
,
p1
,
p2
,
p3
,
p4
,
y1
(
2
),
yh1
(
2
)
integer
::
idummy
=
-11
integer
::
idummy
=
-11
integer
::
mtag
logical
::
first_call
=
.true.
logical
::
first_call
=
.true.
! Sizes of temporary arrays are maxpartfract*maxpart. Increase maxpartfract if
! needed.
real
,
parameter
::
maxpartfract
=
0.1
integer
::
tmp_size
=
int
(
maxpartfract
*
maxpart
)
! Use different seed for each process
! Use different seed for each process
if
(
first_call
)
then
if
(
first_call
)
then
idummy
=
idummy
+
mp_seed
idummy
=
idummy
+
mp_seed
first_call
=
.false.
first_call
=
.false.
end
if
end
if
! If domain-filling is global, no boundary conditions are needed
! If domain-filling is global, no boundary conditions are needed
!***************************************************************
!***************************************************************
if
(
gdomainfill
)
return
if
(
gdomainfill
)
return
accmasst
=
0.
accmasst
=
0.
numactiveparticles
=
0
numactiveparticles
=
0
! Keep track of active particles on each process
allocate
(
numrel_mpi
(
0
:
mp_partgroup_np
-1
))
! numactiveparticles_mpi(0:mp_partgroup_np-1)
! Terminate trajectories that have left the domain, if domain-filling
! New particles to be released on each process
! trajectory calculation domain is not global
numrel_mpi
(:)
=
0
!********************************************************************
! Terminate trajectories that have left the domain, if domain-filling
! trajectory calculation domain is not global. Done for all processes
!********************************************************************
do
i
=
1
,
numpart
do
i
=
1
,
numpart
if
(
itra1
(
i
)
.eq.
itime
)
then
if
(
itra1
(
i
)
.eq.
itime
)
then
...
@@ -97,49 +120,75 @@ subroutine boundcond_domainfill(itime,loutend)
...
@@ -97,49 +120,75 @@ subroutine boundcond_domainfill(itime,loutend)
if
(
itra1
(
i
)
.ne.
-999999999
)
numactiveparticles
=
&
if
(
itra1
(
i
)
.ne.
-999999999
)
numactiveparticles
=
&
numactiveparticles
+1
numactiveparticles
+1
end
do
end
do
! numactiveparticles_mpi(mp_partid) = numactiveparticles
! Collect number of active particles from all processes
! call MPI_Allgather(numactiveparticles, 1, MPI_INTEGER, &
! &numactiveparticles_mpi, 1, MPI_INTEGER, mp_comm_used, mp_ierr)
! Total number of new releases
numpart_total
=
0
! This section only done by root process
!***************************************
if
(
lroot
)
then
! Use separate arrays for newly released particles
!*************************************************
allocate
(
itra1_tmp
(
tmp_size
),
npoint_tmp
(
tmp_size
),
nclass_tmp
(
tmp_size
),&
&
idt_tmp
(
tmp_size
),
itramem_tmp
(
tmp_size
),
itrasplit_tmp
(
tmp_size
),&
&
xtra1_tmp
(
tmp_size
),
ytra1_tmp
(
tmp_size
),
ztra1_tmp
(
tmp_size
),&
&
xmass1_tmp
(
tmp_size
,
maxspec
))
! Initialize all particles as non-existent
itra1_tmp
(:)
=
-999999999
! Determine auxiliary variables for time interpolation
! Determine auxiliary variables for time interpolation
!*****************************************************
!*****************************************************
dt1
=
real
(
itime
-
memtime
(
1
))
dt1
=
real
(
itime
-
memtime
(
1
))
dt2
=
real
(
memtime
(
2
)
-
itime
)
dt2
=
real
(
memtime
(
2
)
-
itime
)
dtt
=
1.
/(
dt1
+
dt2
)
dtt
=
1.
/(
dt1
+
dt2
)
! Initialize auxiliary variable used to search for vacant storage space
! Initialize auxiliary variable used to search for vacant storage space
!**********************************************************************
!**********************************************************************
minpart
=
1
minpart
=
1
!***************************************
!***************************************
! Western and eastern boundary condition
! Western and eastern boundary condition
!***************************************
!***************************************
! Loop from south to north
! Loop from south to north
!*************************
!*************************
do
jy
=
ny_sn
(
1
),
ny_sn
(
2
)
do
jy
=
ny_sn
(
1
),
ny_sn
(
2
)
! Loop over western (index 1) and eastern (index 2) boundary
! Loop over western (index 1) and eastern (index 2) boundary
!***********************************************************
!***********************************************************
do
k
=
1
,
2
do
k
=
1
,
2
! Loop over all release locations in a column
! Loop over all release locations in a column
!********************************************
!********************************************
do
j
=
1
,
numcolumn_we
(
k
,
jy
)
do
j
=
1
,
numcolumn_we
(
k
,
jy
)
! Determine, for each release location, the area of the corresponding boundary
! Determine, for each release location, the area of the corresponding boundary
!*****************************************************************************
!*****************************************************************************
if
(
j
.eq.
1
)
then
if
(
j
.eq.
1
)
then
deltaz
=
(
zcolumn_we
(
k
,
jy
,
2
)
+
zcolumn_we
(
k
,
jy
,
1
))/
2.
deltaz
=
(
zcolumn_we
(
k
,
jy
,
2
)
+
zcolumn_we
(
k
,
jy
,
1
))/
2.
else
if
(
j
.eq.
numcolumn_we
(
k
,
jy
))
then
else
if
(
j
.eq.
numcolumn_we
(
k
,
jy
))
then
! deltaz=height(nz)-(zcolumn_we(k,jy,j-1)+
! deltaz=height(nz)-(zcolumn_we(k,jy,j-1)+
! + zcolumn_we(k,jy,j))/2.
! + zcolumn_we(k,jy,j))/2.
! In order to avoid taking a very high column for very many particles,
! In order to avoid taking a very high column for very many particles,
! use the deltaz from one particle below instead
! use the deltaz from one particle below instead
deltaz
=
(
zcolumn_we
(
k
,
jy
,
j
)
-
zcolumn_we
(
k
,
jy
,
j
-2
))/
2.
deltaz
=
(
zcolumn_we
(
k
,
jy
,
j
)
-
zcolumn_we
(
k
,
jy
,
j
-2
))/
2.
else
else
deltaz
=
(
zcolumn_we
(
k
,
jy
,
j
+1
)
-
zcolumn_we
(
k
,
jy
,
j
-1
))/
2.
deltaz
=
(
zcolumn_we
(
k
,
jy
,
j
+1
)
-
zcolumn_we
(
k
,
jy
,
j
-1
))/
2.
...
@@ -151,11 +200,11 @@ subroutine boundcond_domainfill(itime,loutend)
...
@@ -151,11 +200,11 @@ subroutine boundcond_domainfill(itime,loutend)
endif
endif
! Interpolate the wind velocity and density to the release location
! Interpolate the wind velocity and density to the release location
!******************************************************************
!******************************************************************
! Determine the model level below the release position
! Determine the model level below the release position
!*****************************************************
!*****************************************************
do
i
=
2
,
nz
do
i
=
2
,
nz
if
(
height
(
i
)
.gt.
zcolumn_we
(
k
,
jy
,
j
))
then
if
(
height
(
i
)
.gt.
zcolumn_we
(
k
,
jy
,
j
))
then
...
@@ -166,15 +215,15 @@ subroutine boundcond_domainfill(itime,loutend)
...
@@ -166,15 +215,15 @@ subroutine boundcond_domainfill(itime,loutend)
end
do
end
do
6
continue
6
continue
! Vertical distance to the level below and above current position
! Vertical distance to the level below and above current position
!****************************************************************
!****************************************************************
dz1
=
zcolumn_we
(
k
,
jy
,
j
)
-
height
(
indz
)
dz1
=
zcolumn_we
(
k
,
jy
,
j
)
-
height
(
indz
)
dz2
=
height
(
indzp
)
-
zcolumn_we
(
k
,
jy
,
j
)
dz2
=
height
(
indzp
)
-
zcolumn_we
(
k
,
jy
,
j
)
dz
=
1.
/(
dz1
+
dz2
)
dz
=
1.
/(
dz1
+
dz2
)
! Vertical and temporal interpolation
! Vertical and temporal interpolation
!************************************
!************************************
do
m
=
1
,
2
do
m
=
1
,
2
indexh
=
memind
(
m
)
indexh
=
memind
(
m
)
...
@@ -191,15 +240,15 @@ subroutine boundcond_domainfill(itime,loutend)
...
@@ -191,15 +240,15 @@ subroutine boundcond_domainfill(itime,loutend)
windx
=
(
windhl
(
1
)
*
dt2
+
windhl
(
2
)
*
dt1
)
*
dtt
windx
=
(
windhl
(
1
)
*
dt2
+
windhl
(
2
)
*
dt1
)
*
dtt
rhox
=
(
rhohl
(
1
)
*
dt2
+
rhohl
(
2
)
*
dt1
)
*
dtt
rhox
=
(
rhohl
(
1
)
*
dt2
+
rhohl
(
2
)
*
dt1
)
*
dtt
! Calculate mass flux
, divided by number of processes
! Calculate mass flux
!********************
********************************
!********************
fluxofmass
=
windx
*
rhox
*
boundarea
*
real
(
lsynctime
)
/
mp_partgroup_np
fluxofmass
=
windx
*
rhox
*
boundarea
*
real
(
lsynctime
)
! If the mass flux is directed into the domain, add it to previous mass fluxes;
! If the mass flux is directed into the domain, add it to previous mass fluxes;
! if it is out of the domain, set accumulated mass flux to zero
! if it is out of the domain, set accumulated mass flux to zero
!******************************************************************************
!******************************************************************************
if
(
k
.eq.
1
)
then
if
(
k
.eq.
1
)
then
if
(
fluxofmass
.ge.
0.
)
then
if
(
fluxofmass
.ge.
0.
)
then
...
@@ -216,10 +265,10 @@ subroutine boundcond_domainfill(itime,loutend)
...
@@ -216,10 +265,10 @@ subroutine boundcond_domainfill(itime,loutend)
endif
endif
accmasst
=
accmasst
+
acc_mass_we
(
k
,
jy
,
j
)
accmasst
=
accmasst
+
acc_mass_we
(
k
,
jy
,
j
)
! If the accumulated mass exceeds half the mass that each particle shall carry,
! If the accumulated mass exceeds half the mass that each particle shall carry,
! one (or more) particle(s) is (are) released and the accumulated mass is
! one (or more) particle(s) is (are) released and the accumulated mass is
! reduced by the mass of this (these) particle(s)
! reduced by the mass of this (these) particle(s)
!******************************************************************************
!******************************************************************************
if
(
acc_mass_we
(
k
,
jy
,
j
)
.ge.
xmassperparticle
/
2.
)
then
if
(
acc_mass_we
(
k
,
jy
,
j
)
.ge.
xmassperparticle
/
2.
)
then
mmass
=
int
((
acc_mass_we
(
k
,
jy
,
j
)
+
xmassperparticle
/
2.
)/
&
mmass
=
int
((
acc_mass_we
(
k
,
jy
,
j
)
+
xmassperparticle
/
2.
)/
&
...
@@ -231,43 +280,45 @@ subroutine boundcond_domainfill(itime,loutend)
...
@@ -231,43 +280,45 @@ subroutine boundcond_domainfill(itime,loutend)
endif
endif
do
m
=
1
,
mmass
do
m
=
1
,
mmass
do
ipart
=
minpart
,
maxpart
_mpi
do
ipart
=
minpart
,
maxpart
! If a vacant storage space is found, attribute everything to this array element
! If a vacant storage space is found, attribute everything to this array element
!*****************************************************************************
! TODO: for the MPI version this test can be removed, as all
! elements in _tmp arrays are initialized to zero
!*****************************************************************************
if
(
itra1
(
ipart
)
.ne.
itime
)
then
if
(
itra1
_tmp
(
ipart
)
.ne.
itime
)
then
! Assign particle positions
! Assign particle positions
!**************************
!**************************
xtra1
(
ipart
)
=
real
(
nx_we
(
k
))
xtra1
_tmp
(
ipart
)
=
real
(
nx_we
(
k
))
if
(
jy
.eq.
ny_sn
(
1
))
then
if
(
jy
.eq.
ny_sn
(
1
))
then
ytra1
(
ipart
)
=
real
(
jy
)
+0.5
*
ran1
(
idummy
)
ytra1
_tmp
(
ipart
)
=
real
(
jy
)
+0.5
*
ran1
(
idummy
)
else
if
(
jy
.eq.
ny_sn
(
2
))
then
else
if
(
jy
.eq.
ny_sn
(
2
))
then
ytra1
(
ipart
)
=
real
(
jy
)
-0.5
*
ran1
(
idummy
)
ytra1
_tmp
(
ipart
)
=
real
(
jy
)
-0.5
*
ran1
(
idummy
)
else
else
ytra1
(
ipart
)
=
real
(
jy
)
+
(
ran1
(
idummy
)
-.5
)
ytra1
_tmp
(
ipart
)
=
real
(
jy
)
+
(
ran1
(
idummy
)
-.5
)
endif
endif
if
(
j
.eq.
1
)
then
if
(
j
.eq.
1
)
then
ztra1
(
ipart
)
=
zcolumn_we
(
k
,
jy
,
1
)
+
(
zcolumn_we
(
k
,
jy
,
2
)
-
&
ztra1
_tmp
(
ipart
)
=
zcolumn_we
(
k
,
jy
,
1
)
+
(
zcolumn_we
(
k
,
jy
,
2
)
-
&
zcolumn_we
(
k
,
jy
,
1
))/
4.
zcolumn_we
(
k
,
jy
,
1
))/
4.