Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
flexpart
flexpart
Commits
d9f0585f
Commit
d9f0585f
authored
May 08, 2017
by
Sabine
Browse files
Merge branch 'dev' of git.nilu.no:flexpart/flexpart into dev
parents
d404d981
c8fc7249
Changes
29
Hide whitespace changes
Inline
Side-by-side
src/FLEXPART.f90
View file @
d9f0585f
...
...
@@ -66,9 +66,10 @@ program flexpart
end
do
call
gasdev1
(
idummy
,
rannumb
(
maxrand
),
rannumb
(
maxrand
-1
))
! FLEXPART version string
flexversion_major
=
'10'
! Major version number, also used for species file names
flexversion
=
'Version '
//
trim
(
flexversion_major
)//
'.
0
beta (201
5-05
-0
1
)'
flexversion
=
'Version '
//
trim
(
flexversion_major
)//
'.
1
beta (201
6-11
-0
2
)'
verbosity
=
0
! Read the pathnames where input/output files are stored
...
...
@@ -383,6 +384,17 @@ program flexpart
end
do
end
do
! Inform whether output kernel is used or not
!*********************************************
if
(
lroot
)
then
if
(
lnokernel
)
then
write
(
*
,
*
)
"Concentrations are calculated without using kernel"
else
write
(
*
,
*
)
"Concentrations are calculated using kernel"
end
if
end
if
! Calculate particle trajectories
!********************************
...
...
@@ -401,11 +413,16 @@ program flexpart
call
timemanager
! NIK 16.02.2005
write
(
*
,
*
)
'**********************************************'
write
(
*
,
*
)
'Total number of occurences of below-cloud scavenging'
,
tot_blc_count
write
(
*
,
*
)
'Total number of occurences of in-cloud scavenging'
,
tot_inc_count
write
(
*
,
*
)
'**********************************************'
do
i
=
1
,
nspec
write
(
*
,
*
)
'**********************************************'
write
(
*
,
*
)
'Scavenging statistics for species '
,
species
(
i
),
':'
write
(
*
,
*
)
'Total number of occurences of below-cloud scavenging'
,
&
&
tot_blc_count
(
i
)
write
(
*
,
*
)
'Total number of occurences of in-cloud scavenging'
,
&
&
tot_inc_count
(
i
)
write
(
*
,
*
)
'**********************************************'
end
do
write
(
*
,
*
)
'CONGRATULATIONS: YOU HAVE SUCCESSFULLY COMPLETED A FLE&
&XPART MODEL RUN!'
...
...
src/FLEXPART_MPI.f90
View file @
d9f0585f
...
...
@@ -54,15 +54,17 @@ program flexpart
character
(
len
=
256
)
::
inline_options
!pathfile, flexversion, arg2
! Initialize mpi
!*********************
! Initialize mpi
!*********************
call
mpif_init
if
(
mp_measure_time
)
call
mpif_mtime
(
'flexpart'
,
0
)
! Initialize arrays in com_mod
!*****************************
call
com_mod_allocate_part
(
maxpart_mpi
)
! Initialize arrays in com_mod
!*****************************
if
(
.not.
(
lmpreader
.and.
lmp_use_reader
))
call
com_mod_allocate_part
(
maxpart_mpi
)
! Generate a large number of random numbers
!******************************************
...
...
@@ -78,7 +80,7 @@ program flexpart
! FLEXPART version string
flexversion_major
=
'10'
! Major version number, also used for species file names
! flexversion='Ver. 10 Beta MPI (2015-05-01)'
flexversion
=
'Ver. '
//
trim
(
flexversion_major
)//
'
B
eta MPI (201
5-05
-0
1
)'
flexversion
=
'Ver. '
//
trim
(
flexversion_major
)//
'
.1b
eta MPI (201
6-11
-0
2
)'
verbosity
=
0
! Read the pathnames where input/output files are stored
...
...
@@ -305,9 +307,11 @@ program flexpart
print
*
,
'Initialize all particles to non-existent'
endif
do
j
=
1
,
size
(
itra1
)
! maxpart_mpi
itra1
(
j
)
=
-999999999
end
do
if
(
.not.
(
lmpreader
.and.
lmp_use_reader
))
then
do
j
=
1
,
size
(
itra1
)
! maxpart_mpi
itra1
(
j
)
=
-999999999
end
do
end
if
! For continuation of previous run, read in particle positions
!*************************************************************
...
...
@@ -317,7 +321,7 @@ program flexpart
print
*
,
'call readpartpositions'
endif
! readwind process skips this step
if
(
lmp_use_
reader
.and.
.not.
lmp
reader
)
call
readpartpositions
if
(
.not.
(
lmp
reader
.and.
lmp_use_
reader
)
)
call
readpartpositions
else
if
(
verbosity
.gt.
0
.and.
lroot
)
then
print
*
,
'numpart=0, numparticlecount=0'
...
...
@@ -424,6 +428,16 @@ program flexpart
end
do
end
do
! Inform whether output kernel is used or not
!*********************************************
if
(
lroot
)
then
if
(
lnokernel
)
then
write
(
*
,
*
)
"Concentrations are calculated without using kernel"
else
write
(
*
,
*
)
"Concentrations are calculated using kernel"
end
if
end
if
! Calculate particle trajectories
!********************************
...
...
@@ -447,24 +461,29 @@ program flexpart
! NIK 16.02.2005
if
(
lroot
)
then
call
MPI_Reduce
(
MPI_IN_PLACE
,
tot_blc_count
,
1
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
call
MPI_Reduce
(
MPI_IN_PLACE
,
tot_blc_count
,
nspec
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
&
mp_comm_used
,
mp_ierr
)
call
MPI_Reduce
(
MPI_IN_PLACE
,
tot_inc_count
,
1
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
call
MPI_Reduce
(
MPI_IN_PLACE
,
tot_inc_count
,
nspec
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
&
mp_comm_used
,
mp_ierr
)
else
if
(
mp_partgroup_pid
.ge.
0
)
then
! Skip for readwind process
call
MPI_Reduce
(
tot_blc_count
,
0
,
1
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
call
MPI_Reduce
(
tot_blc_count
,
0
,
nspec
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
&
mp_comm_used
,
mp_ierr
)
call
MPI_Reduce
(
tot_inc_count
,
0
,
1
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
call
MPI_Reduce
(
tot_inc_count
,
0
,
nspec
,
MPI_INTEGER8
,
MPI_SUM
,
id_root
,
&
&
mp_comm_used
,
mp_ierr
)
end
if
end
if
if
(
lroot
)
then
write
(
*
,
*
)
'**********************************************'
write
(
*
,
*
)
'Total number of occurences of below-cloud scavenging'
,
tot_blc_count
write
(
*
,
*
)
'Total number of occurences of in-cloud scavenging'
,
tot_inc_count
write
(
*
,
*
)
'**********************************************'
do
i
=
1
,
nspec
write
(
*
,
*
)
'**********************************************'
write
(
*
,
*
)
'Scavenging statistics for species '
,
species
(
i
),
':'
write
(
*
,
*
)
'Total number of occurences of below-cloud scavenging'
,
&
&
tot_blc_count
(
i
)
write
(
*
,
*
)
'Total number of occurences of in-cloud scavenging'
,
&
&
tot_inc_count
(
i
)
write
(
*
,
*
)
'**********************************************'
end
do
write
(
*
,
*
)
'CONGRATULATIONS: YOU HAVE SUCCESSFULLY COMPLETED A FLE&
&XPART MODEL RUN!'
...
...
src/boundcond_domainfill_mpi.f90
View file @
d9f0585f
...
...
@@ -20,28 +20,39 @@
!**********************************************************************
subroutine
boundcond_domainfill
(
itime
,
loutend
)
! i i
!*****************************************************************************
! *
! Particles are created by this subroutine continuously throughout the *
! simulation at the boundaries of the domain-filling box. *
! All particles carry the same amount of mass which alltogether comprises the*
! mass of air within the box, which remains (more or less) constant. *
! *
! Author: A. Stohl *
! *
! 16 October 2002 *
! *
!*****************************************************************************
! *
! Variables: *
! *
! nx_we(2) grid indices for western and eastern boundary of domain- *
! filling trajectory calculations *
! ny_sn(2) grid indices for southern and northern boundary of domain- *
! filling trajectory calculations *
! *
!*****************************************************************************
! i i
!*****************************************************************************
! *
! Particles are created by this subroutine continuously throughout the *
! simulation at the boundaries of the domain-filling box. *
! All particles carry the same amount of mass which alltogether comprises the*
! mass of air within the box, which remains (more or less) constant. *
! *
! Author: A. Stohl *
! *
! 16 October 2002 *
! *
!*****************************************************************************
! *
! Variables: *
! *
! nx_we(2) grid indices for western and eastern boundary of domain- *
! filling trajectory calculations *
! ny_sn(2) grid indices for southern and northern boundary of domain- *
! filling trajectory calculations *
! *
!*****************************************************************************
! CHANGES
! 08/2016 eso: MPI version:
!
! -Root process release particles and distributes to other processes.
! Temporary arrays are used, also for the non-root (receiving) processes.
! -The scheme can be improved by having all processes report numpart
! (keeping track of how many particles have left the domain), so that
! a proportional amount of new particles can be distributed (however
! we have a separate function called from timemanager that will
! redistribute particles among processes if there are imbalances)
!*****************************************************************************
use
point_mod
use
par_mod
...
...
@@ -54,7 +65,8 @@ subroutine boundcond_domainfill(itime,loutend)
real
::
dz
,
dz1
,
dz2
,
dt1
,
dt2
,
dtt
,
ylat
,
xm
,
cosfact
,
accmasst
integer
::
itime
,
in
,
indz
,
indzp
,
i
,
loutend
integer
::
j
,
k
,
ix
,
jy
,
m
,
indzh
,
indexh
,
minpart
,
ipart
,
mmass
integer
::
numactiveparticles
integer
::
numactiveparticles
,
numpart_total
,
rel_counter
integer
,
allocatable
,
dimension
(:)
::
numrel_mpi
!, numactiveparticles_mpi
real
::
windl
(
2
),
rhol
(
2
)
real
::
windhl
(
2
),
rhohl
(
2
)
...
...
@@ -65,26 +77,37 @@ subroutine boundcond_domainfill(itime,loutend)
real
::
pvpart
,
ddx
,
ddy
,
rddx
,
rddy
,
p1
,
p2
,
p3
,
p4
,
y1
(
2
),
yh1
(
2
)
integer
::
idummy
=
-11
integer
::
mtag
logical
::
first_call
=
.true.
! Sizes of temporary arrays are maxpartfract*maxpart. Increase maxpartfract if
! needed.
real
,
parameter
::
maxpartfract
=
0.1
integer
::
tmp_size
=
int
(
maxpartfract
*
maxpart
)
! Use different seed for each process
! Use different seed for each process
if
(
first_call
)
then
idummy
=
idummy
+
mp_seed
first_call
=
.false.
end
if
! If domain-filling is global, no boundary conditions are needed
!***************************************************************
! If domain-filling is global, no boundary conditions are needed
!***************************************************************
if
(
gdomainfill
)
return
accmasst
=
0.
numactiveparticles
=
0
! Keep track of active particles on each process
allocate
(
numrel_mpi
(
0
:
mp_partgroup_np
-1
))
! numactiveparticles_mpi(0:mp_partgroup_np-1)
! Terminate trajectories that have left the domain, if domain-filling
! trajectory calculation domain is not global
!********************************************************************
! New particles to be released on each process
numrel_mpi
(:)
=
0
! Terminate trajectories that have left the domain, if domain-filling
! trajectory calculation domain is not global. Done for all processes
!********************************************************************
do
i
=
1
,
numpart
if
(
itra1
(
i
)
.eq.
itime
)
then
...
...
@@ -97,504 +120,681 @@ subroutine boundcond_domainfill(itime,loutend)
if
(
itra1
(
i
)
.ne.
-999999999
)
numactiveparticles
=
&
numactiveparticles
+1
end
do
! numactiveparticles_mpi(mp_partid) = numactiveparticles
! Determine auxiliary variables for time interpolation
!*****************************************************
! Collect number of active particles from all processes
! call MPI_Allgather(numactiveparticles, 1, MPI_INTEGER, &
! &numactiveparticles_mpi, 1, MPI_INTEGER, mp_comm_used, mp_ierr)
dt1
=
real
(
itime
-
memtime
(
1
))
dt2
=
real
(
memtime
(
2
)
-
itime
)
dtt
=
1.
/(
dt1
+
dt2
)
! Initialize auxiliary variable used to search for vacant storage space
!**********************************************************************
! Total number of new releases
numpart_total
=
0
minpart
=
1
!***************************************
! Western and eastern boundary condition
!***************************************
! This section only done by root process
!***************************************
if
(
lroot
)
then
! Use separate arrays for newly released particles
!*************************************************
allocate
(
itra1_tmp
(
tmp_size
),
npoint_tmp
(
tmp_size
),
nclass_tmp
(
tmp_size
),&
&
idt_tmp
(
tmp_size
),
itramem_tmp
(
tmp_size
),
itrasplit_tmp
(
tmp_size
),&
&
xtra1_tmp
(
tmp_size
),
ytra1_tmp
(
tmp_size
),
ztra1_tmp
(
tmp_size
),&
&
xmass1_tmp
(
tmp_size
,
maxspec
))
! Initialize all particles as non-existent
itra1_tmp
(:)
=
-999999999
! Determine auxiliary variables for time interpolation
!*****************************************************
! Loop from south to north
!*************************
dt1
=
real
(
itime
-
memtime
(
1
))
dt2
=
real
(
memtime
(
2
)
-
itime
)
dtt
=
1.
/(
dt1
+
dt2
)
do
jy
=
ny_sn
(
1
),
ny_sn
(
2
)
! Initialize auxiliary variable used to search for vacant storage space
!**********************************************************************
! Loop over western (index 1) and eastern (index 2) boundary
!***********************************************************
minpart
=
1
do
k
=
1
,
2
!***************************************
! Western and eastern boundary condition
!***************************************
! Loop
over all release locations in a column
!*******************
*************************
! Loop
from south to north
!
*************************
do
j
=
1
,
numcolumn_we
(
k
,
jy
)
do
j
y
=
ny_sn
(
1
),
ny_sn
(
2
)
! Determine, for each release location, the area of the corresponding
boundary
!******************
***********************************************************
! Loop over western (index 1) and eastern (index 2)
boundary
!
***********************************************************
if
(
j
.eq.
1
)
then
deltaz
=
(
zcolumn_we
(
k
,
jy
,
2
)
+
zcolumn_we
(
k
,
jy
,
1
))/
2.
else
if
(
j
.eq.
numcolumn_we
(
k
,
jy
))
then
! deltaz=height(nz)-(zcolumn_we(k,jy,j-1)+
! + zcolumn_we(k,jy,j))/2.
! In order to avoid taking a very high column for very many particles,
! use the deltaz from one particle below instead
deltaz
=
(
zcolumn_we
(
k
,
jy
,
j
)
-
zcolumn_we
(
k
,
jy
,
j
-2
))/
2.
else
deltaz
=
(
zcolumn_we
(
k
,
jy
,
j
+1
)
-
zcolumn_we
(
k
,
jy
,
j
-1
))/
2.
endif
if
((
jy
.eq.
ny_sn
(
1
))
.or.
(
jy
.eq.
ny_sn
(
2
)))
then
boundarea
=
deltaz
*
111198.5
/
2.
*
dy
else
boundarea
=
deltaz
*
111198.5
*
dy
endif
do
k
=
1
,
2
! Loop over all release locations in a column
!********************************************
! Interpolate the wind velocity and density to the release location
!******************************************************************
do
j
=
1
,
numcolumn_we
(
k
,
jy
)
! Determine
the model level below the release position
!
*****************************************************
! Determine
, for each release location, the area of the corresponding boundary
!************************
*****************************************************
do
i
=
2
,
nz
if
(
height
(
i
)
.gt.
zcolumn_we
(
k
,
jy
,
j
))
then
indz
=
i
-1
indzp
=
i
goto
6
if
(
j
.eq.
1
)
then
deltaz
=
(
zcolumn_we
(
k
,
jy
,
2
)
+
zcolumn_we
(
k
,
jy
,
1
))/
2.
else
if
(
j
.eq.
numcolumn_we
(
k
,
jy
))
then
! deltaz=height(nz)-(zcolumn_we(k,jy,j-1)+
! + zcolumn_we(k,jy,j))/2.
! In order to avoid taking a very high column for very many particles,
! use the deltaz from one particle below instead
deltaz
=
(
zcolumn_we
(
k
,
jy
,
j
)
-
zcolumn_we
(
k
,
jy
,
j
-2
))/
2.
else
deltaz
=
(
zcolumn_we
(
k
,
jy
,
j
+1
)
-
zcolumn_we
(
k
,
jy
,
j
-1
))/
2.
endif
if
((
jy
.eq.
ny_sn
(
1
))
.or.
(
jy
.eq.
ny_sn
(
2
)))
then
boundarea
=
deltaz
*
111198.5
/
2.
*
dy
else
boundarea
=
deltaz
*
111198.5
*
dy
endif
end
do
6
continue
! Vertical distance to the level below and above current position
!****************************************************************
dz1
=
zcolumn_we
(
k
,
jy
,
j
)
-
height
(
indz
)
dz2
=
height
(
indzp
)
-
zcolumn_we
(
k
,
jy
,
j
)
dz
=
1.
/(
dz1
+
dz2
)
! Interpolate the wind velocity and density to the release location
!******************************************************************
! Vertical and temporal interpola
tion
!
************************************
! Determine the model level below the release posi
tion
!*****************
************************************
do
m
=
1
,
2
indexh
=
memind
(
m
)
do
in
=
1
,
2
indz
h
=
i
ndz
+
in
-1
windl
(
in
)
=
uu
(
nx_we
(
k
),
jy
,
indzh
,
indexh
)
rhol
(
in
)
=
rho
(
nx_we
(
k
),
jy
,
indzh
,
indexh
)
do
i
=
2
,
nz
if
(
height
(
i
)
.gt.
zcolumn_we
(
k
,
jy
,
j
))
then
indz
=
i
-1
indz
p
=
i
goto
6
endif
end
do
6
continue
windhl
(
m
)
=
(
dz2
*
windl
(
1
)
+
dz1
*
windl
(
2
))
*
dz
rhohl
(
m
)
=
(
dz2
*
rhol
(
1
)
+
dz1
*
rhol
(
2
))
*
dz
end
do
! Vertical distance to the level below and above current position
!****************************************************************
windx
=
(
windhl
(
1
)
*
dt2
+
windhl
(
2
)
*
dt1
)
*
dtt
rhox
=
(
rhohl
(
1
)
*
dt2
+
rhohl
(
2
)
*
dt1
)
*
dtt
dz1
=
zcolumn_we
(
k
,
jy
,
j
)
-
height
(
indz
)
dz2
=
height
(
indzp
)
-
zcolumn_we
(
k
,
jy
,
j
)
dz
=
1.
/(
dz1
+
dz2
)
! Calculate mass flux, divided by number of processes
!****************
************************************
! Vertical and temporal interpolation
!
************************************
fluxofmass
=
windx
*
rhox
*
boundarea
*
real
(
lsynctime
)/
mp_partgroup_np
do
m
=
1
,
2
indexh
=
memind
(
m
)
do
in
=
1
,
2
indzh
=
indz
+
in
-1
windl
(
in
)
=
uu
(
nx_we
(
k
),
jy
,
indzh
,
indexh
)
rhol
(
in
)
=
rho
(
nx_we
(
k
),
jy
,
indzh
,
indexh
)
end
do
windhl
(
m
)
=
(
dz2
*
windl
(
1
)
+
dz1
*
windl
(
2
))
*
dz
rhohl
(
m
)
=
(
dz2
*
rhol
(
1
)
+
dz1
*
rhol
(
2
))
*
dz
end
do
windx
=
(
windhl
(
1
)
*
dt2
+
windhl
(
2
)
*
dt1
)
*
dtt
rhox
=
(
rhohl
(
1
)
*
dt2
+
rhohl
(
2
)
*
dt1
)
*
dtt
! Calculate mass flux
!********************
fluxofmass
=
windx
*
rhox
*
boundarea
*
real
(
lsynctime
)
! If the mass flux is directed into the domain, add it to previous mass fluxes;
! if it is out of the domain, set accumulated mass flux to zero
!******************************************************************************
if
(
k
.eq.
1
)
then
if
(
fluxofmass
.ge.
0.
)
then
acc_mass_we
(
k
,
jy
,
j
)
=
acc_mass_we
(
k
,
jy
,
j
)
+
fluxofmass
! If the mass flux is directed into the domain, add it to previous mass fluxes;
! if it is out of the domain, set accumulated mass flux to zero
!******************************************************************************
if
(
k
.eq.
1
)
then
if
(
fluxofmass
.ge.
0.
)
then
acc_mass_we
(
k
,
jy
,
j
)
=
acc_mass_we
(
k
,
jy
,
j
)
+
fluxofmass
else
acc_mass_we
(
k
,
jy
,
j
)
=
0.
endif
else
acc_mass_we
(
k
,
jy
,
j
)
=
0.
if
(
fluxofmass
.le.
0.
)
then
acc_mass_we
(
k
,
jy
,
j
)
=
acc_mass_we
(
k
,
jy
,
j
)
+
abs
(
fluxofmass
)
else
acc_mass_we
(
k
,
jy
,
j
)
=
0.
endif
endif
else
if
(
fluxofmass
.le.
0.
)
then
acc_mass_we
(
k
,
jy
,
j
)
=
acc_mass_we
(
k
,
jy
,
j
)
+
abs
(
fluxofmass
)
accmasst
=
accmasst
+
acc_mass_we
(
k
,
jy
,
j
)
! If the accumulated mass exceeds half the mass that each particle shall carry,
! one (or more) particle(s) is (are) released and the accumulated mass is
! reduced by the mass of this (these) particle(s)
!******************************************************************************
if
(
acc_mass_we
(
k
,
jy
,
j
)
.ge.
xmassperparticle
/
2.
)
then
mmass
=
int
((
acc_mass_we
(
k
,
jy
,
j
)
+
xmassperparticle
/
2.
)/
&
xmassperparticle
)
acc_mass_we
(
k
,
jy
,
j
)
=
acc_mass_we
(
k
,
jy
,
j
)
-
&
real
(
mmass
)
*
xmassperparticle
else
acc_mass_we
(
k
,
jy
,
j
)
=
0
.
mmass
=
0
endif
endif
accmasst
=
accmasst
+
acc_mass_we
(
k
,
jy
,
j
)
! If the accumulated mass exceeds half the mass that each particle shall carry,
! one (or more) particle(s) is (are) released and the accumulated mass is
! reduced by the mass of this (these) particle(s)
!******************************************************************************
if
(
acc_mass_we
(
k
,
jy
,
j
)
.ge.
xmassperparticle
/
2.
)
then
mmass
=
int
((
acc_mass_we
(
k
,
jy
,
j
)
+
xmassperparticle
/
2.
)/
&
xmassperparticle
)
acc_mass_we
(
k
,
jy
,
j
)
=
acc_mass_we
(
k
,
jy
,
j
)
-
&
real
(
mmass
)
*
xmassperparticle
else
mmass
=
0
endif
do
m
=
1
,
mmass
do
ipart
=
minpart
,
maxpart_mpi
! If a vacant storage space is found, attribute everything to this array element
!*****************************************************************************
if
(
itra1
(
ipart
)
.ne.
itime
)
then
! Assign particle positions
!**************************
xtra1
(
ipart
)
=
real
(
nx_we
(
k
))
if
(
jy
.eq.
ny_sn
(
1
))
then
ytra1
(
ipart
)
=
real
(
jy
)
+0.5
*
ran1
(
idummy
)
else
if
(
jy
.eq.
ny_sn
(
2
))
then
ytra1
(
ipart
)
=
real
(
jy
)
-0.5
*
ran1
(
idummy
)
else
ytra1
(
ipart
)
=
real
(
jy
)
+
(
ran1
(
idummy
)
-.5
)
endif
if
(
j
.eq.
1
)
then
ztra1
(
ipart
)
=
zcolumn_we
(
k
,
jy
,
1
)
+
(
zcolumn_we
(
k
,
jy
,
2
)
-
&
zcolumn_we
(
k
,
jy
,
1
))/
4.
else
if
(
j
.eq.
numcolumn_we
(
k
,
jy
))
then
ztra1
(
ipart
)
=
(
2.
*
zcolumn_we
(
k
,
jy
,
j
)
+
&
zcolumn_we
(
k
,
jy
,
j
-1
)
+
height
(
nz
))/
4.
else
ztra1
(
ipart
)
=
zcolumn_we
(
k
,
jy
,
j
-1
)
+
ran1
(
idummy
)
*
&
(
zcolumn_we
(
k
,
jy
,
j
+1
)
-
zcolumn_we
(
k
,
jy
,
j
-1
))
endif
! Interpolate PV to the particle position
!****************************************
ixm
=
int
(
xtra1
(
ipart
))
jym
=
int
(
ytra1
(
ipart
))
ixp
=
ixm
+1
jyp
=
jym
+1
ddx
=
xtra1
(
ipart
)
-
real
(
ixm
)
ddy
=
ytra1
(
ipart
)
-
real
(
jym
)
rddx
=
1.
-
ddx
rddy
=
1.
-
ddy
p1
=
rddx
*
rddy
p2
=
ddx
*
rddy
p3
=
rddx
*
ddy
p4
=
ddx
*
ddy
do
i
=
2
,
nz
if
(
height
(
i
)
.gt.
ztra1
(
ipart
))
then
indzm
=
i
-1
indzp
=
i
goto
26
do
m
=
1
,
mmass
do
ipart
=
minpart
,
maxpart
! If a vacant storage space is found, attribute everything to this array element
! TODO: for the MPI version this test can be removed, as all
! elements in _tmp arrays are initialized to zero
!*****************************************************************************