Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
flexpart
flexpart
Commits
16b61a5e
Commit
16b61a5e
authored
Oct 14, 2016
by
Espen Sollum
Browse files
Reworked the domain-filling option (MPI). Fixed a slow loop which had errors in loop counter (MPI)
parent
861805ae
Changes
11
Expand all
Hide whitespace changes
Inline
Side-by-side
src/FLEXPART_MPI.f90
View file @
16b61a5e
...
...
@@ -54,15 +54,15 @@ program flexpart
character
(
len
=
256
)
::
inline_options
!pathfile, flexversion, arg2
! Initialize mpi
!*********************
! Initialize mpi
!*********************
call
mpif_init
if
(
mp_measure_time
)
call
mpif_mtime
(
'flexpart'
,
0
)
! Initialize arrays in com_mod
!*****************************
call
com_mod_allocate_part
(
maxpart_mpi
)
! Initialize arrays in com_mod
!*****************************
if
(
.not.
lmpreader
)
call
com_mod_allocate_part
(
maxpart_mpi
)
! Generate a large number of random numbers
!******************************************
...
...
@@ -305,9 +305,11 @@ program flexpart
print
*
,
'Initialize all particles to non-existent'
endif
do
j
=
1
,
size
(
itra1
)
! maxpart_mpi
itra1
(
j
)
=
-999999999
end
do
if
(
.not.
lmpreader
)
then
do
j
=
1
,
size
(
itra1
)
! maxpart_mpi
itra1
(
j
)
=
-999999999
end
do
end
if
! For continuation of previous run, read in particle positions
!*************************************************************
...
...
@@ -462,8 +464,10 @@ program flexpart
if
(
lroot
)
then
write
(
*
,
*
)
'**********************************************'
write
(
*
,
*
)
'Total number of occurences of below-cloud scavenging'
,
tot_blc_count
write
(
*
,
*
)
'Total number of occurences of in-cloud scavenging'
,
tot_inc_count
write
(
*
,
*
)
'Total number of occurences of below-cloud scavenging'
,
&
&
tot_blc_count
write
(
*
,
*
)
'Total number of occurences of in-cloud scavenging'
,
&
&
tot_inc_count
write
(
*
,
*
)
'**********************************************'
write
(
*
,
*
)
'CONGRATULATIONS: YOU HAVE SUCCESSFULLY COMPLETED A FLE&
...
...
src/boundcond_domainfill_mpi.f90
View file @
16b61a5e
This diff is collapsed.
Click to expand it.
src/conccalc.f90
View file @
16b61a5e
...
...
@@ -125,7 +125,7 @@ subroutine conccalc(itime,weight)
! Take density from 2nd wind field in memory (accurate enough, no time interpolation needed)
!*****************************************************************************
do
ind
=
indz
,
indzp
rhoprof
(
ind
-
indz
+1
)
=
p1
*
rho
(
ix
,
jy
,
ind
,
2
)
&
rhoprof
(
ind
-
indz
+1
)
=
p1
*
rho
(
ix
,
jy
,
ind
,
memind
(
2
)
)
&
+
p2
*
rho
(
ixp
,
jy
,
ind
,
2
)
&
+
p3
*
rho
(
ix
,
jyp
,
ind
,
2
)
&
+
p4
*
rho
(
ixp
,
jyp
,
ind
,
2
)
...
...
src/concoutput.f90
View file @
16b61a5e
...
...
@@ -625,24 +625,24 @@ subroutine concoutput(itime,outnum,gridtotalunc,wetgridtotalunc, &
! Reinitialization of grid
!*************************
! do ks=1,nspec
! do kp=1,maxpointspec_act
! do i=1,numreceptor
! creceptor(i,ks)=0.
! end do
! do jy=0,numygrid-1
! do ix=0,numxgrid-1
! do l=1,nclassunc
! do nage=1,nageclass
! do kz=1,numzgrid
! gridunc(ix,jy,kz,ks,kp,l,nage)=0.
! end do
! end do
! end do
! end do
! end do
! end do
! end do
! do ks=1,nspec
! do kp=1,maxpointspec_act
! do i=1,numreceptor
! creceptor(i,ks)=0.
! end do
! do jy=0,numygrid-1
! do ix=0,numxgrid-1
! do l=1,nclassunc
! do nage=1,nageclass
! do kz=1,numzgrid
! gridunc(ix,jy,kz,ks,kp,l,nage)=0.
! end do
! end do
! end do
! end do
! end do
! end do
! end do
creceptor
(:,:)
=
0.
gridunc
(:,:,:,:,:,:,:)
=
0.
...
...
src/concoutput_nest_mpi.f90
View file @
16b61a5e
...
...
@@ -104,9 +104,6 @@ subroutine concoutput_nest(itime,outnum)
! Measure execution time
if
(
mp_measure_time
)
call
mpif_mtime
(
'iotime'
,
0
)
! call cpu_time(mp_root_time_beg)
! mp_root_wtime_beg = mpi_wtime()
! end if
if
(
verbosity
.eq.
1
)
then
print
*
,
'inside concoutput_surf '
...
...
src/concoutput_surf.f90
View file @
16b61a5e
This diff is collapsed.
Click to expand it.
src/init_domainfill_mpi.f90
View file @
16b61a5e
This diff is collapsed.
Click to expand it.
src/mean_mod.f90
View file @
16b61a5e
...
...
@@ -63,9 +63,9 @@ contains
! real(sp) :: x_sp(number),xm,xs,xl,xq,xaux
! real(sp),parameter :: eps=1.0e-30
integer
,
intent
(
in
)
::
number
real
(
sp
),
intent
(
in
)
::
x_sp
(
number
)
real
(
sp
),
intent
(
out
)
::
xm
,
xs
integer
,
intent
(
in
)
::
number
real
(
sp
)
::
xl
,
xq
,
xaux
real
(
sp
),
parameter
::
eps
=
1.0e-30
integer
::
i
...
...
@@ -115,9 +115,9 @@ contains
implicit
none
integer
,
intent
(
in
)
::
number
real
(
dp
),
intent
(
in
)
::
x_dp
(
number
)
real
(
dp
),
intent
(
out
)
::
xm
,
xs
integer
,
intent
(
in
)
::
number
real
(
dp
)
::
xl
,
xq
,
xaux
real
(
dp
),
parameter
::
eps
=
1.0e-30
integer
::
i
...
...
@@ -167,9 +167,9 @@ contains
implicit
none
integer
,
intent
(
in
)
::
number
real
(
dp
),
intent
(
in
)
::
x_dp
(
number
)
real
(
sp
),
intent
(
out
)
::
xm
,
xs
integer
,
intent
(
in
)
::
number
real
(
sp
)
::
xl
,
xq
,
xaux
real
(
sp
),
parameter
::
eps
=
1.0e-30
integer
::
i
...
...
src/mpi_mod.f90
View file @
16b61a5e
...
...
@@ -121,7 +121,7 @@ module mpi_mod
logical
,
parameter
::
mp_dbg_mode
=
.false.
logical
,
parameter
::
mp_dev_mode
=
.false.
logical
,
parameter
::
mp_dbg_out
=
.false.
logical
,
parameter
::
mp_time_barrier
=
.
fals
e.
logical
,
parameter
::
mp_time_barrier
=
.
tru
e.
logical
,
parameter
::
mp_measure_time
=
.false.
logical
,
parameter
::
mp_exact_numpart
=
.true.
...
...
@@ -250,7 +250,7 @@ contains
&
'all procs call getfields. Setting lmp_sync=.true.'
write
(
*
,
FMT
=
'(80("#"))'
)
end
if
lmp_sync
=
.true.
! :DBG: eso fix this...
lmp_sync
=
.true.
end
if
! TODO: Add more warnings for unimplemented flexpart features
...
...
@@ -260,6 +260,7 @@ contains
! as running with one process less but not using separate read process
!**********************************************************************
! id_read = min(mp_np-1, 1)
id_read
=
mp_np
-1
if
(
mp_pid
.eq.
id_read
)
lmpreader
=
.true.
...
...
@@ -485,6 +486,19 @@ contains
integer
,
intent
(
in
)
::
num_part
integer
::
i
,
jj
,
addone
! Exit if too many particles
if
(
num_part
.gt.
maxpart_mpi
)
then
write
(
*
,
*
)
'#####################################################'
write
(
*
,
*
)
'#### ERROR - TOTAL NUMBER OF PARTICLES REQUIRED ####'
write
(
*
,
*
)
'#### EXCEEDS THE MAXIMUM ALLOWED NUMBER. REDUCE ####'
write
(
*
,
*
)
'#### EITHER NUMBER OF PARTICLES PER RELEASE POINT####'
write
(
*
,
*
)
'#### OR INCREASE MAXPART. ####'
write
(
*
,
*
)
'#####################################################'
! call MPI_FINALIZE(mp_ierr)
stop
end
if
! Time for MPI communications
!****************************
if
(
mp_measure_time
)
call
mpif_mtime
(
'commtime'
,
0
)
...
...
@@ -526,7 +540,6 @@ contains
end
do
if
(
mp_measure_time
)
call
mpif_mtime
(
'commtime'
,
1
)
write
(
*
,
*
)
"PID "
,
mp_partid
,
"ending MPI_Scatter operation"
goto
601
...
...
@@ -534,7 +547,7 @@ contains
stop
! After the transfer of particles it it possible that the value of
! "numpart" is larger than the actual, so we reduce it if there are
! "numpart" is larger than the actual
used
, so we reduce it if there are
! invalid particles at the end of the arrays
601
do
i
=
num_part
,
1
,
-1
if
(
itra1
(
i
)
.eq.
-999999999
)
then
...
...
@@ -627,6 +640,10 @@ contains
if
(
mp_partid
.eq.
idx_arr
(
m
)
.or.
mp_partid
.eq.
idx_arr
(
i
))
then
if
(
numparticles_mpi
(
idx_arr
(
m
))
.gt.
mp_min_redist
.and.
&
&
real
(
num_trans
)/
real
(
numparticles_mpi
(
idx_arr
(
m
)))
.gt.
mp_redist_fract
)
then
! DBG
! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, numparticles_mpi', &
! &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, numparticles_mpi
! DBG
call
mpif_redist_part
(
itime
,
idx_arr
(
m
),
idx_arr
(
i
),
num_trans
/
2
)
end
if
end
if
...
...
@@ -660,6 +677,14 @@ contains
integer
::
mtag
! MPI message tag
integer
::
i
,
j
,
minpart
,
ipart
,
maxnumpart
! Check for invalid input arguments
!**********************************
if
(
src_proc
.eq.
dest_proc
)
then
write
(
*
,
*
)
'<mpi_mod::mpif_redist_part>: Error: &
&src_proc.eq.dest_proc'
stop
end
if
! Measure time for MPI communications
!************************************
if
(
mp_measure_time
)
call
mpif_mtime
(
'commtime'
,
0
)
...
...
@@ -673,11 +698,11 @@ contains
ll
=
numpart
-
num_trans
+1
ul
=
numpart
!
if (mp_dev_mode) then
!
write(*,FMT='(72("#"))')
!
write(*,*) "Sending ", num_trans, "particles (from/to)", src_proc, dest_proc
!
write(*,*) "numpart before: ", numpart
!
end if
if
(
mp_dev_mode
)
then
write
(
*
,
FMT
=
'(72("#"))'
)
write
(
*
,
*
)
"Sending "
,
num_trans
,
"particles (from/to)"
,
src_proc
,
dest_proc
write
(
*
,
*
)
"numpart before: "
,
numpart
end
if
call
MPI_SEND
(
nclass
(
ll
:
ul
),
num_trans
,&
&
MPI_INTEGER
,
dest_proc
,
mtag
+1
,
mp_comm_used
,
mp_ierr
)
...
...
@@ -716,10 +741,10 @@ contains
numpart
=
numpart
-
num_trans
!
if (mp_dev_mode) then
!
write(*,*) "numpart after: ", numpart
!
write(*,FMT='(72("#"))')
!
end if
if
(
mp_dev_mode
)
then
write
(
*
,
*
)
"numpart after: "
,
numpart
write
(
*
,
FMT
=
'(72("#"))'
)
end
if
else
if
(
mp_partid
.eq.
dest_proc
)
then
...
...
@@ -730,11 +755,11 @@ contains
ll
=
numpart
+1
ul
=
numpart
+
num_trans
!
if (mp_dev_mode) then
!
write(*,FMT='(72("#"))')
!
write(*,*) "Receiving ", num_trans, "particles (from/to)", src_proc, dest_proc
!
write(*,*) "numpart before: ", numpart
!
end if
if
(
mp_dev_mode
)
then
write
(
*
,
FMT
=
'(72("#"))'
)
write
(
*
,
*
)
"Receiving "
,
num_trans
,
"particles (from/to)"
,
src_proc
,
dest_proc
write
(
*
,
*
)
"numpart before: "
,
numpart
end
if
! We could receive the data directly at nclass(ll:ul) etc., but this leaves
! vacant spaces at lower indices. Using temporary arrays instead.
...
...
@@ -784,34 +809,35 @@ contains
minpart
=
1
do
i
=
1
,
num_trans
! Take into acount that we may have transferred invalid particles
if
(
itra1_tmp
(
minpart
)
.ne.
itime
)
goto
200
if
(
itra1_tmp
(
i
)
.ne.
itime
)
cycle
do
ipart
=
minpart
,
maxnumpart
if
(
itra1
(
ipart
)
.ne.
itime
)
then
itra1
(
ipart
)
=
itra1_tmp
(
minpart
)
npoint
(
ipart
)
=
npoint_tmp
(
minpart
)
nclass
(
ipart
)
=
nclass_tmp
(
minpart
)
idt
(
ipart
)
=
idt_tmp
(
minpart
)
itramem
(
ipart
)
=
itramem_tmp
(
minpart
)
itrasplit
(
ipart
)
=
itrasplit_tmp
(
minpart
)
xtra1
(
ipart
)
=
xtra1_tmp
(
minpart
)
ytra1
(
ipart
)
=
ytra1_tmp
(
minpart
)
ztra1
(
ipart
)
=
ztra1_tmp
(
minpart
)
xmass1
(
ipart
,:)
=
xmass1_tmp
(
minpart
,:)
! Increase numpart, if necessary
numpart
=
max
(
numpart
,
ipart
)
itra1
(
ipart
)
=
itra1_tmp
(
i
)
npoint
(
ipart
)
=
npoint_tmp
(
i
)
nclass
(
ipart
)
=
nclass_tmp
(
i
)
idt
(
ipart
)
=
idt_tmp
(
i
)
itramem
(
ipart
)
=
itramem_tmp
(
i
)
itrasplit
(
ipart
)
=
itrasplit_tmp
(
i
)
xtra1
(
ipart
)
=
xtra1_tmp
(
i
)
ytra1
(
ipart
)
=
ytra1_tmp
(
i
)
ztra1
(
ipart
)
=
ztra1_tmp
(
i
)
xmass1
(
ipart
,:)
=
xmass1_tmp
(
i
,:)
goto
200
! Storage space has been found, stop searching
end
if
! :TODO: add check for if too many particles requiried
end
do
200
minpart
=
min
part
+1
200
minpart
=
i
part
+1
end
do
! Increase numpart, if necessary
numpart
=
max
(
numpart
,
ipart
)
deallocate
(
itra1_tmp
,
npoint_tmp
,
nclass_tmp
,
idt_tmp
,
itramem_tmp
,
itrasplit_tmp
,&
&
xtra1_tmp
,
ytra1_tmp
,
ztra1_tmp
,
xmass1_tmp
)
!
if (mp_dev_mode) then
!
write(*,*) "numpart after: ", numpart
!
write(*,FMT='(72("#"))')
!
end if
if
(
mp_dev_mode
)
then
write
(
*
,
*
)
"numpart after: "
,
numpart
write
(
*
,
FMT
=
'(72("#"))'
)
end
if
else
! This routine should only be called by the two participating processes
...
...
@@ -2726,8 +2752,9 @@ contains
! & mp_vt_wtime_total
! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR VERTTRANSFORM:',&
! & mp_vt_time_total
! NB: the 'flush' function is possibly a gfortran-specific extension
call
flush
()
! NB: the 'flush' function is possibly a gfortran-specific extension,
! comment out if it gives problems
! call flush()
end
if
end
do
end
if
...
...
src/releaseparticles_mpi.f90
View file @
16b61a5e
...
...
@@ -415,7 +415,6 @@ subroutine releaseparticles(itime)
goto
34
! Storage space has been found, stop searching
endif
end
do
! ESO TODO: Here we could use dynamic allocation and increase array sizes
if
(
ipart
.gt.
maxpart_mpi
)
goto
996
34
minpart
=
ipart
+1
...
...
src/timemanager_mpi.f90
View file @
16b61a5e
...
...
@@ -577,7 +577,7 @@ subroutine timemanager
! end if
end
if
! Write particles for all processes
! Write
number of
particles for all processes
if
(
mp_dev_mode
)
write
(
*
,
*
)
"PID, itime, numpart"
,
mp_pid
,
itime
,
numpart
...
...
@@ -869,7 +869,8 @@ subroutine timemanager
deallocate
(
drygridunc
,
wetgridunc
)
endif
deallocate
(
gridunc
)
deallocate
(
xpoint1
,
xpoint2
,
ypoint1
,
ypoint2
,
zpoint1
,
zpoint2
,
xmass
,
checklifetime
)
deallocate
(
xpoint1
,
xpoint2
,
ypoint1
,
ypoint2
,
zpoint1
,
zpoint2
,
xmass
)
if
(
allocated
(
checklifetime
))
deallocate
(
checklifetime
)
deallocate
(
ireleasestart
,
ireleaseend
,
npart
,
kindz
)
deallocate
(
xmasssave
)
if
(
nested_output
.eq.
1
)
then
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment