Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
flexpart
flexpart
Commits
fb0d416b
Commit
fb0d416b
authored
Apr 20, 2015
by
Espen Sollum
Browse files
OH change suggested by Xuekun
parent
b5d0e7e3
Changes
10
Hide whitespace changes
Inline
Side-by-side
README_PARALLEL.md
View file @
fb0d416b
FLEXPART VERSION
9.2.0
(MPI)
FLEXPART VERSION
10.0 beta
(MPI)
Description
-----------
...
...
@@ -61,7 +61,7 @@ Implementation
among the running processes. In the code, variables like 'maxpart' and
'numpart' are complemented by variables 'maxpart_mpi' and 'numpart_mpi'
which are the run-time determined number of particles per process, i.e,
maxpart_mpi = maxpart/
[
number of processes
]
. The variable 'numpart'
maxpart_mpi = maxpart/
np, where np are the
number of processes. The variable 'numpart'
is still used in the code, but redefined to mean 'number of particles
per MPI process'
...
...
@@ -78,7 +78,7 @@ Implementation
be faster than running with np=3 and no dedicated 'reader' process.
But it is also possible that the
program will run even faster if the 4th process is participating in
the calculation of particle trajectories. This will largely depend on
the calculation of particle trajectories
instead
. This will largely depend on
the problem size (total number of particles in the simulation, resolution
of grids etc) and hardware being used (disk speed/buffering, memory
bandwidth etc).
...
...
@@ -172,7 +172,7 @@ What is implemented in the MPI version
*
Domain-filling trajectory calculations
*
Nested wind fields
-The following will proba
bly/possi
bly not work (untested/under developement):
-The following will
most
probably not work (untested/under developement):
* Backward runs
...
...
options/RELEASES
View file @
fb0d416b
...
...
@@ -15,7 +15,7 @@
Z2= 3000.000000 ,
ZKIND= 1,
MASS= 2.0000E8 ,
PARTS= 2000000
PARTS= 2000000
0
COMMENT="TEST1 ",
/
&RELEASE
...
...
@@ -31,6 +31,6 @@
Z2= 1000.000000 ,
ZKIND= 1,
MASS= 2.0000E8 ,
PARTS= 2000000
PARTS= 2000000
0
COMMENT="TEST2 ",
/
src/FLEXPART_MPI.f90
View file @
fb0d416b
...
...
@@ -335,6 +335,10 @@ program flexpart
! and open files that are to be kept open throughout the simulation
!******************************************************************
if
(
mp_measure_time
)
call
mpif_mtime
(
'iotime'
,
0
)
! :DEV: was a bug here (all processes writing)?
if
(
lroot
)
then
! MPI: this part root process only
if
(
lnetcdfout
.eq.
1
)
then
call
writeheader_netcdf
(
lnest
=
.false.
)
else
...
...
@@ -349,7 +353,7 @@ program flexpart
endif
endif
if
(
lroot
)
then
! MPI: this part root process only
!
if
(
verbosity
.gt.
0
)
then
print
*
,
'call writeheader'
endif
...
...
@@ -363,9 +367,9 @@ program flexpart
if
(
nested_output
.ne.
1.
and
.
surf_only
.eq.
1
)
call
writeheader_surf
end
if
! (mpif_pid == 0)
!open(unitdates,file=path(2)(1:length(2))//'dates'
)
if
(
mp_measure_time
)
call
mpif_mtime
(
'iotime'
,
0
)
!open(unitdates,file=path(2)(1:length(2))//'dates')
!open(unitdates,file=path(2)(1:length(2))//'dates')
if
(
verbosity
.gt.
0
.and.
lroot
)
then
print
*
,
'call openreceptors'
...
...
src/getfields_mpi.f90
View file @
fb0d416b
...
...
@@ -42,10 +42,13 @@ subroutine getfields(itime,nstop,memstat)
! Function of nstop extended.
!
! eso 2014:
! MPI version. 3 fields instead of 2, to allow reading the newest in
! the background.
! Only one process (lmpreader=.true.) does the actual reading, while the
! rest call this routine only to update memind, memstat etc.
! MPI version.
! If running with number of processes >= mpi_mod::read_grp_min,
! only one process (mpi_mod::lmpreader=.true.) does the actual reading, while
! the rest call this routine only to update memind, memstat etc.
!
! If mpi_mod::lmp_sync=.true., uses 3 fields instead of 2, to allow reading
! the newest in the background.
!
! Return memstat, which is the sum of
!
...
...
@@ -60,8 +63,8 @@ subroutine getfields(itime,nstop,memstat)
! lwindinterval [s] time difference between the two wind fields read in *
! indj indicates the number of the wind field to be read in *
! indmin remembers the number of wind fields already treated *
! memind(2
)
pointer, on which place the wind fields are stored *
! memtime(2) [s]
times of the wind fields, which are kept in memory *
! memind(2
[3])
pointer, on which place the wind fields are stored *
! memtime(2
[3]
) [s] times of the wind fields, which are kept in memory *
! itime [s] current time since start date of trajectory calcu- *
! lation *
! nstop > 0, if trajectory has to be terminated *
...
...
src/gethourlyOH.f90
View file @
fb0d416b
...
...
@@ -119,7 +119,7 @@ subroutine gethourlyOH(itime)
m1
=
(
jjjjmmdd
-
(
jjjjmmdd
/
10000
)
*
10000
)/
100
memOHtime
(
1
)
=
0.
jul2
=
bdate
+
real
(
1.
/
24.
,
kind
=
dp
)
! date for next hour
jul2
=
bdate
+
ldirect
*
real
(
1.
/
24.
,
kind
=
dp
)
! date for next hour
call
caldate
(
jul2
,
jjjjmmdd
,
hhmmss
)
m2
=
(
jjjjmmdd
-
(
jjjjmmdd
/
10000
)
*
10000
)/
100
memOHtime
(
2
)
=
ldirect
*
3600.
...
...
src/makefile
View file @
fb0d416b
...
...
@@ -35,12 +35,12 @@ FLEXPART-GFS-MPI = FP_gfs_MPI
## COMPILERS
F90
=
/usr/bin/gfortran
#
F90 = ${HOME}/gcc-4.9.1/bin/gfortran
#
F90 = /usr/bin/gfortran
F90
=
${HOME}
/gcc-4.9.1/bin/gfortran
#MPIF90 = ${HOME}/opt/bin/mpifort
#MPIF90 = mpif90.mpich
MPIF90
=
mpif90.openmpi
#
MPIF90 = mpifort
#
MPIF90 = mpif90.openmpi
MPIF90
=
mpifort
## OPTIMIZATION LEVEL
O_LEV
=
2
# [0,1,2,3,g,s,fast]
...
...
@@ -50,21 +50,21 @@ O_LEV_DBG = 0 # [0,g]
LIBS
=
-lgrib_api_f90
-lgrib_api
-lm
-ljasper
-lnetcdff
# -fopenmp # -llapack -lnetcdf
## 1) System libraries at NILU
INCPATH1
=
/usr/include
INCPATH1
=
/xnilu_wrk/flex_wrk/bin64/grib_api/include
LIBPATH1
=
/xnilu_wrk/flex_wrk/bin64/grib_api/lib
LIBPATH2
=
/usr/lib/x86_64-linux-gnu
#
INCPATH1 = /usr/include
#
INCPATH1 = /xnilu_wrk/flex_wrk/bin64/grib_api/include
#
LIBPATH1 = /xnilu_wrk/flex_wrk/bin64/grib_api/lib
#
LIBPATH2 = /usr/lib/x86_64-linux-gnu
## 2) Home-made libraries
#
INCPATH1 = ${HOME}/include
#
INCPATH2 = /homevip/flexpart/include/
#
LIBPATH2 = /homevip/flexpart/lib/
#
LIBPATH1 = ${HOME}/lib
INCPATH1
=
${HOME}
/include
INCPATH2
=
/homevip/flexpart/include/
LIBPATH2
=
/homevip/flexpart/lib/
LIBPATH1
=
${HOME}
/lib
#LIBPATH2 = ${HOME}/lib
FFLAGS
=
-I
$(INCPATH1)
-I
$(INCPATH2)
-O
$(O_LEV)
-g
-m64
-mcmodel
=
medium
-fconvert
=
little-endian
-frecord-marker
=
4
-fmessage-length
=
0
-flto
=
jobserver
-O
$(O_LEV)
-mtune
=
native
-fuse-linker-plugin
$(FUSER)
# -march=native
DBGFLAGS
=
-I
$(INCPATH1)
-I
$(INCPATH2)
-O
$(O_LEV_DBG)
-g3
-ggdb3
-m64
-mcmodel
=
medium
-fconvert
=
little-endian
-frecord-marker
=
4
-fmessage-length
=
0
-flto
=
jobserver
-O
$(O_LEV_DBG)
-fbacktrace
-Warray-bounds
-Wall
-fcheck
=
all
$(FUSER)
# -ffpe-trap=invalid,overflow,denormal,underflow,zero -fdump-core
DBGFLAGS
=
-I
$(INCPATH1)
-I
$(INCPATH2)
-O
$(O_LEV_DBG)
-g3
-ggdb3
-m64
-mcmodel
=
medium
-fconvert
=
little-endian
-frecord-marker
=
4
-fmessage-length
=
0
-flto
=
jobserver
-O
$(O_LEV_DBG)
-fbacktrace
-Warray-bounds
-Wall
-fcheck
=
all
$(FUSER)
#
-ffpe-trap=invalid,overflow,denormal,underflow,zero -fdump-core
LDFLAGS
=
$(FFLAGS)
-L
$(LIBPATH1)
-L
$(LIBPATH2)
$(LIBS)
LDDEBUG
=
$(DBGFLAGS)
-L
$(LIBPATH1)
-L
$(LIBPATH2)
$(LIBS)
...
...
src/mpi_mod.f90
View file @
fb0d416b
...
...
@@ -89,7 +89,7 @@ module mpi_mod
! MPI tags/requests for send/receive operation
integer
::
tm1
integer
,
parameter
::
nvar_async
=
27
!29 :DBG:
!integer, dimension(:), allocatable :: tags
!integer, dimension(:), allocatable :: tags
integer
,
dimension
(:),
allocatable
::
reqs
...
...
@@ -1242,7 +1242,7 @@ contains
!*****************************************************
do
dest
=
0
,
mp_np
-1
! mp_np-2 will also work if last proc reserved for reading
! TODO: use mp_partgroup_np here
! TODO: use mp_partgroup_np here
if
(
dest
.eq.
id_read
)
cycle
i
=
dest
*
nvar_async
call
MPI_Isend
(
uu
(:,:,:,
mind
),
d3s1
,
mp_pp
,
dest
,
tm1
,
MPI_COMM_WORLD
,
reqs
(
i
),
mp_ierr
)
...
...
@@ -1338,8 +1338,8 @@ contains
call
MPI_Isend
(
ciwc
(:,:,:,
mind
),
d3s1
,
mp_pp
,
dest
,
tm1
,&
&
MPI_COMM_WORLD
,
reqs
(
i
),
mp_ierr
)
if
(
mp_ierr
/
=
0
)
goto
600
! else
! i=i+2
! else
! i=i+2
end
if
end
do
...
...
@@ -1388,10 +1388,10 @@ contains
!*******************************************************************************
! :TODO: don't need these
! d3s1=d3_size1
! d3s2=d3_size2
! d2s1=d2_size1
! d2s2=d2_size2
! d3s1=d3_size1
! d3s2=d3_size2
! d2s1=d2_size1
! d2s2=d2_size2
! At the time this immediate receive is posted, memstat is the state of
! windfield indices at the previous/current time. From this, the future
...
...
@@ -1592,15 +1592,15 @@ contains
! if (readclouds) then
call
MPI_Waitall
(
n_req
,
reqs
,
MPI_STATUSES_IGNORE
,
mp_ierr
)
! endif
! else
! do i = 0, nvar_async*mp_np-1
! if (mod(i,27).eq.0 .or. mod(i,28).eq.0) then
! call MPI_Cancel(reqs(i),mp_ierr)
! cycle
! end if
! call MPI_Wait(reqs(i),MPI_STATUS_IGNORE,mp_ierr)
! end do
! end if
! else
! do i = 0, nvar_async*mp_np-1
! if (mod(i,27).eq.0 .or. mod(i,28).eq.0) then
! call MPI_Cancel(reqs(i),mp_ierr)
! cycle
! end if
! call MPI_Wait(reqs(i),MPI_STATUS_IGNORE,mp_ierr)
! end do
! end if
if
(
mp_ierr
/
=
0
)
goto
600
...
...
@@ -1908,7 +1908,7 @@ contains
!***********************************************************************
if
(
mp_measure_time
)
then
IF
(
mp_measure_time
)
THEN
do
ip
=
0
,
mp_np
-1
call
MPI_BARRIER
(
MPI_COMM_WORLD
,
mp_ierr
)
...
...
@@ -1961,17 +1961,28 @@ contains
end
do
end
if
! This call to barrier is for correctly formatting output
call
MPI_BARRIER
(
MPI_COMM_WORLD
,
mp_ierr
)
if
(
lroot
)
then
write
(
*
,
FMT
=
'(72("#"))'
)
WRITE
(
*
,
*
)
"To turn off output of time measurements, set "
WRITE
(
*
,
*
)
" mp_measure_time=.false."
WRITE
(
*
,
*
)
"in file mpi_mod.f90"
write
(
*
,
FMT
=
'(72("#"))'
)
end
if
! j=mp_pid*nvar_async
! In the implementation with 3 fields, the processes may have posted
! MPI_Irecv requests that should be cancelled here
!! TODO:
! if (.not.lmp_sync) then
! r=mp_pid*nvar_async
! do j=r,r+nvar_async-1
! call MPI_Cancel(j,mp_ierr)
! if (mp_ierr /= 0) write(*,*) '#### mpif_finalize::MPI_Cancel> ERROR ####'
! end do
! end if
! if (.not.lmp_sync) then
! r=mp_pid*nvar_async
! do j=r,r+nvar_async-1
! call MPI_Cancel(j,mp_ierr)
! if (mp_ierr /= 0) write(*,*) '#### mpif_finalize::MPI_Cancel> ERROR ####'
! end do
! end if
call
MPI_FINALIZE
(
mp_ierr
)
if
(
mp_ierr
/
=
0
)
then
...
...
@@ -1980,61 +1991,61 @@ contains
end
if
end
subroutine
mpif_finalize
end
subroutine
mpif_finalize
subroutine
get_lun
(
my_lun
)
subroutine
get_lun
(
my_lun
)
!***********************************************************************
! get_lun:
! Starting from 100, get next free logical unit number
!***********************************************************************
implicit
none
implicit
none
integer
,
intent
(
inout
)
::
my_lun
integer
,
save
::
free_lun
=
100
logical
::
exists
,
iopen
integer
,
intent
(
inout
)
::
my_lun
integer
,
save
::
free_lun
=
100
logical
::
exists
,
iopen
!***********************************************************************
loop1
:
do
inquire
(
UNIT
=
free_lun
,
EXIST
=
exists
,
OPENED
=
iopen
)
if
(
exists
.and.
.not.
iopen
)
exit
loop1
free_lun
=
free_lun
+1
end
do
loop1
my_lun
=
free_lun
loop1
:
do
inquire
(
UNIT
=
free_lun
,
EXIST
=
exists
,
OPENED
=
iopen
)
if
(
exists
.and.
.not.
iopen
)
exit
loop1
free_lun
=
free_lun
+1
end
do
loop1
my_lun
=
free_lun
end
subroutine
get_lun
end
subroutine
get_lun
subroutine
write_data_dbg
(
array_in
,
array_name
,
tstep
,
ident
)
subroutine
write_data_dbg
(
array_in
,
array_name
,
tstep
,
ident
)
!***********************************************************************
! Write one-dimensional arrays to disk (for debugging purposes)
!***********************************************************************
implicit
none
implicit
none
real
,
intent
(
in
),
dimension
(:)
::
array_in
integer
,
intent
(
in
)
::
tstep
integer
::
lios
character
(
LEN
=*
),
intent
(
in
)
::
ident
,
array_name
real
,
intent
(
in
),
dimension
(:)
::
array_in
integer
,
intent
(
in
)
::
tstep
integer
::
lios
character
(
LEN
=*
),
intent
(
in
)
::
ident
,
array_name
character
(
LEN
=
8
)
::
c_ts
character
(
LEN
=
40
)
::
fn_1
,
fn_2
character
(
LEN
=
8
)
::
c_ts
character
(
LEN
=
40
)
::
fn_1
,
fn_2
!***********************************************************************
write
(
c_ts
,
FMT
=
'(I8.8,BZ)'
)
tstep
fn_1
=
'-'
//
trim
(
adjustl
(
c_ts
))//
'-'
//
trim
(
ident
)
write
(
c_ts
,
FMT
=
'(I2.2,BZ)'
)
mp_np
fn_2
=
trim
(
adjustl
(
array_name
))//
trim
(
adjustl
(
fn_1
))//
'-np'
//
trim
(
adjustl
(
c_ts
))//
'.dat'
write
(
c_ts
,
FMT
=
'(I8.8,BZ)'
)
tstep
fn_1
=
'-'
//
trim
(
adjustl
(
c_ts
))//
'-'
//
trim
(
ident
)
write
(
c_ts
,
FMT
=
'(I2.2,BZ)'
)
mp_np
fn_2
=
trim
(
adjustl
(
array_name
))//
trim
(
adjustl
(
fn_1
))//
'-np'
//
trim
(
adjustl
(
c_ts
))//
'.dat'
call
get_lun
(
dat_lun
)
open
(
UNIT
=
dat_lun
,
FILE
=
fn_2
,
IOSTAT
=
lios
,
ACTION
=
'WRITE'
,
&
FORM
=
'UNFORMATTED'
,
STATUS
=
'REPLACE'
)
write
(
UNIT
=
dat_lun
,
IOSTAT
=
lios
)
array_in
close
(
UNIT
=
dat_lun
)
call
get_lun
(
dat_lun
)
open
(
UNIT
=
dat_lun
,
FILE
=
fn_2
,
IOSTAT
=
lios
,
ACTION
=
'WRITE'
,
&
FORM
=
'UNFORMATTED'
,
STATUS
=
'REPLACE'
)
write
(
UNIT
=
dat_lun
,
IOSTAT
=
lios
)
array_in
close
(
UNIT
=
dat_lun
)
end
subroutine
write_data_dbg
end
subroutine
write_data_dbg
end
module
mpi_mod
src/readOHfield.f90
View file @
fb0d416b
...
...
@@ -45,7 +45,6 @@ subroutine readOHfield
use
par_mod
use
com_mod
implicit
none
include
'netcdf.inc'
...
...
src/readspecies.f90
View file @
fb0d416b
...
...
@@ -210,7 +210,7 @@ subroutine readspecies(id_spec,pos_spec)
weightmolar
(
pos_spec
)
=
pweightmolar
ohcconst
(
pos_spec
)
=
pohcconst
ohdconst
(
pos_spec
)
=
pohdconst
oh
d
const
(
pos_spec
)
=
pohnconst
oh
n
const
(
pos_spec
)
=
pohnconst
spec_ass
(
pos_spec
)
=
pspec_ass
kao
(
pos_spec
)
=
pkao
...
...
src/timemanager_mpi.f90
View file @
fb0d416b
...
...
@@ -207,16 +207,22 @@ subroutine timemanager
write
(
*
,
*
)
'timemanager> call getfields'
endif
! This time measure includes reading/MPI communication (for the reader process),
! or MPI communication time only (for other processes)
if
(
mp_measure_time
)
call
mpif_mtime
(
'getfields'
,
0
)
call
getfields
(
itime
,
nstop1
,
memstat
)
if
(
mp_measure_time
)
call
mpif_mtime
(
'getfields'
,
1
)
! Broadcast fields to all MPI processes
! Skip if all processes have called getfields or if no new fields
!*****************************************************************
if
(
mp_measure_time
.and..not.
(
lmpreader
.and.
lmp_use_reader
))
call
mpif_mtime
(
'getfields'
,
0
)
! Version 1 (lmp_sync=.true.) uses a read-ahead process where send/recv is done
! in sync at start of each new field time interval
if
(
lmp_sync
.and.
lmp_use_reader
.and.
memstat
.gt.
0
)
then
...
...
@@ -258,6 +264,9 @@ subroutine timemanager
end
if
if
(
mp_measure_time
.and..not.
(
lmpreader
.and.
lmp_use_reader
))
call
mpif_mtime
(
'getfields'
,
1
)
!*******************************************************************************
if
(
lmpreader
.and.
nstop1
.gt.
1
)
stop
'NO METEO FIELDS AVAILABLE'
...
...
@@ -445,6 +454,7 @@ subroutine timemanager
!**************************************
call
mpif_tm_reduce_grid
if
(
mp_measure_time
)
call
mpif_mtime
(
'iotime'
,
0
)
if
(
surf_only
.ne.
1
)
then
if
(
lroot
)
then
if
(
lnetcdfout
.eq.
1
)
then
...
...
@@ -469,6 +479,7 @@ subroutine timemanager
gridunc
(:,:,:,:,:,:,:)
=
0.
endif
endif
if
(
mp_measure_time
)
call
mpif_mtime
(
'iotime'
,
1
)
! :TODO: Correct calling of conc_surf above?
...
...
@@ -480,6 +491,8 @@ subroutine timemanager
! MPI: Root process collects/sums nested grids
!*********************************************
call
mpif_tm_reduce_grid_nest
if
(
mp_measure_time
)
call
mpif_mtime
(
'iotime'
,
0
)
if
(
lnetcdfout
.eq.
0
)
then
if
(
surf_only
.ne.
1
)
then
...
...
@@ -514,11 +527,14 @@ subroutine timemanager
end
if
end
if
outnum
=
0.
endif
if
((
iout
.eq.
4
)
.or.
(
iout
.eq.
5
))
call
plumetraj
(
itime
)
if
(
iflux
.eq.
1
)
call
fluxoutput
(
itime
)
if
(
mp_measure_time
)
call
mpif_mtime
(
'iotime'
,
1
)
if
(
lroot
)
write
(
*
,
45
)
itime
,
numpart
*
mp_partgroup_np
,
gridtotalunc
,&
&
wetgridtotalunc
,
drygridtotalunc
! if (lroot) write(*,46) float(itime)/3600,itime,numpart*mp_partgroup_np
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment