Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
flexpart
flexpart
Commits
0ecc1fed
Commit
0ecc1fed
authored
Nov 30, 2017
by
Espen Sollum
Browse files
Changed handling of nested input fields to be consistent with non-nested
parent
c2bd55ea
Changes
7
Hide whitespace changes
Inline
Side-by-side
src/interpol_rain.f90
View file @
0ecc1fed
...
...
@@ -55,7 +55,7 @@ subroutine interpol_rain(yy1,yy2,yy3,nxmax,nymax,nzmax,nx, &
! itime2 time of the second wind field *
! ix,jy x,y coordinates of lower left subgrid point *
! level level at which interpolation shall be done *
!
memind(3)
points to the place
s
of the wind field
s
*
!
iwftouse
points to the place of the wind field
*
! nx,ny actual field dimensions in x,y and z direction *
! nxmax,nymax,nzmax maximum field dimensions in x,y and z direction *
! xt current x coordinate *
...
...
src/interpol_rain_nests.f90
View file @
0ecc1fed
...
...
@@ -20,7 +20,7 @@
!**********************************************************************
subroutine
interpol_rain_nests
(
yy1
,
yy2
,
yy3
,
nxmaxn
,
nymaxn
,
nzmax
,
&
maxnests
,
ngrid
,
nxn
,
nyn
,
memind
,
xt
,
yt
,
level
,
itime1
,
itime2
,
itime
,
&
maxnests
,
ngrid
,
nxn
,
nyn
,
iwftouse
,
xt
,
yt
,
level
,
itime1
,
itime2
,
itime
,
&
yint1
,
yint2
,
yint3
)
! i i i i i i
! i i i i i i i i i i i
...
...
@@ -59,7 +59,7 @@ subroutine interpol_rain_nests(yy1,yy2,yy3,nxmaxn,nymaxn,nzmax, &
! itime2 time of the second wind field *
! ix,jy x,y coordinates of lower left subgrid point *
! level level at which interpolation shall be done *
!
memind(3)
points to the place
s
of the wind field
s
*
!
iwftouse
points to the place of the wind field
*
! nx,ny actual field dimensions in x,y and z direction *
! nxmax,nymax,nzmax maximum field dimensions in x,y and z direction *
! xt current x coordinate *
...
...
@@ -74,7 +74,7 @@ subroutine interpol_rain_nests(yy1,yy2,yy3,nxmaxn,nymaxn,nzmax, &
implicit
none
integer
::
maxnests
,
ngrid
integer
::
nxn
(
maxnests
),
nyn
(
maxnests
),
nxmaxn
,
nymaxn
,
nzmax
,
memind
(
numwfmem
)
integer
::
nxn
(
maxnests
),
nyn
(
maxnests
),
nxmaxn
,
nymaxn
,
nzmax
,
iwftouse
integer
::
m
,
ix
,
jy
,
ixp
,
jyp
,
itime
,
itime1
,
itime2
,
level
,
indexh
real
::
yy1
(
0
:
nxmaxn
-1
,
0
:
nymaxn
-1
,
nzmax
,
numwfmem
,
maxnests
)
real
::
yy2
(
0
:
nxmaxn
-1
,
0
:
nymaxn
-1
,
nzmax
,
numwfmem
,
maxnests
)
...
...
@@ -124,35 +124,39 @@ subroutine interpol_rain_nests(yy1,yy2,yy3,nxmaxn,nymaxn,nzmax, &
! Loop over 2 time steps
!***********************
do
m
=
1
,
2
indexh
=
memind
(
m
)
! do m=1,2
! indexh=memind(m)
indexh
=
iwftouse
y1
(
m
)
=
p1
*
yy1
(
ix
,
jy
,
level
,
indexh
,
ngrid
)
&
y1
(
1
)
=
p1
*
yy1
(
ix
,
jy
,
level
,
indexh
,
ngrid
)
&
+
p2
*
yy1
(
ixp
,
jy
,
level
,
indexh
,
ngrid
)
&
+
p3
*
yy1
(
ix
,
jyp
,
level
,
indexh
,
ngrid
)
&
+
p4
*
yy1
(
ixp
,
jyp
,
level
,
indexh
,
ngrid
)
y2
(
m
)
=
p1
*
yy2
(
ix
,
jy
,
level
,
indexh
,
ngrid
)
&
y2
(
1
)
=
p1
*
yy2
(
ix
,
jy
,
level
,
indexh
,
ngrid
)
&
+
p2
*
yy2
(
ixp
,
jy
,
level
,
indexh
,
ngrid
)
&
+
p3
*
yy2
(
ix
,
jyp
,
level
,
indexh
,
ngrid
)
&
+
p4
*
yy2
(
ixp
,
jyp
,
level
,
indexh
,
ngrid
)
y3
(
m
)
=
p1
*
yy3
(
ix
,
jy
,
level
,
indexh
,
ngrid
)
&
y3
(
1
)
=
p1
*
yy3
(
ix
,
jy
,
level
,
indexh
,
ngrid
)
&
+
p2
*
yy3
(
ixp
,
jy
,
level
,
indexh
,
ngrid
)
&
+
p3
*
yy3
(
ix
,
jyp
,
level
,
indexh
,
ngrid
)
&
+
p4
*
yy3
(
ixp
,
jyp
,
level
,
indexh
,
ngrid
)
end
do
!
end do
!************************************
! 2.) Temporal interpolation (linear)
!************************************
dt1
=
real
(
itime
-
itime1
)
dt2
=
real
(
itime2
-
itime
)
dt
=
dt1
+
dt2
!
dt1=real(itime-itime1)
!
dt2=real(itime2-itime)
!
dt=dt1+dt2
yint1
=
(
y1
(
1
)
*
dt2
+
y1
(
2
)
*
dt1
)/
dt
yint2
=
(
y2
(
1
)
*
dt2
+
y2
(
2
)
*
dt1
)/
dt
yint3
=
(
y3
(
1
)
*
dt2
+
y3
(
2
)
*
dt1
)/
dt
!
yint1=(y1(1)*dt2+y1(2)*dt1)/dt
!
yint2=(y2(1)*dt2+y2(2)*dt1)/dt
!
yint3=(y3(1)*dt2+y3(2)*dt1)/dt
yint1
=
y1
(
1
)
yint2
=
y2
(
1
)
yint3
=
y3
(
1
)
end
subroutine
interpol_rain_nests
src/mpi_mod.f90
View file @
0ecc1fed
...
...
@@ -2535,6 +2535,7 @@ contains
! & mp_comm_used, mp_ierr)
! if (mp_ierr /= 0) goto 600
#ifdef USE_MPIINPLACE
! Using in-place reduction
if
(
lroot
)
then
call
MPI_Reduce
(
MPI_IN_PLACE
,
griduncn
,
grid_size3d
,
mp_sp
,
MPI_SUM
,
id_root
,
&
...
...
@@ -2543,8 +2544,17 @@ contains
else
call
MPI_Reduce
(
griduncn
,
0
,
grid_size3d
,
mp_sp
,
MPI_SUM
,
id_root
,
&
&
mp_comm_used
,
mp_ierr
)
if
(
mp_ierr
/
=
0
)
goto
600
end
if
#else
call
MPI_Reduce
(
griduncn
,
griduncn0
,
grid_size3d
,
mp_sp
,
MPI_SUM
,
id_root
,
&
&
mp_comm_used
,
mp_ierr
)
if
(
mp_ierr
/
=
0
)
goto
600
if
(
lroot
)
griduncn
=
griduncn0
#endif
if
((
WETDEP
)
.and.
(
ldirect
.gt.
0
))
then
call
MPI_Reduce
(
wetgriduncn
,
wetgriduncn0
,
grid_size2d
,
mp_cp
,
MPI_SUM
,
id_root
,
&
&
mp_comm_used
,
mp_ierr
)
...
...
src/netcdf_output_mod.f90
View file @
0ecc1fed
...
...
@@ -272,7 +272,6 @@ subroutine writeheader_netcdf(lnest)
character
(
len
=
10
)
::
fprefix
character
(
len
=
3
)
::
anspec
CHARACTER
::
adate
*
8
,
atime
*
6
,
timeunit
*
32
! ESO DBG: WHY IS THIS HARDCODED TO 1000?
!REAL, DIMENSION(1000) :: coord
real
,
allocatable
,
dimension
(:)
::
coord
...
...
src/outgrid_init.f90
View file @
0ecc1fed
...
...
@@ -209,35 +209,39 @@ subroutine outgrid_init
! gridunc,griduncn uncertainty of outputted concentrations
allocate
(
gridunc
(
0
:
numxgrid
-1
,
0
:
numygrid
-1
,
numzgrid
,
maxspec
,
&
maxpointspec_act
,
nclassunc
,
maxageclass
),
stat
=
stat
)
if
(
stat
.ne.
0
)
write
(
*
,
*
)
'ERROR: could not allocate gridunc'
if
(
stat
.ne.
0
)
write
(
*
,
*
)
'ERROR: could not allocate gridunc'
if
(
ldirect
.gt.
0
)
then
allocate
(
wetgridunc
(
0
:
numxgrid
-1
,
0
:
numygrid
-1
,
maxspec
,
&
maxpointspec_act
,
nclassunc
,
maxageclass
),
stat
=
stat
)
maxpointspec_act
,
nclassunc
,
maxageclass
),
stat
=
stat
)
if
(
stat
.ne.
0
)
write
(
*
,
*
)
'ERROR: could not allocate wetgridunc'
allocate
(
drygridunc
(
0
:
numxgrid
-1
,
0
:
numygrid
-1
,
maxspec
,
&
maxpointspec_act
,
nclassunc
,
maxageclass
),
stat
=
stat
)
allocate
(
drygridunc
(
0
:
numxgrid
-1
,
0
:
numygrid
-1
,
maxspec
,
&
maxpointspec_act
,
nclassunc
,
maxageclass
),
stat
=
stat
)
if
(
stat
.ne.
0
)
write
(
*
,
*
)
'ERROR: could not allocate drygridunc'
endif
! Extra field for totals at MPI root process
if
(
lroot
.and.
mpi_mode
.gt.
0
)
then
#ifdef USE_MPIINPLACE
#else
! If MPI_IN_PLACE option is not used in mpi_mod.f90::mpif_tm_reduce_grid(),
! then an aux array is needed for parallel grid reduction
! Extra field for totals at MPI root process
if
(
lroot
.and.
mpi_mode
.gt.
0
)
then
! If MPI_IN_PLACE option is not used in mpi_mod.f90::mpif_tm_reduce_grid(),
! then an aux array is needed for parallel grid reduction
allocate
(
gridunc0
(
0
:
numxgrid
-1
,
0
:
numygrid
-1
,
numzgrid
,
maxspec
,
&
maxpointspec_act
,
nclassunc
,
maxageclass
),
stat
=
stat
)
if
(
stat
.ne.
0
)
write
(
*
,
*
)
'ERROR: could not allocate gridunc0'
else
if
(
.not.
lroot
.and.
mpi_mode
.gt.
0
)
then
allocate
(
gridunc0
(
1
,
1
,
1
,
1
,
1
,
1
,
1
),
stat
=
stat
)
if
(
stat
.ne.
0
)
write
(
*
,
*
)
'ERROR: could not allocate gridunc0'
end
if
#endif
if
(
ldirect
.gt.
0
)
then
allocate
(
wetgridunc0
(
0
:
numxgrid
-1
,
0
:
numygrid
-1
,
maxspec
,
&
maxpointspec_act
,
nclassunc
,
maxageclass
),
stat
=
stat
)
if
(
stat
.ne.
0
)
write
(
*
,
*
)
'ERROR: could not allocate wetgridunc0'
allocate
(
drygridunc0
(
0
:
numxgrid
-1
,
0
:
numygrid
-1
,
maxspec
,
&
maxpointspec_act
,
nclassunc
,
maxageclass
),
stat
=
stat
)
if
(
stat
.ne.
0
)
write
(
*
,
*
)
'ERROR: could not allocate drygridunc0'
endif
! if (ldirect.gt.0) then
if
(
lroot
.and.
mpi_mode
.gt.
0
)
then
allocate
(
wetgridunc0
(
0
:
numxgrid
-1
,
0
:
numygrid
-1
,
maxspec
,
&
maxpointspec_act
,
nclassunc
,
maxageclass
),
stat
=
stat
)
if
(
stat
.ne.
0
)
write
(
*
,
*
)
'ERROR: could not allocate wetgridunc0'
allocate
(
drygridunc0
(
0
:
numxgrid
-1
,
0
:
numygrid
-1
,
maxspec
,
&
maxpointspec_act
,
nclassunc
,
maxageclass
),
stat
=
stat
)
if
(
stat
.ne.
0
)
write
(
*
,
*
)
'ERROR: could not allocate drygridunc0'
! allocate a dummy to avoid compilator complaints
else
if
(
.not.
lroot
.and.
mpi_mode
.gt.
0
)
then
allocate
(
wetgridunc0
(
1
,
1
,
1
,
1
,
1
,
1
),
stat
=
stat
)
...
...
src/outgrid_init_nest.f90
View file @
0ecc1fed
...
...
@@ -68,20 +68,29 @@ subroutine outgrid_init_nest
if
(
stat
.ne.
0
)
write
(
*
,
*
)
'ERROR:could not allocate nested gridunc'
endif
#ifdef USE_MPIINPLACE
#else
! Extra field for totals at MPI root process
if
(
lroot
.and.
mpi_mode
.gt.
0
)
then
! allocate(griduncn0(0:numxgridn-1,0:numygridn-1,numzgrid,maxspec, &
! maxpointspec_act,nclassunc,maxageclass),stat=stat)
! if (stat.ne.0) write(*,*)'ERROR:could not allocate nested gridunc'
if
(
ldirect
.gt.
0
)
then
allocate
(
wetgriduncn0
(
0
:
numxgridn
-1
,
0
:
numygridn
-1
,
maxspec
,
&
maxpointspec_act
,
nclassunc
,
maxageclass
),
stat
=
stat
)
if
(
stat
.ne.
0
)
write
(
*
,
*
)
'ERROR:could not allocate nested gridunc'
allocate
(
drygriduncn0
(
0
:
numxgridn
-1
,
0
:
numygridn
-1
,
maxspec
,
&
maxpointspec_act
,
nclassunc
,
maxageclass
),
stat
=
stat
)
if
(
stat
.ne.
0
)
write
(
*
,
*
)
'ERROR:could not allocate nested gridunc'
endif
! If MPI_IN_PLACE option is not used in mpi_mod.f90::mpif_tm_reduce_grid_nest(),
! then an aux array is needed for parallel grid reduction
allocate
(
griduncn0
(
0
:
numxgridn
-1
,
0
:
numygridn
-1
,
numzgrid
,
maxspec
,
&
maxpointspec_act
,
nclassunc
,
maxageclass
),
stat
=
stat
)
if
(
stat
.ne.
0
)
write
(
*
,
*
)
'ERROR:could not allocate nested gridunc'
! allocate a dummy to avoid compilator complaints
else
if
(
.not.
lroot
.and.
mpi_mode
.gt.
0
)
then
allocate
(
griduncn0
(
1
,
1
,
1
,
1
,
1
,
1
,
1
),
stat
=
stat
)
end
if
#endif
! if (ldirect.gt.0) then
if
(
lroot
.and.
mpi_mode
.gt.
0
)
then
allocate
(
wetgriduncn0
(
0
:
numxgridn
-1
,
0
:
numygridn
-1
,
maxspec
,
&
maxpointspec_act
,
nclassunc
,
maxageclass
),
stat
=
stat
)
if
(
stat
.ne.
0
)
write
(
*
,
*
)
'ERROR:could not allocate nested gridunc'
allocate
(
drygriduncn0
(
0
:
numxgridn
-1
,
0
:
numygridn
-1
,
maxspec
,
&
maxpointspec_act
,
nclassunc
,
maxageclass
),
stat
=
stat
)
if
(
stat
.ne.
0
)
write
(
*
,
*
)
'ERROR:could not allocate nested gridunc'
! endif
! allocate a dummy to avoid compilator complaints
else
if
(
.not.
lroot
.and.
mpi_mode
.gt.
0
)
then
allocate
(
wetgriduncn0
(
1
,
1
,
1
,
1
,
1
,
1
),
stat
=
stat
)
...
...
src/unc_mod.f90
View file @
0ecc1fed
...
...
@@ -37,6 +37,7 @@ module unc_mod
! If MPI_IN_PLACE option is not used in mpi_mod.f90::mpif_tm_reduce_grid(),
! then an aux array is needed for parallel grid reduction
real
,
allocatable
,
dimension
(:,:,:,:,:,:,:)
::
gridunc0
real
,
allocatable
,
dimension
(:,:,:,:,:,:,:)
::
griduncn0
#endif
real
,
allocatable
,
dimension
(:,:,:,:,:,:,:)
::
griduncn
real
(
dep_prec
),
allocatable
,
dimension
(:,:,:,:,:,:)
::
drygridunc
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment