Commit 0ecc1fed authored by Espen Sollum's avatar Espen Sollum
Browse files

Changed handling of nested input fields to be consistent with non-nested

parent c2bd55ea
......@@ -55,7 +55,7 @@ subroutine interpol_rain(yy1,yy2,yy3,nxmax,nymax,nzmax,nx, &
! itime2 time of the second wind field *
! ix,jy x,y coordinates of lower left subgrid point *
! level level at which interpolation shall be done *
! memind(3) points to the places of the wind fields *
! iwftouse points to the place of the wind field *
! nx,ny actual field dimensions in x,y and z direction *
! nxmax,nymax,nzmax maximum field dimensions in x,y and z direction *
! xt current x coordinate *
......
......@@ -20,7 +20,7 @@
!**********************************************************************
subroutine interpol_rain_nests(yy1,yy2,yy3,nxmaxn,nymaxn,nzmax, &
maxnests,ngrid,nxn,nyn,memind,xt,yt,level,itime1,itime2,itime, &
maxnests,ngrid,nxn,nyn,iwftouse,xt,yt,level,itime1,itime2,itime, &
yint1,yint2,yint3)
! i i i i i i
! i i i i i i i i i i i
......@@ -59,7 +59,7 @@ subroutine interpol_rain_nests(yy1,yy2,yy3,nxmaxn,nymaxn,nzmax, &
! itime2 time of the second wind field *
! ix,jy x,y coordinates of lower left subgrid point *
! level level at which interpolation shall be done *
! memind(3) points to the places of the wind fields *
! iwftouse points to the place of the wind field *
! nx,ny actual field dimensions in x,y and z direction *
! nxmax,nymax,nzmax maximum field dimensions in x,y and z direction *
! xt current x coordinate *
......@@ -74,7 +74,7 @@ subroutine interpol_rain_nests(yy1,yy2,yy3,nxmaxn,nymaxn,nzmax, &
implicit none
integer :: maxnests,ngrid
integer :: nxn(maxnests),nyn(maxnests),nxmaxn,nymaxn,nzmax,memind(numwfmem)
integer :: nxn(maxnests),nyn(maxnests),nxmaxn,nymaxn,nzmax,iwftouse
integer :: m,ix,jy,ixp,jyp,itime,itime1,itime2,level,indexh
real :: yy1(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,maxnests)
real :: yy2(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,maxnests)
......@@ -124,35 +124,39 @@ subroutine interpol_rain_nests(yy1,yy2,yy3,nxmaxn,nymaxn,nzmax, &
! Loop over 2 time steps
!***********************
do m=1,2
indexh=memind(m)
! do m=1,2
! indexh=memind(m)
indexh=iwftouse
y1(m)=p1*yy1(ix ,jy ,level,indexh,ngrid) &
y1(1)=p1*yy1(ix ,jy ,level,indexh,ngrid) &
+ p2*yy1(ixp,jy ,level,indexh,ngrid) &
+ p3*yy1(ix ,jyp,level,indexh,ngrid) &
+ p4*yy1(ixp,jyp,level,indexh,ngrid)
y2(m)=p1*yy2(ix ,jy ,level,indexh,ngrid) &
y2(1)=p1*yy2(ix ,jy ,level,indexh,ngrid) &
+ p2*yy2(ixp,jy ,level,indexh,ngrid) &
+ p3*yy2(ix ,jyp,level,indexh,ngrid) &
+ p4*yy2(ixp,jyp,level,indexh,ngrid)
y3(m)=p1*yy3(ix ,jy ,level,indexh,ngrid) &
y3(1)=p1*yy3(ix ,jy ,level,indexh,ngrid) &
+ p2*yy3(ixp,jy ,level,indexh,ngrid) &
+ p3*yy3(ix ,jyp,level,indexh,ngrid) &
+ p4*yy3(ixp,jyp,level,indexh,ngrid)
end do
! end do
!************************************
! 2.) Temporal interpolation (linear)
!************************************
dt1=real(itime-itime1)
dt2=real(itime2-itime)
dt=dt1+dt2
! dt1=real(itime-itime1)
! dt2=real(itime2-itime)
! dt=dt1+dt2
yint1=(y1(1)*dt2+y1(2)*dt1)/dt
yint2=(y2(1)*dt2+y2(2)*dt1)/dt
yint3=(y3(1)*dt2+y3(2)*dt1)/dt
! yint1=(y1(1)*dt2+y1(2)*dt1)/dt
! yint2=(y2(1)*dt2+y2(2)*dt1)/dt
! yint3=(y3(1)*dt2+y3(2)*dt1)/dt
yint1=y1(1)
yint2=y2(1)
yint3=y3(1)
end subroutine interpol_rain_nests
......@@ -2535,6 +2535,7 @@ contains
! & mp_comm_used, mp_ierr)
! if (mp_ierr /= 0) goto 600
#ifdef USE_MPIINPLACE
! Using in-place reduction
if (lroot) then
call MPI_Reduce(MPI_IN_PLACE, griduncn, grid_size3d, mp_sp, MPI_SUM, id_root, &
......@@ -2543,8 +2544,17 @@ contains
else
call MPI_Reduce(griduncn, 0, grid_size3d, mp_sp, MPI_SUM, id_root, &
& mp_comm_used, mp_ierr)
if (mp_ierr /= 0) goto 600
end if
#else
call MPI_Reduce(griduncn, griduncn0, grid_size3d, mp_sp, MPI_SUM, id_root, &
& mp_comm_used, mp_ierr)
if (mp_ierr /= 0) goto 600
if (lroot) griduncn = griduncn0
#endif
if ((WETDEP).and.(ldirect.gt.0)) then
call MPI_Reduce(wetgriduncn, wetgriduncn0, grid_size2d, mp_cp, MPI_SUM, id_root, &
& mp_comm_used, mp_ierr)
......
......@@ -272,7 +272,6 @@ subroutine writeheader_netcdf(lnest)
character(len=10) :: fprefix
character(len=3) :: anspec
CHARACTER :: adate*8,atime*6,timeunit*32
! ESO DBG: WHY IS THIS HARDCODED TO 1000?
!REAL, DIMENSION(1000) :: coord
real, allocatable, dimension(:) :: coord
......
......@@ -209,35 +209,39 @@ subroutine outgrid_init
! gridunc,griduncn uncertainty of outputted concentrations
allocate(gridunc(0:numxgrid-1,0:numygrid-1,numzgrid,maxspec, &
maxpointspec_act,nclassunc,maxageclass),stat=stat)
if (stat.ne.0) write(*,*)'ERROR: could not allocate gridunc'
if (stat.ne.0) write(*,*)'ERROR: could not allocate gridunc'
if (ldirect.gt.0) then
allocate(wetgridunc(0:numxgrid-1,0:numygrid-1,maxspec, &
maxpointspec_act,nclassunc,maxageclass),stat=stat)
maxpointspec_act,nclassunc,maxageclass),stat=stat)
if (stat.ne.0) write(*,*)'ERROR: could not allocate wetgridunc'
allocate(drygridunc(0:numxgrid-1,0:numygrid-1,maxspec, &
maxpointspec_act,nclassunc,maxageclass),stat=stat)
allocate(drygridunc(0:numxgrid-1,0:numygrid-1,maxspec, &
maxpointspec_act,nclassunc,maxageclass),stat=stat)
if (stat.ne.0) write(*,*)'ERROR: could not allocate drygridunc'
endif
! Extra field for totals at MPI root process
if (lroot.and.mpi_mode.gt.0) then
#ifdef USE_MPIINPLACE
#else
! If MPI_IN_PLACE option is not used in mpi_mod.f90::mpif_tm_reduce_grid(),
! then an aux array is needed for parallel grid reduction
! Extra field for totals at MPI root process
if (lroot.and.mpi_mode.gt.0) then
! If MPI_IN_PLACE option is not used in mpi_mod.f90::mpif_tm_reduce_grid(),
! then an aux array is needed for parallel grid reduction
allocate(gridunc0(0:numxgrid-1,0:numygrid-1,numzgrid,maxspec, &
maxpointspec_act,nclassunc,maxageclass),stat=stat)
if (stat.ne.0) write(*,*)'ERROR: could not allocate gridunc0'
else if (.not.lroot.and.mpi_mode.gt.0) then
allocate(gridunc0(1,1,1,1,1,1,1),stat=stat)
if (stat.ne.0) write(*,*)'ERROR: could not allocate gridunc0'
end if
#endif
if (ldirect.gt.0) then
allocate(wetgridunc0(0:numxgrid-1,0:numygrid-1,maxspec, &
maxpointspec_act,nclassunc,maxageclass),stat=stat)
if (stat.ne.0) write(*,*)'ERROR: could not allocate wetgridunc0'
allocate(drygridunc0(0:numxgrid-1,0:numygrid-1,maxspec, &
maxpointspec_act,nclassunc,maxageclass),stat=stat)
if (stat.ne.0) write(*,*)'ERROR: could not allocate drygridunc0'
endif
! if (ldirect.gt.0) then
if (lroot.and.mpi_mode.gt.0) then
allocate(wetgridunc0(0:numxgrid-1,0:numygrid-1,maxspec, &
maxpointspec_act,nclassunc,maxageclass),stat=stat)
if (stat.ne.0) write(*,*)'ERROR: could not allocate wetgridunc0'
allocate(drygridunc0(0:numxgrid-1,0:numygrid-1,maxspec, &
maxpointspec_act,nclassunc,maxageclass),stat=stat)
if (stat.ne.0) write(*,*)'ERROR: could not allocate drygridunc0'
! allocate a dummy to avoid compilator complaints
else if (.not.lroot.and.mpi_mode.gt.0) then
allocate(wetgridunc0(1,1,1,1,1,1),stat=stat)
......
......@@ -68,20 +68,29 @@ subroutine outgrid_init_nest
if (stat.ne.0) write(*,*)'ERROR:could not allocate nested gridunc'
endif
#ifdef USE_MPIINPLACE
#else
! Extra field for totals at MPI root process
if (lroot.and.mpi_mode.gt.0) then
! allocate(griduncn0(0:numxgridn-1,0:numygridn-1,numzgrid,maxspec, &
! maxpointspec_act,nclassunc,maxageclass),stat=stat)
! if (stat.ne.0) write(*,*)'ERROR:could not allocate nested gridunc'
if (ldirect.gt.0) then
allocate(wetgriduncn0(0:numxgridn-1,0:numygridn-1,maxspec, &
maxpointspec_act,nclassunc,maxageclass),stat=stat)
if (stat.ne.0) write(*,*)'ERROR:could not allocate nested gridunc'
allocate(drygriduncn0(0:numxgridn-1,0:numygridn-1,maxspec, &
maxpointspec_act,nclassunc,maxageclass),stat=stat)
if (stat.ne.0) write(*,*)'ERROR:could not allocate nested gridunc'
endif
! If MPI_IN_PLACE option is not used in mpi_mod.f90::mpif_tm_reduce_grid_nest(),
! then an aux array is needed for parallel grid reduction
allocate(griduncn0(0:numxgridn-1,0:numygridn-1,numzgrid,maxspec, &
maxpointspec_act,nclassunc,maxageclass),stat=stat)
if (stat.ne.0) write(*,*)'ERROR:could not allocate nested gridunc'
! allocate a dummy to avoid compilator complaints
else if (.not.lroot.and.mpi_mode.gt.0) then
allocate(griduncn0(1,1,1,1,1,1,1),stat=stat)
end if
#endif
! if (ldirect.gt.0) then
if (lroot.and.mpi_mode.gt.0) then
allocate(wetgriduncn0(0:numxgridn-1,0:numygridn-1,maxspec, &
maxpointspec_act,nclassunc,maxageclass),stat=stat)
if (stat.ne.0) write(*,*)'ERROR:could not allocate nested gridunc'
allocate(drygriduncn0(0:numxgridn-1,0:numygridn-1,maxspec, &
maxpointspec_act,nclassunc,maxageclass),stat=stat)
if (stat.ne.0) write(*,*)'ERROR:could not allocate nested gridunc'
! endif
! allocate a dummy to avoid compilator complaints
else if (.not.lroot.and.mpi_mode.gt.0) then
allocate(wetgriduncn0(1,1,1,1,1,1),stat=stat)
......
......@@ -37,6 +37,7 @@ module unc_mod
! If MPI_IN_PLACE option is not used in mpi_mod.f90::mpif_tm_reduce_grid(),
! then an aux array is needed for parallel grid reduction
real,allocatable, dimension (:,:,:,:,:,:,:) :: gridunc0
real,allocatable, dimension (:,:,:,:,:,:,:) :: griduncn0
#endif
real,allocatable, dimension (:,:,:,:,:,:,:) :: griduncn
real(dep_prec),allocatable, dimension (:,:,:,:,:,:) :: drygridunc
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment