Commit 03978611 authored by Espen Sollum's avatar Espen Sollum
Browse files

parallel version: fix for receptor concentrations

parent 93a369ca
......@@ -45,12 +45,6 @@ subroutine calcpar(n,uuh,vvh,pvh,metdata_format)
! Marian Harustak, 12.5.2017 *
! - Merged calcpar and calcpar_gfs into one routine using if-then *
! for meteo-type dependent code *
! *
! *
! Don Morton, 13.10.2017 *
! - Repairing problems from merger and documenting the merger of *
! Harustak *
! *
!*****************************************************************************
!*****************************************************************************
......@@ -81,19 +75,12 @@ subroutine calcpar(n,uuh,vvh,pvh,metdata_format)
integer :: n,ix,jy,i,kz,lz,kzmin,llev,loop_start
real :: ttlev(nuvzmax),qvlev(nuvzmax),obukhov,scalev,ol,hmixplus
real :: ulev(nuvzmax),vlev(nuvzmax),ew,rh,vd(maxspec),subsceff,ylat
real :: altmin,tvold,pold,zold,pint,tv,zlev(nuvzmax),hmixdummy
real :: altmin,tvold,pold,zold,pint,tv,zlev(nuvzmax),hmixdummy,akzdummy
real :: uuh(0:nxmax-1,0:nymax-1,nuvzmax)
real :: vvh(0:nxmax-1,0:nymax-1,nuvzmax)
real :: pvh(0:nxmax-1,0:nymax-1,nuvzmax)
real,parameter :: const=r_air/ga
!! DJM - using these as meaningless arguments to the obukhov function call
!! For the GFS version, gfs_dummy_arg(nwzmax) is used in place of the
!! akm(nwzmax) and bkm(nwzmax) used in the call to ECMWF version
!! For the ECMWF version, ecmwf_dummy_arg is used in place of the
!! akz(llev) used in the call to the GFS version.
REAL :: ecmwf_dummy_arg, gfs_dummy_arg(nwzmax)
!write(*,*) 'in calcpar writting snowheight'
!***********************************
! for test: write out snow depths
......@@ -137,36 +124,6 @@ subroutine calcpar(n,uuh,vvh,pvh,metdata_format)
! 2) Calculation of inverse Obukhov length scale
!***********************************************
!! ..... Documentation by Don Morton, 13 Oct 2017 .....
!
!
! This subroutine is a result of merging an ECMWF and a GFS version.
! In the case of the call to the obukhov() function, originally the
! call for ECMWF looked like:
!
! ol=obukhov(ps(ix,jy,1,n),tt2(ix,jy,1,n),td2(ix,jy,1,n), &
! tth(ix,jy,2,n),ustar(ix,jy,1,n),sshf(ix,jy,1,n),akm,bkm)
!
!
! and the call for GFS looked like:
!
! ol=obukhov(ps(ix,jy,1,n),tt2(ix,jy,1,n),td2(ix,jy,1,n), &
! tth(ix,jy,llev,n),ustar(ix,jy,1,n),sshf(ix,jy,1,n),akz(llev))
!
! Harustek had also merged the ECMWF and GFS obukhov functions, and the
! new "merged" parameter list looked something like
!
! ol=obukhov(ps(ix,jy,1,n),tt2(ix,jy,1,n),td2(ix,jy,1,n), &
! tth(ix,jy,llev,n),ustar(ix,jy,1,n),sshf(ix,jy,1,n),akm,bkm,
! akz(llev),metdata_format)
!
! For the ECMWF call, the akz(llev) argument was problematic, and the
! logic behind the argument lists was confusing and not documented. I've
! tried to resolve this by creating two new variables, gfs_dummy_arg and
! ecmwf_dummy_arg, and using those where appropriate in the call to the
! obukhov function
!
if (metdata_format.eq.GRIBFILE_CENTRE_NCEP) then
! NCEP version: find first level above ground
llev = 0
......@@ -179,13 +136,11 @@ subroutine calcpar(n,uuh,vvh,pvh,metdata_format)
! calculate inverse Obukhov length scale with tth(llev)
ol=obukhov(ps(ix,jy,1,n),tt2(ix,jy,1,n),td2(ix,jy,1,n), &
& tth(ix,jy,llev,n),ustar(ix,jy,1,n),sshf(ix,jy,1,n), &
& gfs_dummy_arg, gfs_dummy_arg, akz(llev), metdata_format)
tth(ix,jy,llev,n),ustar(ix,jy,1,n),sshf(ix,jy,1,n),akm,bkm,akz(llev),metdata_format)
else
llev=0
ol=obukhov(ps(ix,jy,1,n),tt2(ix,jy,1,n),td2(ix,jy,1,n), &
tth(ix,jy,2,n),ustar(ix,jy,1,n),sshf(ix,jy,1,n),akm,bkm, &
& ecmwf_dummy_arg, metdata_format)
tth(ix,jy,2,n),ustar(ix,jy,1,n),sshf(ix,jy,1,n),akm,bkm,akzdummy,metdata_format)
end if
if (ol.ne.0.) then
......
......@@ -652,6 +652,7 @@ module com_mod
real :: xreceptor(maxreceptor),yreceptor(maxreceptor)
real :: receptorarea(maxreceptor)
real :: creceptor(maxreceptor,maxspec)
real, allocatable, dimension(:,:) :: creceptor0
character(len=16) :: receptorname(maxreceptor)
integer :: numreceptor
......
......@@ -189,8 +189,8 @@ contains
! mpi_mode default 0, set to 2/3 if running MPI version
! mp_np number of running processes, decided at run-time
!***********************************************************************
use par_mod, only: maxpart, numwfmem, dep_prec
use com_mod, only: mpi_mode, verbosity
use par_mod, only: maxpart, numwfmem, dep_prec, maxreceptor, maxspec
use com_mod, only: mpi_mode, verbosity, creceptor0
implicit none
......@@ -336,7 +336,7 @@ contains
end if
! Set maxpart per process
! eso 08/2016: Increase maxpart per process, in case of unbalanced distribution
! ESO 08/2016: Increase maxpart per process, in case of unbalanced distribution
maxpart_mpi=int(mp_maxpart_factor*real(maxpart)/real(mp_partgroup_np))
if (mp_np == 1) maxpart_mpi = maxpart
......@@ -364,6 +364,13 @@ contains
reqs(:)=MPI_REQUEST_NULL
end if
! Write whether MPI_IN_PLACE is used or not
#ifdef USE_MPIINPLACE
if (lroot) write(*,*) 'Using MPI_IN_PLACE operations'
#else
if (lroot) allocate(creceptor0(maxreceptor,maxspec))
if (lroot) write(*,*) 'Not using MPI_IN_PLACE operations'
#endif
goto 101
100 write(*,*) '#### mpi_mod::mpif_init> ERROR ####', mp_ierr
......@@ -2461,8 +2468,6 @@ contains
& mp_comm_used, mp_ierr)
end if
!CGZ MOVED THIS PART HERE FROM OUTSIDE MPI_IN_PLACE (see below)
!**********************************************************
! Receptor concentrations
if (lroot) then
call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, &
......@@ -2472,14 +2477,19 @@ contains
call MPI_Reduce(creceptor,0,rcpt_size,mp_sp,MPI_SUM,id_root, &
& mp_comm_used,mp_ierr)
end if
!**********************************************************
#else
call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, &
& mp_comm_used, mp_ierr)
if (mp_ierr /= 0) goto 600
if (lroot) gridunc = gridunc0
call MPI_Reduce(creceptor, creceptor0,rcpt_size,mp_sp,MPI_SUM,id_root, &
& mp_comm_used,mp_ierr)
if (mp_ierr /= 0) goto 600
if (lroot) creceptor = creceptor0
#endif
if ((WETDEP).and.(ldirect.gt.0)) then
......@@ -2494,18 +2504,6 @@ contains
if (mp_ierr /= 0) goto 600
end if
!CGZ MOVED THIS PART TO MPI_IN_PLACE (line 2467)
!**********************************************************
! Receptor concentrations
! if (lroot) then
! call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, &
! & mp_comm_used,mp_ierr)
! if (mp_ierr /= 0) goto 600
! else
! call MPI_Reduce(creceptor,0,rcpt_size,mp_sp,MPI_SUM,id_root, &
! & mp_comm_used,mp_ierr)
! end if
!**********************************************************
if (mp_measure_time) call mpif_mtime('commtime',1)
......
......@@ -531,7 +531,7 @@ subroutine timemanager(metdata_format)
griduncn(:,:,:,:,:,:,:)=0.
end if
else ! :TODO: check for zeroing in the netcdf module
else
call concoutput_surf_nest(itime,outnum)
end if
else
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment