Commit b0434e19 authored by Espen Sollum's avatar Espen Sollum
Browse files

Initial code to handle cloud water in nested wind fields (it is not completely...

Initial code to handle cloud water in nested wind fields (it is not completely implemented in this commit)
parent 9484483e
......@@ -55,8 +55,8 @@ program flexpart
! Initialize arrays in com_mod
!*****************************
call com_mod_allocate(maxpart)
call com_mod_allocate_part(maxpart)
! Generate a large number of random numbers
!******************************************
......@@ -170,6 +170,14 @@ program flexpart
endif
call readavailable
! If nested wind fields are used, allocate arrays
!************************************************
if (verbosity.gt.0) then
write(*,*) 'call com_mod_allocate_nests'
endif
call com_mod_allocate_nests
! Read the model grid specifications,
! both for the mother domain and eventual nests
!**********************************************
......
......@@ -62,7 +62,7 @@ program flexpart
! Initialize arrays in com_mod
!*****************************
call com_mod_allocate(maxpart_mpi)
call com_mod_allocate_part(maxpart_mpi)
! Generate a large number of random numbers
!******************************************
......@@ -194,7 +194,14 @@ program flexpart
write(*,*) 'call readavailable'
endif
call readavailable
!end if
! If nested wind fields are used, allocate arrays
!************************************************
if (verbosity.gt.0 .and. lroot) then
write(*,*) 'call com_mod_allocate_nests'
endif
call com_mod_allocate_nests
! Read the model grid specifications,
! both for the mother domain and eventual nests
......@@ -444,15 +451,15 @@ program flexpart
! NIK 16.02.2005
if (lroot) then
call MPI_Reduce(MPI_IN_PLACE, tot_blc_count, 1, MPI_INTEGER, MPI_SUM, id_root, &
call MPI_Reduce(MPI_IN_PLACE, tot_blc_count, 1, MPI_INTEGER8, MPI_SUM, id_root, &
& mp_comm_used, mp_ierr)
call MPI_Reduce(MPI_IN_PLACE, tot_inc_count, 1, MPI_INTEGER, MPI_SUM, id_root, &
call MPI_Reduce(MPI_IN_PLACE, tot_inc_count, 1, MPI_INTEGER8, MPI_SUM, id_root, &
& mp_comm_used, mp_ierr)
else
if (mp_partgroup_pid.ge.0) then ! Skip for readwind process
call MPI_Reduce(tot_blc_count, tot_blc_count, 1, MPI_INTEGER, MPI_SUM, id_root, &
call MPI_Reduce(tot_blc_count, tot_blc_count, 1, MPI_INTEGER8, MPI_SUM, id_root, &
& mp_comm_used, mp_ierr)
call MPI_Reduce(tot_inc_count, tot_inc_count, 1, MPI_INTEGER, MPI_SUM, id_root, &
call MPI_Reduce(tot_inc_count, tot_inc_count, 1, MPI_INTEGER8, MPI_SUM, id_root, &
& mp_comm_used, mp_ierr)
end if
end if
......
......@@ -137,8 +137,7 @@ module com_mod
!NIK 16.02.2015
integer :: tot_blc_count=0, tot_inc_count=0
integer(selected_int_kind(16)) :: tot_blc_count=0, tot_inc_count=0
!*********************************************************************
......@@ -486,18 +485,10 @@ module com_mod
! 3d nested fields
!*****************
real :: uun(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,maxnests)
real :: vvn(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,maxnests)
real :: wwn(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,maxnests)
real :: ttn(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,maxnests)
real :: qvn(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,maxnests)
real :: pvn(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,maxnests)
integer(kind=1) :: cloudsn(0:nxmaxn-1,0:nymaxn-1,0:nzmax,numwfmem,maxnests)
integer :: cloudsnh(0:nxmaxn-1,0:nymaxn-1,numwfmem,maxnests)
real :: rhon(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,maxnests)
real :: drhodzn(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,maxnests)
real :: tthn(0:nxmaxn-1,0:nymaxn-1,nuvzmax,numwfmem,maxnests)
real :: qvhn(0:nxmaxn-1,0:nymaxn-1,nuvzmax,numwfmem,maxnests)
real,allocatable,dimension(:,:,:,:,:) :: uun, vvn, wwn, ttn, qvn, pvn,&
& rhon, drhodzn, tthn, qvhn
integer,allocatable,dimension(:,:,:,:) :: cloudsnh
integer(kind=1),allocatable,dimension(:,:,:,:,:) :: cloudsn
! 2d nested fields
!*****************
......@@ -751,18 +742,18 @@ module com_mod
!*****************************************************************
integer :: mpi_mode=0 ! .gt. 0 if running MPI version
logical :: lroot=.true. ! true if serial version, or if MPI .and. root process
contains
subroutine com_mod_allocate(nmpart)
!*******************************************************************************
! Dynamic allocation of arrays
!
! For FLEXPART version 9.2 and earlier these arrays were statically declared
! with size maxpart. This function is introduced so that the MPI version
! can declare these arrays with smaller size ("maxpart per process"), while
! the serial version allocate at run-time with size maxpart
!
!*******************************************************************************
contains
subroutine com_mod_allocate_part(nmpart)
!*******************************************************************************
! Dynamic allocation of arrays
!
! For FLEXPART version 9.2 and earlier these arrays were statically declared
! with size maxpart. This function is introduced so that the MPI version
! can declare these arrays with smaller size ("maxpart per process"), while
! the serial version allocate at run-time with size maxpart
!
!*******************************************************************************
implicit none
integer, intent(in) :: nmpart ! maximum number of particles (per process)
......@@ -778,7 +769,33 @@ module com_mod
allocate(uap(nmpart),ucp(nmpart),uzp(nmpart),us(nmpart),&
& vs(nmpart),ws(nmpart),cbt(nmpart))
end subroutine com_mod_allocate
end subroutine com_mod_allocate_part
subroutine com_mod_allocate_nests
!*******************************************************************************
! Dynamic allocation of arrays
!
! For nested wind fields.
!
!*******************************************************************************
implicit none
allocate(uun(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,numbnests))
allocate(vvn(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,numbnests))
allocate(wwn(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,numbnests))
allocate(ttn(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,numbnests))
allocate(qvn(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,numbnests))
allocate(pvn(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,numbnests))
allocate(cloudsn(0:nxmaxn-1,0:nymaxn-1,0:nzmax,numwfmem,numbnests))
allocate(cloudsnh(0:nxmaxn-1,0:nymaxn-1,numwfmem,numbnests))
allocate(rhon(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,numbnests))
allocate(drhodzn(0:nxmaxn-1,0:nymaxn-1,nzmax,numwfmem,numbnests))
allocate(tthn(0:nxmaxn-1,0:nymaxn-1,nuvzmax,numwfmem,numbnests))
allocate(qvhn(0:nxmaxn-1,0:nymaxn-1,nuvzmax,numwfmem,numbnests))
end subroutine com_mod_allocate_nests
end module com_mod
......@@ -48,7 +48,7 @@ module wind_mod
! Maximum dimensions of the nested input grids
!*********************************************
! integer,parameter :: maxnests=0,nxmaxn=351,nymaxn=351 !ECMWF
integer,parameter :: maxnests=1,nxmaxn=361,nymaxn=181 !ECMWF
end module wind_mod
......@@ -43,4 +43,5 @@ module wind_mod
integer,parameter :: nxmax=721,nymax=361,nuvzmax=64,nwzmax=64,nzmax=64
integer,parameter :: nxshift=0 ! for GFS or FNL
integer,parameter :: maxnests=1,nxmaxn=361,nymaxn=181
end module wind_mod
......@@ -104,10 +104,6 @@ subroutine gridcheck
character(len=24) :: gribErrorMsg = 'Error reading grib file'
character(len=20) :: gribFunction = 'gridcheck'
!NIK 16.02.2015
tot_blc_count=0 !count for total number of occurences of below cloud scavenging
tot_inc_count=0 !count for total number of occurences of in cloud scavenging
iumax=0
iwmax=0
......
......@@ -169,6 +169,18 @@ subroutine gridcheck_nests
isec1(6)=132 ! indicatorOfParameter
elseif ((parCat.eq.1).and.(parNum.eq.0).and.(typSurf.eq.105)) then ! Q
isec1(6)=133 ! indicatorOfParameter
elseif ((parCat.eq.1).and.(parNum.eq.83).and.(typSurf.eq.105)) then ! clwc
isec1(6)=246 ! indicatorOfParameter
! readclouds=.true.
! sumclouds=.false.
elseif ((parCat.eq.1).and.(parNum.eq.84).and.(typSurf.eq.105)) then ! ciwc
isec1(6)=247 ! indicatorOfParameter
!ZHG end
! ESO qc(=clwc+ciwc)
elseif ((parCat.eq.201).and.(parNum.eq.31).and.(typSurf.eq.105)) then ! qc
isec1(6)=201031 ! indicatorOfParameter
! readclouds=.true.
! sumclouds=.true.
elseif ((parCat.eq.3).and.(parNum.eq.0).and.(typSurf.eq.1)) then !SP
isec1(6)=134 ! indicatorOfParameter
elseif ((parCat.eq.2).and.(parNum.eq.32)) then ! W, actually eta dot
......
......@@ -38,7 +38,7 @@ FLEXPART-GFS = FP_gfs_gfortran
FLEXPART-GFS-MPI = FP_gfs_MPI
ifeq ($(gcc), 4.9)
# Compiled libraries under users ~flexpart (or ~espen), gfortran v4.9
# Compiled libraries under users ~flexpart, gfortran v4.9
ROOT_DIR = /homevip/flexpart/
# ROOT_DIR = /homevip/espen/
......@@ -62,14 +62,14 @@ endif
## OPTIMIZATION LEVEL
O_LEV = 2 # [0,1,2,3,g,s,fast]
O_LEV_DBG = 0 # [0,g]
O_LEV_DBG = g # [0,g]
## LIBRARIES
LIBS = -lgrib_api_f90 -lgrib_api -lm -ljasper -lnetcdff -llapack # -fopenmp # -llapack
LIBS = -lgrib_api_f90 -lgrib_api -lm -ljasper -lnetcdff -llapack # -fopenmp
FFLAGS = -I$(INCPATH1) -I$(INCPATH2) -O$(O_LEV) -g -m64 -mcmodel=medium -fconvert=little-endian -frecord-marker=4 -fmessage-length=0 -flto=jobserver -O$(O_LEV) $(FUSER) # -march=native
DBGFLAGS = -I$(INCPATH1) -I$(INCPATH2) -O$(O_LEV_DBG) -g3 -ggdb3 -m64 -mcmodel=medium -fconvert=little-endian -frecord-marker=4 -fmessage-length=0 -flto=jobserver -O$(O_LEV_DBG) -fbacktrace -Warray-bounds -Wall -fcheck=all $(FUSER) # -ffpe-trap=invalid,overflow,denormal,underflow,zero -fdump-core
DBGFLAGS = -I$(INCPATH1) -I$(INCPATH2) -O$(O_LEV_DBG) -g3 -ggdb3 -m64 -mcmodel=medium -fconvert=little-endian -frecord-marker=4 -fmessage-length=0 -flto=jobserver -O$(O_LEV_DBG) -fbacktrace -Wall -fdump-core $(FUSER) # -ffpe-trap=invalid,overflow,denormal,underflow,zero -Warray-bounds -fcheck=all
LDFLAGS = $(FFLAGS) -L$(LIBPATH1) $(LIBS) #-L$(LIBPATH2)
LDDEBUG = $(DBGFLAGS) -L$(LIBPATH1) $(LIBS) #-L$(LIBPATH2)
......
......@@ -171,7 +171,7 @@ module par_mod
!*********************************************
!integer,parameter :: maxnests=0, nxmaxn=0, nymaxn=0
integer,parameter :: maxnests=0,nxmaxn=351,nymaxn=351 !ECMWF
!integer,parameter :: maxnests=0,nxmaxn=351,nymaxn=351 !ECMWF
!integer,parameter :: maxnests=1, nxmaxn=201, nymaxn=161 ! FNL XF
! maxnests maximum number of nested grids
......
......@@ -224,7 +224,9 @@ subroutine readreleases
allocate(specnum_rel2(nspec),stat=stat)
if (stat.ne.0) write(*,*)'ERROR: could not allocate specnum_rel2'
specnum_rel2=specnum_rel(1:nspec)
deallocate(specnum_rel)
deallocate(specnum_rel)
! eso: BUG, crashes here for nspec=12 and maxspec=6,
! TODO: catch error and exit
allocate(specnum_rel(nspec),stat=stat)
if (stat.ne.0) write(*,*)'ERROR: could not allocate specnum_rel'
specnum_rel=specnum_rel2
......
......@@ -189,7 +189,7 @@ subroutine readwind(indj,n,uuh,vvh,wwh)
isec1(6)=132 ! indicatorOfParameter
elseif ((parCat.eq.1).and.(parNum.eq.0).and.(typSurf.eq.105)) then ! Q
isec1(6)=133 ! indicatorOfParameter
! ESO Cloud water is in a) fields CLWC and CIWC, *or* b) field qc
! ESO Cloud water is in a) fields CLWC and CIWC, *or* b) field QC
elseif ((parCat.eq.1).and.(parNum.eq.83).and.(typSurf.eq.105)) then ! clwc
isec1(6)=246 ! indicatorOfParameter
elseif ((parCat.eq.1).and.(parNum.eq.84).and.(typSurf.eq.105)) then ! ciwc
......@@ -376,7 +376,6 @@ subroutine readwind(indj,n,uuh,vvh,wwh)
clwch(i,j,nlev_ec-k+2,n)=zsec4(nxfield*(ny-j-1)+i+1)
readclouds=.true.
sumclouds=.true.
!if (clwch(i,j,nlev_ec-k+2,n) .gt. 0) write(*,*) 'readwind: found water!', clwch(i,j,nlev_ec-k+2,n)
endif
end do
......
......@@ -172,6 +172,14 @@ subroutine readwind_nests(indj,n,uuhn,vvhn,wwhn)
isec1(6)=132 ! indicatorOfParameter
elseif ((parCat.eq.1).and.(parNum.eq.0).and.(typSurf.eq.105)) then ! Q
isec1(6)=133 ! indicatorOfParameter
! ESO Cloud water is in a) fields CLWC and CIWC, *or* b) field QC
elseif ((parCat.eq.1).and.(parNum.eq.83).and.(typSurf.eq.105)) then ! clwc
isec1(6)=246 ! indicatorOfParameter
elseif ((parCat.eq.1).and.(parNum.eq.84).and.(typSurf.eq.105)) then ! ciwc
isec1(6)=247 ! indicatorOfParameter
! ESO qc(=clwc+ciwc):
elseif ((parCat.eq.201).and.(parNum.eq.31).and.(typSurf.eq.105)) then ! qc
isec1(6)=201031 ! indicatorOfParameter
elseif ((parCat.eq.3).and.(parNum.eq.0).and.(typSurf.eq.1)) then !SP
isec1(6)=134 ! indicatorOfParameter
elseif ((parCat.eq.2).and.(parNum.eq.32)) then ! W, actually eta dot !
......@@ -202,9 +210,9 @@ subroutine readwind_nests(indj,n,uuhn,vvhn,wwhn)
isec1(6)=146 ! indicatorOfParameter
elseif ((parCat.eq.4).and.(parNum.eq.9).and.(typSurf.eq.1)) then ! SR
isec1(6)=176 ! indicatorOfParameter
elseif ((parCat.eq.2).and.(parNum.eq.17) .or. parId .eq. 180) then ! EWSS !added by mc to make it consisitent with new readwind.f90
elseif ((parCat.eq.2).and.(parNum.eq.38) .or. parId .eq. 180) then ! EWSS !added by mc to make it consisitent with new readwind.f90
isec1(6)=180 ! indicatorOfParameter
elseif ((parCat.eq.2).and.(parNum.eq.18) .or. parId .eq. 181) then ! NSSS !added by mc to make it consisitent with new readwind.f90
elseif ((parCat.eq.2).and.(parNum.eq.37) .or. parId .eq. 181) then ! NSSS !added by mc to make it consisitent with new readwind.f90
isec1(6)=181 ! indicatorOfParameter
elseif ((parCat.eq.3).and.(parNum.eq.4)) then ! ORO
isec1(6)=129 ! indicatorOfParameter
......@@ -335,6 +343,28 @@ subroutine readwind_nests(indj,n,uuhn,vvhn,wwhn)
if(isec1(6).eq.131) iumax=max(iumax,nlev_ec-k+1)
if(isec1(6).eq.135) iwmax=max(iwmax,nlev_ec-k+1)
! ESO TODO:
! -add check for if one of clwc/ciwc missing (error),
! also if all 3 cw fields present, use qc and disregard the others
! -use same flags readclouds/sumclouds as in mother grid? this assumes
! that both the nested and mother grids contain CW in same format
if(isec1(6).eq.246) then !! CLWC Cloud liquid water content [kg/kg]
clwch(i,j,nlev_ec-k+2,n)=zsec4(nxfield*(ny-j-1)+i+1)
! readclouds=.true.
! sumclouds=.false.
endif
if(isec1(6).eq.247) then !! CIWC Cloud ice water content
ciwch(i,j,nlev_ec-k+2,n)=zsec4(nxfield*(ny-j-1)+i+1)
endif
!ZHG end
!ESO read qc (=clwc+ciwc)
if(isec1(6).eq.201031) then !! QC Cloud liquid water content [kg/kg]
clwch(i,j,nlev_ec-k+2,n)=zsec4(nxfield*(ny-j-1)+i+1)
! readclouds=.true.
! sumclouds=.true.
endif
end do
end do
......
......@@ -577,7 +577,7 @@ subroutine verttransform(n,uuh,vvh,wwh,pvh)
! to scavenging. Also clouds that are not precipitating are defined which may be
! to include future cloud processing by non-precipitating-clouds.
!***********************************************************************************
write(*,*) 'using cloud water from ECMWF'
write(*,*) 'Global ECMWF fields: using cloud water'
clw(:,:,:,n)=0.0
icloud_stats(:,:,:,n)=0.0
clouds(:,:,:,n)=0
......@@ -634,12 +634,16 @@ subroutine verttransform(n,uuh,vvh,wwh,pvh)
endif ! precipitation
end do
end do
! eso: copy the relevant data to clw4 to reduce amount of communicated data for MPI
clw4(:,:,n) = icloud_stats(:,:,4,n)
!**************************************************************************
else ! use old definitions
!**************************************************************************
! create a cloud and rainout/washout field, clouds occur where rh>80%
! total cloudheight is stored at level 0
write(*,*) 'using cloud water from Parameterization'
write(*,*) 'Global fields: using cloud water from Parameterization'
do jy=0,nymin1
do ix=0,nxmin1
! OLD METHOD
......@@ -680,8 +684,6 @@ subroutine verttransform(n,uuh,vvh,wwh,pvh)
end do
endif !readclouds
! eso: copy the relevant data to clw4 to reduce amount of communicated data for MPI
clw4(:,:,n) = icloud_stats(:,:,4,n)
!********* TEST ***************
! WRITE OUT SOME TEST VARIABLES
......
......@@ -281,6 +281,7 @@ subroutine verttransform_nests(n,uuhn,vvhn,wwhn,pvhn)
!write (*,*) 'initializing nested cloudsn, n:',n
! create a cloud and rainout/washout field, cloudsn occur where rh>80%
write(*,*) 'Nested fields: using cloud water from Parameterization'
do jy=0,nyn(l)-1
do ix=0,nxn(l)-1
rain_cloud_above=0
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment