Commit d005a67c authored by Sabine's avatar Sabine

Merge remote-tracking branch 'refs/remotes/origin/dev' into dev

parents 5d74ed91 0c8c7f2f
......@@ -18,7 +18,7 @@
CTL= -5.0000000, ! CTL>1, ABL time step = (Lagrangian timescale (TL))/CTL, uses LSYNCTIME if CTL<0
IFINE= 4, ! Reduction for time step in vertical transport, used only if CTL>1
IOUT= 1, ! Output type: [1]mass 2]pptv 3]1&2 4]plume 5]1&4, +8 for NetCDF output
IPOUT= 0, ! Particle position output: 0]no 1]every output 2]only at end
IPOUT= 0, ! Particle position output: 0]no 1]every output 2]only at end 3]time averaged
LSUBGRID= 0, ! Increase of ABL heights due to sub-grid scale orographic variations;[0]off 1]on
LCONVECTION= 1, ! Switch for convection parameterization;0]off [1]on
LAGESPECTRA= 0, ! Switch for calculation of age spectra (needs AGECLASSES);[0]off 1]on
......
......@@ -67,13 +67,7 @@ program flexpart
integer :: metdata_format = GRIBFILE_CENTRE_UNKNOWN
integer :: detectformat
! Initialize arrays in com_mod
!*****************************
call com_mod_allocate_part(maxpart)
! Generate a large number of random numbers
!******************************************
......@@ -171,6 +165,11 @@ program flexpart
endif
endif
! Initialize arrays in com_mod
!*****************************
call com_mod_allocate_part(maxpart)
! Read the age classes to be used
!********************************
if (verbosity.gt.0) then
......
......@@ -76,12 +76,7 @@ program flexpart
if (mp_measure_time) call mpif_mtime('flexpart',0)
! Initialize arrays in com_mod
!*****************************
if(.not.(lmpreader.and.lmp_use_reader)) call com_mod_allocate_part(maxpart_mpi)
! Generate a large number of random numbers
!******************************************
......@@ -179,6 +174,11 @@ program flexpart
endif
endif
! Initialize arrays in com_mod
!*****************************
if(.not.(lmpreader.and.lmp_use_reader)) call com_mod_allocate_part(maxpart_mpi)
! Read the age classes to be used
!********************************
......@@ -412,7 +412,7 @@ program flexpart
if (nested_output.ne.1.and.surf_only.eq.1) call writeheader_surf
end if ! (mpif_pid == 0)
if (mp_measure_time) call mpif_mtime('iotime',0)
if (mp_measure_time) call mpif_mtime('iotime',1)
if (verbosity.gt.0 .and. lroot) then
print*,'call openreceptors'
......
......@@ -18,6 +18,8 @@ module com_mod
implicit none
!****************************************************************
! Variables defining where FLEXPART input/output files are stored
!****************************************************************
......@@ -68,7 +70,7 @@ module com_mod
! outstep = real(abs(loutstep))
real :: ctl,fine
integer :: ifine,iout,ipout,ipin,iflux,mdomainfill
integer :: ifine,iout,ipout,ipin,iflux,mdomainfill,ipoutfac
integer :: mquasilag,nested_output,ind_source,ind_receptor
integer :: ind_rel,ind_samp,ioutputforeachrelease,linit_cond,surf_only
logical :: turbswitch
......@@ -81,6 +83,7 @@ module com_mod
! iflux flux calculation options: 1 calculation of fluxes, 2 no fluxes
! iout output options: 1 conc. output (ng/m3), 2 mixing ratio (pptv), 3 both
! ipout particle dump options: 0 no, 1 every output interval, 2 only at end
! ipoutfac increase particle dump interval by factor (default 1)
! ipin read in particle positions from dumped file from a previous run
! fine real(ifine)
! mdomainfill 0: normal run
......@@ -127,7 +130,6 @@ module com_mod
logical :: gdomainfill
! gdomainfill .T., if domain-filling is global, .F. if not
!ZHG SEP 2015 wheather or not to read clouds from GRIB
......@@ -650,6 +652,7 @@ module com_mod
real :: xreceptor(maxreceptor),yreceptor(maxreceptor)
real :: receptorarea(maxreceptor)
real :: creceptor(maxreceptor,maxspec)
real, allocatable, dimension(:,:) :: creceptor0
character(len=16) :: receptorname(maxreceptor)
integer :: numreceptor
......@@ -673,6 +676,14 @@ module com_mod
real, allocatable, dimension(:,:) :: xmass1
real, allocatable, dimension(:,:) :: xscav_frac1
! Variables used for writing out interval averages for partoutput
!****************************************************************
integer, allocatable, dimension(:) :: npart_av
real, allocatable, dimension(:) :: part_av_cartx,part_av_carty,part_av_cartz,part_av_z,part_av_topo
real, allocatable, dimension(:) :: part_av_pv,part_av_qv,part_av_tt,part_av_rho,part_av_tro,part_av_hmix
real, allocatable, dimension(:) :: part_av_uu,part_av_vv,part_av_energy
! eso: Moved from timemanager
real, allocatable, dimension(:) :: uap,ucp,uzp,us,vs,ws
integer(kind=2), allocatable, dimension(:) :: cbt
......@@ -779,13 +790,21 @@ contains
allocate(itra1(nmpart),npoint(nmpart),nclass(nmpart),&
& idt(nmpart),itramem(nmpart),itrasplit(nmpart),&
& xtra1(nmpart),ytra1(nmpart),ztra1(nmpart),&
& xmass1(nmpart, maxspec),&
& checklifetime(nmpart,maxspec), species_lifetime(maxspec,2))!CGZ-lifetime
& xmass1(nmpart, maxspec)) ! ,&
! & checklifetime(nmpart,maxspec), species_lifetime(maxspec,2))!CGZ-lifetime
if (ipout.eq.3) then
allocate(npart_av(nmpart),part_av_cartx(nmpart),part_av_carty(nmpart),&
& part_av_cartz(nmpart),part_av_z(nmpart),part_av_topo(nmpart))
allocate(part_av_pv(nmpart),part_av_qv(nmpart),part_av_tt(nmpart),&
& part_av_rho(nmpart),part_av_tro(nmpart),part_av_hmix(nmpart))
allocate(part_av_uu(nmpart),part_av_vv(nmpart),part_av_energy(nmpart))
end if
allocate(uap(nmpart),ucp(nmpart),uzp(nmpart),us(nmpart),&
& vs(nmpart),ws(nmpart),cbt(nmpart))
end subroutine com_mod_allocate_part
......
......@@ -86,6 +86,10 @@ subroutine init_domainfill
endif
endif
! Exit here if resuming a run from particle dump
!***********************************************
if (gdomainfill.and.ipin.ne.0) return
! Do not release particles twice (i.e., not at both in the leftmost and rightmost
! grid cell) for a global domain
!*****************************************************************************
......@@ -413,7 +417,7 @@ subroutine init_domainfill
! This overrides any previous calculations.
!***************************************************************************
if (ipin.eq.1) then
if ((ipin.eq.1).and.(.not.gdomainfill)) then
open(unitboundcond,file=path(2)(1:length(2))//'boundcond.bin', &
form='unformatted')
read(unitboundcond) numcolumn_we,numcolumn_sn, &
......
......@@ -109,6 +109,10 @@ subroutine init_domainfill
endif
endif
! Exit here if resuming a run from particle dump
!***********************************************
if (gdomainfill.and.ipin.ne.0) return
! Do not release particles twice (i.e., not at both in the leftmost and rightmost
! grid cell) for a global domain
!*****************************************************************************
......@@ -212,7 +216,6 @@ subroutine init_domainfill
pp(nz)=rho(ix,jy,nz,1)*r_air*tt(ix,jy,nz,1)
colmass(ix,jy)=(pp(1)-pp(nz))/ga*gridarea(jy)
colmasstotal=colmasstotal+colmass(ix,jy)
end do
end do
......@@ -465,7 +468,7 @@ subroutine init_domainfill
!***************************************************************************
! eso TODO: only needed for root process
if (ipin.eq.1) then
if ((ipin.eq.1).and.(.not.gdomainfill)) then
open(unitboundcond,file=path(2)(1:length(2))//'boundcond.bin', &
form='unformatted')
read(unitboundcond) numcolumn_we,numcolumn_sn, &
......@@ -473,27 +476,33 @@ subroutine init_domainfill
close(unitboundcond)
endif
numpart = numpart/mp_partgroup_np
if (mod(numpart,mp_partgroup_np).ne.0) numpart=numpart+1
else ! Allocate dummy arrays for receiving processes
allocate(itra1_tmp(nullsize),npoint_tmp(nullsize),nclass_tmp(nullsize),&
& idt_tmp(nullsize),itramem_tmp(nullsize),itrasplit_tmp(nullsize),&
& xtra1_tmp(nullsize),ytra1_tmp(nullsize),ztra1_tmp(nullsize),&
& xmass1_tmp(nullsize, nullsize))
if (ipin.eq.0) then
numpart = numpart/mp_partgroup_np
if (mod(numpart,mp_partgroup_np).ne.0) numpart=numpart+1
end if
else ! Allocate dummy arrays for receiving processes
if (ipin.eq.0) then
allocate(itra1_tmp(nullsize),npoint_tmp(nullsize),nclass_tmp(nullsize),&
& idt_tmp(nullsize),itramem_tmp(nullsize),itrasplit_tmp(nullsize),&
& xtra1_tmp(nullsize),ytra1_tmp(nullsize),ztra1_tmp(nullsize),&
& xmass1_tmp(nullsize, nullsize))
end if
end if ! end if(lroot)
end if ! end if(lroot)
! Distribute particles to other processes (numpart is 'per-process', not total)
call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
! eso TODO: xmassperparticle: not necessary to send
call MPI_Bcast(xmassperparticle, 1, mp_sp, id_root, mp_comm_used, mp_ierr)
call mpif_send_part_properties(numpart)
! Only if not restarting from previous run
if (ipin.eq.0) then
call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
call mpif_send_part_properties(npart(1)/mp_partgroup_np)
! Deallocate the temporary arrays used for all particles
deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,&
deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,&
& itrasplit_tmp,xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp)
end if
end subroutine init_domainfill
......@@ -117,6 +117,7 @@ mpi_mod.o
## Serial versions (MPI version with same functionality and name '_mpi.f90' exists)
OBJECTS_SERIAL = \
releaseparticles.o partoutput.o \
partoutput_average.o \
conccalc.o \
init_domainfill.o concoutput.o \
timemanager.o FLEXPART.o \
......@@ -131,7 +132,7 @@ OBJECTS_SERIAL = \
## For MPI version
OBJECTS_MPI = releaseparticles_mpi.o partoutput_mpi.o \
conccalc_mpi.o \
partoutput_average_mpi.o conccalc_mpi.o \
init_domainfill_mpi.o concoutput_mpi.o \
timemanager_mpi.o FLEXPART_MPI.o \
readpartpositions_mpi.o \
......@@ -148,7 +149,7 @@ OBJECTS_NCF = netcdf_output_mod.o
OBJECTS = \
advance.o initialize.o \
writeheader.o writeheader_txt.o \
writeprecip.o \
partpos_average.o writeprecip.o \
writeheader_surf.o assignland.o\
part0.o gethourlyOH.o\
caldate.o partdep.o \
......@@ -347,7 +348,10 @@ outgrid_init.o: com_mod.o flux_mod.o oh_mod.o outg_mod.o par_mod.o unc_mod.o
outgrid_init_nest.o: com_mod.o outg_mod.o par_mod.o unc_mod.o
part0.o: par_mod.o
partdep.o: par_mod.o
partpos_average.o: com_mod.o par_mod.o
partoutput.o: com_mod.o par_mod.o
partoutput_average.o: com_mod.o par_mod.o
partoutput_average_mpi.o: com_mod.o par_mod.o mpi_mod.o
partoutput_mpi.o: com_mod.o mpi_mod.o par_mod.o
partoutput_short.o: com_mod.o par_mod.o
partoutput_short_mpi.o: com_mod.o mpi_mod.o par_mod.o
......
......@@ -87,6 +87,7 @@ module mpi_mod
! Variables for MPI processes in the 'particle' group
integer, allocatable, dimension(:) :: mp_partgroup_rank
integer, allocatable, dimension(:) :: npart_per_process
integer :: mp_partgroup_comm, mp_partgroup_pid, mp_partgroup_np
integer :: mp_seed=0
......@@ -124,7 +125,7 @@ module mpi_mod
! mp_measure_time Measure cpu/wall time, write out at end of run
! mp_time_barrier Measure MPI barrier time
! mp_exact_numpart Use an extra MPI communication to give the exact number of particles
! to standard output (this does *not* otherwise affect the simulation)
! to standard output (this does not otherwise affect the simulation)
logical, parameter :: mp_dbg_mode = .false.
logical, parameter :: mp_dev_mode = .false.
logical, parameter :: mp_dbg_out = .false.
......@@ -189,8 +190,8 @@ contains
! mpi_mode default 0, set to 2/3 if running MPI version
! mp_np number of running processes, decided at run-time
!***********************************************************************
use par_mod, only: maxpart, numwfmem, dep_prec
use com_mod, only: mpi_mode, verbosity
use par_mod, only: maxpart, numwfmem, dep_prec, maxreceptor, maxspec
use com_mod, only: mpi_mode, verbosity, creceptor0
implicit none
......@@ -336,7 +337,7 @@ contains
end if
! Set maxpart per process
! eso 08/2016: Increase maxpart per process, in case of unbalanced distribution
! ESO 08/2016: Increase maxpart per process, in case of unbalanced distribution
maxpart_mpi=int(mp_maxpart_factor*real(maxpart)/real(mp_partgroup_np))
if (mp_np == 1) maxpart_mpi = maxpart
......@@ -364,6 +365,16 @@ contains
reqs(:)=MPI_REQUEST_NULL
end if
! Allocate array for number of particles per process
allocate(npart_per_process(0:mp_partgroup_np-1))
! Write whether MPI_IN_PLACE is used or not
#ifdef USE_MPIINPLACE
if (lroot) write(*,*) 'Using MPI_IN_PLACE operations'
#else
if (lroot) allocate(creceptor0(maxreceptor,maxspec))
if (lroot) write(*,*) 'Not using MPI_IN_PLACE operations'
#endif
goto 101
100 write(*,*) '#### mpi_mod::mpif_init> ERROR ####', mp_ierr
......@@ -558,7 +569,7 @@ contains
! "numpart" is larger than the actual used, so we reduce it if there are
! invalid particles at the end of the arrays
601 do i=num_part, 1, -1
601 do i=numpart, 1, -1
if (itra1(i).eq.-999999999) then
numpart=numpart-1
else
......@@ -597,7 +608,7 @@ contains
real :: pmin,z
integer :: i,jj,nn, num_part=1,m,imin, num_trans
logical :: first_iter
integer,allocatable,dimension(:) :: numparticles_mpi, idx_arr
integer,allocatable,dimension(:) :: idx_arr
real,allocatable,dimension(:) :: sorted ! TODO: we don't really need this
! Exit if running with only 1 process
......@@ -606,20 +617,22 @@ contains
! All processes exchange information on number of particles
!****************************************************************************
allocate(numparticles_mpi(0:mp_partgroup_np-1), &
&idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1))
allocate( idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1))
call MPI_Allgather(numpart, 1, MPI_INTEGER, numparticles_mpi, &
call MPI_Allgather(numpart, 1, MPI_INTEGER, npart_per_process, &
& 1, MPI_INTEGER, mp_comm_used, mp_ierr)
! Sort from lowest to highest
! Initial guess: correct order
sorted(:) = numparticles_mpi(:)
sorted(:) = npart_per_process(:)
do i=0, mp_partgroup_np-1
idx_arr(i) = i
end do
! Do not rebalance particles for ipout=3
if (ipout.eq.3) return
! For each successive element in index array, see if a lower value exists
do i=0, mp_partgroup_np-2
pmin=sorted(i)
......@@ -644,13 +657,13 @@ contains
m=mp_partgroup_np-1 ! index for last sorted process (most particles)
do i=0,mp_partgroup_np/2-1
num_trans = numparticles_mpi(idx_arr(m)) - numparticles_mpi(idx_arr(i))
num_trans = npart_per_process(idx_arr(m)) - npart_per_process(idx_arr(i))
if (mp_partid.eq.idx_arr(m).or.mp_partid.eq.idx_arr(i)) then
if ( numparticles_mpi(idx_arr(m)).gt.mp_min_redist.and.&
& real(num_trans)/real(numparticles_mpi(idx_arr(m))).gt.mp_redist_fract) then
if ( npart_per_process(idx_arr(m)).gt.mp_min_redist.and.&
& real(num_trans)/real(npart_per_process(idx_arr(m))).gt.mp_redist_fract) then
! DBG
! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, numparticles_mpi', &
! &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, numparticles_mpi
! write(*,*) 'mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, npart_per_process', &
! &mp_partid, idx_arr(m), idx_arr(i), mp_min_redist, num_trans, npart_per_process
! DBG
call mpif_redist_part(itime, idx_arr(m), idx_arr(i), num_trans/2)
end if
......@@ -658,7 +671,7 @@ contains
m=m-1
end do
deallocate(numparticles_mpi, idx_arr, sorted)
deallocate(idx_arr, sorted)
end subroutine mpif_calculate_part_redist
......@@ -1960,7 +1973,7 @@ contains
! For now assume that data at all steps either have or do not have water
if (readclouds) then
j=j+1
call MPI_Irecv(ctwc(:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
call MPI_Irecv(ctwc(:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
&MPI_COMM_WORLD,reqs(j),mp_ierr)
if (mp_ierr /= 0) goto 600
end if
......@@ -2325,7 +2338,7 @@ contains
! For now assume that data at all steps either have or do not have water
if (readclouds) then
j=j+1
call MPI_Irecv(ctwcn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
&MPI_COMM_WORLD,reqs(j),mp_ierr)
if (mp_ierr /= 0) goto 600
end if
......@@ -2461,12 +2474,28 @@ contains
& mp_comm_used, mp_ierr)
end if
! Receptor concentrations
if (lroot) then
call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, &
& mp_comm_used,mp_ierr)
if (mp_ierr /= 0) goto 600
else
call MPI_Reduce(creceptor,0,rcpt_size,mp_sp,MPI_SUM,id_root, &
& mp_comm_used,mp_ierr)
end if
#else
call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, &
& mp_comm_used, mp_ierr)
if (mp_ierr /= 0) goto 600
if (lroot) gridunc = gridunc0
call MPI_Reduce(creceptor, creceptor0,rcpt_size,mp_sp,MPI_SUM,id_root, &
& mp_comm_used,mp_ierr)
if (mp_ierr /= 0) goto 600
if (lroot) creceptor = creceptor0
#endif
if ((WETDEP).and.(ldirect.gt.0)) then
......@@ -2481,15 +2510,6 @@ contains
if (mp_ierr /= 0) goto 600
end if
! Receptor concentrations
if (lroot) then
call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, &
& mp_comm_used,mp_ierr)
if (mp_ierr /= 0) goto 600
else
call MPI_Reduce(creceptor,0,rcpt_size,mp_sp,MPI_SUM,id_root, &
& mp_comm_used,mp_ierr)
end if
if (mp_measure_time) call mpif_mtime('commtime',1)
......@@ -2699,19 +2719,19 @@ contains
& mp_vt_time_beg)
end if
case ('readwind')
if (imode.eq.0) then
call cpu_time(mp_readwind_time_beg)
mp_readwind_wtime_beg = mpi_wtime()
else
call cpu_time(mp_readwind_time_end)
mp_readwind_wtime_end = mpi_wtime()
case ('readwind')
if (imode.eq.0) then
call cpu_time(mp_readwind_time_beg)
mp_readwind_wtime_beg = mpi_wtime()
else
call cpu_time(mp_readwind_time_end)
mp_readwind_wtime_end = mpi_wtime()
mp_readwind_time_total = mp_readwind_time_total + &
&(mp_readwind_time_end - mp_readwind_time_beg)
mp_readwind_wtime_total = mp_readwind_wtime_total + &
&(mp_readwind_wtime_end - mp_readwind_wtime_beg)
end if
mp_readwind_time_total = mp_readwind_time_total + &
&(mp_readwind_time_end - mp_readwind_time_beg)
mp_readwind_wtime_total = mp_readwind_wtime_total + &
&(mp_readwind_wtime_end - mp_readwind_wtime_beg)
end if
case ('commtime')
if (imode.eq.0) then
......@@ -2787,10 +2807,10 @@ contains
& mp_getfields_wtime_total
write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR GETFIELDS:',&
& mp_getfields_time_total
write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',&
& mp_readwind_wtime_total
write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',&
& mp_readwind_time_total
! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',&
! & mp_readwind_wtime_total
! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',&
! & mp_readwind_time_total
write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FILE IO:',&
& mp_io_wtime_total
write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR FILE IO:',&
......
......@@ -92,7 +92,7 @@ module netcdf_output_mod
logical, parameter :: min_size = .false. ! if set true, redundant fields (topography) are not written to minimize file size
character(len=255), parameter :: institution = 'NILU'
integer :: tpointer
integer :: tpointer=0
character(len=255) :: ncfname, ncfnamen
! netcdf dimension and variable IDs for main and nested output grid
......
......@@ -279,7 +279,7 @@ module par_mod
!************************************
integer,parameter :: unitpath=1, unitcommand=1, unitageclasses=1, unitgrid=1
integer,parameter :: unitavailab=1, unitreleases=88, unitpartout=93
integer,parameter :: unitavailab=1, unitreleases=88, unitpartout=93, unitpartout_average=105
integer,parameter :: unitpartin=93, unitflux=98, unitouttraj=96
integer,parameter :: unitvert=1, unitoro=1, unitpoin=1, unitreceptor=1
integer,parameter :: unitoutgrid=97, unitoutgridppt=99, unitoutinfo=1
......
......@@ -70,7 +70,7 @@ subroutine partoutput(itime)
! Open output file and write the output
!**************************************
if (ipout.eq.1) then
if (ipout.eq.1.or.ipout.eq.3) then
open(unitpartout,file=path(2)(1:length(2))//'partposit_'//adate// &
atime,form='unformatted')
else
......
!**********************************************************************
! Copyright 1998,1999,2000,2001,2002,2005,2007,2008,2009,2010 *
! Andreas Stohl, Petra Seibert, A. Frank, Gerhard Wotawa, *
! Caroline Forster, Sabine Eckhardt, John Burkhart, Harald Sodemann *
! *
! This file is part of FLEXPART. *
! *
! FLEXPART is free software: you can redistribute it and/or modify *
! it under the terms of the GNU General Public License as published by*
! the Free Software Foundation, either version 3 of the License, or *
! (at your option) any later version. *
! *
! FLEXPART is distributed in the hope that it will be useful, *
! but WITHOUT ANY WARRANTY; without even the implied warranty of *
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
! GNU General Public License for more details. *
! *
! You should have received a copy of the GNU General Public License *
! along with FLEXPART. If not, see <http://www.gnu.org/licenses/>. *
!**********************************************************************
subroutine partoutput_average(itime)
! i
!*****************************************************************************
! *
! Dump all particle positions *
! *
! Author: A. Stohl *
! *
! 12 March 1999 *
! *
!*****************************************************************************
! *
! Variables: *
! *
!*****************************************************************************
use par_mod
use com_mod
implicit none
real(kind=dp) :: jul
integer :: itime,i,j,jjjjmmdd,ihmmss
integer :: ix,jy,ixp,jyp,indexh,m,il,ind,indz,indzp
real :: xlon,ylat
real :: dt1,dt2,dtt,ddx,ddy,rddx,rddy,p1,p2,p3,p4,dz1,dz2,dz
real :: topo,hm(2),hmixi,pv1(2),pvprof(2),pvi,qv1(2),qvprof(2),qvi
real :: tt1(2),ttprof(2),tti,rho1(2),rhoprof(2),rhoi
real :: tr(2),tri,zlim
character :: adate*8,atime*6
integer(kind=2) :: ishort_xlon(maxpart),ishort_ylat(maxpart),ishort_z(maxpart)
integer(kind=2) :: ishort_topo(maxpart),ishort_tro(maxpart),ishort_hmix(maxpart)
integer(kind=2) :: ishort_pv(maxpart),ishort_rho(maxpart),ishort_qv(maxpart)
integer(kind=2) :: ishort_tt(maxpart),ishort_uu(maxpart),ishort_vv(maxpart)
integer(kind=2) :: ishort_energy(maxpart)
! Determine current calendar date, needed for the file name
!**********************************************************
jul=bdate+real(itime,kind=dp)/86400._dp
call caldate(jul,jjjjmmdd,ihmmss)
write(adate,'(i8.8)') jjjjmmdd
write(atime,'(i6.6)') ihmmss
! Some variables needed for temporal interpolation
!*************************************************
dt1=real(itime-memtime(1))
dt2=real(memtime(2)-itime)
dtt=1./(dt1+dt2)
! Open output file and write the output
!**************************************
open(unitpartout_average,file=path(2)(1:length(2))//'partposit_average_'//adate// &
atime,form='unformatted',access='direct',status='replace',recl=24)
! Write current time to file
!***************************
! write(unitpartout_average) itime,numpart
do i=1,numpart
! Take only valid particles
!**************************
if (itra1(i).eq.itime) then
part_av_topo(i)=part_av_topo(i)/float(npart_av(i))
part_av_z(i)=part_av_z(i)/float(npart_av(i))
part_av_pv(i)=part_av_pv(i)/float(npart_av(i))
part_av_qv(i)=part_av_qv(i)/float(npart_av(i))
part_av_tt(i)=part_av_tt(i)/float(npart_av(i))
part_av_uu(i)=part_av_uu(i)/float(npart_av(i))
part_av_vv(i)=part_av_vv(i)/float(npart_av(i))
part_av_rho(i)=part_av_rho(i)/float(npart_av(i))
part_av_tro(i)=part_av_tro(i)/float(npart_av(i))
part_av_hmix(i)=part_av_hmix(i)/float(npart_av(i))
part_av_energy(i)=part_av_energy(i)/float(npart_av(i))
part_av_cartx(i)=part_av_cartx(i)/float(npart_av(i))
part_av_carty(i)=part_av_carty(i)/float(npart_av(i))
part_av_cartz(i)=part_av_cartz(i)/float(npart_av(i))
! Project Cartesian coordinates back onto Earth's surface
! #######################################################
xlon=atan2(part_av_cartx(i),-1.*part_av_carty(i))
ylat=atan2(part_av_cartz(i),sqrt(part_av_cartx(i)*part_av_cartx(i)+ &
part_av_carty(i)*part_av_carty(i)))
xlon=xlon/pi180
ylat=ylat/pi180
! Convert all data to integer*2 variables (from -32768 to 32767) for compressed output
!*************************************************************************************
if (xlon.gt.180.) xlon=xlon-360.
if (xlon.lt.-180.) xlon=xlon+360.
ishort_xlon(i)=nint(xlon*180.)
ishort_ylat(i)=nint(ylat*360.)
zlim=(part_av_z(i)*2.-32000.)
zlim=min(zlim,32766.)
zlim=max(zlim,-32766.)
ishort_z(i)=nint(zlim)
zlim=(part_av_topo(i)*2.-32000.)
zlim=min(zlim,32766.)
zlim=max(zlim,-32766.)
ishort_topo(i)=nint(zlim)
zlim=(part_av_tro(i)*2.-32000.)
zlim=min(zlim,32766.)
zlim=max(zlim,-32766.)
ishort_tro(i)=nint(zlim)
zlim=(part_av_hmix(i)*2.-32000.)
zlim=min(zlim,32766.)
zlim=max(zlim,-32766.)
ishort_hmix(i)=nint(zlim)
zlim=(part_av_rho(i)*20000.-32000.)
zlim=min(zlim,32766.)
zlim=max(zlim,-32766.)
ishort_rho(i)=nint(zlim)
zlim=(part_av_qv(i)*1000000.-32000.)
zlim=min(zlim,32766.)
zlim=max(zlim,-32766.)
ishort_qv(i)=nint(zlim)
zlim=(part_av_pv(i)*100.)
zlim=min(zlim,32766.)
zlim=max(zlim,-32766.)
ishort_pv(i)=nint(zlim)
zlim=((part_av_tt(i)-273.15))*300.
zlim=min(zlim,32766.)
zlim=max(zlim,-32766.)
ishort_tt(i)=nint(zlim)
zlim=(part_av_uu(i)*200.)
zlim=min(zlim,32766.)
zlim=max(zlim,-32766.)
ishort_uu(