Commit 328fdf90 authored by Espen Sollum's avatar Espen Sollum

bugfix: particles were lost at start of global domain filling run. Error in...

bugfix: particles were lost at start of global domain filling run. Error in restarting domain filling run from particle dump
parent 77783e3d
......@@ -650,6 +650,7 @@ module com_mod
real :: xreceptor(maxreceptor),yreceptor(maxreceptor)
real :: receptorarea(maxreceptor)
real :: creceptor(maxreceptor,maxspec)
real, allocatable, dimension(:,:) :: creceptor0
character(len=16) :: receptorname(maxreceptor)
integer :: numreceptor
......
......@@ -413,7 +413,7 @@ subroutine init_domainfill
! This overrides any previous calculations.
!***************************************************************************
if (ipin.eq.1) then
if ((ipin.eq.1).and.(.not.gdomainfill)) then
open(unitboundcond,file=path(2)(1:length(2))//'boundcond.bin', &
form='unformatted')
read(unitboundcond) numcolumn_we,numcolumn_sn, &
......
......@@ -109,6 +109,10 @@ subroutine init_domainfill
endif
endif
! Exit here if resuming a run from particle dump
!***********************************************
if (gdomainfill.and.ipin.ne.0) return
! Do not release particles twice (i.e., not at both in the leftmost and rightmost
! grid cell) for a global domain
!*****************************************************************************
......@@ -212,7 +216,6 @@ subroutine init_domainfill
pp(nz)=rho(ix,jy,nz,1)*r_air*tt(ix,jy,nz,1)
colmass(ix,jy)=(pp(1)-pp(nz))/ga*gridarea(jy)
colmasstotal=colmasstotal+colmass(ix,jy)
end do
end do
......@@ -465,7 +468,7 @@ subroutine init_domainfill
!***************************************************************************
! eso TODO: only needed for root process
if (ipin.eq.1) then
if ((ipin.eq.1).and.(.not.gdomainfill)) then
open(unitboundcond,file=path(2)(1:length(2))//'boundcond.bin', &
form='unformatted')
read(unitboundcond) numcolumn_we,numcolumn_sn, &
......@@ -473,27 +476,33 @@ subroutine init_domainfill
close(unitboundcond)
endif
numpart = numpart/mp_partgroup_np
if (mod(numpart,mp_partgroup_np).ne.0) numpart=numpart+1
else ! Allocate dummy arrays for receiving processes
allocate(itra1_tmp(nullsize),npoint_tmp(nullsize),nclass_tmp(nullsize),&
& idt_tmp(nullsize),itramem_tmp(nullsize),itrasplit_tmp(nullsize),&
& xtra1_tmp(nullsize),ytra1_tmp(nullsize),ztra1_tmp(nullsize),&
& xmass1_tmp(nullsize, nullsize))
if (ipin.eq.0) then
numpart = numpart/mp_partgroup_np
if (mod(numpart,mp_partgroup_np).ne.0) numpart=numpart+1
end if
else ! Allocate dummy arrays for receiving processes
if (ipin.eq.0) then
allocate(itra1_tmp(nullsize),npoint_tmp(nullsize),nclass_tmp(nullsize),&
& idt_tmp(nullsize),itramem_tmp(nullsize),itrasplit_tmp(nullsize),&
& xtra1_tmp(nullsize),ytra1_tmp(nullsize),ztra1_tmp(nullsize),&
& xmass1_tmp(nullsize, nullsize))
end if
end if ! end if(lroot)
end if ! end if(lroot)
! Distribute particles to other processes (numpart is 'per-process', not total)
call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
! eso TODO: xmassperparticle: not necessary to send
call MPI_Bcast(xmassperparticle, 1, mp_sp, id_root, mp_comm_used, mp_ierr)
call mpif_send_part_properties(numpart)
! Only if not restarting from previous run
if (ipin.eq.0) then
call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
call mpif_send_part_properties(npart(1)/mp_partgroup_np)
! Deallocate the temporary arrays used for all particles
deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,&
deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,&
& itrasplit_tmp,xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp)
end if
end subroutine init_domainfill
......@@ -124,13 +124,15 @@ module mpi_mod
! mp_measure_time Measure cpu/wall time, write out at end of run
! mp_time_barrier Measure MPI barrier time
! mp_exact_numpart Use an extra MPI communication to give the exact number of particles
! to standard output (this does *not* otherwise affect the simulation)
! to standard output (this does *not* otherwise affect the simulation)
! mp_rebalance Attempt to rebalance particle between processes
logical, parameter :: mp_dbg_mode = .false.
logical, parameter :: mp_dev_mode = .false.
logical, parameter :: mp_dbg_out = .false.
logical, parameter :: mp_time_barrier=.true.
logical, parameter :: mp_measure_time=.false.
logical, parameter :: mp_exact_numpart=.true.
logical, parameter :: mp_rebalance=.true.
! for measuring CPU/Wall time
real(sp),private :: mp_comm_time_beg, mp_comm_time_end, mp_comm_time_total=0.
......@@ -143,8 +145,8 @@ module mpi_mod
real(sp),private :: tm_tot_beg, tm_tot_end, tm_tot_total=0.
real(dp),private :: mp_getfields_wtime_beg, mp_getfields_wtime_end, mp_getfields_wtime_total=0.
real(sp),private :: mp_getfields_time_beg, mp_getfields_time_end, mp_getfields_time_total=0.
real(dp),private :: mp_readwind_wtime_beg, mp_readwind_wtime_end, mp_readwind_wtime_total=0.
real(sp),private :: mp_readwind_time_beg, mp_readwind_time_end, mp_readwind_time_total=0.
! real(dp),private :: mp_readwind_wtime_beg, mp_readwind_wtime_end, mp_readwind_wtime_total=0.
! real(sp),private :: mp_readwind_time_beg, mp_readwind_time_end, mp_readwind_time_total=0.
real(dp),private :: mp_io_wtime_beg, mp_io_wtime_end, mp_io_wtime_total=0.
real(sp),private :: mp_io_time_beg, mp_io_time_end, mp_io_time_total=0.
real(dp),private :: mp_wetdepo_wtime_beg, mp_wetdepo_wtime_end, mp_wetdepo_wtime_total=0.
......@@ -189,8 +191,8 @@ contains
! mpi_mode default 0, set to 2/3 if running MPI version
! mp_np number of running processes, decided at run-time
!***********************************************************************
use par_mod, only: maxpart, numwfmem, dep_prec
use com_mod, only: mpi_mode, verbosity
use par_mod, only: maxpart, numwfmem, dep_prec, maxreceptor, maxspec
use com_mod, only: mpi_mode, verbosity, creceptor0
implicit none
......@@ -336,7 +338,7 @@ contains
end if
! Set maxpart per process
! eso 08/2016: Increase maxpart per process, in case of unbalanced distribution
! ESO 08/2016: Increase maxpart per process, in case of unbalanced distribution
maxpart_mpi=int(mp_maxpart_factor*real(maxpart)/real(mp_partgroup_np))
if (mp_np == 1) maxpart_mpi = maxpart
......@@ -364,6 +366,13 @@ contains
reqs(:)=MPI_REQUEST_NULL
end if
! Write whether MPI_IN_PLACE is used or not
#ifdef USE_MPIINPLACE
if (lroot) write(*,*) 'Using MPI_IN_PLACE operations'
#else
if (lroot) allocate(creceptor0(maxreceptor,maxspec))
if (lroot) write(*,*) 'Not using MPI_IN_PLACE operations'
#endif
goto 101
100 write(*,*) '#### mpi_mod::mpif_init> ERROR ####', mp_ierr
......@@ -558,7 +567,7 @@ contains
! "numpart" is larger than the actual used, so we reduce it if there are
! invalid particles at the end of the arrays
601 do i=num_part, 1, -1
601 do i=numpart, 1, -1
if (itra1(i).eq.-999999999) then
numpart=numpart-1
else
......@@ -1960,7 +1969,7 @@ contains
! For now assume that data at all steps either have or do not have water
if (readclouds) then
j=j+1
call MPI_Irecv(ctwc(:,:,mind),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
call MPI_Irecv(ctwc(:,:,mind),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
&MPI_COMM_WORLD,reqs(j),mp_ierr)
if (mp_ierr /= 0) goto 600
end if
......@@ -2325,7 +2334,7 @@ contains
! For now assume that data at all steps either have or do not have water
if (readclouds) then
j=j+1
call MPI_Irecv(ctwcn(:,:,mind,k),d2s1,mp_sp,id_read,MPI_ANY_TAG,&
call MPI_Irecv(ctwcn(:,:,mind,k),d2s1*5,mp_sp,id_read,MPI_ANY_TAG,&
&MPI_COMM_WORLD,reqs(j),mp_ierr)
if (mp_ierr /= 0) goto 600
end if
......@@ -2461,12 +2470,28 @@ contains
& mp_comm_used, mp_ierr)
end if
! Receptor concentrations
if (lroot) then
call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, &
& mp_comm_used,mp_ierr)
if (mp_ierr /= 0) goto 600
else
call MPI_Reduce(creceptor,0,rcpt_size,mp_sp,MPI_SUM,id_root, &
& mp_comm_used,mp_ierr)
end if
#else
call MPI_Reduce(gridunc, gridunc0, grid_size3d, mp_sp, MPI_SUM, id_root, &
& mp_comm_used, mp_ierr)
if (mp_ierr /= 0) goto 600
if (lroot) gridunc = gridunc0
call MPI_Reduce(creceptor, creceptor0,rcpt_size,mp_sp,MPI_SUM,id_root, &
& mp_comm_used,mp_ierr)
if (mp_ierr /= 0) goto 600
if (lroot) creceptor = creceptor0
#endif
if ((WETDEP).and.(ldirect.gt.0)) then
......@@ -2481,15 +2506,6 @@ contains
if (mp_ierr /= 0) goto 600
end if
! Receptor concentrations
if (lroot) then
call MPI_Reduce(MPI_IN_PLACE,creceptor,rcpt_size,mp_sp,MPI_SUM,id_root, &
& mp_comm_used,mp_ierr)
if (mp_ierr /= 0) goto 600
else
call MPI_Reduce(creceptor,0,rcpt_size,mp_sp,MPI_SUM,id_root, &
& mp_comm_used,mp_ierr)
end if
if (mp_measure_time) call mpif_mtime('commtime',1)
......@@ -2699,19 +2715,19 @@ contains
& mp_vt_time_beg)
end if
case ('readwind')
if (imode.eq.0) then
call cpu_time(mp_readwind_time_beg)
mp_readwind_wtime_beg = mpi_wtime()
else
call cpu_time(mp_readwind_time_end)
mp_readwind_wtime_end = mpi_wtime()
mp_readwind_time_total = mp_readwind_time_total + &
&(mp_readwind_time_end - mp_readwind_time_beg)
mp_readwind_wtime_total = mp_readwind_wtime_total + &
&(mp_readwind_wtime_end - mp_readwind_wtime_beg)
end if
! case ('readwind')
! if (imode.eq.0) then
! call cpu_time(mp_readwind_time_beg)
! mp_readwind_wtime_beg = mpi_wtime()
! else
! call cpu_time(mp_readwind_time_end)
! mp_readwind_wtime_end = mpi_wtime()
!
! mp_readwind_time_total = mp_readwind_time_total + &
! &(mp_readwind_time_end - mp_readwind_time_beg)
! mp_readwind_wtime_total = mp_readwind_wtime_total + &
! &(mp_readwind_wtime_end - mp_readwind_wtime_beg)
! end if
case ('commtime')
if (imode.eq.0) then
......@@ -2787,10 +2803,10 @@ contains
& mp_getfields_wtime_total
write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR GETFIELDS:',&
& mp_getfields_time_total
write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',&
& mp_readwind_wtime_total
write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',&
& mp_readwind_time_total
! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR READWIND:',&
! & mp_readwind_wtime_total
! write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR READWIND:',&
! & mp_readwind_time_total
write(*,FMT='(A60,TR1,F9.2)') 'TOTAL WALL TIME FOR FILE IO:',&
& mp_io_wtime_total
write(*,FMT='(A60,TR1,F9.2)') 'TOTAL CPU TIME FOR FILE IO:',&
......
......@@ -343,7 +343,7 @@ subroutine timemanager(metdata_format)
! Check if particles should be redistributed among processes
!***********************************************************
call mpif_calculate_part_redist(itime)
if (mp_rebalance) call mpif_calculate_part_redist(itime)
! Compute convective mixing for forward runs
......@@ -531,7 +531,7 @@ subroutine timemanager(metdata_format)
griduncn(:,:,:,:,:,:,:)=0.
end if
else ! :TODO: check for zeroing in the netcdf module
else
call concoutput_surf_nest(itime,outnum)
end if
else
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment