mpi_mod.f90 106 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
!**********************************************************************
! Copyright 1998,1999,2000,2001,2002,2005,2007,2008,2009,2010         *
! Andreas Stohl, Petra Seibert, A. Frank, Gerhard Wotawa,             *
! Caroline Forster, Sabine Eckhardt, John Burkhart, Harald Sodemann   *
!                                                                     *
! This file is part of FLEXPART.                                      *
!                                                                     *
! FLEXPART is free software: you can redistribute it and/or modify    *
! it under the terms of the GNU General Public License as published by*
! the Free Software Foundation, either version 3 of the License, or   *
! (at your option) any later version.                                 *
!                                                                     *
! FLEXPART is distributed in the hope that it will be useful,         *
! but WITHOUT ANY WARRANTY; without even the implied warranty of      *
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the       *
! GNU General Public License for more details.                        *
!                                                                     *
! You should have received a copy of the GNU General Public License   *
! along with FLEXPART.  If not, see <http://www.gnu.org/licenses/>.   *
!**********************************************************************

module mpi_mod

!*****************************************************************************
!                                                                            *
!  DESCRIPTION                                                               *
!    This module contains subroutines and common variables used for the      *
!    MPI parallelization of FLEXPART.                                        *
!                                                                            *
!  NOTE                                                                      *
!    Depending on the MPI library installed on your system (e.g. mpich2,     *
!    OpenMPI) you may need to choose below in this file between              *
!      use mpi                                                               *
!    (if the MPI library comes with the file 'mpi.mod'); or                  *
!      include 'mpif.h'                                                      *
!                                                                            *
!                                                                            *
!*****************************************************************************
!                                                                            *
! Variables:                                                                 *
!                                                                            *
! mp_ierr                 MPI error code                                     *
! mp_np                   Number of MPI processes                            *
! mp_pid                  Process ID of each MPI process                     *
! mp_seed                 Parameter for random number seed                   *
Espen Sollum's avatar
Espen Sollum committed
46
47
! read_grp_min            Minimum number of processes at which one will be   *
!                         used as reader                                     *
48
49
50
51
52
53
! numpart_mpi,            Number of particles per node                       *
! maxpart_mpi                                                                *
! mp_partid               MPI process ID for particle calculation            *
! mp_partgroup_           Refers to the subset of processors performing      *
!                         loops over particles. Will be all processes        *
!                         unless a dedicated process runs getfields/readwind *
Espen Sollum's avatar
Espen Sollum committed
54
! lmp_sync                If .false., use asynchronous MPI                   *
55
! mp_cp                   Real precision to use for deposition fields        *
56
57
58
59
60
61
62
!                                                                            *
!                                                                            *
!                                                                            *
!                                                                            *
!*****************************************************************************

  use mpi 
63
  use par_mod, only: dp,sp
64
65
66
67
68
69
70
71
72
73
  use com_mod, only: lroot

  implicit none

!  include 'mpif.h'

  public

! Set aside a process for reading windfields if using at least these many processes
!==================================================
Espen Sollum's avatar
Espen Sollum committed
74
  integer, parameter, private :: read_grp_min=4
75
76
77
78
79
80
81
82
83
84
85
86
!==================================================

! Variables for each MPI process in the world group
  integer :: mp_ierr, mp_np, mp_pid, mp_partid
  integer, private :: world_group_id

! Variables for MPI processes in the 'particle' group
  integer, allocatable, dimension(:) :: mp_partgroup_rank
  integer :: mp_partgroup_comm, mp_partgroup_pid, mp_partgroup_np

  integer :: mp_seed=0
  integer, parameter :: mp_sp=MPI_REAL4, mp_dp=MPI_REAL8
87
  integer :: mp_cp
88
89
90
  integer, parameter :: id_root=0 ! master process

! MPI tags/requests for send/receive operation
Espen Sollum's avatar
Espen Sollum committed
91
  integer :: tm1
92
  integer, parameter :: nvar_async=26
93
!integer, dimension(:), allocatable :: tags
94
95
  integer, dimension(:), allocatable :: reqs

96
97
98
! Status array used for certain MPI operations (MPI_RECV)
  integer, dimension(MPI_STATUS_SIZE) :: mp_status

99
100
101
102
103
104
105
106
107

  integer :: id_read   ! readwind/getfield process
  integer :: numpart_mpi,maxpart_mpi ! number of particles per node
  integer :: tot_numpart=0
  integer :: mp_comm_used ! global or subgroup communicator

  logical :: lmpreader=.false. ! is set to true for reading process(es) only.
  logical :: lmp_use_reader=.false. ! true if separate readwind process is used

108
! .true. if only using synchronous MPI send/recv (default)
109
110
! If setting this to .false., numwfmem must be set to 3
!===============================================================================
111
  logical :: lmp_sync=.true. 
112
113
!===============================================================================

Espen Sollum's avatar
Espen Sollum committed
114
! mp_dbg_mode       Used for debugging MPI.
115
116
117
118
! mp_dev_mode       various checks related to debugging the parallel code
! mp_dbg_out        write some arrays to data file for debugging
! mp_measure_time   Measure cpu/wall time, write out at end of run
! mp_time_barrier   Measure MPI barrier time
119
120
! mp_exact_numpart  Use an extra MPI communication to give the exact number of particles
!                   to standard output (this does *not* otherwise affect the simulation) 
121
122
123
  logical, parameter :: mp_dbg_mode = .false.
  logical, parameter :: mp_dev_mode = .false.
  logical, parameter :: mp_dbg_out = .false.
124
  logical, parameter :: mp_time_barrier=.false.
Espen Sollum's avatar
Espen Sollum committed
125
  logical, parameter :: mp_measure_time=.false.
126
  logical, parameter :: mp_exact_numpart=.true.
127
128

! for measuring CPU/Wall time
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
  real(sp),private :: mp_comm_time_beg, mp_comm_time_end, mp_comm_time_total=0.
  real(dp),private :: mp_comm_wtime_beg, mp_comm_wtime_end, mp_comm_wtime_total=0.
  real(sp),private :: mp_root_time_beg, mp_root_time_end, mp_root_time_total=0.
  real(dp),private :: mp_root_wtime_beg, mp_root_wtime_end, mp_root_wtime_total=0.
  real(sp),private :: mp_barrier_time_beg, mp_barrier_time_end, mp_barrier_time_total=0.
  real(dp),private :: mp_barrier_wtime_beg, mp_barrier_wtime_end, mp_barrier_wtime_total=0.
  real(sp),private :: tm_nploop_beg, tm_nploop_end, tm_nploop_total=0.
  real(sp),private :: tm_tot_beg, tm_tot_end, tm_tot_total=0.
  real(dp),private :: mp_getfields_wtime_beg, mp_getfields_wtime_end, mp_getfields_wtime_total=0.
  real(sp),private :: mp_getfields_time_beg, mp_getfields_time_end, mp_getfields_time_total=0.
  real(dp),private :: mp_readwind_wtime_beg, mp_readwind_wtime_end, mp_readwind_wtime_total=0.
  real(sp),private :: mp_readwind_time_beg, mp_readwind_time_end, mp_readwind_time_total=0.
  real(dp),private :: mp_io_wtime_beg, mp_io_wtime_end, mp_io_wtime_total=0.
  real(sp),private :: mp_io_time_beg, mp_io_time_end, mp_io_time_total=0.
  real(dp),private :: mp_wetdepo_wtime_beg, mp_wetdepo_wtime_end, mp_wetdepo_wtime_total=0.
  real(sp),private :: mp_wetdepo_time_beg, mp_wetdepo_time_end, mp_wetdepo_time_total=0.
  real(dp),private :: mp_advance_wtime_beg, mp_advance_wtime_end, mp_advance_wtime_total=0.
  real(dp),private :: mp_conccalc_time_beg, mp_conccalc_time_end, mp_conccalc_time_total=0.
  real(dp),private :: mp_total_wtime_beg, mp_total_wtime_end, mp_total_wtime_total=0.
  real(dp),private :: mp_vt_wtime_beg, mp_vt_wtime_end, mp_vt_wtime_total
  real(sp),private :: mp_vt_time_beg, mp_vt_time_end, mp_vt_time_total
150
151
152
153

! dat_lun           logical unit number for i/o
  integer, private :: dat_lun 

154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
! Temporary arrays for particles (allocated and deallocated as needed)
  integer, allocatable, dimension(:) :: nclass_tmp, npoint_tmp, itra1_tmp, idt_tmp, &
       & itramem_tmp, itrasplit_tmp
  real(kind=dp), allocatable, dimension(:) :: xtra1_tmp, ytra1_tmp
  real, allocatable, dimension(:) :: ztra1_tmp 
  real, allocatable, dimension(:,:) :: xmass1_tmp

! mp_redist_fract        Exchange particles between processes if relative difference
!                        is larger. A good value is between 0.0 and 0.5
! mp_maxpart_factor      Allocate more memory per process than strictly needed
!                        (safety factor). Recommended value between 1.5 and 2.5
! mp_min_redist          Do not redistribute particles if below this limit
  real, parameter :: mp_redist_fract=0.2, mp_maxpart_factor=1.5
  integer,parameter :: mp_min_redist=100000


170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
contains

  subroutine mpif_init
!***********************************************************************
! DESCRIPTION
!   Initialize MPI.
!   
!   Create the global communicator MPI_COMM_WORLD
!   If using a separate MPI process for getfields/readwind, a subgroup
!   is created for the other processes.
!
! VARIABLES
!   mpi_mode    default 0, set to 2/3 if running MPI version
!   mp_np       number of running processes, decided at run-time
!***********************************************************************
185
    use par_mod, only: maxpart, numwfmem, dep_prec
186
    use com_mod, only: mpi_mode, verbosity
187
188
189
190
191

    implicit none

    integer :: i,j,s,addmaxpart=0

192
! Each process gets an ID (mp_pid) in the range 0,..,mp_np-1
193
194
195
196
197
198
199
200
    call MPI_INIT(mp_ierr)
    if (mp_ierr /= 0) goto 100
    call MPI_COMM_RANK(MPI_COMM_WORLD, mp_pid, mp_ierr)
    if (mp_ierr /= 0) goto 100
    call MPI_COMM_SIZE(MPI_COMM_WORLD, mp_np, mp_ierr)
    if (mp_ierr /= 0) goto 100


201
! Variable mpi_mode is used to handle subroutines common to parallel/serial version
202
203
204
205
206
207
208
209
210
211
    if (lmp_sync) then
      mpi_mode=2 ! hold 2 windfields in memory
    else
      mpi_mode=3 ! hold 3 windfields in memory
    end if

    if (mp_pid.ne.0) then
      lroot = .false.
    end if

212
213
214
215
! Set MPI precision to use for transferring deposition fields
!************************************************************
    if (dep_prec==dp) then
      mp_cp = MPI_REAL8
216
! TODO: write info message for serial version as well 
Espen Sollum's avatar
Espen Sollum committed
217
      if (lroot.and.verbosity>0) write(*,*) 'Using double precision for deposition fields'
218
219
    else if (dep_prec==sp) then
      mp_cp = MPI_REAL4
Espen Sollum's avatar
Espen Sollum committed
220
      if (lroot.and.verbosity>0) write(*,*) 'Using single precision for deposition fields'
221
222
223
224
225
    else
      write(*,*) 'ERROR: something went wrong setting MPI real precision'
      stop
    end if

226
227
228
! Check for sensible combination of parameters
!*********************************************

229
230
231
232
233
234
235
236
    if (.not.lmp_sync.and.numwfmem.ne.3) then
      if (lroot) then
        write(*,FMT='(80("#"))')
        write(*,*) '#### mpi_mod::mpif_init> ERROR: ', &
             & 'numwfmem must be set to 3 for asyncronous reading ####'
        write(*,FMT='(80("#"))')
      end if
      call MPI_FINALIZE(mp_ierr)
237
238
239
240
      stop
    else if (lmp_sync.and.numwfmem.ne.2.and.lroot) then
      write(*,FMT='(80("#"))')
      write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
241
           & 'numwfmem should be set to 2 for syncronous'
242
243
      write(*,*) ' reading. Results will still be valid, but unneccesary memory &
           &is allocated.'
244
      write(*,FMT='(80("#"))')
Espen Sollum's avatar
Espen Sollum committed
245
! Force "syncronized" version if all processes will call getfields
246
    else if ((.not.lmp_sync.and.mp_np.lt.read_grp_min).or.(mp_np.eq.1)) then
Espen Sollum's avatar
Espen Sollum committed
247
248
249
250
251
252
253
      if (lroot) then
        write(*,FMT='(80("#"))')
        write(*,*) '#### mpi_mod::mpif_init> WARNING: ', &
             & 'all procs call getfields. Setting lmp_sync=.true.'
        write(*,FMT='(80("#"))')
      end if
      lmp_sync=.true. ! :DBG: eso fix this...
254
    end if
255
256

! TODO: Add more warnings for unimplemented flexpart features
257
258
259
260
261
262
263
264
265
266
267
268

! Set ID of process that calls getfield/readwind. 
! Using the last process in the group ensures statistical identical results
! as running with one process less but not using separate read process
!**********************************************************************

    id_read = mp_np-1

    if (mp_pid.eq.id_read) lmpreader=.true.

    call MPI_Comm_group (MPI_COMM_WORLD, world_group_id, mp_ierr)

Espen Sollum's avatar
Espen Sollum committed
269
! Create a MPI group/communicator that will calculate trajectories. 
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
! Skip this step if program is run with only a few processes
!************************************************************************
    allocate(mp_partgroup_rank(0:mp_np-2))

! This allows checking for allocation of arrays when no subgroup is used
    mp_partgroup_pid=0

    if (read_grp_min.lt.2) then
      write(*,*) '#### mpi_mod::mpif_init> ERROR ####', &
           & 'read_grp_min should be at least 2. Exiting'
      stop
    end if

    if (mp_np.ge.read_grp_min) then
      lmp_use_reader = .true.

Espen Sollum's avatar
Espen Sollum committed
286
! Map the subgroup IDs= 0,1,2,...,mp_np-2, skipping reader process
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
      j=-1 
      do i=0, mp_np-2 !loop over all (subgroup) IDs
        j=j+1
        if (i.eq.id_read) then
          j=j+1
        end if
        mp_partgroup_rank(i) = j
      end do

      call MPI_Group_incl (world_group_id, mp_np-1, mp_partgroup_rank, &
           &mp_partgroup_pid, mp_ierr)
      if (mp_ierr /= 0) goto 100
      call MPI_Comm_create (MPI_COMM_WORLD, mp_partgroup_pid, mp_partgroup_comm, mp_ierr)
      if (mp_ierr /= 0) goto 100

      if (mp_pid.ne.id_read) then
        call MPI_Comm_rank (mp_partgroup_comm, mp_partgroup_pid, mp_ierr)
        if (mp_ierr /= 0) goto 100

! Get size of new group (=mp_np-1)
        call MPI_COMM_SIZE(mp_partgroup_comm, mp_partgroup_np, mp_ierr)
        if (mp_ierr /= 0) goto 100
        if (mp_partgroup_np.ne.mp_np-1) then
          write(*,*)  '#### mpi_mod:: mpif_init> ERROR ####&
               & mp_partgroup_np.ne.mp_np-1'
          stop
        endif

      else
        mp_partgroup_pid = -1
      end if
    end if

    if (lmp_use_reader) then
      mp_comm_used = mp_partgroup_comm
      mp_partid = mp_partgroup_pid
      mp_partgroup_np=mp_np-1
    else
      mp_comm_used = MPI_COMM_WORLD
      mp_partgroup_np = mp_np
      mp_partid = mp_pid
    end if

! Set maxpart per process
331
332
! eso 08/2016: Increase maxpart per process, in case of unbalanced distribution
    maxpart_mpi=int(mp_maxpart_factor*maxpart/mp_partgroup_np)
333
334
335
336
337
338
339
340
341
342
343

! Do not allocate particle data arrays for readwind process
    if (lmpreader.and.lmp_use_reader) then
      maxpart_mpi=0
    end if

! Set random seed for each non-root process
    if (mp_pid.gt.0) then
      s = 244
      mp_seed = -abs(mod((s*181)*((mp_pid-83)*359), 104729))
    end if
Espen Sollum's avatar
Espen Sollum committed
344
345
346
347
    if (mp_dbg_mode) then
      mp_seed=0
      if (lroot) write(*,*) 'MPI: setting seed=0'
    end if
348
349
350
351

! Allocate request array for asynchronous MPI
    if (.not.lmp_sync) then
      allocate(reqs(0:nvar_async*mp_np-1))
Espen Sollum's avatar
Espen Sollum committed
352
      reqs(:)=MPI_REQUEST_NULL
353
354
    else
      allocate(reqs(0:1))
Espen Sollum's avatar
Espen Sollum committed
355
      reqs(:)=MPI_REQUEST_NULL
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
    end if

    goto 101

100 write(*,*) '#### mpi_mod::mpif_init> ERROR ####', mp_ierr
    stop

101 end subroutine mpif_init


  subroutine mpif_mpi_barrier
!***********************************************************************
! Set MPI barrier (all processes are synchronized here), with the
! possible exception of a separate 'readwind' process.
! Optionally measure cpu/wall time.
!
!***********************************************************************
    implicit none

    if (mp_time_barrier) then
      call cpu_time(mp_barrier_time_beg)
      mp_barrier_wtime_beg = mpi_wtime()
    endif

    call MPI_BARRIER(mp_comm_used, mp_ierr)
    if (mp_ierr /= 0) goto 100

    if (mp_time_barrier) then
      call cpu_time(mp_barrier_time_end)
      mp_barrier_wtime_end = mpi_wtime()

      mp_barrier_time_total=mp_barrier_time_total+&
           &(mp_barrier_time_end-mp_barrier_time_beg)
      mp_barrier_wtime_total=mp_barrier_wtime_total+&
           &(mp_barrier_wtime_end-mp_barrier_wtime_beg)
    end if

    goto 101

100 write(*,*) '#### mpi_mod::mpif_mpi_barrier> ERROR ####', mp_ierr
    stop

101 end subroutine mpif_mpi_barrier


  subroutine mpif_com_mod_allocate
!*******************************************************************************    
! Dynamic allocation of arrays (in serial code these have size maxpart)
!
405
406
407
! Not in use anymore, moved to com_mod for interoperability with serial version
! 
! TODO: remove 
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
!*******************************************************************************
    use com_mod

    implicit none 

    integer :: array_size

    array_size = maxpart_mpi

    allocate(itra1(array_size),npoint(array_size),&
         & nclass(array_size),idt(array_size),&
         & itramem(array_size),itrasplit(array_size),&
         & xtra1(array_size),ytra1(array_size),&
         & ztra1(array_size),xmass1(array_size, maxspec))

    allocate(uap(array_size),ucp(array_size),&
         & uzp(array_size),us(array_size),&
         & vs(array_size),&
         & ws(array_size),cbt(array_size))


  end subroutine mpif_com_mod_allocate


  subroutine mpif_tm_send_dims
!***********************************************************************
! Distribute array dimensions from pid0 to all processes.
! numpart_mpi/numpart is sent to allow dynamic allocation
!
! Currently not in use.
!
! Variables of similar type (integer, real) are placed in an array
! and sent collectively, to avoid the overhead associated with individual
! MPI send/receives
!
!
!***********************************************************************
    use com_mod

    implicit none

    integer :: add_part=0

Espen Sollum's avatar
Espen Sollum committed
451
    call MPI_Bcast(numpart, 1, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469

! MPI subgroup does the particle-loop 
    if (lmp_use_reader) then
      if (mod(numpart,mp_partgroup_np).ne.0) add_part=1
      numpart_mpi=int(numpart/mp_partgroup_np)+add_part
    else
      if (mod(numpart,mp_np).ne.0) add_part=1
      numpart_mpi=int(numpart/mp_np)+add_part
    end if


! redefine numpart as 'numpart per process' throughout the code
!**************************************************************
    numpart = numpart_mpi

  end subroutine mpif_tm_send_dims


470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
  subroutine mpif_send_part_properties(num_part)
!***********************************************************************
! Distribute particle properties from root to all processes.
!  
! Used by init_domainfill_mpi
!
! Variables:
! 
! num_part        input, number of particles per process (rounded up)
!
!***********************************************************************
    use com_mod

    implicit none

    integer,intent(in) :: num_part
    integer :: i,jj, addone

! Time for MPI communications
!****************************
    if (mp_measure_time) call mpif_mtime('commtime',0)

! Distribute variables, send from pid 0 to other processes (including itself)
!****************************************************************************

    call MPI_SCATTER(nclass_tmp,num_part,MPI_INTEGER,nclass,&
         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
    if (mp_ierr /= 0) goto 600 
    call MPI_SCATTER(npoint_tmp,num_part,MPI_INTEGER,npoint,&
         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
    if (mp_ierr /= 0) goto 600 
    call MPI_SCATTER(itra1_tmp,num_part,MPI_INTEGER,itra1,&
         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
    if (mp_ierr /= 0) goto 600 
    call MPI_SCATTER(idt_tmp,num_part,MPI_INTEGER,idt,&
         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
    if (mp_ierr /= 0) goto 600 
    call MPI_SCATTER(itramem_tmp,num_part,MPI_INTEGER,itramem,&
         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
    if (mp_ierr /= 0) goto 600 
    call MPI_SCATTER(itrasplit_tmp,num_part,MPI_INTEGER,itrasplit,&
         &num_part,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
    if (mp_ierr /= 0) goto 600 
    call MPI_SCATTER(xtra1_tmp,num_part,mp_dp,xtra1,&
         &num_part,mp_dp,id_root,mp_comm_used,mp_ierr)
    if (mp_ierr /= 0) goto 600 
    call MPI_SCATTER(ytra1_tmp,num_part,mp_dp,ytra1,&
         &num_part,mp_dp,id_root,mp_comm_used,mp_ierr)
    if (mp_ierr /= 0) goto 600 
    call MPI_SCATTER(ztra1_tmp,num_part,mp_sp,ztra1,&
         &num_part,mp_sp,id_root,mp_comm_used,mp_ierr)
    if (mp_ierr /= 0) goto 600 
    do i=1,nspec
      call MPI_SCATTER(xmass1_tmp(:,i),num_part,mp_sp,xmass1(:,i),&
           &num_part,mp_sp,id_root,mp_comm_used,mp_ierr)
      if (mp_ierr /= 0) goto 600 
    end do

    if (mp_measure_time) call mpif_mtime('commtime',1)
    write(*,*) "PID ", mp_partid, "ending MPI_Scatter operation"

    goto 601

600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
    stop

! After the transfer of particles it it possible that the value of
! "numpart" is larger than the actual, so we reduce it if there are
! invalid particles at the end of the arrays
601 do i=num_part, 1, -1
      if (itra1(i).eq.-999999999) then
        numpart=numpart-1
      else
        exit
      end if
    end do


!601 end subroutine mpif_send_part_properties
  end subroutine mpif_send_part_properties


  subroutine mpif_calculate_part_redist(itime)
!***********************************************************************
! Calculate number of particles to redistribute between processes. This routine 
! can be called at regular time intervals to keep a level number of
! particles on each process.
!
! First, all processes report their local "numpart" to each other, which is
! stored in array "numpart_mpi(np)". This array is sorted from low to
! high values, along with a corresponding process ID array "idx_arr(np)".
! If the relative difference between the highest and lowest "numpart_mpi" value
! is above a threshold, particles are transferred from process idx_arr(np-1)
! to process idx_arr(0). Repeat for processes idx_arr(np-i) and idx_arr(i)
! for all valid i. 
! Note: If np is an odd number, the process with the median
! number of particles is left unchanged
!
! VARIABLES
! itime        input, current time
!***********************************************************************
    use com_mod

    implicit none
    
    integer, intent(in) :: itime
    real :: pmin,z
    integer :: i,jj,nn, num_part=1,m,imin, num_trans
    logical :: first_iter
    integer,allocatable,dimension(:) :: numparticles_mpi, idx_arr
    real,allocatable,dimension(:) :: sorted ! TODO: we don't really need this

! Exit if running with only 1 process
!************************************************************************
    if (mp_np.eq.1) return

! All processes exchange information on number of particles
!****************************************************************************
    allocate(numparticles_mpi(0:mp_partgroup_np-1), &
         &idx_arr(0:mp_partgroup_np-1), sorted(0:mp_partgroup_np-1))

    call MPI_Allgather(numpart, 1, MPI_INTEGER, numparticles_mpi, &
         & 1, MPI_INTEGER, mp_comm_used, mp_ierr)


! Sort from lowest to highest
! Initial guess: correct order
    sorted(:) = numparticles_mpi(:)
    do i=0, mp_partgroup_np-1
      idx_arr(i) = i
    end do

! For each successive element in index array, see if a lower value exists
    do i=0, mp_partgroup_np-2
      pmin=sorted(i)
      imin=idx_arr(i)
      m=i+1
      do jj=m, mp_partgroup_np-1
        if (pmin.le.sorted(jj)) cycle
        z=pmin
        pmin=sorted(jj)
        sorted(jj)=z

        nn=imin
        imin=idx_arr(jj)
        idx_arr(jj)=nn
      end do
      sorted(i)=pmin
      idx_arr(i)=imin
    end do

! For each pair of processes, transfer particles if the difference is above a
! limit, and numpart at sending process large enough

    m=mp_partgroup_np-1 ! index for last sorted process (most particles)
    do i=0,mp_partgroup_np/2-1
      num_trans = numparticles_mpi(idx_arr(m)) - numparticles_mpi(idx_arr(i))
      if (mp_partid.eq.idx_arr(m).or.mp_partid.eq.idx_arr(i)) then
        if ( numparticles_mpi(idx_arr(m)).gt.mp_min_redist.and.&
             & real(num_trans)/real(numparticles_mpi(idx_arr(m))).gt.mp_redist_fract) then
          call mpif_redist_part(itime, idx_arr(m), idx_arr(i), num_trans/2)
        end if
      end if
      m=m-1
    end do

    deallocate(numparticles_mpi, idx_arr, sorted)

  end subroutine mpif_calculate_part_redist


  subroutine mpif_redist_part(itime, src_proc, dest_proc, num_trans)
!***********************************************************************
! Transfer particle properties between two arbitrary processes.
! 
! VARIABLES
!
! itime           input, current time
! src_proc        input, ID of source process
! dest_proc       input, ID of destination process
! num_trans       input, number of particles to transfer
!
!************************************************************************
    use com_mod !TODO:  ,only: nclass etc

    implicit none 

    integer, intent(in) :: itime, src_proc, dest_proc, num_trans
    integer :: ll, ul ! lower and upper indices in arrays
    integer :: arr_sz ! size of temporary arrays
    integer :: mtag   ! MPI message tag
    integer :: i, j, minpart, ipart, maxnumpart
 
! Measure time for MPI communications
!************************************
    if (mp_measure_time) call mpif_mtime('commtime',0)

! Send particles
!***************
    if (mp_partid.eq.src_proc) then
      mtag = 2000

! Array indices for the transferred particles
      ll=numpart-num_trans+1
      ul=numpart

      ! if (mp_dev_mode) then
      !   write(*,FMT='(72("#"))')
      !   write(*,*) "Sending ", num_trans, "particles (from/to)", src_proc, dest_proc
      !   write(*,*) "numpart before: ", numpart
      ! end if

      call MPI_SEND(nclass(ll:ul),num_trans,&
           & MPI_INTEGER,dest_proc,mtag+1,mp_comm_used,mp_ierr)

      call MPI_SEND(npoint(ll:ul),num_trans,&
           & MPI_INTEGER,dest_proc,mtag+2,mp_comm_used,mp_ierr)

      call MPI_SEND(itra1(ll:ul),num_trans, &
           & MPI_INTEGER,dest_proc,mtag+3,mp_comm_used,mp_ierr)

      call MPI_SEND(idt(ll:ul),num_trans, &
           & MPI_INTEGER,dest_proc,mtag+4,mp_comm_used,mp_ierr)

      call MPI_SEND(itramem(ll:ul),num_trans, &
           & MPI_INTEGER,dest_proc,mtag+5,mp_comm_used,mp_ierr)

      call MPI_SEND(itrasplit(ll:ul),num_trans,&
           & MPI_INTEGER,dest_proc,mtag+6,mp_comm_used,mp_ierr)

      call MPI_SEND(xtra1(ll:ul),num_trans, &
           & mp_dp,dest_proc,mtag+7,mp_comm_used,mp_ierr)

      call MPI_SEND(ytra1(ll:ul),num_trans,&
           & mp_dp,dest_proc,mtag+8,mp_comm_used,mp_ierr)

      call MPI_SEND(ztra1(ll:ul),num_trans,&
           & mp_sp,dest_proc,mtag+9,mp_comm_used,mp_ierr)

      do j=1,nspec
        call MPI_SEND(xmass1(ll:ul,j),num_trans,&
             & mp_sp,dest_proc,mtag+(9+j),mp_comm_used,mp_ierr)
      end do

! Terminate transferred particles, update numpart
      itra1(ll:ul) = -999999999

      numpart = numpart-num_trans

      ! if (mp_dev_mode) then
      !   write(*,*) "numpart after: ", numpart
      !   write(*,FMT='(72("#"))')
      ! end if

    else if (mp_partid.eq.dest_proc) then

! Receive particles
!******************
      mtag = 2000
      ! Array indices for the transferred particles
      ll=numpart+1
      ul=numpart+num_trans

      ! if (mp_dev_mode) then
      !   write(*,FMT='(72("#"))')
      !   write(*,*) "Receiving ", num_trans, "particles (from/to)", src_proc, dest_proc
      !   write(*,*) "numpart before: ", numpart
      ! end if

! We could receive the data directly at nclass(ll:ul) etc., but this leaves
! vacant spaces at lower indices. Using temporary arrays instead.
      arr_sz = ul-ll+1
      allocate(itra1_tmp(arr_sz),npoint_tmp(arr_sz),nclass_tmp(arr_sz),&
           & idt_tmp(arr_sz),itramem_tmp(arr_sz),itrasplit_tmp(arr_sz),&
           & xtra1_tmp(arr_sz),ytra1_tmp(arr_sz),ztra1_tmp(arr_sz),&
           & xmass1_tmp(arr_sz, maxspec))
      
      call MPI_RECV(nclass_tmp,num_trans,&
           & MPI_INTEGER,src_proc,mtag+1,mp_comm_used,mp_status,mp_ierr)

      call MPI_RECV(npoint_tmp,num_trans,&
           & MPI_INTEGER,src_proc,mtag+2,mp_comm_used,mp_status,mp_ierr)

      call MPI_RECV(itra1_tmp,num_trans, &
           & MPI_INTEGER,src_proc,mtag+3,mp_comm_used,mp_status,mp_ierr)

      call MPI_RECV(idt_tmp,num_trans, &
           & MPI_INTEGER,src_proc,mtag+4,mp_comm_used,mp_status,mp_ierr)

      call MPI_RECV(itramem_tmp,num_trans, &
           & MPI_INTEGER,src_proc,mtag+5,mp_comm_used,mp_status,mp_ierr)

      call MPI_RECV(itrasplit_tmp,num_trans,&
           & MPI_INTEGER,src_proc,mtag+6,mp_comm_used,mp_status,mp_ierr)

      call MPI_RECV(xtra1_tmp,num_trans, &
           & mp_dp,src_proc,mtag+7,mp_comm_used,mp_status,mp_ierr)

      call MPI_RECV(ytra1_tmp,num_trans,&
           & mp_dp,src_proc,mtag+8,mp_comm_used,mp_status,mp_ierr)

      call MPI_RECV(ztra1_tmp,num_trans,&
           & mp_sp,src_proc,mtag+9,mp_comm_used,mp_status,mp_ierr)

      do j=1,nspec
        call MPI_RECV(xmass1_tmp(:,j),num_trans,&
             & mp_sp,src_proc,mtag+(9+j),mp_comm_used,mp_status,mp_ierr)
      end do

! This is the maximum value numpart can possibly have after the transfer
      maxnumpart=numpart+num_trans

! Search for vacant space and copy from temporary storage
!********************************************************
      minpart=1
      do i=1, num_trans
! Take into acount that we may have transferred invalid particles
        if (itra1_tmp(minpart).ne.itime) goto 200
        do ipart=minpart,maxnumpart
          if (itra1(ipart).ne.itime) then
            itra1(ipart) = itra1_tmp(minpart)
            npoint(ipart) = npoint_tmp(minpart)
            nclass(ipart) = nclass_tmp(minpart)
            idt(ipart) = idt_tmp(minpart)
            itramem(ipart) = itramem_tmp(minpart)
            itrasplit(ipart) = itrasplit_tmp(minpart)
            xtra1(ipart) = xtra1_tmp(minpart)
            ytra1(ipart) = ytra1_tmp(minpart)
            ztra1(ipart) = ztra1_tmp(minpart)
            xmass1(ipart,:) = xmass1_tmp(minpart,:)
! Increase numpart, if necessary
            numpart=max(numpart,ipart)
            goto 200 ! Storage space has been found, stop searching
          end if
        end do
200     minpart=minpart+1
      end do

      deallocate(itra1_tmp,npoint_tmp,nclass_tmp,idt_tmp,itramem_tmp,itrasplit_tmp,&
           & xtra1_tmp,ytra1_tmp,ztra1_tmp,xmass1_tmp)

      ! if (mp_dev_mode) then
      !   write(*,*) "numpart after: ", numpart
      !   write(*,FMT='(72("#"))')
      ! end if

    else
! This routine should only be called by the two participating processes
      write(*,*) "ERROR: wrong process has entered mpi_mod::mpif_redist_part"
      stop
      return
    end if

! Measure time for MPI communications
!************************************
    if (mp_measure_time) call mpif_mtime('commtime',1)

  end subroutine mpif_redist_part


830
831
832
833
  subroutine mpif_tm_send_vars
!***********************************************************************
! Distribute particle variables from pid0 to all processes.
! Called from timemanager
834
! *NOT IN USE* at the moment, but can be useful for debugging
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
!
!***********************************************************************
    use com_mod

    implicit none

    integer :: i

! Time for MPI communications
!****************************
    if (mp_measure_time) call mpif_mtime('commtime',0)

! Distribute variables, send from pid 0 to other processes
!**********************************************************************

! integers
    if (lroot) then
      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
853
           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
854
855
      if (mp_ierr /= 0) goto 600 
      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
856
           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
857
858
      if (mp_ierr /= 0) goto 600 
      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
859
           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
860
861
      if (mp_ierr /= 0) goto 600 
      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
862
           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
863
864
      if (mp_ierr /= 0) goto 600 
      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,MPI_IN_PLACE,&
865
           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
866
867
868
869
      if (mp_ierr /= 0) goto 600 

! int2
      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,MPI_IN_PLACE,&
870
           & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
871
872
873
      if (mp_ierr /= 0) goto 600

! real
874
      call MPI_SCATTER(uap,numpart_mpi,mp_sp,MPI_IN_PLACE,&
875
           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
876
      if (mp_ierr /= 0) goto 600
877
      call MPI_SCATTER(ucp,numpart_mpi,mp_sp,MPI_IN_PLACE,&
878
           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
879
      if (mp_ierr /= 0) goto 600
880
      call MPI_SCATTER(uzp,numpart_mpi,mp_sp,MPI_IN_PLACE,&
881
           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
882
883
      if (mp_ierr /= 0) goto 600

884
      call MPI_SCATTER(us,numpart_mpi,mp_sp,MPI_IN_PLACE,&
885
           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
886
      if (mp_ierr /= 0) goto 600
887
      call MPI_SCATTER(vs,numpart_mpi,mp_sp,MPI_IN_PLACE,&
888
           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
889
      if (mp_ierr /= 0) goto 600
890
      call MPI_SCATTER(ws,numpart_mpi,mp_sp,MPI_IN_PLACE,&
891
           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
892
893
894
      if (mp_ierr /= 0) goto 600

      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
895
           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
896
897
      if (mp_ierr /= 0) goto 600 
      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,MPI_IN_PLACE,&
898
           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
899
      if (mp_ierr /= 0) goto 600 
900
      call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,MPI_IN_PLACE,&
901
           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
902
903
904
      if (mp_ierr /= 0) goto 600 

      do i=1,nspec
905
        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,MPI_IN_PLACE,&
906
             & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
907
908
909
910
911
912
        if (mp_ierr /= 0) goto 600 
      end do

    else ! (mp_pid >= 1)
! integers
      call MPI_SCATTER(npoint,numpart_mpi,MPI_INTEGER,npoint,&
913
           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
914
915
      if (mp_ierr /= 0) goto 600 
      call MPI_SCATTER(idt,numpart_mpi,MPI_INTEGER,idt,&
916
           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
917
918
      if (mp_ierr /= 0) goto 600 
      call MPI_SCATTER(itra1,numpart_mpi,MPI_INTEGER,itra1,&
919
           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
920
921
      if (mp_ierr /= 0) goto 600 
      call MPI_SCATTER(nclass,numpart_mpi,MPI_INTEGER,nclass,&
922
           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
923
924
      if (mp_ierr /= 0) goto 600 
      call MPI_SCATTER(itramem,numpart_mpi,MPI_INTEGER,itramem,&
925
           & numpart_mpi,MPI_INTEGER,id_root,mp_comm_used,mp_ierr)
926
927
928
929
      if (mp_ierr /= 0) goto 600 

! int2
      call MPI_SCATTER(cbt,numpart_mpi,MPI_INTEGER2,cbt,&
930
           & numpart_mpi,MPI_INTEGER2,id_root,mp_comm_used,mp_ierr)
931
932
933
      if (mp_ierr /= 0) goto 600

! reals
934
      call MPI_SCATTER(uap,numpart_mpi,mp_sp,uap,&
935
           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
936
      if (mp_ierr /= 0) goto 600
937
      call MPI_SCATTER(ucp,numpart_mpi,mp_sp,ucp,&
938
           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
939
      if (mp_ierr /= 0) goto 600
940
      call MPI_SCATTER(uzp,numpart_mpi,mp_sp,uzp,&
941
           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
942
943
      if (mp_ierr /= 0) goto 600

944
      call MPI_SCATTER(us,numpart_mpi,mp_sp,us,&
945
           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
946
      if (mp_ierr /= 0) goto 600
947
      call MPI_SCATTER(vs,numpart_mpi,mp_sp,vs,&
948
           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
949
      if (mp_ierr /= 0) goto 600
950
      call MPI_SCATTER(ws,numpart_mpi,mp_sp,ws,&
951
           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
952
953
954
      if (mp_ierr /= 0) goto 600

      call MPI_SCATTER(xtra1,numpart_mpi,mp_dp,xtra1,&
955
           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
956
957
      if (mp_ierr /= 0) goto 600 
      call MPI_SCATTER(ytra1,numpart_mpi,mp_dp,ytra1,&
958
           & numpart_mpi,mp_dp,id_root,mp_comm_used,mp_ierr)
959
      if (mp_ierr /= 0) goto 600 
960
      call MPI_SCATTER(ztra1,numpart_mpi,mp_sp,ztra1,&
961
           & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
962
963
964
      if (mp_ierr /= 0) goto 600 

      do i=1,nspec
965
        call MPI_SCATTER(xmass1(:,i),numpart_mpi,mp_sp,xmass1(:,i),&
966
             & numpart_mpi,mp_sp,id_root,mp_comm_used,mp_ierr)
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
        if (mp_ierr /= 0) goto 600 
      end do

    end if !(lroot)

    if (mp_measure_time) call mpif_mtime('commtime',1)

    goto 601

600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
    stop
601 end subroutine mpif_tm_send_vars


  subroutine mpif_tm_collect_vars
!*******************************************************************************
! Collect particle data to PID 0 from all processes.                           *
!                                                                              *
! This can be called from timemanager_mpi, before caclulating                  *
! concentrations/writing output grids.                                         *
!                                                                              *
988
989
! Currently *not in use*, as each process calculates concentrations            *
! separately, but can be useful for debugging                                  *
990
991
992
993
994
!                                                                              *
! To use this routine (together with mpif_tm_send_vars) to transfer data       *
! to the MPI root process (useful for debugging), insert code like this        *
! (in timemanager_mpi.f90)                                                     *
!                                                                              *
995
!      if (lroot) tot_numpart=numpart ! root stores total numpart              *
996
997
998
999
1000
1001
1002
1003
1004
!      call mpif_tm_send_dims                                                  *
!      if (numpart>1) then                                                     *
!        call mpif_tm_send_vars                                                *
!      end if                                                                  *
!                                                                              *
!    To collect data from all processes to root process after a parallel       *
!    region:                                                                   *
!                                                                              *
!      if (numpart>0) then                                                     *
1005
!          if (lroot) numpart = tot_numpart                                    *
1006
1007
1008
!       call mpif_tm_collect_vars                                              *
!      end if                                                                  *
!                                                                              *
1009
1010
! Then a section that begins with "if (lroot) ..." will behave like            *
! serial flexpart                                                              *
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
!                                                                              *
!*******************************************************************************
    use com_mod !, only: numpart, nspec, itra1, npoint, nclass

    implicit none

    integer :: i

    logical :: use_mp_vars = .false.! use mp_... type allocated vars
    logical :: use_in_place = .true.! use MPI_IN_PLACE to collect vars.


! Time for MPI communications
    if (mp_measure_time) call mpif_mtime('commtime',0)

! Distribute variables, send from pid 0 to other processes
!**********************************************************************

    if (.not. use_mp_vars.and..not.use_in_place) then

! Integers:
      call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
      if (mp_ierr /= 0) goto 600 
      call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
      if (mp_ierr /= 0) goto 600 
      call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
      if (mp_ierr /= 0) goto 600 
      call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
      if (mp_ierr /= 0) goto 600 
      call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
           &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
      if (mp_ierr /= 0) goto 600 

! int2
      call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
           &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
      if (mp_ierr /= 0) goto 600 

! Reals:
1054
1055
      call MPI_GATHER(uap, numpart_mpi, mp_sp, uap, &
           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1056
      if (mp_ierr /= 0) goto 600 
1057
1058
      call MPI_GATHER(ucp, numpart_mpi, mp_sp, ucp, &
           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1059
      if (mp_ierr /= 0) goto 600 
1060
1061
      call MPI_GATHER(uzp, numpart_mpi, mp_sp, uzp, &
           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1062
1063
      if (mp_ierr /= 0) goto 600 

1064
1065
      call MPI_GATHER(us, numpart_mpi, mp_sp, us, &
           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1066
      if (mp_ierr /= 0) goto 600
1067
1068
      call MPI_GATHER(vs, numpart_mpi, mp_sp, vs, &
           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1069
      if (mp_ierr /= 0) goto 600 
1070
1071
      call MPI_GATHER(ws, numpart_mpi, mp_sp, ws, &
           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1072
1073
1074
1075
1076
1077
1078
1079
1080
      if (mp_ierr /= 0) goto 600


      call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
      if (mp_ierr /= 0) goto 600 
      call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
           &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
      if (mp_ierr /= 0) goto 600 
1081
1082
      call MPI_GATHER(ztra1, numpart_mpi, mp_sp, ztra1, &
           &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1083
1084
1085
      if (mp_ierr /= 0) goto 600 

      do i=1, nspec
1086
1087
        call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_sp, xmass1(:,i), &
             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
        if (mp_ierr /= 0) goto 600 

      end do

! Use variables npoint etc directly for communications
!***********************************************************************
    else if (use_in_place.and..not.use_mp_vars) then
      if (lroot) then
! Integers:
        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, npoint, &
             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
        if (mp_ierr /= 0) goto 600 
        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, idt, &
             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
        if (mp_ierr /= 0) goto 600 
        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itra1, &
             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
        if (mp_ierr /= 0) goto 600 
        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, nclass, &
             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
        if (mp_ierr /= 0) goto 600 
        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER, itramem, &
             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
        if (mp_ierr /= 0) goto 600 

! int2
        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, MPI_INTEGER2, cbt, &
             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
        if (mp_ierr /= 0) goto 600 

! Reals:
1119
1120
        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, uap, &
             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1121
        if (mp_ierr /= 0) goto 600 
1122
1123
        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ucp, &
             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1124
        if (mp_ierr /= 0) goto 600 
1125
1126
        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, uzp, &
             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1127
1128
        if (mp_ierr /= 0) goto 600 

1129
1130
        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, us, &
             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1131
        if (mp_ierr /= 0) goto 600
1132
1133
        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, vs, &
             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1134
        if (mp_ierr /= 0) goto 600 
1135
1136
        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ws, &
             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1137
1138
1139
1140
1141
1142
1143
1144
1145
        if (mp_ierr /= 0) goto 600


        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, xtra1, &
             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
        if (mp_ierr /= 0) goto 600 
        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_dp, ytra1, &
             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
        if (mp_ierr /= 0) goto 600 
1146
1147
        call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, ztra1, &
             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1148
1149
1150
        if (mp_ierr /= 0) goto 600 

        do i=1, nspec
1151
1152
          call MPI_GATHER(MPI_IN_PLACE, numpart_mpi, mp_sp, xmass1(:,i), &
               &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
          if (mp_ierr /= 0) goto 600 
        end do

      else ! (for mp_pid >= 1)

! Integers:
        call MPI_GATHER(npoint, numpart_mpi, MPI_INTEGER, npoint, &
             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
        if (mp_ierr /= 0) goto 600 
        call MPI_GATHER(idt, numpart_mpi, MPI_INTEGER, idt, &
             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
        if (mp_ierr /= 0) goto 600 
        call MPI_GATHER(itra1, numpart_mpi, MPI_INTEGER, itra1, &
             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
        if (mp_ierr /= 0) goto 600 
        call MPI_GATHER(nclass, numpart_mpi, MPI_INTEGER, nclass, &
             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
        if (mp_ierr /= 0) goto 600 
        call MPI_GATHER(itramem, numpart_mpi, MPI_INTEGER, itramem, &
             &numpart_mpi, MPI_INTEGER, id_root, mp_comm_used, mp_ierr)
        if (mp_ierr /= 0) goto 600 

! int2
        call MPI_GATHER(cbt, numpart_mpi, MPI_INTEGER2, cbt, &
             &numpart_mpi, MPI_INTEGER2, id_root, mp_comm_used, mp_ierr)
        if (mp_ierr /= 0) goto 600 

! Reals:
1181
1182
        call MPI_GATHER(uap, numpart_mpi, mp_sp, uap, &
             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1183
        if (mp_ierr /= 0) goto 600 
1184
1185
        call MPI_GATHER(ucp, numpart_mpi, mp_sp, ucp, &
             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1186
        if (mp_ierr /= 0) goto 600 
1187
1188
        call MPI_GATHER(uzp, numpart_mpi, mp_sp, uzp, &
             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1189
1190
        if (mp_ierr /= 0) goto 600 

1191
1192
        call MPI_GATHER(us, numpart_mpi, mp_sp, us, &
             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1193
        if (mp_ierr /= 0) goto 600
1194
1195
        call MPI_GATHER(vs, numpart_mpi, mp_sp, vs, &
             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1196
        if (mp_ierr /= 0) goto 600 
1197
1198
        call MPI_GATHER(ws, numpart_mpi, mp_sp, ws, &
             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1199
1200
1201
1202
1203
1204
1205
1206
1207
        if (mp_ierr /= 0) goto 600


        call MPI_GATHER(xtra1, numpart_mpi, mp_dp, xtra1, &
             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
        if (mp_ierr /= 0) goto 600 
        call MPI_GATHER(ytra1, numpart_mpi, mp_dp, ytra1, &
             &numpart_mpi, mp_dp, id_root, mp_comm_used, mp_ierr)
        if (mp_ierr /= 0) goto 600 
1208
1209
        call MPI_GATHER(ztra1, numpart_mpi, mp_sp, ztra1, &
             &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1210
1211
1212
        if (mp_ierr /= 0) goto 600 

        do i=1, nspec
1213
1214
          call MPI_GATHER(xmass1(:,i), numpart_mpi, mp_sp, xmass1(:,i), &
               &numpart_mpi, mp_sp, id_root, mp_comm_used, mp_ierr)
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
          if (mp_ierr /= 0) goto 600 
        end do
      end if ! (lroot)
    end if !  (use_in_place)

    if (mp_measure_time) call mpif_mtime('commtime',1)

    goto 601

600 write(*,*) "mpi_mod> mp_ierr \= 0", mp_ierr
    stop
601 end subroutine mpif_tm_collect_vars


  subroutine mpif_gf_send_vars(memstat)
!*******************************************************************************
! DESCRIPTION
Espen Sollum's avatar
Espen Sollum committed
1232
!   Distribute 'getfield' variables from reader process
1233
1234
1235
1236
1237
! 
!   Called from timemanager
!
! NOTE
!   This subroutine distributes windfields read from the reader process to
1238
!   all other processes. Usually one set of fields are transfered, but at
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
!   first timestep when there are no fields in memory, both are transfered.
!   MPI_Bcast is used, so implicitly all processes are synchronized at this
!   step
!
!
!*******************************************************************************
    use com_mod
    use par_mod,only: numwfmem

    implicit none

    integer, intent(in) :: memstat

! Common array sizes used for communications
    integer, parameter :: d3_size1 = nxmax*nymax*nzmax
    integer, parameter :: d3_size2 = nxmax*nymax*nuvzmax
    integer, parameter :: d2_size1 = nxmax*nymax
    integer, parameter :: d2_size2 = nxmax*nymax*maxspec
    integer, parameter :: d2_size3 = nxmax*nymax
    integer, parameter :: d1_size1 = maxwf

    integer :: d3s1,d3s2,d2s1,d2s2

! li,ui    lower,upper indices of fields to be transfered (1-3, ui>=li) 
    integer :: li,ui

! First time routine is called the unchangeable fields will be transfered    
    logical :: first_call=.true.

!**********************************************************************

! Sizes of arrays transfered
    d3s1=d3_size1
    d3s2=d3_size2
    d2s1=d2_size1 
    d2s2=d2_size2

! Decide which field(s) need to be transfered
    if (memstat.ge.32) then ! distribute 2 fields, to 1st/2nd indices 
      li=1; ui=2
      d3s1=2*d3_size1
      d3s2=2*d3_size2
      d2s1=2*d2_size1 
      d2s2=2*d2_size2
    else if (memstat.eq.17) then ! distribute 1 field, place on 1st index
      li=1; ui=1
    else if (memstat.eq.18) then ! distribute 1 field, place on 2nd index
      li=2; ui=2
    else if (memstat.eq.19) then ! distribute 1 field, place on 3nd index
      li=3; ui=3
    else
      write(*,*) '#### mpi_mod::mpif_gf_send_vars> ERROR: ', &
           & 'wrong value of memstat, exiting ####', memstat
      stop
    end if


! Time for MPI communications
    if (mp_measure_time) call mpif_mtime('commtime',0)

! Send variables from getfield process (id_read) to other processes
!**********************************************************************

1302
! The non-reader processes need to know if cloud water was read.
Espen Sollum's avatar
Espen Sollum committed
1303
1304
1305
    call MPI_Bcast(readclouds,1,MPI_LOGICAL,id_read,MPI_COMM_WORLD,mp_ierr)
    if (mp_ierr /= 0) goto 600 

1306
1307
! Static fields/variables sent only at startup
    if (first_call) then
1308
      call MPI_Bcast(oro(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1309
      if (mp_ierr /= 0) goto 600 
1310
      call MPI_Bcast(excessoro(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1311
      if (mp_ierr /= 0) goto 600 
1312
      call MPI_Bcast(lsm(:,:),d2_size3,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1313
      if (mp_ierr /= 0) goto 600 
1314
      call MPI_Bcast(xlanduse(:,:,:),d2_size3*numclass,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1315
1316
1317
1318
1319
1320
1321
1322
      if (mp_ierr /= 0) goto 600 
      call MPI_Bcast(wftime,d1_size1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
      if (mp_ierr /= 0) goto 600
      call MPI_Bcast(numbwf,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
      if (mp_ierr /= 0) goto 600 
      call MPI_Bcast(nmixz,1,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
      if (mp_ierr /= 0) goto 600 
      call MPI_Bcast(height,nzmax,MPI_INTEGER,id_read,MPI_COMM_WORLD,mp_ierr)
Espen Sollum's avatar
Espen Sollum committed
1323
      if (mp_ierr /= 0) goto 600
1324
1325
1326
1327

      first_call=.false.
    endif

1328
    call MPI_Bcast(uu(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1329
    if (mp_ierr /= 0) goto 600 
1330
    call MPI_Bcast(vv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1331
    if (mp_ierr /= 0) goto 600 
1332
    call MPI_Bcast(uupol(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1333
    if (mp_ierr /= 0) goto 600 
1334
    call MPI_Bcast(vvpol(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1335
    if (mp_ierr /= 0) goto 600 
1336
    call MPI_Bcast(ww(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1337
    if (mp_ierr /= 0) goto 600
1338
    call MPI_Bcast(tt(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1339
    if (mp_ierr /= 0) goto 600 
1340
    call MPI_Bcast(rho(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1341
    if (mp_ierr /= 0) goto 600 
1342
    call MPI_Bcast(drhodz(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1343
    if (mp_ierr /= 0) goto 600 
1344
    call MPI_Bcast(tth(:,:,:,li:ui),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1345
    if (mp_ierr /= 0) goto 600 
1346
    call MPI_Bcast(qvh(:,:,:,li:ui),d3s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1347
    if (mp_ierr /= 0) goto 600 
1348
    call MPI_Bcast(qv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1349
    if (mp_ierr /= 0) goto 600
1350
    call MPI_Bcast(pv(:,:,:,li:ui),d3s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1351
1352
    if (mp_ierr /= 0) goto 600 
    call MPI_Bcast(clouds(:,:,:,li:ui),d3s1,MPI_INTEGER1,id_read,MPI_COMM_WORLD,mp_ierr)
Espen Sollum's avatar
Espen Sollum committed
1353
1354
1355
1356
    if (mp_ierr /= 0) goto 600

! cloud water/ice:
    if (readclouds) then
1357
      call MPI_Bcast(ctwc(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
Espen Sollum's avatar
Espen Sollum committed
1358
1359
      if (mp_ierr /= 0) goto 600
    end if
1360
1361

! 2D fields
1362
    call MPI_Bcast(cloudsh(:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1363
    if (mp_ierr /= 0) goto 600 
1364
    call MPI_Bcast(vdep(:,:,:,li:ui),d2s2,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1365
    if (mp_ierr /= 0) goto 600 
1366
    call MPI_Bcast(ps(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1367
    if (mp_ierr /= 0) goto 600
1368
    call MPI_Bcast(sd(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1369
    if (mp_ierr /= 0) goto 600
1370
    call MPI_Bcast(tcc(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1371
    if (mp_ierr /= 0) goto 600
1372
    call MPI_Bcast(tt2(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1373
    if (mp_ierr /= 0) goto 600 
1374
    call MPI_Bcast(td2(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1375
    if (mp_ierr /= 0) goto 600 
1376
    call MPI_Bcast(lsprec(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1377
    if (mp_ierr /= 0) goto 600 
1378
    call MPI_Bcast(convprec(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1379
    if (mp_ierr /= 0) goto 600 
1380
    call MPI_Bcast(ustar(:,:,:,li:ui),d2s1,mp_sp,id_read,MPI_COMM_WORLD,mp_ierr)
1381
    if (mp_ierr /= 0) goto 600