source: LMDZ6/branches/Amaury_dev/libf/dyn3dmem/parallel_lmdz.F90 @ 5411

Last change on this file since 5411 was 5225, checked in by abarral, 3 months ago

Merge r5206 r5207

  • Property copyright set to
    Name of program: LMDZ
    Creation date: 1984
    Version: LMDZ5
    License: CeCILL version 2
    Holder: Laboratoire de m\'et\'eorologie dynamique, CNRS, UMR 8539
    See the license file in the root directory
File size: 18.8 KB
Line 
1! $Id$
2
3MODULE parallel_lmdz
4  USE mod_const_mpi
5  USE lmdz_mpi, ONLY: using_mpi
6  USE IOIPSL
7  INTEGER, PARAMETER :: halo_max = 3
8
9  LOGICAL, SAVE :: using_omp ! .TRUE. if using OpenMP
10  LOGICAL, SAVE :: is_master ! .TRUE. if the core is both MPI & OpenMP master
11  !$OMP THREADPRIVATE(is_master)
12
13  INTEGER, save :: mpi_size
14  INTEGER, save :: mpi_rank
15  INTEGER, save :: jj_begin
16  INTEGER, save :: jj_end
17  INTEGER, save :: jj_nb
18  INTEGER, save :: ij_begin
19  INTEGER, save :: ij_end
20  logical, save :: pole_nord
21  logical, save :: pole_sud
22
23  INTEGER, save :: jjb_u
24  INTEGER, save :: jje_u
25  INTEGER, save :: jjnb_u
26  INTEGER, save :: jjb_v
27  INTEGER, save :: jje_v
28  INTEGER, save :: jjnb_v
29
30  INTEGER, save :: ijb_u
31  INTEGER, save :: ije_u
32  INTEGER, save :: ijnb_u
33
34  INTEGER, save :: ijb_v
35  INTEGER, save :: ije_v
36  INTEGER, save :: ijnb_v
37
38  INTEGER, ALLOCATABLE, SAVE, DIMENSION(:) :: jj_begin_para
39  INTEGER, ALLOCATABLE, SAVE, DIMENSION(:) :: jj_end_para
40  INTEGER, ALLOCATABLE, SAVE, DIMENSION(:) :: jj_nb_para
41  INTEGER, save :: OMP_CHUNK
42  INTEGER, save :: omp_rank
43  INTEGER, save :: omp_size
44  !$OMP THREADPRIVATE(omp_rank)
45
46  TYPE distrib
47    INTEGER :: jj_begin
48    INTEGER :: jj_end
49    INTEGER :: jj_nb
50    INTEGER :: ij_begin
51    INTEGER :: ij_end
52
53    INTEGER :: jjb_u
54    INTEGER :: jje_u
55    INTEGER :: jjnb_u
56    INTEGER :: jjb_v
57    INTEGER :: jje_v
58    INTEGER :: jjnb_v
59
60    INTEGER :: ijb_u
61    INTEGER :: ije_u
62    INTEGER :: ijnb_u
63
64    INTEGER :: ijb_v
65    INTEGER :: ije_v
66    INTEGER :: ijnb_v
67
68    INTEGER, pointer :: jj_begin_para(:) => NULL()
69    INTEGER, pointer :: jj_end_para(:) => NULL()
70    INTEGER, pointer :: jj_nb_para(:) => NULL()
71  END TYPE distrib
72
73  INTERFACE ASSIGNMENT (=)
74    MODULE PROCEDURE copy_distrib
75  END INTERFACE
76  TYPE(distrib), SAVE :: current_dist
77
78CONTAINS
79
80  SUBROUTINE init_parallel
81    USE lmdz_vampir
82    USE lmdz_mpi
83    USE lmdz_iniprint, ONLY: lunout, prt_level
84    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
85  USE lmdz_paramet
86    IMPLICIT NONE
87
88
89
90    INTEGER :: ierr
91    INTEGER :: i, j
92    INTEGER :: type_size
93    INTEGER, DIMENSION(3) :: blocklen, type
94    INTEGER :: comp_id
95    CHARACTER(LEN = 4) :: num
96    CHARACTER(LEN = 20) :: filename
97
98#ifdef CPP_OMP   
99      INTEGER :: OMP_GET_NUM_THREADS
100      EXTERNAL OMP_GET_NUM_THREADS
101      INTEGER :: OMP_GET_THREAD_NUM
102      EXTERNAL OMP_GET_THREAD_NUM
103#endif 
104
105#ifdef CPP_OMP
106       using_OMP=.TRUE.
107#else
108    using_OMP = .FALSE.
109#endif
110
111    CALL InitVampir
112
113    IF (using_mpi) THEN
114      CALL MPI_COMM_SIZE(COMM_LMDZ, mpi_size, ierr)
115      CALL MPI_COMM_RANK(COMM_LMDZ, mpi_rank, ierr)
116    ELSE
117      mpi_size = 1
118      mpi_rank = 0
119    ENDIF
120
121
122    ! Open text output file with mpi_rank in suffix of file name
123    IF (lunout /= 5 .AND. lunout /= 6) THEN
124      WRITE(num, '(I4.4)') mpi_rank
125      filename = 'lmdz.out_' // num
126      IF (mpi_rank /= 0) THEN
127        OPEN(UNIT = lunout, FILE = TRIM(filename), ACTION = 'write', &
128                STATUS = 'unknown', FORM = 'formatted', IOSTAT = ierr)
129      ENDIF
130    ENDIF
131
132    allocate(jj_begin_para(0:mpi_size - 1))
133    allocate(jj_end_para(0:mpi_size - 1))
134    allocate(jj_nb_para(0:mpi_size - 1))
135
136    DO i = 0, mpi_size - 1
137      jj_nb_para(i) = (jjm + 1) / mpi_size
138      IF (i < MOD((jjm + 1), mpi_size)) jj_nb_para(i) = jj_nb_para(i) + 1
139
140      IF (jj_nb_para(i) <= 1) THEN
141        WRITE(lunout, *)"Arret : le nombre de bande de lattitude par process est trop faible (<2)."
142        WRITE(lunout, *)" ---> diminuez le nombre de CPU ou augmentez la taille en lattitude"
143
144        IF (using_mpi) CALL MPI_ABORT(COMM_LMDZ, -1, ierr)
145
146      endif
147
148    enddo
149
150    !      jj_nb_para(0)=11
151    !      jj_nb_para(1)=25
152    !      jj_nb_para(2)=25
153    !      jj_nb_para(3)=12
154
155    j = 1
156
157    DO i = 0, mpi_size - 1
158
159      jj_begin_para(i) = j
160      jj_end_para(i) = j + jj_Nb_para(i) - 1
161      j = j + jj_Nb_para(i)
162
163    enddo
164
165    jj_begin = jj_begin_para(mpi_rank)
166    jj_end = jj_end_para(mpi_rank)
167    jj_nb = jj_nb_para(mpi_rank)
168
169    ij_begin = (jj_begin - 1) * iip1 + 1
170    ij_end = jj_end * iip1
171
172    IF (mpi_rank==0) THEN
173      pole_nord = .TRUE.
174    else
175      pole_nord = .FALSE.
176    endif
177
178    IF (mpi_rank==mpi_size - 1) THEN
179      pole_sud = .TRUE.
180    else
181      pole_sud = .FALSE.
182    endif
183
184    WRITE(lunout, *)"init_parallel: jj_begin", jj_begin
185    WRITE(lunout, *)"init_parallel: jj_end", jj_end
186    WRITE(lunout, *)"init_parallel: ij_begin", ij_begin
187    WRITE(lunout, *)"init_parallel: ij_end", ij_end
188    jjb_u = MAX(jj_begin - halo_max, 1)
189    jje_u = MIN(jj_end + halo_max, jjp1)
190    jjnb_u = jje_u - jjb_u + 1
191
192    jjb_v = MAX(jj_begin - halo_max, 1)
193    jje_v = MIN(jj_end + halo_max, jjm)
194    jjnb_v = jje_v - jjb_v + 1
195
196    ijb_u = MAX(ij_begin - halo_max * iip1, 1)
197    ije_u = MIN(ij_end + halo_max * iip1, ip1jmp1)
198    ijnb_u = ije_u - ijb_u + 1
199
200    ijb_v = MAX(ij_begin - halo_max * iip1, 1)
201    ije_v = MIN(ij_end + halo_max * iip1, ip1jm)
202    ijnb_v = ije_v - ijb_v + 1
203
204    !$OMP PARALLEL
205
206#ifdef CPP_OMP
207!$OMP MASTER
208        omp_size=OMP_GET_NUM_THREADS()
209!$OMP END MASTER
210!$OMP BARRIER
211        omp_rank=OMP_GET_THREAD_NUM() 
212
213!Config  Key  = omp_chunk
214!Config  Desc = taille des blocs openmp
215!Config  Def  = 1
216!Config  Help = defini la taille des packets d'itération openmp
217!Config         distribue a chaque tache lors de l'entree dans une
218!Config         boucle parallelisee
219
220!$OMP MASTER
221      omp_chunk=(llm+1)/omp_size
222      IF (MOD(llm+1,omp_size)/=0) omp_chunk=omp_chunk+1
223      CALL getin('omp_chunk',omp_chunk)
224!$OMP END MASTER
225!$OMP BARRIER       
226#else   
227    omp_size = 1
228    omp_rank = 0
229#endif
230    !$OMP END PARALLEL
231    CALL create_distrib(jj_nb_para, current_dist)
232
233    IF ((mpi_rank==0).AND.(omp_rank==0)) THEN
234      is_master = .TRUE.
235    ELSE
236      is_master = .FALSE.
237    ENDIF
238
239  END SUBROUTINE  init_parallel
240
241  SUBROUTINE create_distrib(jj_nb_new, d)
242    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
243  USE lmdz_paramet
244    IMPLICIT NONE
245
246
247
248    INTEGER, INTENT(IN) :: jj_Nb_New(0:MPI_Size - 1)
249    TYPE(distrib), INTENT(INOUT) :: d
250    INTEGER :: i
251
252    IF (.NOT. ASSOCIATED(d%jj_nb_para)) ALLOCATE(d%jj_nb_para(0:MPI_Size - 1))
253    IF (.NOT. ASSOCIATED(d%jj_begin_para)) ALLOCATE(d%jj_begin_para(0:MPI_Size - 1))
254    IF (.NOT. ASSOCIATED(d%jj_end_para)) ALLOCATE(d%jj_end_para(0:MPI_Size - 1))
255
256    d%jj_Nb_Para = jj_Nb_New
257
258    d%jj_begin_para(0) = 1
259    d%jj_end_para(0) = d%jj_Nb_Para(0)
260
261    DO i = 1, mpi_size - 1
262
263      d%jj_begin_para(i) = d%jj_end_para(i - 1) + 1
264      d%jj_end_para(i) = d%jj_begin_para(i) + d%jj_Nb_para(i) - 1
265
266    enddo
267
268    d%jj_begin = d%jj_begin_para(mpi_rank)
269    d%jj_end = d%jj_end_para(mpi_rank)
270    d%jj_nb = d%jj_nb_para(mpi_rank)
271
272    d%ij_begin = (d%jj_begin - 1) * iip1 + 1
273    d%ij_end = d%jj_end * iip1
274
275    d%jjb_u = MAX(d%jj_begin - halo_max, 1)
276    d%jje_u = MIN(d%jj_end + halo_max, jjp1)
277    d%jjnb_u = d%jje_u - d%jjb_u + 1
278
279    d%jjb_v = MAX(d%jj_begin - halo_max, 1)
280    d%jje_v = MIN(d%jj_end + halo_max, jjm)
281    d%jjnb_v = d%jje_v - d%jjb_v + 1
282
283    d%ijb_u = MAX(d%ij_begin - halo_max * iip1, 1)
284    d%ije_u = MIN(d%ij_end + halo_max * iip1, ip1jmp1)
285    d%ijnb_u = d%ije_u - d%ijb_u + 1
286
287    d%ijb_v = MAX(d%ij_begin - halo_max * iip1, 1)
288    d%ije_v = MIN(d%ij_end + halo_max * iip1, ip1jm)
289    d%ijnb_v = d%ije_v - d%ijb_v + 1
290
291  END SUBROUTINE create_distrib
292
293
294  SUBROUTINE Set_Distrib(d)
295    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
296  USE lmdz_paramet
297    IMPLICIT NONE
298
299
300    TYPE(distrib), INTENT(IN) :: d
301
302    jj_begin = d%jj_begin
303    jj_end = d%jj_end
304    jj_nb = d%jj_nb
305    ij_begin = d%ij_begin
306    ij_end = d%ij_end
307
308    jjb_u = d%jjb_u
309    jje_u = d%jje_u
310    jjnb_u = d%jjnb_u
311    jjb_v = d%jjb_v
312    jje_v = d%jje_v
313    jjnb_v = d%jjnb_v
314
315    ijb_u = d%ijb_u
316    ije_u = d%ije_u
317    ijnb_u = d%ijnb_u
318
319    ijb_v = d%ijb_v
320    ije_v = d%ije_v
321    ijnb_v = d%ijnb_v
322
323    jj_begin_para(:) = d%jj_begin_para(:)
324    jj_end_para(:) = d%jj_end_para(:)
325    jj_nb_para(:) = d%jj_nb_para(:)
326    current_dist = d
327
328  END SUBROUTINE Set_Distrib
329
330  SUBROUTINE copy_distrib(dist, new_dist)
331    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
332  USE lmdz_paramet
333    IMPLICIT NONE
334
335
336    TYPE(distrib), INTENT(INOUT) :: dist
337    TYPE(distrib), INTENT(IN) :: new_dist
338
339    dist%jj_begin = new_dist%jj_begin
340    dist%jj_end = new_dist%jj_end
341    dist%jj_nb = new_dist%jj_nb
342    dist%ij_begin = new_dist%ij_begin
343    dist%ij_end = new_dist%ij_end
344
345    dist%jjb_u = new_dist%jjb_u
346    dist%jje_u = new_dist%jje_u
347    dist%jjnb_u = new_dist%jjnb_u
348    dist%jjb_v = new_dist%jjb_v
349    dist%jje_v = new_dist%jje_v
350    dist%jjnb_v = new_dist%jjnb_v
351
352    dist%ijb_u = new_dist%ijb_u
353    dist%ije_u = new_dist%ije_u
354    dist%ijnb_u = new_dist%ijnb_u
355
356    dist%ijb_v = new_dist%ijb_v
357    dist%ije_v = new_dist%ije_v
358    dist%ijnb_v = new_dist%ijnb_v
359
360    dist%jj_begin_para(:) = new_dist%jj_begin_para(:)
361    dist%jj_end_para(:) = new_dist%jj_end_para(:)
362    dist%jj_nb_para(:) = new_dist%jj_nb_para(:)
363
364  END SUBROUTINE copy_distrib
365
366
367  SUBROUTINE get_current_distrib(d)
368    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
369  USE lmdz_paramet
370    IMPLICIT NONE
371
372
373    TYPE(distrib), INTENT(OUT) :: d
374
375    d = current_dist
376
377  END SUBROUTINE get_current_distrib
378
379  SUBROUTINE Finalize_parallel
380    USE lmdz_mpi
381    USE lmdz_wxios  ! ug Pour les sorties XIOS
382    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
383    USE lmdz_paramet
384    USE control_mod, only : ok_dyn_xios
385
386
387#ifdef CPP_COUPLE
388! Use of Oasis-MCT coupler
389#if defined CPP_OMCT
390    USE mod_prism
391#else
392    USE mod_prism_proto
393#endif
394! Ehouarn: surface_data module is in 'phylmd' ...
395      USE surface_data, ONLY: type_ocean
396      IMPLICIT NONE
397#else
398      IMPLICIT NONE
399    ! without the surface_data module, we declare (and set) a dummy 'type_ocean'
400    CHARACTER(LEN = 6), parameter :: type_ocean = "dummy"
401#endif
402
403    INTEGER :: ierr
404    INTEGER :: i
405
406    IF (allocated(jj_begin_para)) deallocate(jj_begin_para)
407    IF (allocated(jj_end_para))   deallocate(jj_end_para)
408    IF (allocated(jj_nb_para))    deallocate(jj_nb_para)
409
410    IF (type_ocean == 'couple') THEN
411#ifdef CPP_COUPLE
412        IF (using_xios) THEN
413          !Fermeture propre de XIOS
414          ! close xios dynamic context if is defined (call LMDZDYN)
415          IF (ok_dyn_xios) THEN
416             CALL xios_context_finalize()
417          ENDIF
418          CALL wxios_close()
419          CALL prism_terminate_proto(ierr)
420          IF (ierr .NE. PRISM_Ok) THEN
421            CALL abort_gcm('Finalize_parallel',' Probleme dans prism_terminate_proto ',1)
422          ENDIF
423        ELSE
424           CALL prism_terminate_proto(ierr)
425           IF (ierr .NE. PRISM_Ok) THEN
426              CALL abort_gcm('Finalize_parallel',' Probleme dans prism_terminate_proto ',1)
427           endif
428        ENDIF
429#else
430      CALL abort_gcm('Finalize_parallel', 'type_ocean = couple but CPP_COUPLE not defined', 1)
431#endif
432
433    ELSE
434      IF (using_xios) THEN
435        !Fermeture propre de XIOS
436        IF (ok_dyn_xios) THEN
437          CALL xios_context_finalize()
438        END IF
439
440        CALL wxios_close()
441      END IF
442      IF (using_mpi) CALL MPI_FINALIZE(ierr)
443    END IF
444
445  END SUBROUTINE  Finalize_parallel
446
447  SUBROUTINE Pack_Data(Field, ij, ll, row, Buffer)
448    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
449  USE lmdz_paramet
450    IMPLICIT NONE
451
452
453
454    INTEGER, INTENT(IN) :: ij, ll, row
455    REAL, DIMENSION(ij, ll), INTENT(IN) :: Field
456    REAL, DIMENSION(ll * iip1 * row), INTENT(OUT) :: Buffer
457
458    INTEGER :: Pos
459    INTEGER :: i, l
460
461    Pos = 0
462    DO l = 1, ll
463      DO i = 1, row * iip1
464        Pos = Pos + 1
465        Buffer(Pos) = Field(i, l)
466      enddo
467    enddo
468
469  END SUBROUTINE  Pack_data
470
471  SUBROUTINE Unpack_Data(Field, ij, ll, row, Buffer)
472    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
473  USE lmdz_paramet
474    IMPLICIT NONE
475
476
477
478    INTEGER, INTENT(IN) :: ij, ll, row
479    REAL, DIMENSION(ij, ll), INTENT(OUT) :: Field
480    REAL, DIMENSION(ll * iip1 * row), INTENT(IN) :: Buffer
481
482    INTEGER :: Pos
483    INTEGER :: i, l
484
485    Pos = 0
486
487    DO l = 1, ll
488      DO i = 1, row * iip1
489        Pos = Pos + 1
490        Field(i, l) = Buffer(Pos)
491      enddo
492    enddo
493
494  END SUBROUTINE  UnPack_data
495
496
497  SUBROUTINE barrier
498    USE lmdz_mpi
499    IMPLICIT NONE
500    INTEGER :: ierr
501
502    !$OMP CRITICAL (MPI)
503    IF (using_mpi) CALL MPI_Barrier(COMM_LMDZ, ierr)
504    !$OMP END CRITICAL (MPI)
505
506  END SUBROUTINE barrier
507
508
509  SUBROUTINE exchange_hallo(Field, ij, ll, up, down)
510    USE lmdz_mpi
511    USE lmdz_vampir
512    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
513  USE lmdz_paramet
514    IMPLICIT NONE
515
516
517    INTEGER :: ij, ll
518    REAL, DIMENSION(ij, ll) :: Field
519    INTEGER :: up, down
520
521    INTEGER :: ierr
522    LOGICAL :: SendUp, SendDown
523    LOGICAL :: RecvUp, RecvDown
524    INTEGER, DIMENSION(4) :: Request
525    INTEGER, DIMENSION(MPI_STATUS_SIZE, 4) :: Status
526
527    INTEGER :: NbRequest
528    REAL, DIMENSION(:), ALLOCATABLE :: Buffer_Send_up, Buffer_Send_down
529    REAL, DIMENSION(:), ALLOCATABLE :: Buffer_Recv_up, Buffer_Recv_down
530    INTEGER :: Buffer_size
531
532    IF (using_mpi) THEN
533
534      CALL barrier
535
536      CALL VTb(VThallo)
537
538      SendUp = .TRUE.
539      SendDown = .TRUE.
540      RecvUp = .TRUE.
541      RecvDown = .TRUE.
542
543      IF (pole_nord) THEN
544        SendUp = .FALSE.
545        RecvUp = .FALSE.
546      ENDIF
547
548      IF (pole_sud) THEN
549        SendDown = .FALSE.
550        RecvDown = .FALSE.
551      ENDIF
552
553      IF (up==0) THEN
554        SendDown = .FALSE.
555        RecvUp = .FALSE.
556      endif
557
558      IF (down==0) THEN
559        SendUp = .FALSE.
560        RecvDown = .FALSE.
561      endif
562
563      NbRequest = 0
564
565      IF (SendUp) THEN
566        NbRequest = NbRequest + 1
567        buffer_size = down * iip1 * ll
568        allocate(Buffer_Send_up(Buffer_size))
569        CALL PACK_Data(Field(ij_begin, 1), ij, ll, down, Buffer_Send_up)
570        !$OMP CRITICAL (MPI)
571        CALL MPI_ISEND(Buffer_send_up, Buffer_Size, MPI_REAL8, MPI_Rank - 1, 1, &
572                COMM_LMDZ, Request(NbRequest), ierr)
573        !$OMP END CRITICAL (MPI)
574      ENDIF
575
576      IF (SendDown) THEN
577        NbRequest = NbRequest + 1
578
579        buffer_size = up * iip1 * ll
580        allocate(Buffer_Send_down(Buffer_size))
581        CALL PACK_Data(Field(ij_end + 1 - up * iip1, 1), ij, ll, up, Buffer_send_down)
582
583        !$OMP CRITICAL (MPI)
584        CALL MPI_ISEND(Buffer_send_down, Buffer_Size, MPI_REAL8, MPI_Rank + 1, 1, &
585                COMM_LMDZ, Request(NbRequest), ierr)
586        !$OMP END CRITICAL (MPI)
587      ENDIF
588
589      IF (RecvUp) THEN
590        NbRequest = NbRequest + 1
591        buffer_size = up * iip1 * ll
592        allocate(Buffer_recv_up(Buffer_size))
593
594        !$OMP CRITICAL (MPI)
595        CALL MPI_IRECV(Buffer_recv_up, Buffer_size, MPI_REAL8, MPI_Rank - 1, 1, &
596                COMM_LMDZ, Request(NbRequest), ierr)
597        !$OMP END CRITICAL (MPI)
598
599      ENDIF
600
601      IF (RecvDown) THEN
602        NbRequest = NbRequest + 1
603        buffer_size = down * iip1 * ll
604        allocate(Buffer_recv_down(Buffer_size))
605
606        !$OMP CRITICAL (MPI)
607        CALL MPI_IRECV(Buffer_recv_down, Buffer_size, MPI_REAL8, MPI_Rank + 1, 1, &
608                COMM_LMDZ, Request(NbRequest), ierr)
609        !$OMP END CRITICAL (MPI)
610
611      ENDIF
612
613      IF (NbRequest > 0) CALL MPI_WAITALL(NbRequest, Request, Status, ierr)
614      IF (RecvUp)  CALL Unpack_Data(Field(ij_begin - up * iip1, 1), ij, ll, up, Buffer_Recv_up)
615      IF (RecvDown) CALL Unpack_Data(Field(ij_end + 1, 1), ij, ll, down, Buffer_Recv_down)
616
617      CALL VTe(VThallo)
618      CALL barrier
619
620    ENDIF  ! using_mpi
621
622  END SUBROUTINE  exchange_Hallo
623
624
625  SUBROUTINE Gather_Field(Field, ij, ll, rank)
626    USE lmdz_mpi
627    USE lmdz_iniprint, ONLY: lunout, prt_level
628    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
629  USE lmdz_paramet
630    IMPLICIT NONE
631
632
633    INTEGER :: ij, ll, rank
634    REAL, DIMENSION(ij, ll) :: Field
635    REAL, DIMENSION(:), ALLOCATABLE :: Buffer_send
636    REAL, DIMENSION(:), ALLOCATABLE :: Buffer_Recv
637    INTEGER, DIMENSION(0:MPI_Size - 1) :: Recv_count, displ
638    INTEGER :: ierr
639    INTEGER :: i
640
641    IF (using_mpi) THEN
642
643      IF (ij==ip1jmp1) THEN
644        allocate(Buffer_send(iip1 * ll * (jj_end - jj_begin + 1)))
645        CALL Pack_Data(Field(ij_begin, 1), ij, ll, jj_end - jj_begin + 1, Buffer_send)
646      ELSE IF (ij==ip1jm) THEN
647        allocate(Buffer_send(iip1 * ll * (min(jj_end, jjm) - jj_begin + 1)))
648        CALL Pack_Data(Field(ij_begin, 1), ij, ll, min(jj_end, jjm) - jj_begin + 1, Buffer_send)
649      else
650        WRITE(lunout, *)ij
651        CALL abort_gcm("parallel_lmdz", "erreur dans Gather_Field", 1)
652      endif
653
654      IF (MPI_Rank==rank) THEN
655        allocate(Buffer_Recv(ij * ll))
656
657        !CDIR NOVECTOR
658        DO i = 0, MPI_Size - 1
659
660          IF (ij==ip1jmp1) THEN
661            Recv_count(i) = (jj_end_para(i) - jj_begin_para(i) + 1) * ll * iip1
662          ELSE IF (ij==ip1jm) THEN
663            Recv_count(i) = (min(jj_end_para(i), jjm) - jj_begin_para(i) + 1) * ll * iip1
664          else
665            CALL abort_gcm("parallel_lmdz", "erreur dans Gather_Field", 1)
666          endif
667
668          IF (i==0) THEN
669            displ(i) = 0
670          else
671            displ(i) = displ(i - 1) + Recv_count(i - 1)
672          endif
673
674        enddo
675
676      else
677        ! Ehouarn: When in debug mode, ifort complains (for CALL MPI_GATHERV
678        !          below) about Buffer_Recv() being not allocated.
679        !          So make a dummy allocation.
680        allocate(Buffer_Recv(1))
681      endif ! of if (MPI_Rank==rank)
682
683      !$OMP CRITICAL (MPI)
684      CALL MPI_GATHERV(Buffer_send, (min(ij_end, ij) - ij_begin + 1) * ll, MPI_REAL8, &
685              Buffer_Recv, Recv_count, displ, MPI_REAL8, rank, COMM_LMDZ, ierr)
686      !$OMP END CRITICAL (MPI)
687
688      IF (MPI_Rank==rank) THEN
689        IF (ij==ip1jmp1) THEN
690          DO i = 0, MPI_Size - 1
691            CALL Unpack_Data(Field((jj_begin_para(i) - 1) * iip1 + 1, 1), ij, ll, &
692                    jj_end_para(i) - jj_begin_para(i) + 1, Buffer_Recv(displ(i) + 1))
693          enddo
694        ELSE IF (ij==ip1jm) THEN
695          DO i = 0, MPI_Size - 1
696            CALL Unpack_Data(Field((jj_begin_para(i) - 1) * iip1 + 1, 1), ij, ll, &
697                    min(jj_end_para(i), jjm) - jj_begin_para(i) + 1, Buffer_Recv(displ(i) + 1))
698          enddo
699        endif
700      endif
701    ENDIF ! using_mpi
702
703  END SUBROUTINE  Gather_Field
704
705
706  SUBROUTINE AllGather_Field(Field, ij, ll)
707    USE lmdz_mpi
708    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
709  USE lmdz_paramet
710    IMPLICIT NONE
711
712
713    INTEGER :: ij, ll
714    REAL, DIMENSION(ij, ll) :: Field
715    INTEGER :: ierr
716
717    IF (using_mpi) THEN
718      CALL Gather_Field(Field, ij, ll, 0)
719      !$OMP CRITICAL (MPI)
720      CALL MPI_BCAST(Field, ij * ll, MPI_REAL8, 0, COMM_LMDZ, ierr)
721      !$OMP END CRITICAL (MPI)
722    ENDIF
723
724  END SUBROUTINE  AllGather_Field
725
726  SUBROUTINE Broadcast_Field(Field, ij, ll, rank)
727    USE lmdz_mpi
728    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
729  USE lmdz_paramet
730    IMPLICIT NONE
731
732
733    INTEGER :: ij, ll
734    REAL, DIMENSION(ij, ll) :: Field
735    INTEGER :: rank
736    INTEGER :: ierr
737
738    IF (using_mpi) THEN
739
740      !$OMP CRITICAL (MPI)
741      CALL MPI_BCAST(Field, ij * ll, MPI_REAL8, rank, COMM_LMDZ, ierr)
742      !$OMP END CRITICAL (MPI)
743
744    ENDIF
745  END SUBROUTINE  Broadcast_Field
746
747END MODULE parallel_lmdz
Note: See TracBrowser for help on using the repository browser.