source: LMDZ6/branches/Amaury_dev/libf/dyn3dmem/parallel_lmdz.F90 @ 5423

Last change on this file since 5423 was 5225, checked in by abarral, 3 months ago

Merge r5206 r5207

  • Property copyright set to
    Name of program: LMDZ
    Creation date: 1984
    Version: LMDZ5
    License: CeCILL version 2
    Holder: Laboratoire de m\'et\'eorologie dynamique, CNRS, UMR 8539
    See the license file in the root directory
File size: 18.8 KB
RevLine 
[1823]1! $Id$
[5099]2
[5118]3MODULE parallel_lmdz
[1823]4  USE mod_const_mpi
[5101]5  USE lmdz_mpi, ONLY: using_mpi
[5118]6  USE IOIPSL
7  INTEGER, PARAMETER :: halo_max = 3
[1823]8
[5118]9  LOGICAL, SAVE :: using_omp ! .TRUE. if using OpenMP
10  LOGICAL, SAVE :: is_master ! .TRUE. if the core is both MPI & OpenMP master
11  !$OMP THREADPRIVATE(is_master)
[1823]12
[5118]13  INTEGER, save :: mpi_size
14  INTEGER, save :: mpi_rank
15  INTEGER, save :: jj_begin
16  INTEGER, save :: jj_end
17  INTEGER, save :: jj_nb
18  INTEGER, save :: ij_begin
19  INTEGER, save :: ij_end
20  logical, save :: pole_nord
21  logical, save :: pole_sud
[1823]22
[5118]23  INTEGER, save :: jjb_u
24  INTEGER, save :: jje_u
25  INTEGER, save :: jjnb_u
26  INTEGER, save :: jjb_v
27  INTEGER, save :: jje_v
28  INTEGER, save :: jjnb_v
[1823]29
[5118]30  INTEGER, save :: ijb_u
31  INTEGER, save :: ije_u
32  INTEGER, save :: ijnb_u
33
34  INTEGER, save :: ijb_v
35  INTEGER, save :: ije_v
36  INTEGER, save :: ijnb_v
37
38  INTEGER, ALLOCATABLE, SAVE, DIMENSION(:) :: jj_begin_para
39  INTEGER, ALLOCATABLE, SAVE, DIMENSION(:) :: jj_end_para
40  INTEGER, ALLOCATABLE, SAVE, DIMENSION(:) :: jj_nb_para
41  INTEGER, save :: OMP_CHUNK
42  INTEGER, save :: omp_rank
43  INTEGER, save :: omp_size
44  !$OMP THREADPRIVATE(omp_rank)
45
46  TYPE distrib
47    INTEGER :: jj_begin
48    INTEGER :: jj_end
49    INTEGER :: jj_nb
50    INTEGER :: ij_begin
51    INTEGER :: ij_end
52
53    INTEGER :: jjb_u
54    INTEGER :: jje_u
55    INTEGER :: jjnb_u
56    INTEGER :: jjb_v
57    INTEGER :: jje_v
58    INTEGER :: jjnb_v
59
60    INTEGER :: ijb_u
61    INTEGER :: ije_u
62    INTEGER :: ijnb_u
63
64    INTEGER :: ijb_v
65    INTEGER :: ije_v
66    INTEGER :: ijnb_v
67
68    INTEGER, pointer :: jj_begin_para(:) => NULL()
69    INTEGER, pointer :: jj_end_para(:) => NULL()
70    INTEGER, pointer :: jj_nb_para(:) => NULL()
71  END TYPE distrib
72
73  INTERFACE ASSIGNMENT (=)
74    MODULE PROCEDURE copy_distrib
75  END INTERFACE
76  TYPE(distrib), SAVE :: current_dist
77
[5119]78CONTAINS
[5118]79
80  SUBROUTINE init_parallel
[5117]81    USE lmdz_vampir
[4600]82    USE lmdz_mpi
[5118]83    USE lmdz_iniprint, ONLY: lunout, prt_level
[5159]84    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
85  USE lmdz_paramet
[5113]86    IMPLICIT NONE
[1823]87
[5159]88
89
[5118]90    INTEGER :: ierr
91    INTEGER :: i, j
92    INTEGER :: type_size
93    INTEGER, DIMENSION(3) :: blocklen, type
94    INTEGER :: comp_id
95    CHARACTER(LEN = 4) :: num
96    CHARACTER(LEN = 20) :: filename
97
[1823]98#ifdef CPP_OMP   
99      INTEGER :: OMP_GET_NUM_THREADS
100      EXTERNAL OMP_GET_NUM_THREADS
101      INTEGER :: OMP_GET_THREAD_NUM
102      EXTERNAL OMP_GET_THREAD_NUM
103#endif 
104
105#ifdef CPP_OMP
106       using_OMP=.TRUE.
107#else
[5118]108    using_OMP = .FALSE.
[1823]109#endif
110
[5118]111    CALL InitVampir
[1823]112
[5118]113    IF (using_mpi) THEN
114      CALL MPI_COMM_SIZE(COMM_LMDZ, mpi_size, ierr)
115      CALL MPI_COMM_RANK(COMM_LMDZ, mpi_rank, ierr)
116    ELSE
117      mpi_size = 1
118      mpi_rank = 0
119    ENDIF
120
121
122    ! Open text output file with mpi_rank in suffix of file name
123    IF (lunout /= 5 .AND. lunout /= 6) THEN
124      WRITE(num, '(I4.4)') mpi_rank
125      filename = 'lmdz.out_' // num
126      IF (mpi_rank /= 0) THEN
127        OPEN(UNIT = lunout, FILE = TRIM(filename), ACTION = 'write', &
128                STATUS = 'unknown', FORM = 'formatted', IOSTAT = ierr)
[1823]129      ENDIF
[5118]130    ENDIF
[1823]131
[5118]132    allocate(jj_begin_para(0:mpi_size - 1))
133    allocate(jj_end_para(0:mpi_size - 1))
134    allocate(jj_nb_para(0:mpi_size - 1))
[4600]135
[5158]136    DO i = 0, mpi_size - 1
[5118]137      jj_nb_para(i) = (jjm + 1) / mpi_size
138      IF (i < MOD((jjm + 1), mpi_size)) jj_nb_para(i) = jj_nb_para(i) + 1
[1823]139
[5118]140      IF (jj_nb_para(i) <= 1) THEN
141        WRITE(lunout, *)"Arret : le nombre de bande de lattitude par process est trop faible (<2)."
142        WRITE(lunout, *)" ---> diminuez le nombre de CPU ou augmentez la taille en lattitude"
143
144        IF (using_mpi) CALL MPI_ABORT(COMM_LMDZ, -1, ierr)
145
[1823]146      endif
147
[5118]148    enddo
[1823]149
[5118]150    !      jj_nb_para(0)=11
151    !      jj_nb_para(1)=25
152    !      jj_nb_para(2)=25
153    !      jj_nb_para(3)=12
[1823]154
[5118]155    j = 1
[1823]156
[5158]157    DO i = 0, mpi_size - 1
[5118]158
159      jj_begin_para(i) = j
160      jj_end_para(i) = j + jj_Nb_para(i) - 1
161      j = j + jj_Nb_para(i)
162
163    enddo
164
165    jj_begin = jj_begin_para(mpi_rank)
166    jj_end = jj_end_para(mpi_rank)
167    jj_nb = jj_nb_para(mpi_rank)
168
169    ij_begin = (jj_begin - 1) * iip1 + 1
170    ij_end = jj_end * iip1
171
172    IF (mpi_rank==0) THEN
173      pole_nord = .TRUE.
174    else
175      pole_nord = .FALSE.
176    endif
177
178    IF (mpi_rank==mpi_size - 1) THEN
179      pole_sud = .TRUE.
180    else
181      pole_sud = .FALSE.
182    endif
183
184    WRITE(lunout, *)"init_parallel: jj_begin", jj_begin
185    WRITE(lunout, *)"init_parallel: jj_end", jj_end
186    WRITE(lunout, *)"init_parallel: ij_begin", ij_begin
187    WRITE(lunout, *)"init_parallel: ij_end", ij_end
188    jjb_u = MAX(jj_begin - halo_max, 1)
189    jje_u = MIN(jj_end + halo_max, jjp1)
190    jjnb_u = jje_u - jjb_u + 1
191
192    jjb_v = MAX(jj_begin - halo_max, 1)
193    jje_v = MIN(jj_end + halo_max, jjm)
194    jjnb_v = jje_v - jjb_v + 1
195
196    ijb_u = MAX(ij_begin - halo_max * iip1, 1)
197    ije_u = MIN(ij_end + halo_max * iip1, ip1jmp1)
198    ijnb_u = ije_u - ijb_u + 1
199
200    ijb_v = MAX(ij_begin - halo_max * iip1, 1)
201    ije_v = MIN(ij_end + halo_max * iip1, ip1jm)
202    ijnb_v = ije_v - ijb_v + 1
203
204    !$OMP PARALLEL
205
[1823]206#ifdef CPP_OMP
207!$OMP MASTER
208        omp_size=OMP_GET_NUM_THREADS()
209!$OMP END MASTER
[1859]210!$OMP BARRIER
[1858]211        omp_rank=OMP_GET_THREAD_NUM() 
212
213!Config  Key  = omp_chunk
214!Config  Desc = taille des blocs openmp
215!Config  Def  = 1
[5093]216!Config  Help = defini la taille des packets d'itération openmp
[1858]217!Config         distribue a chaque tache lors de l'entree dans une
218!Config         boucle parallelisee
[1859]219
220!$OMP MASTER
[1858]221      omp_chunk=(llm+1)/omp_size
222      IF (MOD(llm+1,omp_size)/=0) omp_chunk=omp_chunk+1
223      CALL getin('omp_chunk',omp_chunk)
[1859]224!$OMP END MASTER
225!$OMP BARRIER       
[1823]226#else   
[5118]227    omp_size = 1
228    omp_rank = 0
[1823]229#endif
[5118]230    !$OMP END PARALLEL
231    CALL create_distrib(jj_nb_para, current_dist)
[1823]232
[5118]233    IF ((mpi_rank==0).AND.(omp_rank==0)) THEN
234      is_master = .TRUE.
235    ELSE
236      is_master = .FALSE.
237    ENDIF
238
239  END SUBROUTINE  init_parallel
240
241  SUBROUTINE create_distrib(jj_nb_new, d)
[5159]242    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
243  USE lmdz_paramet
[1823]244    IMPLICIT NONE
245
[5159]246
247
[5118]248    INTEGER, INTENT(IN) :: jj_Nb_New(0:MPI_Size - 1)
249    TYPE(distrib), INTENT(INOUT) :: d
250    INTEGER :: i
[1823]251
[5118]252    IF (.NOT. ASSOCIATED(d%jj_nb_para)) ALLOCATE(d%jj_nb_para(0:MPI_Size - 1))
253    IF (.NOT. ASSOCIATED(d%jj_begin_para)) ALLOCATE(d%jj_begin_para(0:MPI_Size - 1))
254    IF (.NOT. ASSOCIATED(d%jj_end_para)) ALLOCATE(d%jj_end_para(0:MPI_Size - 1))
[1823]255
[5118]256    d%jj_Nb_Para = jj_Nb_New
[1823]257
[5118]258    d%jj_begin_para(0) = 1
259    d%jj_end_para(0) = d%jj_Nb_Para(0)
[1823]260
[5158]261    DO i = 1, mpi_size - 1
[1823]262
[5118]263      d%jj_begin_para(i) = d%jj_end_para(i - 1) + 1
264      d%jj_end_para(i) = d%jj_begin_para(i) + d%jj_Nb_para(i) - 1
265
266    enddo
267
268    d%jj_begin = d%jj_begin_para(mpi_rank)
269    d%jj_end = d%jj_end_para(mpi_rank)
270    d%jj_nb = d%jj_nb_para(mpi_rank)
271
272    d%ij_begin = (d%jj_begin - 1) * iip1 + 1
273    d%ij_end = d%jj_end * iip1
274
275    d%jjb_u = MAX(d%jj_begin - halo_max, 1)
276    d%jje_u = MIN(d%jj_end + halo_max, jjp1)
277    d%jjnb_u = d%jje_u - d%jjb_u + 1
278
279    d%jjb_v = MAX(d%jj_begin - halo_max, 1)
280    d%jje_v = MIN(d%jj_end + halo_max, jjm)
281    d%jjnb_v = d%jje_v - d%jjb_v + 1
282
283    d%ijb_u = MAX(d%ij_begin - halo_max * iip1, 1)
284    d%ije_u = MIN(d%ij_end + halo_max * iip1, ip1jmp1)
285    d%ijnb_u = d%ije_u - d%ijb_u + 1
286
287    d%ijb_v = MAX(d%ij_begin - halo_max * iip1, 1)
288    d%ije_v = MIN(d%ij_end + halo_max * iip1, ip1jm)
289    d%ijnb_v = d%ije_v - d%ijb_v + 1
290
291  END SUBROUTINE create_distrib
292
293
294  SUBROUTINE Set_Distrib(d)
[5159]295    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
296  USE lmdz_paramet
[1823]297    IMPLICIT NONE
298
[5159]299
[5118]300    TYPE(distrib), INTENT(IN) :: d
[1823]301
[5118]302    jj_begin = d%jj_begin
303    jj_end = d%jj_end
304    jj_nb = d%jj_nb
305    ij_begin = d%ij_begin
306    ij_end = d%ij_end
[1823]307
[5118]308    jjb_u = d%jjb_u
309    jje_u = d%jje_u
310    jjnb_u = d%jjnb_u
311    jjb_v = d%jjb_v
312    jje_v = d%jje_v
313    jjnb_v = d%jjnb_v
[1823]314
[5118]315    ijb_u = d%ijb_u
316    ije_u = d%ije_u
317    ijnb_u = d%ijnb_u
[1823]318
[5118]319    ijb_v = d%ijb_v
320    ije_v = d%ije_v
321    ijnb_v = d%ijnb_v
322
323    jj_begin_para(:) = d%jj_begin_para(:)
324    jj_end_para(:) = d%jj_end_para(:)
325    jj_nb_para(:) = d%jj_nb_para(:)
326    current_dist = d
327
328  END SUBROUTINE Set_Distrib
329
330  SUBROUTINE copy_distrib(dist, new_dist)
[5159]331    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
332  USE lmdz_paramet
[1823]333    IMPLICIT NONE
334
[5159]335
[5118]336    TYPE(distrib), INTENT(INOUT) :: dist
337    TYPE(distrib), INTENT(IN) :: new_dist
[1823]338
[5118]339    dist%jj_begin = new_dist%jj_begin
340    dist%jj_end = new_dist%jj_end
341    dist%jj_nb = new_dist%jj_nb
342    dist%ij_begin = new_dist%ij_begin
343    dist%ij_end = new_dist%ij_end
[1823]344
[5118]345    dist%jjb_u = new_dist%jjb_u
346    dist%jje_u = new_dist%jje_u
347    dist%jjnb_u = new_dist%jjnb_u
348    dist%jjb_v = new_dist%jjb_v
349    dist%jje_v = new_dist%jje_v
350    dist%jjnb_v = new_dist%jjnb_v
351
352    dist%ijb_u = new_dist%ijb_u
353    dist%ije_u = new_dist%ije_u
354    dist%ijnb_u = new_dist%ijnb_u
355
356    dist%ijb_v = new_dist%ijb_v
357    dist%ije_v = new_dist%ije_v
358    dist%ijnb_v = new_dist%ijnb_v
359
360    dist%jj_begin_para(:) = new_dist%jj_begin_para(:)
361    dist%jj_end_para(:) = new_dist%jj_end_para(:)
362    dist%jj_nb_para(:) = new_dist%jj_nb_para(:)
363
364  END SUBROUTINE copy_distrib
365
366
367  SUBROUTINE get_current_distrib(d)
[5159]368    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
369  USE lmdz_paramet
[1823]370    IMPLICIT NONE
371
[5159]372
[5118]373    TYPE(distrib), INTENT(OUT) :: d
[1823]374
[5118]375    d = current_dist
[1823]376
[5118]377  END SUBROUTINE get_current_distrib
378
379  SUBROUTINE Finalize_parallel
[4600]380    USE lmdz_mpi
[5159]381    USE lmdz_wxios  ! ug Pour les sorties XIOS
382    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
383    USE lmdz_paramet
[5225]384    USE control_mod, only : ok_dyn_xios
[4619]385
[5128]386
[1823]387#ifdef CPP_COUPLE
[1965]388! Use of Oasis-MCT coupler
389#if defined CPP_OMCT
[5117]390    USE mod_prism
[1965]391#else
[5117]392    USE mod_prism_proto
[1965]393#endif
[1823]394! Ehouarn: surface_data module is in 'phylmd' ...
[5117]395      USE surface_data, ONLY: type_ocean
[5113]396      IMPLICIT NONE
[1823]397#else
[5113]398      IMPLICIT NONE
[5118]399    ! without the surface_data module, we declare (and set) a dummy 'type_ocean'
400    CHARACTER(LEN = 6), parameter :: type_ocean = "dummy"
[1823]401#endif
402
[5118]403    INTEGER :: ierr
404    INTEGER :: i
[1823]405
[5118]406    IF (allocated(jj_begin_para)) deallocate(jj_begin_para)
407    IF (allocated(jj_end_para))   deallocate(jj_end_para)
408    IF (allocated(jj_nb_para))    deallocate(jj_nb_para)
[1823]409
[5118]410    IF (type_ocean == 'couple') THEN
[4848]411#ifdef CPP_COUPLE
[4619]412        IF (using_xios) THEN
413          !Fermeture propre de XIOS
[5225]414          ! close xios dynamic context if is defined (call LMDZDYN)
415          IF (ok_dyn_xios) THEN
416             CALL xios_context_finalize()
417          ENDIF
[4619]418          CALL wxios_close()
[4848]419          CALL prism_terminate_proto(ierr)
[5117]420          IF (ierr .NE. PRISM_Ok) THEN
[4848]421            CALL abort_gcm('Finalize_parallel',' Probleme dans prism_terminate_proto ',1)
422          ENDIF
[4619]423        ELSE
[5101]424           CALL prism_terminate_proto(ierr)
[5117]425           IF (ierr .NE. PRISM_Ok) THEN
[5101]426              CALL abort_gcm('Finalize_parallel',' Probleme dans prism_terminate_proto ',1)
[4619]427           endif
[4848]428        ENDIF
429#else
[5118]430      CALL abort_gcm('Finalize_parallel', 'type_ocean = couple but CPP_COUPLE not defined', 1)
[1823]431#endif
[5225]432
433    ELSE
[5118]434      IF (using_xios) THEN
435        !Fermeture propre de XIOS
[5225]436        IF (ok_dyn_xios) THEN
437          CALL xios_context_finalize()
438        END IF
439
[5118]440        CALL wxios_close()
[5225]441      END IF
[5118]442      IF (using_mpi) CALL MPI_FINALIZE(ierr)
[5225]443    END IF
[5118]444
445  END SUBROUTINE  Finalize_parallel
446
447  SUBROUTINE Pack_Data(Field, ij, ll, row, Buffer)
[5159]448    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
449  USE lmdz_paramet
[5113]450    IMPLICIT NONE
[1823]451
452
[5159]453
[5118]454    INTEGER, INTENT(IN) :: ij, ll, row
455    REAL, DIMENSION(ij, ll), INTENT(IN) :: Field
456    REAL, DIMENSION(ll * iip1 * row), INTENT(OUT) :: Buffer
457
458    INTEGER :: Pos
459    INTEGER :: i, l
460
461    Pos = 0
[5158]462    DO l = 1, ll
463      DO i = 1, row * iip1
[5118]464        Pos = Pos + 1
465        Buffer(Pos) = Field(i, l)
[1823]466      enddo
[5118]467    enddo
468
469  END SUBROUTINE  Pack_data
470
471  SUBROUTINE Unpack_Data(Field, ij, ll, row, Buffer)
[5159]472    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
473  USE lmdz_paramet
[5113]474    IMPLICIT NONE
[1823]475
476
[5159]477
[5118]478    INTEGER, INTENT(IN) :: ij, ll, row
479    REAL, DIMENSION(ij, ll), INTENT(OUT) :: Field
480    REAL, DIMENSION(ll * iip1 * row), INTENT(IN) :: Buffer
481
482    INTEGER :: Pos
483    INTEGER :: i, l
484
485    Pos = 0
486
[5158]487    DO l = 1, ll
488      DO i = 1, row * iip1
[5118]489        Pos = Pos + 1
490        Field(i, l) = Buffer(Pos)
[1823]491      enddo
[5118]492    enddo
[1823]493
[5118]494  END SUBROUTINE  UnPack_data
495
496
497  SUBROUTINE barrier
[4600]498    USE lmdz_mpi
[1823]499    IMPLICIT NONE
500    INTEGER :: ierr
[5118]501
502    !$OMP CRITICAL (MPI)
503    IF (using_mpi) CALL MPI_Barrier(COMM_LMDZ, ierr)
504    !$OMP END CRITICAL (MPI)
505
506  END SUBROUTINE barrier
507
508
509  SUBROUTINE exchange_hallo(Field, ij, ll, up, down)
[4600]510    USE lmdz_mpi
[5117]511    USE lmdz_vampir
[5159]512    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
513  USE lmdz_paramet
[5113]514    IMPLICIT NONE
[5159]515
516
[5118]517    INTEGER :: ij, ll
518    REAL, DIMENSION(ij, ll) :: Field
519    INTEGER :: up, down
[4600]520
[5118]521    INTEGER :: ierr
522    LOGICAL :: SendUp, SendDown
523    LOGICAL :: RecvUp, RecvDown
524    INTEGER, DIMENSION(4) :: Request
525    INTEGER, DIMENSION(MPI_STATUS_SIZE, 4) :: Status
[1823]526
[5118]527    INTEGER :: NbRequest
528    REAL, DIMENSION(:), ALLOCATABLE :: Buffer_Send_up, Buffer_Send_down
529    REAL, DIMENSION(:), ALLOCATABLE :: Buffer_Recv_up, Buffer_Recv_down
530    INTEGER :: Buffer_size
[1823]531
[5118]532    IF (using_mpi) THEN
[1823]533
[5118]534      CALL barrier
[5105]535
[5118]536      CALL VTb(VThallo)
[1823]537
[5118]538      SendUp = .TRUE.
539      SendDown = .TRUE.
540      RecvUp = .TRUE.
541      RecvDown = .TRUE.
542
543      IF (pole_nord) THEN
544        SendUp = .FALSE.
545        RecvUp = .FALSE.
546      ENDIF
547
548      IF (pole_sud) THEN
549        SendDown = .FALSE.
550        RecvDown = .FALSE.
551      ENDIF
552
553      IF (up==0) THEN
554        SendDown = .FALSE.
555        RecvUp = .FALSE.
556      endif
557
558      IF (down==0) THEN
559        SendUp = .FALSE.
560        RecvDown = .FALSE.
561      endif
562
563      NbRequest = 0
564
565      IF (SendUp) THEN
566        NbRequest = NbRequest + 1
567        buffer_size = down * iip1 * ll
568        allocate(Buffer_Send_up(Buffer_size))
569        CALL PACK_Data(Field(ij_begin, 1), ij, ll, down, Buffer_Send_up)
570        !$OMP CRITICAL (MPI)
571        CALL MPI_ISEND(Buffer_send_up, Buffer_Size, MPI_REAL8, MPI_Rank - 1, 1, &
572                COMM_LMDZ, Request(NbRequest), ierr)
573        !$OMP END CRITICAL (MPI)
574      ENDIF
575
576      IF (SendDown) THEN
577        NbRequest = NbRequest + 1
578
579        buffer_size = up * iip1 * ll
580        allocate(Buffer_Send_down(Buffer_size))
581        CALL PACK_Data(Field(ij_end + 1 - up * iip1, 1), ij, ll, up, Buffer_send_down)
582
583        !$OMP CRITICAL (MPI)
584        CALL MPI_ISEND(Buffer_send_down, Buffer_Size, MPI_REAL8, MPI_Rank + 1, 1, &
585                COMM_LMDZ, Request(NbRequest), ierr)
586        !$OMP END CRITICAL (MPI)
587      ENDIF
588
589      IF (RecvUp) THEN
590        NbRequest = NbRequest + 1
591        buffer_size = up * iip1 * ll
592        allocate(Buffer_recv_up(Buffer_size))
593
594        !$OMP CRITICAL (MPI)
595        CALL MPI_IRECV(Buffer_recv_up, Buffer_size, MPI_REAL8, MPI_Rank - 1, 1, &
596                COMM_LMDZ, Request(NbRequest), ierr)
597        !$OMP END CRITICAL (MPI)
598
599      ENDIF
600
601      IF (RecvDown) THEN
602        NbRequest = NbRequest + 1
603        buffer_size = down * iip1 * ll
604        allocate(Buffer_recv_down(Buffer_size))
605
606        !$OMP CRITICAL (MPI)
607        CALL MPI_IRECV(Buffer_recv_down, Buffer_size, MPI_REAL8, MPI_Rank + 1, 1, &
608                COMM_LMDZ, Request(NbRequest), ierr)
609        !$OMP END CRITICAL (MPI)
610
611      ENDIF
612
613      IF (NbRequest > 0) CALL MPI_WAITALL(NbRequest, Request, Status, ierr)
614      IF (RecvUp)  CALL Unpack_Data(Field(ij_begin - up * iip1, 1), ij, ll, up, Buffer_Recv_up)
615      IF (RecvDown) CALL Unpack_Data(Field(ij_end + 1, 1), ij, ll, down, Buffer_Recv_down)
616
617      CALL VTe(VThallo)
618      CALL barrier
619
620    ENDIF  ! using_mpi
621
622  END SUBROUTINE  exchange_Hallo
623
624
625  SUBROUTINE Gather_Field(Field, ij, ll, rank)
[4600]626    USE lmdz_mpi
[5118]627    USE lmdz_iniprint, ONLY: lunout, prt_level
[5159]628    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
629  USE lmdz_paramet
[5113]630    IMPLICIT NONE
[5159]631
632
[5118]633    INTEGER :: ij, ll, rank
634    REAL, DIMENSION(ij, ll) :: Field
635    REAL, DIMENSION(:), ALLOCATABLE :: Buffer_send
636    REAL, DIMENSION(:), ALLOCATABLE :: Buffer_Recv
637    INTEGER, DIMENSION(0:MPI_Size - 1) :: Recv_count, displ
638    INTEGER :: ierr
639    INTEGER :: i
[1823]640
[5118]641    IF (using_mpi) THEN
[1823]642
[5118]643      IF (ij==ip1jmp1) THEN
644        allocate(Buffer_send(iip1 * ll * (jj_end - jj_begin + 1)))
645        CALL Pack_Data(Field(ij_begin, 1), ij, ll, jj_end - jj_begin + 1, Buffer_send)
646      ELSE IF (ij==ip1jm) THEN
647        allocate(Buffer_send(iip1 * ll * (min(jj_end, jjm) - jj_begin + 1)))
648        CALL Pack_Data(Field(ij_begin, 1), ij, ll, min(jj_end, jjm) - jj_begin + 1, Buffer_send)
649      else
650        WRITE(lunout, *)ij
651        CALL abort_gcm("parallel_lmdz", "erreur dans Gather_Field", 1)
652      endif
653
654      IF (MPI_Rank==rank) THEN
655        allocate(Buffer_Recv(ij * ll))
656
657        !CDIR NOVECTOR
[5158]658        DO i = 0, MPI_Size - 1
[5118]659
[5117]660          IF (ij==ip1jmp1) THEN
[5118]661            Recv_count(i) = (jj_end_para(i) - jj_begin_para(i) + 1) * ll * iip1
[5117]662          ELSE IF (ij==ip1jm) THEN
[5118]663            Recv_count(i) = (min(jj_end_para(i), jjm) - jj_begin_para(i) + 1) * ll * iip1
664          else
665            CALL abort_gcm("parallel_lmdz", "erreur dans Gather_Field", 1)
[1823]666          endif
667
[5118]668          IF (i==0) THEN
669            displ(i) = 0
670          else
671            displ(i) = displ(i - 1) + Recv_count(i - 1)
672          endif
[1823]673
[5118]674        enddo
675
676      else
677        ! Ehouarn: When in debug mode, ifort complains (for CALL MPI_GATHERV
678        !          below) about Buffer_Recv() being not allocated.
679        !          So make a dummy allocation.
680        allocate(Buffer_Recv(1))
681      endif ! of if (MPI_Rank==rank)
682
683      !$OMP CRITICAL (MPI)
684      CALL MPI_GATHERV(Buffer_send, (min(ij_end, ij) - ij_begin + 1) * ll, MPI_REAL8, &
685              Buffer_Recv, Recv_count, displ, MPI_REAL8, rank, COMM_LMDZ, ierr)
686      !$OMP END CRITICAL (MPI)
687
688      IF (MPI_Rank==rank) THEN
689        IF (ij==ip1jmp1) THEN
[5158]690          DO i = 0, MPI_Size - 1
[5118]691            CALL Unpack_Data(Field((jj_begin_para(i) - 1) * iip1 + 1, 1), ij, ll, &
692                    jj_end_para(i) - jj_begin_para(i) + 1, Buffer_Recv(displ(i) + 1))
693          enddo
694        ELSE IF (ij==ip1jm) THEN
[5158]695          DO i = 0, MPI_Size - 1
[5118]696            CALL Unpack_Data(Field((jj_begin_para(i) - 1) * iip1 + 1, 1), ij, ll, &
697                    min(jj_end_para(i), jjm) - jj_begin_para(i) + 1, Buffer_Recv(displ(i) + 1))
698          enddo
699        endif
700      endif
701    ENDIF ! using_mpi
702
703  END SUBROUTINE  Gather_Field
704
705
706  SUBROUTINE AllGather_Field(Field, ij, ll)
[4600]707    USE lmdz_mpi
[5159]708    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
709  USE lmdz_paramet
[5113]710    IMPLICIT NONE
[5159]711
712
[5118]713    INTEGER :: ij, ll
714    REAL, DIMENSION(ij, ll) :: Field
715    INTEGER :: ierr
716
717    IF (using_mpi) THEN
718      CALL Gather_Field(Field, ij, ll, 0)
719      !$OMP CRITICAL (MPI)
720      CALL MPI_BCAST(Field, ij * ll, MPI_REAL8, 0, COMM_LMDZ, ierr)
721      !$OMP END CRITICAL (MPI)
722    ENDIF
723
724  END SUBROUTINE  AllGather_Field
725
726  SUBROUTINE Broadcast_Field(Field, ij, ll, rank)
[4600]727    USE lmdz_mpi
[5159]728    USE lmdz_dimensions, ONLY: iim, jjm, llm, ndm
729  USE lmdz_paramet
[5113]730    IMPLICIT NONE
[5159]731
732
[5118]733    INTEGER :: ij, ll
734    REAL, DIMENSION(ij, ll) :: Field
735    INTEGER :: rank
736    INTEGER :: ierr
[5099]737
[5118]738    IF (using_mpi) THEN
[5099]739
[5118]740      !$OMP CRITICAL (MPI)
741      CALL MPI_BCAST(Field, ij * ll, MPI_REAL8, rank, COMM_LMDZ, ierr)
742      !$OMP END CRITICAL (MPI)
[5099]743
[5118]744    ENDIF
745  END SUBROUTINE  Broadcast_Field
[5099]746
[5159]747END MODULE parallel_lmdz
Note: See TracBrowser for help on using the repository browser.