source: lmdz_wrf/WRFV3/external/atm_ocn/cmpcomm.F @ 1

Last change on this file since 1 was 1, checked in by lfita, 10 years ago
  • -- --- Opening of the WRF+LMDZ coupling repository --- -- -

WRF: version v3.3
LMDZ: version v1818

More details in:

File size: 32.3 KB
Line 
1#if defined( DM_PARALLEL )
2      MODULE CMP_COMM
3
4      implicit none
5
6! MPI variables
7      include 'mpif.h'
8 
9      integer Coupler_id /0/   ! this is Coupler's id, used to address
10                               ! Coupler. This is a default value,
11                               ! possibly to be redefined later
12!
13!     Make Coupler's id 0 if it is active (i.e. communnicating with
14! the Component.) Otherwise, make it a negative integer; in this case,
15! the Component is standalone.
16!
17
18      integer ibuffer_size
19      parameter (ibuffer_size=10)
20      integer Coupler_rank,my_id,COMM_local, &
21     &component_master_rank_global,process_rank_global, &
22     &component_master_rank_local,process_rank_local,&
23     &component_nprocs,FlexLev,ibuffer(ibuffer_size),nprocs_global
24
25      integer kind_REAL,kind_INTEGER,MPI_kind_REAL,&
26     &kind_alt_REAL,MPI_kind_alt_REAL
27      parameter (kind_REAL=8,kind_INTEGER=4)
28      parameter (kind_alt_REAL=12-kind_REAL)
29!       kind_INTEGER must be number of bytes equal to number of bytes
30!     implied by MPI_INTEGER MPI constant; all integers sent/received
31!     are of this kind. No value other than 4 is anticipated as of now
32!       kind_REAL is type of real data to communicate. The corresponding
33!     MPI data type variable MPI_kind_REAL is assigned in CMP_INIT.
34!       kind_alt_REAL is alternative type of real data to communicate.
35!     The corresponding MPI data type variable MPI_kind_alt_REAL is
36!     assigned in CMP_INIT. (It is used in subroutines CMP_alt_SEND
37!     and CMP_alt_RECV,)
38
39      save
40
41      END MODULE CMP_COMM
42!
43!***********************************************************************
44!
45      SUBROUTINE CMP_INIT(id,flex)
46!                         in  in
47!
48!     This subroutine must be called by every Component right upon
49!     calling MPI_INIT. It assigns a value to the Component communicator
50!     COMM_local (which is a global variable in module CMP), to be
51!     thereafter used by the Component in place of
52!     MPI_COMM_WORLD wherever it is used by the Component's
53!     standalone version. Besides, it stores the Component's id,
54!     the process's ranks, and the "flexibility level" (flex) requested
55!     by the Component in glob. variables. (The latter parameter affects
56!     the mode of communications; for its description, see CMP_SEND and
57!     CMP_RECV.) Finally, it starts handshaking with Coupler, receiving
58!     the unique (global, i.e. in MPI_COMM_WORLD) Coupler process
59!     rank Coupler_rank from Coupler
60                                        ! ibuffer may include additional
61                                        ! info to be received
62!
63      USE CMP_COMM
64
65      implicit none
66
67      integer id,flex
68
69      integer ierr,color,key,status(MPI_STATUS_SIZE),tag,dummy
70      character*10 s
71      logical izd
72!
73
74!        Determine if MPI is initialized, if not initialize
75      call MPI_INITIALIZED(izd,ierr)
76      if (.not.izd) call MPI_INIT(ierr)
77
78!        Determine MPI send/receive types according to prescribed
79!        types for arrays to be communicated
80      if (kind_REAL.eq.8) then
81        MPI_kind_REAL=MPI_REAL8
82        MPI_kind_alt_REAL=MPI_REAL4
83      else if (kind_REAL.eq.4) then
84        MPI_kind_REAL=MPI_REAL4
85        MPI_kind_alt_REAL=MPI_REAL8
86      else
87        write(s,'(i0)') kind_REAL
88        call GLOB_ABORT(1, &
89     &  'CMP_INIT: illegal value of kind_REAL='//s,1)
90      end if
91      if (kind_INTEGER.ne.4) then
92        write(s,'(i0)') kind_INTEGER
93        call GLOB_ABORT(1, &
94     &  'CMP_INIT: illegal value of kind_INTEGER='//s,1)
95      end if
96
97!        Store the Component's id
98!
99      my_id=id
100
101!        Store the Component's "flexibility level"
102!
103      FlexLev=flex
104
105!        Assign a value to the Component communicator
106!        COMM_local, to be thereafter used by the Component in place of
107!        MPI_COMM_WORLD wherever it is used by the Component's
108!        standalone version
109!
110      color=id
111      key=1
112!           print*,'CMP_INIT: to call MPI_COMM_SPLIT, color=',color
113      call MPI_COMM_SPLIT(MPI_COMM_WORLD,color,key,COMM_local,ierr)
114      call GLOB_ABORT(ierr,'CMP_INIT: error in MPI_COMM_SPLIT',1)
115
116!        Store the process's global and local ranks
117!
118!           print*,'CMP_INIT: to call MPI_COMM_RANK for global rank'
119      call MPI_COMM_RANK(MPI_COMM_WORLD,process_rank_global,ierr)
120      call GLOB_ABORT(ierr, &
121     &'CMP_INIT: error in MPI_COMM_RANK(MPI_COMM_WORLD...)',1)
122!           print*,'CMP_INIT: to call MPI_COMM_RANK for local rank'
123      call MPI_COMM_RANK(COMM_local,process_rank_local,ierr)
124      call GLOB_ABORT(ierr, &
125     &'CMP_INIT: error in MPI_COMM_RANK(COMM_local...)',1)
126
127!        Store component_nprocs - component's number of processes;
128!        calculate global number number of processes;
129!        determine whether it is standalone mode and if it is, make
130!        Coupler's id negative and return
131!
132      call MPI_COMM_SIZE(COMM_local,component_nprocs,ierr)
133      call MPI_COMM_SIZE(MPI_COMM_WORLD,nprocs_global,ierr)
134      if (component_nprocs.eq.nprocs_global) then
135        if(process_rank_local.eq.0) print*,'CMP_INIT: standalone mode'
136        Coupler_id=-1
137        RETURN
138      end if
139
140!        Start handshaking with Coupler (all processes):
141!        receive the unique (global, i.e. in MPI_COMM_WORLD) Coupler
142!        process rank Coupler_rank from Coupler
143!
144      tag=Coupler_id+23456
145!           print*,'CMP_INIT: to call MPI_RECV'
146      call MPI_RECV(ibuffer,ibuffer_size,MPI_INTEGER,MPI_ANY_SOURCE,tag, &
147     &MPI_COMM_WORLD,status,ierr)
148      call GLOB_ABORT(ierr,'CMP_INIT: error in MPI_RECV',1)
149      Coupler_rank=ibuffer(2)
150      if (ibuffer(1).ne.Coupler_id) then
151        print*,'CMP_INIT: stopped, rcvd ibuffer(1) value is not C id: ', &
152     &  ibuffer(1)
153        CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
154      end if
155      if (ibuffer(3).ne.ibuffer_size) then
156        print*,'CMP_INIT: stopped, rcvd ibuffer(3) value ',ibuffer(3), &
157     &  ' is not ibuffer_size=',ibuffer_size
158        CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
159      end if
160
161!        Inform Coupler that this components exists and is active
162!
163      call MPI_GATHER(id,1,MPI_INTEGER,dummy,1,MPI_INTEGER, &
164     &Coupler_rank,MPI_COMM_WORLD,ierr)
165
166!
167!     print*,
168!    >'CMP_INIT: ranks: process local, global, Coupler; Coupler_id: ',
169!    >process_rank_local,process_rank_global,Coupler_rank,Coupler_id
170
171      return
172      END
173!
174!***********************************************************************
175!
176      SUBROUTINE CMP_INTRO(master_rank_local)
177!                                in
178!       This routine must be called by all Component's processes
179!       which must all know the local rank of Component's master
180!       process (master_rank_local)
181!          Alternatively, SUBROUTINE CMP_INTRO_m can be called
182!      from Component's master process only, and SUBROUTINE CMP_INTRO_s
183!      from all other processes. In this case, the local rank of
184!      Component's master process will be determined and broadcast
185!      automatically
186
187      USE CMP_COMM
188
189      implicit none
190 
191      integer master_rank_local,ierr,ibuf(3),color,key,tag
192!
193
194!     print*,'CMP_INTRO: entered ',master_rank_local,process_rank_local
195!    >,Coupler_rank
196
197      component_master_rank_local=master_rank_local
198
199      if (Coupler_id.lt.0) return    !   <- standalone mode
200
201!        If this process is the Component's master process,
202!        complete handshaking with Coupler:
203!        "register", i.e. send Component master process global rank
204!        to Coupler. Also, send the requested "flexibility level".
205!        (Sending Component's id (in ibuf(1)) is for double-check only.)
206!
207      if (process_rank_local.eq.master_rank_local) then
208        component_master_rank_global=process_rank_global
209        ibuf(1)=my_id  ! redundant, sent for control only
210        ibuf(2)=process_rank_global
211        ibuf(3)=FlexLev
212        tag=my_id+54321
213            print*,'CMP_INTRO: to call MPI_SEND ',process_rank_local, &
214     &      process_rank_global
215        call MPI_SEND(ibuf,3,MPI_INTEGER,Coupler_rank,tag, &
216     &  MPI_COMM_WORLD,ierr)
217        if (ierr.ne.0) then
218          print*,'CMP_INTRO: error in MPI_SEND, process ', &
219     &    process_rank_global
220          CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
221        end if
222      end if
223!           print*,'CMP_INTRO: returning ',process_rank_local,
224!    >      process_rank_global,Coupler_rank
225      return
226      END
227!
228!***********************************************************************
229!
230      SUBROUTINE CMP_INTRO_m
231!
232!      This routine must be called by Component's master process (only),
233!      if CMP_INTRO is not called (see comments in CMP_INTRO)
234
235      USE CMP_COMM
236
237      implicit none
238 
239      integer ierr,ibuf(3),color,key,tag,i
240!
241
242!     print*,'CMP_INTRO_m: entered, process_rank_local=',
243!    >process_rank_local
244
245      component_master_rank_local=process_rank_local
246      component_master_rank_global=process_rank_global
247
248      tag=abs(my_id)+12345
249      do i=0,component_nprocs-1
250        if (i.ne.component_master_rank_local) then
251          ibuf(1)=component_master_rank_local
252          ibuf(2)=component_master_rank_global
253          call MPI_SEND(ibuf,2,MPI_INTEGER,i,tag,COMM_local,ierr)
254          if (ierr.ne.0) then
255            print*,'CMP_INTRO_m: error in 1st MPI_SEND, i=',i
256            CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
257          end if
258        end if
259      end do
260
261      if (Coupler_id.lt.0) return    !   <- standalone mode
262
263!        Complete handshaking with Coupler:
264!        "register", i.e. send Component master process global rank
265!        to Coupler. Also, send the requested "flexibility level".
266!        (Sending Component's id (in ibuf(1)) is for double-check only.)
267!
268      tag=my_id+54321
269      ibuf(1)=my_id  ! redundant, sent for control only
270      ibuf(2)=process_rank_global
271      ibuf(3)=FlexLev
272!         print*,'CMP_INTRO_m: to call MPI_SEND ',process_rank_local,
273!    >    process_rank_global
274      call MPI_SEND(ibuf,3,MPI_INTEGER,Coupler_rank,tag, &
275     &MPI_COMM_WORLD,ierr)
276      if (ierr.ne.0) then
277        print*,'CMP_INTRO_m: error in MPI_SEND, process ', &
278     &  process_rank_global
279        CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
280      end if
281!         print*,'CMP_INTRO_m: returning ',process_rank_local,
282!    >    process_rank_global
283      return
284      END
285!
286!***********************************************************************
287!
288      SUBROUTINE CMP_INTRO_s
289!
290!      This routine must be called by all Component's processes other
291!      than master process,
292!      if CMP_INTRO is not called (see comments in CMP_INTRO)
293
294      USE CMP_COMM
295
296      implicit none
297 
298      integer ierr,ibuf(3),color,key,tag,i,status(MPI_STATUS_SIZE)
299!
300
301!     print*,'CMP_INTRO_s: entered, process_rank_local=',
302!    >process_rank_local
303
304      tag=abs(my_id)+12345
305      call MPI_RECV(ibuf,2,MPI_INTEGER,MPI_ANY_SOURCE,tag, &
306     &COMM_local,status,ierr)
307      if (ierr.ne.0) then
308        print*,'CMP_INTRO_s: error in MPI_RECV ',process_rank_local
309        CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
310      end if
311      component_master_rank_local=ibuf(1)
312      component_master_rank_global=ibuf(2)
313! WtF?      do i=0,component_nprocs-1
314! WtF?        if (i.ne.component_master_rank_local) then
315! WtF?          ibuf(1)=component_master_rank_local
316! WtF?          ibuf(2)=component_master_rank_global
317! WtF?          call MPI_SEND(ibuf,2,MPI_INTEGER,i,tag,COMM_local,ierr)
318! WtF?        end if
319! WtF?      end do
320
321!         print*,'CMP_INTRO_s: returning ',process_rank_local,
322!    >    process_rank_global,component_master_rank_local,
323!    >    component_master_rank_global
324      return
325      END
326!
327!***********************************************************************
328!
329      SUBROUTINE CMP_SEND(F,N)
330!
331      USE CMP_COMM
332
333      implicit none
334 
335      integer N,ierr,tag
336      real(kind=kind_REAL) F(N)
337!
338      if (Coupler_id.lt.0) return    !   <- standalone mode
339
340!           call CMP_DBG_CR(6,'CMP_SEND: entered')
341
342      if (process_rank_local.ne.component_master_rank_local) then
343        if (FlexLev.eq.0) then
344!         With "flexibility level" FlexLev=0, only Component master
345!         process is supposed to call this subroutine.
346          print '("*** CMP_SEND: process_rank_local=",i4,"  ***"/ &
347     &    "*** and component_master_rank_local=",i4," differ:  ***"/ &
348     &    "*** STOPPED ***")', &
349     &    process_rank_local,component_master_rank_local
350          CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
351        else if (FlexLev.eq.1) then
352!         With "flexibility level" FlexLev=1, any Component process is
353!         allowed to call this subroutine but only the Component
354!         master process can actually send data (so the
355!         others just make a dummy call), as the Coupler process only
356!         receives data from the Component master process.
357          return
358        else if (FlexLev.ne.2 .and. FlexLev.ne.3) then
359          print '("*** CMP_SEND: illegal value of FlexLev",i9/ &
360     &    "*** STOPPED")',FlexLev
361          CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
362        end if
363!         With "flexibility level" FlexLev=2 or FlexLev=3, any
364!         Component process is allowed to actually send data.
365!         [In this case, the Coupler process (in CPL_RECV) receives
366!         from MPI_ANY_SOURCE rather than component_master_rank_global,
367!         and it is only identification by  tag  which enables Coupler
368!         to receive the data from the right source.]
369!         But in any case only one Component process may actually be
370!         engaged in a particular exchange of data with Coupler.
371      end if
372
373      tag=my_id
374
375      call MPI_SEND(F,N,MPI_kind_REAL,Coupler_rank,tag, &
376     &MPI_COMM_WORLD,ierr)
377      call GLOB_ABORT(ierr,'CMP_SEND: error in MPI_SEND',1)
378
379!           call CMP_DBG_CR(6,'CMP_SEND: exiting')
380      return
381      END
382!
383!***********************************************************************
384!
385      SUBROUTINE CMP_alt_SEND(F,N)
386!
387      USE CMP_COMM
388
389      implicit none
390 
391      integer N,ierr,tag
392      real(kind=kind_alt_REAL) F(N)
393!
394      if (Coupler_id.lt.0) return    !   <- standalone mode
395
396!           call CMP_DBG_CR(6,'CMP_alt_SEND: entered')
397
398      if (process_rank_local.ne.component_master_rank_local) then
399        if (FlexLev.eq.0) then
400!         With "flexibility level" FlexLev=0, only Component master
401!         process is supposed to call this subroutine.
402          print '("*** CMP_SEND: process_rank_local=",i4,"  ***"/ &
403     &    "*** and component_master_rank_local=",i4," differ:  ***"/ &
404     &    "*** STOPPED ***")', &
405     &    process_rank_local,component_master_rank_local
406          CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
407        else if (FlexLev.eq.1) then
408!         With "flexibility level" FlexLev=1, any Component process is
409!         allowed to call this subroutine but only the Component
410!         master process can actually send data (so the
411!         others just make a dummy call), as the Coupler process only
412!         receives data from the Component master process.
413          return
414        else if (FlexLev.ne.2 .and. FlexLev.ne.3) then
415          print '("*** CMP_SEND: illegal value of FlexLev",i9/ &
416     &    "*** STOPPED")',FlexLev
417          CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
418        end if
419!         With "flexibility level" FlexLev=2 or FlexLev=3, any
420!         Component process is allowed to actually send data.
421!         [In this case, the Coupler process (in CPL_RECV) receives
422!         from MPI_ANY_SOURCE rather than component_master_rank_global,
423!         and it is only identification by  tag  which enables Coupler
424!         to receive the data from the right source.]
425!         But in any case only one Component process may actually be
426!         engaged in a particular exchange of data with Coupler.
427      end if
428
429      tag=my_id
430
431      call MPI_SEND(F,N,MPI_kind_alt_REAL,Coupler_rank,tag, &
432     &MPI_COMM_WORLD,ierr)
433      call GLOB_ABORT(ierr,'CMP_SEND: error in MPI_SEND',1)
434
435!           call CMP_DBG_CR(6,'CMP_SEND: exiting')
436      return
437      END
438!
439!***********************************************************************
440!
441      SUBROUTINE CMP_gnr_SEND(F,N,MPI_DATATYPE)
442!
443      USE CMP_COMM
444
445      implicit none
446 
447      integer N,MPI_DATATYPE
448      integer F(1)
449
450      integer ierr,tag
451!
452
453      if (Coupler_id.lt.0) return    !   <- standalone mode
454
455!           call CMP_DBG_CR(6,'CMP_alt_SEND: entered')
456
457      if (process_rank_local.ne.component_master_rank_local) then
458        if (FlexLev.eq.0) then
459!         With "flexibility level" FlexLev=0, only Component master
460!         process is supposed to call this subroutine.
461          print '("*** CMP_SEND: process_rank_local=",i4,"  ***"/ &
462     &    "*** and component_master_rank_local=",i4," differ:  ***"/ &
463     &    "*** STOPPED ***")', &
464     &    process_rank_local,component_master_rank_local
465          CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
466        else if (FlexLev.eq.1) then
467!         With "flexibility level" FlexLev=1, any Component process is
468!         allowed to call this subroutine but only the Component
469!         master process can actually send data (so the
470!         others just make a dummy call), as the Coupler process only
471!         receives data from the Component master process.
472          return
473        else if (FlexLev.ne.2 .and. FlexLev.ne.3) then
474          print '("*** CMP_SEND: illegal value of FlexLev",i9/ &
475     &    "*** STOPPED")',FlexLev
476          CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
477        end if
478!         With "flexibility level" FlexLev=2 or FlexLev=3, any
479!         Component process is allowed to actually send data.
480!         [In this case, the Coupler process (in CPL_RECV) receives
481!         from MPI_ANY_SOURCE rather than component_master_rank_global,
482!         and it is only identification by  tag  which enables Coupler
483!         to receive the data from the right source.]
484!         But in any case only one Component process may actually be
485!         engaged in a particular exchange of data with Coupler.
486      end if
487
488      tag=my_id
489
490      call MPI_SEND(F,N,MPI_DATATYPE,Coupler_rank,tag, &
491     &MPI_COMM_WORLD,ierr)
492      call GLOB_ABORT(ierr,'CMP_SEND: error in MPI_SEND',1)
493
494!           call CMP_DBG_CR(6,'CMP_SEND: exiting')
495      return
496      END
497!
498!***********************************************************************
499!
500      SUBROUTINE CMP_INTEGER_SEND(F,N)
501!
502      USE CMP_COMM
503
504      implicit none
505 
506      integer N,ierr,tag
507      integer F(N)
508!
509      if (Coupler_id.lt.0) return    !   <- standalone mode
510
511!           print*,'CMP_INTEGER_SEND: entered with N=',N,' F=',F,
512!    >      '; my_id=',my_id,'Coupler_rank=',Coupler_rank
513
514      if (process_rank_local.ne.component_master_rank_local) then
515        if (FlexLev.eq.0) then
516!         With "flexibility level" FlexLev=0, only Component master
517!         process is supposed to call this subroutine.
518          print '("*** CMP_SEND: process_rank_local=",i4,"  ***"/ &
519     &    "*** and component_master_rank_local=",i4," differ:  ***"/ &
520     &    "*** STOPPED ***")', &
521     &    process_rank_local,component_master_rank_local
522          CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
523        else if (FlexLev.eq.1) then
524!         With "flexibility level" FlexLev=1, any Component process is
525!         allowed to call this subroutine but only the Component
526!         master process can actually send data (so the
527!         others just make a dummy call), as the Coupler process only
528!         receives data from the Component master process.
529          return
530        else if (FlexLev.ne.2 .and. FlexLev.ne.3) then
531          print '("*** CMP_SEND: illegal value of FlexLev",i9/ &
532     &    "*** STOPPED")',FlexLev
533          CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
534        end if
535!         With "flexibility level" FlexLev=2 or FlexLev=3, any
536!         Component process is allowed to actually send data.
537!         [In this case, the Coupler process (in CPL_RECV) receives
538!         from MPI_ANY_SOURCE rather than component_master_rank_global,
539!         and it is only identification by  tag  which enables Coupler
540!         to receive the data from the right source.]
541!         But in any case only one Component process may actually be
542!         engaged in a particular exchange of data with Coupler.
543      end if
544
545      tag=my_id
546            print*,'CMP_INTEGER_SEND: to call MPI_SEND; F=', &
547     &      F,' N=',N,' Coupler_rank=',Coupler_rank,' tag=',tag
548      call MPI_SEND(F,N,MPI_INTEGER,Coupler_rank,tag, &
549     &MPI_COMM_WORLD,ierr)
550      call GLOB_ABORT(ierr,'CMP_INTEGER_SEND: error in MPI_SEND',1)
551            print*,'CMP_INTEGER_SEND: to return'
552
553      return
554      END
555!
556!***********************************************************************
557!
558      SUBROUTINE CMP_RECV(F,N)
559!
560      USE CMP_COMM
561
562      implicit none
563 
564      integer N,ierr,tag,ibuf(3),status(MPI_STATUS_SIZE)
565      real(kind=kind_REAL) F(N)
566!
567      if (Coupler_id.lt.0) return    !   <- standalone mode
568
569!           call CMP_DBG_CR(6,'CMP_RECV: entered')
570
571      if (process_rank_local.ne.component_master_rank_local) then
572
573        if (FlexLev.eq.0) then
574
575!         With "flexibility level" FlexLev=0, only Component master
576!         process is supposed to call this subroutine.
577
578          print '("*** CMP_RECV: process_rank_local=",i4,"  ***"/ &
579     &    "*** and component_master_rank_local=",i4," differ:  ***"/ &
580     &    "*** STOPPED ***")', &
581     &    process_rank_local,component_master_rank_local
582          CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
583
584        else if (FlexLev.eq.1 .or. FlexLev.eq.2) then
585
586!         With "flexibility level" FlexLev=1 or FlexLev=2, any
587!         Component process is allowed to call this subroutine but
588!         only the Component master process is supposed to actually
589!         receive data (so the others just make a dummy call), as
590!         the Coupler process only sends data to the Component master
591!         process.
592
593          return
594
595        else if (FlexLev.eq.3) then
596
597!         With "flexibility level" FlexLev=3, any Component process
598!         may actually receive data.
599!         [In this case, the Coupler process (in CPL_SEND) first
600!         receives the Component process global rank
601!         (process_rank_global) from this subroutine, the source being
602!         MPI_ANY_SOURCE, so it is only identification by  tag  which
603!         enables Coupler to receive process_rank_global from the right
604!         source. Upon the receipt, the Coupler process (in CPL_SEND)
605!         sends the data to this Component process, rather than to
606!         the Component master process as is the case with lower
607!         "flexibility levels".]
608!         But in any case only one Component process may actually be
609!         engaged in a particular exchange of data with Coupler.
610
611          ibuf(1)=my_id
612          ibuf(2)=process_rank_global
613          tag=my_id
614          call MPI_SEND(ibuf,2,MPI_INTEGER,Coupler_rank,tag, &
615     &    MPI_COMM_WORLD,ierr)
616          call GLOB_ABORT(ierr,'CMP_RECV: error in MPI_SEND',1)
617
618        else
619
620          print '("*** CMP_RECV: illegal value of FlexLev",i9/ &
621     &    "*** STOPPED")',FlexLev
622          CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
623
624        end if
625
626      end if
627
628      tag=my_id
629      call MPI_RECV(F,N,MPI_kind_REAL,Coupler_rank,tag, &
630     &MPI_COMM_WORLD,status,ierr)
631      call GLOB_ABORT(ierr,'CMP_RECV: error in MPI_RECV',1)
632
633!           call CMP_DBG_CR(6,'CMP_RECV: exiting')
634
635      return
636      END
637!
638!***********************************************************************
639!
640      SUBROUTINE CMP_alt_RECV(F,N)
641!
642      USE CMP_COMM
643
644      implicit none
645 
646      integer N,ierr,tag,ibuf(3),status(MPI_STATUS_SIZE)
647      real(kind=kind_alt_REAL) F(N)
648!
649      if (Coupler_id.lt.0) return    !   <- standalone mode
650
651!           call CMP_DBG_CR(6,'CMP_alt_RECV: entered')
652
653      if (process_rank_local.ne.component_master_rank_local) then
654
655        if (FlexLev.eq.0) then
656
657!         With "flexibility level" FlexLev=0, only Component master
658!         process is supposed to call this subroutine.
659
660          print '("*** CMP_alt_RECV: process_rank_local=",i4,"  ***"/ &
661     &    "*** and component_master_rank_local=",i4," differ:  ***"/ &
662     &    "*** STOPPED ***")', &
663     &    process_rank_local,component_master_rank_local
664          CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
665
666        else if (FlexLev.eq.1 .or. FlexLev.eq.2) then
667
668!         With "flexibility level" FlexLev=1 or FlexLev=2, any
669!         Component process is allowed to call this subroutine but
670!         only the Component master process is supposed to actually
671!         receive data (so the others just make a dummy call), as
672!         the Coupler process only sends data to the Component master
673!         process.
674
675          return
676
677        else if (FlexLev.eq.3) then
678
679!         With "flexibility level" FlexLev=3, any Component process
680!         may actually receive data.
681!         [In this case, the Coupler process (in CPL_SEND) first
682!         receives the Component process global rank
683!         (process_rank_global) from this subroutine, the source being
684!         MPI_ANY_SOURCE, so it is only identification by  tag  which
685!         enables Coupler to receive process_rank_global from the right
686!         source. Upon the receipt, the Coupler process (in CPL_SEND)
687!         sends the data to this Component process, rather than to
688!         the Component master process as is the case with lower
689!         "flexibility levels".]
690!         But in any case only one Component process may actually be
691!         engaged in a particular exchange of data with Coupler.
692
693          ibuf(1)=my_id
694          ibuf(2)=process_rank_global
695          tag=my_id
696          call MPI_SEND(ibuf,2,MPI_INTEGER,Coupler_rank,tag, &
697     &    MPI_COMM_WORLD,ierr)
698          call GLOB_ABORT(ierr,'CMP_alt_RECV: error in MPI_SEND',1)
699
700        else
701
702          print '("*** CMP_alt_RECV: illegal value of FlexLev",i9/ &
703     &    "*** STOPPED")',FlexLev
704          CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
705
706        end if
707
708      end if
709
710      tag=my_id
711      call MPI_RECV(F,N,MPI_kind_alt_REAL,Coupler_rank,tag, &
712     &MPI_COMM_WORLD,status,ierr)
713      call GLOB_ABORT(ierr,'CMP_alt_RECV: error in MPI_RECV',1)
714
715!           call CMP_DBG_CR(6,'CMP_alt_RECV: exiting')
716
717      return
718      END
719!
720!***********************************************************************
721!
722      SUBROUTINE CMP_gnr_RECV(F,N,MPI_DATATYPE)
723!
724      USE CMP_COMM
725
726      implicit none
727 
728      integer N,MPI_DATATYPE
729      integer F(1)
730
731      integer ierr,tag,ibuf(3),status(MPI_STATUS_SIZE)
732!
733
734      if (Coupler_id.lt.0) return    !   <- standalone mode
735
736!           call CMP_DBG_CR(6,'CMP_gnr_RECV: entered')
737
738      if (process_rank_local.ne.component_master_rank_local) then
739
740        if (FlexLev.eq.0) then
741
742!         With "flexibility level" FlexLev=0, only Component master
743!         process is supposed to call this subroutine.
744
745          print '("*** CMP_gnr_RECV: process_rank_local=",i4,"  ***"/ &
746     &    "*** and component_master_rank_local=",i4," differ:  ***"/ &
747     &    "*** STOPPED ***")', &
748     &    process_rank_local,component_master_rank_local
749          CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
750
751        else if (FlexLev.eq.1 .or. FlexLev.eq.2) then
752
753!         With "flexibility level" FlexLev=1 or FlexLev=2, any
754!         Component process is allowed to call this subroutine but
755!         only the Component master process is supposed to actually
756!         receive data (so the others just make a dummy call), as
757!         the Coupler process only sends data to the Component master
758!         process.
759
760          return
761
762        else if (FlexLev.eq.3) then
763
764!         With "flexibility level" FlexLev=3, any Component process
765!         may actually receive data.
766!         [In this case, the Coupler process (in CPL_SEND) first
767!         receives the Component process global rank
768!         (process_rank_global) from this subroutine, the source being
769!         MPI_ANY_SOURCE, so it is only identification by  tag  which
770!         enables Coupler to receive process_rank_global from the right
771!         source. Upon the receipt, the Coupler process (in CPL_SEND)
772!         sends the data to this Component process, rather than to
773!         the Component master process as is the case with lower
774!         "flexibility levels".]
775!         But in any case only one Component process may actually be
776!         engaged in a particular exchange of data with Coupler.
777
778          ibuf(1)=my_id
779          ibuf(2)=process_rank_global
780          tag=my_id
781          call MPI_SEND(ibuf,2,MPI_INTEGER,Coupler_rank,tag, &
782     &    MPI_COMM_WORLD,ierr)
783          call GLOB_ABORT(ierr,'CMP_gnr_RECV: error in MPI_SEND',1)
784
785        else
786
787          print '("*** CMP_gnr_RECV: illegal value of FlexLev",i9/ &
788     &    "*** STOPPED")',FlexLev
789          CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
790
791        end if
792
793      end if
794
795      tag=my_id
796      call MPI_RECV(F,N,MPI_DATATYPE,Coupler_rank,tag, &
797     &MPI_COMM_WORLD,status,ierr)
798      call GLOB_ABORT(ierr,'CMP_gnr_RECV: error in MPI_RECV',1)
799
800!           call CMP_DBG_CR(6,'CMP_gnr_RECV: exiting')
801
802      return
803      END
804!
805!***********************************************************************
806!
807      SUBROUTINE CMP_ANNOUNCE(nunit,s)
808!
809      USE CMP_COMM
810
811      implicit none
812
813      character*(*) s
814 
815      integer nunit,ierr
816!
817
818      if (process_rank_local.eq.component_master_rank_local) then
819        write(nunit,*) trim(s)
820      else if (FlexLev.eq.0) then
821
822!         With "flexibility level" FlexLev=0, only Component master
823!         process is supposed to call this subroutine.
824
825          print '("*** CMP_ANNOUNCE: process_rank_local=",i4,"  ***"/ &
826     &    "*** and component_master_rank_local=",i4," differ:  ***"/ &
827     &    "*** STOPPED ***")', &
828     &    process_rank_local,component_master_rank_local
829          CALL MPI_ABORT(MPI_COMM_WORLD,2,ierr)
830
831      end if
832
833      return
834      END
835!
836!***********************************************************************
837!
838      SUBROUTINE CMP_STDOUT(s)
839!
840!     USE CMP_COMM, ONLY: Coupler_id,process_rank_global
841        ! <- These values may not have the right value by this moment,
842        ! as this routine may be called before CMP_INIT  - 02/23/05
843
844      implicit none
845
846      character*(*) s
847      integer ios
848      character*4 mess
849!
850
851! -> For debugging:
852      OPEN(12345, &
853     &file='/nfsuser/g01/wx20ds/C/cmp.stdout', &
854     &form='formatted',status='old',iostat=ios)
855      if (ios.eq.0) then
856        read(12345,*) mess
857        if (mess.eq.'mess') then
858!         print*,'CMP_STDOUT: unit 6 left alone, process ',
859!    >    process_rank_global
860        ! <- process_rank_global may be undefined by this moment, as
861        !    this routine may be called before CMP_INIT  - 02/23/05
862          RETURN
863        end if
864        CLOSE(12345)
865      end if
866! <- for debugging
867
868!     if (Coupler_id.lt.0) RETURN    ! Nothing is to occur if there is
869                                     ! no communication with Coupler,
870                                     ! i.e. if Component is standalone
871        ! <- Coupler_id may not have the right value by this moment,
872        ! as this routine may be called before CMP_INIT  - 02/23/05
873
874      if (len_trim(s).eq.0) RETURN
875
876      close(6)
877     
878      open(6,file=trim(s),form='formatted',status='unknown')
879
880      print*,'CMP_STDOUT: unit 6 closed, reopened as '//trim(s)
881
882      return
883      END
884!
885!***********************************************************************
886!
887      SUBROUTINE CMP_DBG_CR(nunit,s)
888!
889!       Debugging routine: mainly, prints Coupler_rank
890!
891      USE CMP_COMM
892
893      implicit none
894
895      character*(*) s
896      integer nunit
897
898      integer ncall/0/,ncallmax/5000/
899      save
900!
901
902      if (s(5:6).eq.'m:') then
903        if (process_rank_local .ne. component_master_rank_local) RETURN
904      end if
905
906      if (ncall.ge.ncallmax) RETURN
907      ncall=ncall+1
908
909      write(nunit,*)process_rank_global,ncall,Coupler_id,Coupler_rank,s
910
911! The following assumes that Coupler_rank must be =0, comment out if
912! this is not the case
913      call GLOB_ABORT(Coupler_rank, &
914     &'CMP_DBG_CR: Coupler_rank.ne.0, aborting',1)
915
916      return
917      END
918!
919!***********************************************************************
920!
921      SUBROUTINE CMP_FLUSH(nunit)
922
923      USE CMP_COMM
924
925      implicit none
926
927      integer nunit
928
929      integer i,ierr,rc
930!
931
932      do i=0,component_nprocs-1
933        call MPI_BARRIER(COMM_local,ierr)
934        call GLOB_ABORT(ierr,'CMP_FLUSH: MPI_BARRIER failed, aborting', &
935     &  rc)
936        if (i.eq.process_rank_local) call FLUSH(nunit)
937      end do
938
939      return
940      END
941!
942!***********************************************************************
943!
944      subroutine CMP_FINALIZE(izd,ierr)
945
946      USE CMP_COMM
947
948      implicit none
949
950      logical izd
951      integer ierr
952
953      integer ierr1,ierr2
954!
955
956      ierr=0
957      ierr2=0
958      call MPI_INITIALIZED(izd,ierr1)
959      if (izd) call MPI_FINALIZE(ierr2)
960      ierr=abs(ierr1)+abs(ierr2)
961
962      return
963      END
964#endif
Note: See TracBrowser for help on using the repository browser.