source: lmdz_wrf/trunk/WRFV3/frame/module_io_quilt.F @ 354

Last change on this file since 354 was 1, checked in by lfita, 10 years ago
  • -- --- Opening of the WRF+LMDZ coupling repository --- -- -

WRF: version v3.3
LMDZ: version v1818

More details in:

File size: 196.6 KB
Line 
1!WRF:DRIVER_LAYER:IO
2!
3#define DEBUG_LVL 50
4!#define mpi_x_comm_size(i,j,k)  Mpi_Comm_Size ( i,j,k ) ; write(0,*) __LINE__
5#define mpi_x_comm_size(i,j,k)  Mpi_Comm_Size ( i,j,k )
6
7MODULE module_wrf_quilt
8!<DESCRIPTION>
9!<PRE>
10! This module contains WRF-specific I/O quilt routines called by both
11! client (compute) and server (I/O quilt) tasks.  I/O quilt servers are
12! a run-time optimization that allow I/O operations, executed on the I/O
13! quilt server tasks, to be overlapped with useful computation, executed on
14! the compute tasks.  Since I/O operations are often quite slow compared to
15! computation, this performance optimization can increase parallel
16! efficiency. 
17!
18! Currently, one group of I/O servers can be specified at run-time.  Namelist
19! variable "nio_tasks_per_group" is used to specify the number of I/O server
20! tasks in this group.  In most cases, parallel efficiency is optimized when
21! the minimum number of I/O server tasks are used.  If memory needed to cache
22! I/O operations fits on a single processor, then set nio_tasks_per_group=1. 
23! If not, increase the number of I/O server tasks until I/O operations fit in
24! memory.  In the future, multiple groups of I/O server tasks will be
25! supported.  The number of groups will be specified by namelist variable
26! "nio_groups".  For now, nio_groups must be set to 1.  Currently, I/O servers
27! only support overlap of output operations with computation.  Also, only I/O
28! packages that do no support native parallel I/O may be used with I/O server
29! tasks.  This excludes PHDF5 and MCEL. 
30!
31! In this module, the I/O quilt server tasks call package-dependent
32! WRF-specific I/O interfaces to perform I/O operations requested by the
33! client (compute) tasks.  All of these calls occur inside subroutine
34! quilt(). 
35!
36! The client (compute) tasks call package-independent WRF-specific "quilt I/O"
37! interfaces that send requests to the I/O quilt servers.  All of these calls
38! are made from module_io.F. 
39!
40! These routines have the same names and (roughly) the same arguments as those
41! specified in the WRF I/O API except that:
42! - "Quilt I/O" routines defined in this file and called by routines in
43!   module_io.F have the "wrf_quilt_" prefix.
44! - Package-dependent routines called from routines in this file are defined
45!   in the external I/O packages and have the "ext_" prefix.
46!
47! Both client (compute) and server tasks call routine init_module_wrf_quilt()
48! which then calls setup_quilt_servers() determine which tasks are compute
49! tasks and which are server tasks.  Before the end of init_module_wrf_quilt()
50! server tasks call routine quilt() and remain there for the rest of the model
51! run.  Compute tasks return from init_module_wrf_quilt() to perform model
52! computations. 
53!
54! See http://www.mmm.ucar.edu/wrf/WG2/software_2.0/IOAPI.doc for the latest
55! version of the WRF I/O API.  This document includes detailed descriptions
56! of subroutines and their arguments that are not duplicated here.
57!</PRE>
58!</DESCRIPTION>
59  USE module_internal_header_util
60  USE module_timing
61
62  INTEGER, PARAMETER :: int_num_handles = 99
63  LOGICAL, DIMENSION(int_num_handles) :: okay_to_write, int_handle_in_use, okay_to_commit
64  INTEGER, DIMENSION(int_num_handles) :: int_num_bytes_to_write, io_form
65  REAL, POINTER,SAVE :: int_local_output_buffer(:)
66  INTEGER,      SAVE :: int_local_output_cursor
67  LOGICAL          :: quilting_enabled
68  LOGICAL          :: disable_quilt = .FALSE.
69  INTEGER          :: prev_server_for_handle = -1
70  INTEGER          :: server_for_handle(int_num_handles)
71  INTEGER          :: reduced(2), reduced_dummy(2)
72  LOGICAL, EXTERNAL :: wrf_dm_on_monitor
73  INTEGER nio_groups
74#ifdef DM_PARALLEL
75  INTEGER :: mpi_comm_local
76  LOGICAL :: compute_node
77  LOGICAL :: compute_group_master(100)
78  INTEGER :: mpi_comm_io_groups(100)
79  INTEGER :: nio_tasks_in_group
80  INTEGER :: nio_tasks_per_group
81  INTEGER :: ncompute_tasks
82  INTEGER :: ntasks
83  INTEGER :: mytask
84
85  INTEGER, PARAMETER           :: onebyte = 1
86  INTEGER comm_io_servers, iserver, hdrbufsize, obufsize
87  INTEGER, DIMENSION(4096)     :: hdrbuf
88  INTEGER, DIMENSION(int_num_handles)     :: handle
89#endif
90
91  CONTAINS
92
93#if  defined(DM_PARALLEL)  &&  !defined( STUBMPI )
94    INTEGER FUNCTION get_server_id ( dhandle )
95!<DESCRIPTION>
96! Logic in the client side to know which io server
97! group to send to. If the unit corresponds to a file that's
98! already been opened, then we have no choice but to send the
99! data to that group again, regardless of whether there are
100! other server-groups. If it's a new file, we can chose a new
101! server group. I.e. opening a file locks it onto a server
102! group. Closing the file unlocks it.
103!</DESCRIPTION>
104      IMPLICIT NONE
105      INTEGER, INTENT(IN) :: dhandle
106      IF ( dhandle .GE. 1 .AND. dhandle .LE. int_num_handles ) THEN
107        IF ( server_for_handle ( dhandle ) .GE. 1 ) THEN
108          get_server_id = server_for_handle ( dhandle )
109        ELSE
110          prev_server_for_handle = mod ( prev_server_for_handle + 1 , nio_groups )
111          server_for_handle( dhandle ) = prev_server_for_handle+1
112          get_server_id = prev_server_for_handle+1
113        ENDIF
114      ELSE
115         CALL wrf_message('module_io_quilt: get_server_id bad dhandle' )
116      ENDIF
117    END FUNCTION get_server_id
118#endif
119
120    SUBROUTINE set_server_id ( dhandle, value )
121       IMPLICIT NONE
122       INTEGER, INTENT(IN) :: dhandle, value
123       IF ( dhandle .GE. 1 .AND. dhandle .LE. int_num_handles ) THEN
124         server_for_handle(dhandle) = value
125       ELSE
126         CALL wrf_message('module_io_quilt: set_server_id bad dhandle' )
127       ENDIF
128    END SUBROUTINE set_server_id
129
130#if defined( DM_PARALLEL ) && !defined( STUBMPI )
131    SUBROUTINE int_get_fresh_handle( retval )
132!<DESCRIPTION>
133! Find an unused "client file handle" and return it in retval.
134! The "client file handle" is used to remember how a file was opened
135! so clients do not need to ask the I/O quilt servers for this information.
136! It is also used as a file identifier in communications with the I/O
137! server task.
138!
139! Note that client tasks know nothing about package-specific handles.
140! Only the I/O quilt servers know about them.
141!</DESCRIPTION>
142      INTEGER i, retval
143      retval = -1
144      DO i = 1, int_num_handles
145        IF ( .NOT. int_handle_in_use(i) )  THEN
146          retval = i
147          GOTO 33
148        ENDIF
149      ENDDO
15033    CONTINUE
151      IF ( retval < 0 )  THEN
152        CALL wrf_error_fatal("frame/module_io_quilt.F: int_get_fresh_handle() can not")
153      ENDIF
154      int_handle_in_use(i) = .TRUE.
155      NULLIFY ( int_local_output_buffer )
156    END SUBROUTINE int_get_fresh_handle
157
158    SUBROUTINE setup_quilt_servers ( nio_tasks_per_group,     &
159                                     mytask,                  &
160                                     ntasks,                  &
161                                     n_groups_arg,            &
162                                     nio,                     &
163                                     mpi_comm_wrld,           &
164                                     mpi_comm_local,          &
165                                     mpi_comm_io_groups)
166!<DESCRIPTION>
167! Both client (compute) and server tasks call this routine to
168! determine which tasks are compute tasks and which are I/O server tasks. 
169!
170! Module variables MPI_COMM_LOCAL and MPI_COMM_IO_GROUPS(:) are set up to
171! contain MPI communicators as follows: 
172!
173! MPI_COMM_LOCAL is the Communicator for the local groups of tasks. For the
174! compute tasks it is the group of compute tasks; for a server group it the
175! communicator of tasks in the server group.
176!
177! Elements of MPI_COMM_IO_GROUPS are communicators that each contain one or
178! more compute tasks and a single I/O server assigned to those compute tasks. 
179! The I/O server tasks is always the last task in these communicators. 
180! On a compute task, which has a single associate in each of the server
181! groups, MPI_COMM_IO_GROUPS is treated as an array; each element corresponds
182! to a different server group.
183! On a server task only the first element of MPI_COMM_IO_GROUPS is used
184! because each server task is part of only one io_group. 
185!
186! I/O server tasks in each I/O server group are divided among compute tasks as
187! evenly as possible. 
188!
189! When multiple I/O server groups are used, each must have the same number of
190! tasks.  When the total number of extra I/O tasks does not divide evenly by
191! the number of io server groups requested, the remainder tasks are not used
192! (wasted). 
193!
194! For example, communicator membership for 18 tasks with nio_groups=2 and
195! nio_tasks_per_group=3 is shown below: 
196!
197!<PRE>
198! Membership for MPI_COMM_LOCAL communicators:
199!   COMPUTE TASKS:          0   1   2   3   4   5   6   7   8   9  10  11
200!   1ST I/O SERVER GROUP:  12  13  14
201!   2ND I/O SERVER GROUP:  15  16  17
202!
203! Membership for MPI_COMM_IO_GROUPS(1): 
204!   COMPUTE TASKS 0, 3, 6, 9:   0   3   6   9  12
205!   COMPUTE TASKS 1, 4, 7,10:   1   4   7  10  13
206!   COMPUTE TASKS 2, 5, 8,11:   2   5   8  11  14
207!   I/O SERVER TASK       12:   0   3   6   9  12
208!   I/O SERVER TASK       13:   1   4   7  10  13
209!   I/O SERVER TASK       14:   2   5   8  11  14
210!   I/O SERVER TASK       15:   0   3   6   9  15
211!   I/O SERVER TASK       16:   1   4   7  10  16
212!   I/O SERVER TASK       17:   2   5   8  11  17
213!
214! Membership for MPI_COMM_IO_GROUPS(2): 
215!   COMPUTE TASKS 0, 3, 6, 9:   0   3   6   9  15
216!   COMPUTE TASKS 1, 4, 7,10:   1   4   7  10  16
217!   COMPUTE TASKS 2, 5, 8,11:   2   5   8  11  17
218!   I/O SERVER TASK       12:  ** not used **
219!   I/O SERVER TASK       13:  ** not used **
220!   I/O SERVER TASK       14:  ** not used **
221!   I/O SERVER TASK       15:  ** not used **
222!   I/O SERVER TASK       16:  ** not used **
223!   I/O SERVER TASK       17:  ** not used **
224!</PRE>
225!</DESCRIPTION>
226      USE module_configure
227#ifdef DM_PARALLEL
228      USE module_dm, ONLY : compute_mesh
229#endif
230      IMPLICIT NONE
231      INCLUDE 'mpif.h'
232      INTEGER,                      INTENT(IN)  :: nio_tasks_per_group, mytask, ntasks, &
233                                                   n_groups_arg, mpi_comm_wrld
234      INTEGER,  INTENT(OUT)                     :: mpi_comm_local, nio
235      INTEGER, DIMENSION(100),      INTENT(OUT) :: mpi_comm_io_groups
236! Local
237      INTEGER                     :: i, j, ii, comdup, ierr, niotasks, n_groups, iisize
238      INTEGER, DIMENSION(ntasks)  :: icolor
239      CHARACTER*128 mess
240
241      INTEGER :: io_form_setting
242      INTEGER :: me
243      INTEGER :: k, m, nprocx, nprocy
244      LOGICAL :: reorder_mesh
245
246!check the namelist and make sure there are no output forms specified
247!that cannot be quilted
248      CALL nl_get_io_form_history(1,   io_form_setting) ; call sokay( 'history', io_form_setting )
249      CALL nl_get_io_form_restart(1,   io_form_setting) ; call sokay( 'restart', io_form_setting )
250      CALL nl_get_io_form_auxhist1(1,  io_form_setting) ; call sokay( 'auxhist1', io_form_setting )
251      CALL nl_get_io_form_auxhist2(1,  io_form_setting) ; call sokay( 'auxhist2', io_form_setting )
252      CALL nl_get_io_form_auxhist3(1,  io_form_setting) ; call sokay( 'auxhist3', io_form_setting )
253      CALL nl_get_io_form_auxhist4(1,  io_form_setting) ; call sokay( 'auxhist4', io_form_setting )
254      CALL nl_get_io_form_auxhist5(1,  io_form_setting) ; call sokay( 'auxhist5', io_form_setting )
255      CALL nl_get_io_form_auxhist6(1,  io_form_setting) ; call sokay( 'auxhist6', io_form_setting )
256      CALL nl_get_io_form_auxhist7(1,  io_form_setting) ; call sokay( 'auxhist7', io_form_setting )
257      CALL nl_get_io_form_auxhist8(1,  io_form_setting) ; call sokay( 'auxhist8', io_form_setting )
258      CALL nl_get_io_form_auxhist9(1,  io_form_setting) ; call sokay( 'auxhist9', io_form_setting )
259      CALL nl_get_io_form_auxhist10(1, io_form_setting) ; call sokay( 'auxhist10', io_form_setting )
260      CALL nl_get_io_form_auxhist11(1, io_form_setting) ; call sokay( 'auxhist11', io_form_setting )
261
262      n_groups = n_groups_arg
263      IF ( n_groups .LT. 1 ) n_groups = 1
264
265      compute_node = .TRUE.
266
267!<DESCRIPTION>
268! nio is number of io tasks per group.  If there arent enough tasks to satisfy
269! the requirement that there be at least as many compute tasks as io tasks in
270! each group, then just print a warning and dump out of quilting
271!</DESCRIPTION>
272
273      nio = nio_tasks_per_group
274      ncompute_tasks = ntasks - (nio * n_groups)
275      IF ( ncompute_tasks .LT. nio ) THEN
276        WRITE(mess,'("Not enough tasks to have ",I3," groups of ",I3," I/O tasks. No quilting.")')n_groups,nio
277        nio            = 0
278        ncompute_tasks = ntasks
279      ELSE                                   
280        WRITE(mess,'("Quilting with ",I3," groups of ",I3," I/O tasks.")')n_groups,nio
281      ENDIF                                   
282      CALL wrf_message(mess)
283
284      IF ( nio .LT. 0 ) THEN
285        nio = 0
286      ENDIF
287      IF ( nio .EQ. 0 ) THEN
288        quilting_enabled = .FALSE.
289        mpi_comm_local = mpi_comm_wrld
290        mpi_comm_io_groups = mpi_comm_wrld
291        RETURN
292      ENDIF
293      quilting_enabled = .TRUE.
294
295! First construct the local communicators
296! prepare to split the communicator by designating compute-only tasks
297      DO i = 1, ncompute_tasks
298        icolor(i) = 0
299      ENDDO
300      ii = 1
301! and designating the groups of i/o tasks
302      DO i = ncompute_tasks+1, ntasks, nio
303        DO j = i, i+nio-1
304          icolor(j) = ii
305        ENDDO
306        ii = ii+1
307      ENDDO
308      CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
309      CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask,mpi_comm_local,ierr)
310
311! Now construct the communicators for the io_groups
312      CALL nl_get_reorder_mesh(1,reorder_mesh)
313      IF ( reorder_mesh ) THEN
314        reorder_mesh = .FALSE.
315        CALL nl_set_reorder_mesh(1,reorder_mesh)
316        CALL wrf_message('Warning: reorder_mesh does not work with quilting. Disabled reorder_mesh.')
317      ENDIF
318      ! assign the compute tasks to the i/o tasks in full rows
319      CALL compute_mesh( ncompute_tasks, nprocx, nprocy )
320
321      nio = min(nio,nprocy)
322      m = mod(nprocy,nio)  ! divide up remainder, 1 row per, until gone
323      ii = 1
324      DO j = 1, nio, 1
325         DO k = 1,nprocy/nio+min(m,1)
326           DO i = 1, nprocx
327             icolor(ii) = j - 1
328             ii = ii + 1
329           ENDDO
330         ENDDO
331         m = max(m-1,0)
332      ENDDO
333
334! ... and add the io servers as the last task in each group
335      DO j = 1, n_groups
336        ! TBH:  each I/O group will contain only one I/O server
337        DO i = ncompute_tasks+1,ntasks
338          icolor(i) = MPI_UNDEFINED
339        ENDDO
340        ii = 0
341        DO i = ncompute_tasks+(j-1)*nio+1,ncompute_tasks+j*nio
342          icolor(i) = ii
343          ii = ii+1
344        ENDDO
345        CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
346        CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask, &
347                            mpi_comm_io_groups(j),ierr)
348      ENDDO
349
350      compute_group_master = .FALSE.
351      compute_node         = .FALSE.
352
353      DO j = 1, n_groups
354
355         IF ( mytask .LT. ncompute_tasks .OR.                                                  &    ! I am a compute task
356              (ncompute_tasks+(j-1)*nio .LE. mytask .AND. mytask .LT. ncompute_tasks+j*nio) &    ! I am the I/O server for this group
357            ) THEN
358
359         CALL MPI_Comm_Size( mpi_comm_io_groups(j) , iisize, ierr )
360         ! Get the rank of this compute task in the compute+io
361         ! communicator to which it belongs
362         CALL MPI_Comm_Rank( mpi_comm_io_groups(j) , me , ierr )
363
364         ! If I am an I/O server for this group then make that group's
365         ! communicator the first element in the mpi_comm_io_groups array
366         ! (I will ignore all of the other elements).
367         IF ( me+1 .EQ. iisize ) THEN
368            mpi_comm_io_groups(1) = mpi_comm_io_groups(j)
369         ELSE
370            compute_node = .TRUE.
371            ! If I am a compute task, check whether I am the member of my
372            ! group that will communicate things that should be sent just
373            ! once (e.g. commands) to the IO server of my group.
374            compute_group_master(j) = (me .EQ. 0)
375
376            IF( compute_group_master(j) ) WRITE(*,*) mytask,': ARPDBG : I will talk to IO server in group ',j
377         ENDIF
378         ENDIF
379      ENDDO
380
381    END SUBROUTINE setup_quilt_servers
382
383    SUBROUTINE sokay ( stream, io_form )
384    USE module_state_description
385    CHARACTER*(*) stream
386    CHARACTER*256 mess
387    INTEGER io_form
388
389    SELECT CASE (io_form)
390#ifdef NETCDF
391      CASE ( IO_NETCDF   )
392         RETURN
393#endif
394#ifdef INTIO
395      CASE ( IO_INTIO   )
396         RETURN
397#endif
398#ifdef YYY
399      CASE ( IO_YYY )
400         RETURN
401#endif
402#ifdef GRIB1
403      CASE ( IO_GRIB1 )
404         RETURN
405#endif
406#ifdef GRIB2
407      CASE ( IO_GRIB2 )
408         RETURN
409#endif
410      CASE (0)
411         RETURN
412      CASE DEFAULT
413         WRITE(mess,*)' An output format has been specified that is incompatible with quilting: io_form: ',io_form,' ',TRIM(stream)
414         CALL wrf_error_fatal(mess)
415    END SELECT
416    END SUBROUTINE sokay
417
418    SUBROUTINE quilt
419!<DESCRIPTION>
420! I/O server tasks call this routine and remain in it for the rest of the
421! model run.  I/O servers receive I/O requests from compute tasks and
422! perform requested I/O operations by calling package-dependent WRF-specific
423! I/O interfaces.  Requests are sent in the form of "data headers".  Each
424! request has a unique "header" message associated with it.  For requests that
425! contain large amounts of data, the data is appended to the header.  See
426! file module_internal_header_util.F for detailed descriptions of all
427! headers. 
428!
429! We wish to be able to link to different packages depending on whether
430! the I/O is restart, initial, history, or boundary.
431!</DESCRIPTION>
432      USE module_state_description
433      USE module_quilt_outbuf_ops
434      IMPLICIT NONE
435      INCLUDE 'mpif.h'
436#include "intio_tags.h"
437#include "wrf_io_flags.h"
438      INTEGER itag, ninbuf, ntasks_io_group, ntasks_local_group, mytask_local, ierr
439      INTEGER istat
440      INTEGER mytask_io_group
441      INTEGER   :: nout_set = 0
442      INTEGER   :: obufsize, bigbufsize, chunksize, sz
443      REAL, DIMENSION(1)      :: dummy
444      INTEGER, ALLOCATABLE, DIMENSION(:) :: obuf, bigbuf
445      REAL,    ALLOCATABLE, DIMENSION(:) :: RDATA
446      INTEGER, ALLOCATABLE, DIMENSION(:) :: IDATA
447      CHARACTER (LEN=512) :: CDATA
448      CHARACTER (LEN=80) :: fname
449      INTEGER icurs, hdrbufsize, itypesize, ftypesize, rtypesize, Status, fstat, io_form_arg
450      INTEGER :: DataHandle, FieldType, Comm, IOComm, DomainDesc, code, Count
451      INTEGER, DIMENSION(3) :: DomainStart , DomainEnd , MemoryStart , MemoryEnd , PatchStart , PatchEnd
452      INTEGER :: dummybuf(1)
453      INTEGER :: num_noops, num_commit_messages, num_field_training_msgs, hdr_tag
454      CHARACTER (len=256) :: DateStr , Element, VarName, MemoryOrder , Stagger , DimNames(3), FileName, SysDepInfo, mess
455      INTEGER, EXTERNAL :: use_package
456      LOGICAL           :: stored_write_record, retval
457      INTEGER iii, jjj, vid, CC, DD
458
459logical okay_to_w
460character*120 sysline
461
462! If we've been built with PNETCDF_QUILT defined then we use parallel I/O
463! within the group of I/O servers rather than gathering the data onto the
464! root I/O server. Unfortunately, this approach means that we can no-longer
465! select different I/O layers for use with quilting at run time. ARPDBG.
466! This code is sufficiently different that it is kept in the separate
467! quilt_pnc() routine.
468#ifdef PNETCDF_QUILT
469      CALL quilt_pnc()
470      RETURN
471#endif
472
473! Call ext_pkg_ioinit() routines to initialize I/O packages. 
474      SysDepInfo = " "
475#ifdef NETCDF
476      CALL ext_ncd_ioinit( SysDepInfo, ierr)
477#endif
478#ifdef INTIO
479      CALL ext_int_ioinit( SysDepInfo, ierr )
480#endif
481#ifdef XXX
482      CALL ext_xxx_ioinit( SysDepInfo, ierr)
483#endif
484#ifdef YYY
485      CALL ext_yyy_ioinit( SysDepInfo, ierr)
486#endif
487#ifdef ZZZ
488      CALL ext_zzz_ioinit( SysDepInfo, ierr)
489#endif
490#ifdef GRIB1
491      CALL ext_gr1_ioinit( SysDepInfo, ierr)
492#endif
493#ifdef GRIB2
494      CALL ext_gr2_ioinit( SysDepInfo, ierr)
495#endif
496
497      okay_to_commit = .false.
498      stored_write_record = .false.
499      ninbuf = 0
500      ! get info. about the I/O server group that this I/O server task
501      ! belongs to
502      ! Last task in this I/O server group is the I/O server "root"
503      ! The I/O server "root" actually writes data to disk
504      ! TBH:  WARNING:  This is also implicit in the call to collect_on_comm().
505      CALL mpi_x_comm_size( mpi_comm_io_groups(1), ntasks_io_group,    ierr )
506      CALL MPI_COMM_RANK( mpi_comm_io_groups(1), mytask_io_group,    ierr )
507      CALL mpi_x_comm_size( mpi_comm_local,        ntasks_local_group, ierr )
508      CALL MPI_COMM_RANK( mpi_comm_local,        mytask_local,       ierr )
509
510      CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
511      IF ( itypesize <= 0 ) THEN
512        CALL wrf_error_fatal("external/RSL/module_dm.F: quilt: type size <= 0 invalid")
513      ENDIF
514
515! Work out whether this i/o server processor has one fewer associated compute proc than
516! the most any processor has. Can happen when number of i/o tasks does not evenly divide
517! the number of compute tasks. This is needed to keep the i/o tasks sychronized on the
518! same message when they start commmunicating to stitch together an output.
519!
520! Compute processes associated with this task:
521       CC = ntasks_io_group - 1
522! Number of compute tasks per I/O task (less remainder)
523       DD = ncompute_tasks / ntasks_local_group
524!
525! If CC-DD is 1 on servrs with the maximum number of compute clients,
526!             0 on servrs with one less than maximum
527
528
529! infinite loop until shutdown message received
530! This is the main request-handling loop.  I/O quilt servers stay in this loop
531! until the model run ends. 
532okay_to_w = .false.
533      DO WHILE (.TRUE.)  ! {
534
535!<DESCRIPTION>
536! Each I/O server receives requests from its compute tasks.  Each request
537! is contained in a data header (see module_internal_header_util.F for
538! detailed descriptions of data headers).
539! Each request is sent in two phases.  First, sizes of all messages that
540! will be sent from the compute tasks to this I/O server are summed on the
541! I/O server via MPI_reduce().  The I/O server then allocates buffer "obuf"
542! and receives concatenated messages from the compute tasks in it via the
543! call to collect_on_comm().  Note that "sizes" are generally expressed in
544! *bytes* in this code so conversion to "count" (number of Fortran words) is
545! required for Fortran indexing and MPI calls. 
546!</DESCRIPTION>
547        ! wait for info from compute tasks in the I/O group that we're ready to rock
548        ! obufsize will contain number of *bytes*
549!CALL start_timing()
550        ! first element of reduced is obufsize, second is DataHandle
551        ! if needed (currently needed only for ioclose).
552        reduced_dummy = 0
553        CALL MPI_Reduce( reduced_dummy, reduced, 2, MPI_INTEGER,  &
554                         MPI_SUM, mytask_io_group,          &
555                         mpi_comm_io_groups(1), ierr )
556        obufsize = reduced(1)
557!CALL end_timing("MPI_Reduce at top of forever loop")
558!JMDEBUGwrite(0,*)'obufsize = ',obufsize
559! Negative obufsize will trigger I/O server exit. 
560        IF ( obufsize .LT. 0 ) THEN
561          IF ( obufsize .EQ. -100 ) THEN         ! magic number
562#ifdef NETCDF
563            CALL ext_ncd_ioexit( Status )
564#endif
565#ifdef INTIO
566            CALL ext_int_ioexit( Status )
567#endif
568#ifdef XXX
569            CALL ext_xxx_ioexit( Status )
570#endif
571#ifdef YYY
572            CALL ext_yyy_ioexit( Status )
573#endif
574#ifdef ZZZ
575            CALL ext_zzz_ioexit( Status )
576#endif
577#ifdef GRIB1
578            CALL ext_gr1_ioexit( Status )
579#endif
580#ifdef GRIB2
581            CALL ext_gr2_ioexit( Status )
582#endif
583            CALL wrf_message ( 'I/O QUILT SERVERS DONE' )
584            CALL mpi_finalize(ierr)
585            STOP
586          ELSE
587            WRITE(mess,*)'Possible 32-bit overflow on output server. Try larger nio_tasks_per_group in namelist.'
588            CALL wrf_error_fatal(mess)
589          ENDIF
590        ENDIF
591
592!        CALL start_timing()
593! Obufsize of zero signals a close
594
595! Allocate buffer obuf to be big enough for the data the compute tasks
596! will send.  Note: obuf is size in *bytes* so we need to pare this
597! down, since the buffer is INTEGER. 
598        IF ( obufsize .GT. 0 ) THEN
599          ALLOCATE( obuf( (obufsize+1)/itypesize ) )
600
601! let's roll; get the data from the compute procs and put in obuf
602          CALL collect_on_comm_debug(__FILE__,__LINE__, mpi_comm_io_groups(1),        &
603                                onebyte,                      &
604                                dummy, 0,                     &
605                                obuf, obufsize )
606!          CALL end_timing( "quilt on server: collecting data from compute procs" )
607        ELSE
608          ! Necessarily, the compute processes send the ioclose signal,
609          ! if there is one, after the iosync, which means they
610          ! will stall on the ioclose message waiting for the quilt
611          ! processes if we handle the way other messages are collected,
612          ! using collect_on_comm.  This avoids this, but we need
613          ! a special signal (obufsize zero) and the DataHandle
614          ! to be closed. That handle is send as the second
615          ! word of the io_close message received by the MPI_Reduce above.
616          ! Then a header representing the ioclose message is constructed
617          ! here and handled below as if it were received from the
618          ! compute processes. The clients (compute processes) must be
619          ! careful to send this correctly (one compule process sends the actual
620          ! handle and everone else sends a zero, so the result sums to
621          ! the value of the handle).
622          !
623          ALLOCATE( obuf( 4096 ) )
624          ! DataHandle is provided as second element of reduced
625          CALL int_gen_handle_header( obuf, obufsize, itypesize, &
626                                      reduced(2) , int_ioclose )
627        ENDIF
628
629!write(0,*)'calling init_store_piece_of_field'
630! Now all messages received from the compute clients are stored in
631! obuf.  Scan through obuf and extract headers and field data and store in
632! internal buffers.  The scan is done twice, first to determine sizes of
633! internal buffers required for storage of headers and fields and second to
634! actually store the headers and fields.  This bit of code does not do the
635! "quilting" (assembly of patches into full domains).  For each field, it
636! simply concatenates all received patches for the field into a separate
637! internal buffer (i.e. one buffer per field).  Quilting is done later by
638! routine store_patch_in_outbuf(). 
639        CALL init_store_piece_of_field
640        CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
641!write(0,*)'mpi_type_size returns ', itypesize
642! Scan obuf the first time to calculate the size of the buffer required for
643! each field.  Calls to add_to_bufsize_for_field() accumulate sizes. 
644        vid = 0
645        icurs = itypesize
646        num_noops = 0
647        num_commit_messages = 0
648        num_field_training_msgs = 0
649        DO WHILE ( icurs .lt. obufsize ) ! {
650          hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
651          SELECT CASE ( hdr_tag )
652            CASE ( int_field )
653              CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize,  &
654                                                DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
655                                                DomainDesc , MemoryOrder , Stagger , DimNames ,              &
656                                                DomainStart , DomainEnd ,                                    &
657                                                MemoryStart , MemoryEnd ,                                    &
658                                                PatchStart , PatchEnd )
659              chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
660                          (PatchEnd(3)-PatchStart(3)+1)*ftypesize
661
662              IF ( DomainDesc .EQ. 333933 ) THEN  ! Training write, only one per group of tasks
663                 IF ( num_field_training_msgs .EQ. 0 ) THEN
664                   call add_to_bufsize_for_field( VarName, hdrbufsize )
665!write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
666                 ENDIF
667                 num_field_training_msgs = num_field_training_msgs + 1
668              ELSE
669                 call add_to_bufsize_for_field( VarName, hdrbufsize )
670!write(0,*) 'X-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
671              ENDIF
672              icurs = icurs + hdrbufsize
673
674!write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
675
676              ! If this is a real write (i.e. not a training write), accumulate
677              ! buffersize for this field.
678              IF ( DomainDesc .NE. 333933 ) THEN   ! magic number
679!write(0,*) 'X-1a', chunksize, TRIM(VarName)
680                call add_to_bufsize_for_field( VarName, chunksize )
681                icurs = icurs + chunksize
682              ENDIF
683            CASE ( int_open_for_write_commit )  ! only one per group of tasks
684              hdrbufsize = obuf(icurs/itypesize)
685              IF (num_commit_messages.EQ.0) THEN
686                call add_to_bufsize_for_field( 'COMMIT', hdrbufsize )
687              ENDIF
688              num_commit_messages = num_commit_messages + 1
689              icurs = icurs + hdrbufsize
690            CASE DEFAULT
691              hdrbufsize = obuf(icurs/itypesize)
692
693! This logic and the logic in the loop below is used to determine whether
694! to send a noop records sent by the compute processes to allow to go
695! through. The purpose is to make sure that the communications between this
696! server and the other servers in this quilt group stay synchronized in
697! the collection loop below, even when the servers are serving different
698! numbers of clients. Here are some conditions:
699!
700!   1. The number of compute clients served will not differ by more than 1
701!   2. The servers with +1 number of compute clients begin with task 0
702!      of mpi_comm_local, the commicator shared by this group of servers
703!
704!   3. For each collective field or metadata output from the compute tasks,
705!      there will be one record sent to the associated i/o server task. The
706!      i/o server task collects these records and stores them contiguously
707!      in a buffer (obuf) using collect_on_comm above.  Thus, obuf on this
708!      server task will contain one record from each associated compute
709!      task, in order.
710!
711!   4. In the case of replicated output from the compute tasks
712!      (e.g. put_dom_ti records and control records like
713!      open_for_write_commit type records), compute task 0 is the only
714!      one that sends the record. The other compute tasks send noop
715!      records. Thus, obuf on server task zero will contain the output
716!      record from task 0 followed by noop records from the rest of the
717!      compute tasks associated with task 0.  Obuf on the other server
718!      tasks will contain nothing but noop records.
719!
720!   5. The logic below will not allow any noop records from server task 0.
721!      It allows only one noop record from each of the other server tasks
722!      in the i/o group.  This way, for replicated output, when the records
723!      are collected on one server task below, using collect_on_comm on
724!      mpi_comm_local, each task will provide exactly one record for each
725!      call to collect_on_comm: 1 bona fide output record from server task
726!      0 and noops from the rest.
727
728              IF ((hdr_tag.EQ.int_noop.AND.mytask_local.NE.0.AND.num_noops.LE.0)  &
729                  .OR.hdr_tag.NE.int_noop) THEN
730                write(VarName,'(I5.5)')vid
731!write(0,*) 'X-2', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
732                call add_to_bufsize_for_field( VarName, hdrbufsize )
733                vid = vid+1
734              ENDIF
735              IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
736              icurs = icurs + hdrbufsize
737          END SELECT
738        ENDDO ! }
739! Store the headers and field data in internal buffers.  The first call to
740! store_piece_of_field() allocates internal buffers using sizes computed by
741! calls to add_to_bufsize_for_field(). 
742        vid = 0
743        icurs = itypesize
744        num_noops = 0
745        num_commit_messages = 0
746        num_field_training_msgs = 0
747        DO WHILE ( icurs .lt. obufsize ) !{
748!write(0,*) 'A icurs ', icurs, ' obufsize ', obufsize
749          hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
750          SELECT CASE ( hdr_tag )
751            CASE ( int_field )
752              CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize,  &
753                                                DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
754                                                DomainDesc , MemoryOrder , Stagger , DimNames ,              &
755                                                DomainStart , DomainEnd ,                                    &
756                                                MemoryStart , MemoryEnd ,                                    &
757                                                PatchStart , PatchEnd )
758              chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
759                          (PatchEnd(3)-PatchStart(3)+1)*ftypesize
760
761              IF ( DomainDesc .EQ. 333933 ) THEN  ! Training write, only one per group of tasks
762                 IF ( num_field_training_msgs .EQ. 0 ) THEN
763                   call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
764!write(0,*) 'A-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
765                 ENDIF
766                 num_field_training_msgs = num_field_training_msgs + 1
767              ELSE
768                 call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
769!write(0,*) 'A-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
770              ENDIF
771              icurs = icurs + hdrbufsize
772              ! If this is a real write (i.e. not a training write), store
773              ! this piece of this field.
774              IF ( DomainDesc .NE. 333933 ) THEN   ! magic number
775!write(0,*) 'A-1a', chunksize, TRIM(VarName),PatchStart(1:3),PatchEnd(1:3)
776                call store_piece_of_field( obuf(icurs/itypesize), VarName, chunksize )
777                icurs = icurs + chunksize
778              ENDIF
779            CASE ( int_open_for_write_commit )  ! only one per group of tasks
780              hdrbufsize = obuf(icurs/itypesize)
781              IF (num_commit_messages.EQ.0) THEN
782                call store_piece_of_field( obuf(icurs/itypesize), 'COMMIT', hdrbufsize )
783              ENDIF
784              num_commit_messages = num_commit_messages + 1
785              icurs = icurs + hdrbufsize
786            CASE DEFAULT
787              hdrbufsize = obuf(icurs/itypesize)
788              IF ((hdr_tag.EQ.int_noop.AND.mytask_local.NE.0.AND.num_noops.LE.0)  &
789                  .OR.hdr_tag.NE.int_noop) THEN
790                write(VarName,'(I5.5)')vid
791!write(0,*) 'A-2b', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
792                call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
793                vid = vid+1
794              ENDIF
795              IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
796              icurs = icurs + hdrbufsize
797          END SELECT
798        ENDDO !}
799
800! Now, for each field, retrieve headers and patches (data) from the internal
801! buffers and collect them all on the I/O quilt server "root" task.
802        CALL init_retrieve_pieces_of_field
803! Retrieve header and all patches for the first field from the internal
804! buffers. 
805        CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
806! Sum sizes of all headers and patches (data) for this field from all I/O
807! servers in this I/O server group onto the I/O server "root".
808        CALL MPI_Reduce( sz, bigbufsize, 1, MPI_INTEGER,  &
809                         MPI_SUM, ntasks_local_group-1,         &
810                         mpi_comm_local, ierr )
811!write(0,*)'seed: sz ',sz,' bigbufsize ',bigbufsize,' VarName ', TRIM(VarName),' retval ',retval
812
813! Loop until there are no more fields to retrieve from the internal buffers.
814        DO WHILE ( retval ) !{
815#if 0
816#else
817
818! I/O server "root" allocates space to collect headers and fields from all
819! other servers in this I/O server group.
820          IF ( mytask_local .EQ. ntasks_local_group-1 ) THEN
821            ALLOCATE( bigbuf( (bigbufsize+1)/itypesize ) )
822          ENDIF
823
824! Collect buffers and fields from all I/O servers in this I/O server group
825! onto the I/O server "root"
826          CALL collect_on_comm_debug2(__FILE__,__LINE__,Trim(VarName),        &
827                                get_hdr_tag(obuf),sz,get_hdr_rec_size(obuf),  &
828                                mpi_comm_local,                               &
829                                onebyte,                                      &
830                                obuf, sz,                                     &
831                                bigbuf, bigbufsize )
832! The I/O server "root" now handles collected requests from all compute
833! tasks served by this I/O server group (i.e. all compute tasks). 
834          IF ( mytask_local .EQ. ntasks_local_group-1 ) THEN
835!jjj = 4
836!do iii = 1, ntasks_local_group
837!  write(0,*)'i,j,tag,size ', iii, jjj, get_hdr_tag(bigbuf(jjj/4)),get_hdr_rec_size(bigbuf(jjj/4))
838!  jjj = jjj + get_hdr_rec_size(bigbuf(jjj/4))
839!enddo
840
841            icurs = itypesize  ! icurs is a byte counter, but buffer is integer
842
843            stored_write_record = .false.
844
845! The I/O server "root" loops over the collected requests. 
846            DO WHILE ( icurs .lt. bigbufsize ) !{
847              CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
848
849!write(0,*)'B tag,size ',icurs,get_hdr_tag( bigbuf(icurs/itypesize) ),get_hdr_rec_size( bigbuf(icurs/itypesize) )
850! The I/O server "root" gets the request out of the next header and
851! handles it by, in most cases, calling the appropriate external I/O package
852! interface.
853              SELECT CASE ( get_hdr_tag( bigbuf(icurs/itypesize) ) )
854! The I/O server "root" handles the "noop" (do nothing) request.  This is
855! actually quite easy.  "Noop" requests exist to help avoid race conditions. 
856! In some cases, only one compute task will everything about a request so
857! other compute tasks send "noop" requests. 
858                CASE ( int_noop )
859                  CALL int_get_noop_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize )
860                  icurs = icurs + hdrbufsize
861
862! The I/O server "root" handles the "put_dom_td_real" request.
863                CASE ( int_dom_td_real )
864                  CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
865                  ALLOCATE( RData( bigbuf(icurs/itypesize + 4 ) ) )      ! 5 is the count of data items for this record ; defined in collect_on_comm.c
866                  CALL int_get_td_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
867                                          DataHandle, DateStr, Element, RData, Count, code )
868                  icurs = icurs + hdrbufsize
869
870                  SELECT CASE (use_package(io_form(DataHandle)))
871#ifdef NETCDF
872                    CASE ( IO_NETCDF   )
873                      CALL ext_ncd_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
874#endif
875#ifdef INTIO
876                    CASE ( IO_INTIO   )
877                      CALL ext_int_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
878#endif
879#ifdef YYY
880                 CASE ( IO_YYY )
881                    CALL ext_yyy_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
882#endif
883#ifdef GRIB1
884                 CASE ( IO_GRIB1 )
885                    CALL ext_gr1_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
886#endif
887#ifdef GRIB2
888                 CASE ( IO_GRIB2 )
889                    CALL ext_gr2_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
890#endif
891                     CASE DEFAULT
892                      Status = 0
893                  END SELECT
894
895                  DEALLOCATE( RData )
896! The I/O server "root" handles the "put_dom_ti_real" request.
897                CASE ( int_dom_ti_real )
898!write(0,*)' int_dom_ti_real '
899                  CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
900                  ALLOCATE( RData( bigbuf(icurs/itypesize + 4 ) ) )      ! 5 is the count of data items for this record ; defined in collect_on_comm.c
901                  CALL int_get_ti_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
902                                          DataHandle, Element, RData, Count, code )
903                  icurs = icurs + hdrbufsize
904
905                  SELECT CASE (use_package(io_form(DataHandle)))
906#ifdef NETCDF
907                    CASE ( IO_NETCDF   )
908                      CALL ext_ncd_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
909!write(0,*)'ext_ncd_put_dom_ti_real ',handle(DataHandle),TRIM(Element),RData,Status
910#endif
911#ifdef INTIO
912                    CASE ( IO_INTIO   )
913                      CALL ext_int_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
914#endif
915#ifdef YYY
916                 CASE ( IO_YYY )
917                    CALL ext_yyy_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
918#endif
919#ifdef GRIB1
920                 CASE ( IO_GRIB1 )
921                    CALL ext_gr1_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
922#endif
923#ifdef GRIB2
924                 CASE ( IO_GRIB2 )
925                    CALL ext_gr2_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
926#endif
927                    CASE DEFAULT
928                      Status = 0
929                  END SELECT
930
931                  DEALLOCATE( RData )
932
933! The I/O server "root" handles the "put_dom_td_integer" request.
934                CASE ( int_dom_td_integer )
935!write(0,*)' int_dom_td_integer '
936                  CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
937                  ALLOCATE( IData( bigbuf(icurs/itypesize + 4 ) ) )      ! 5 is the count of data items for this record ; defined in collect_on_comm.c
938                  CALL int_get_td_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
939                                          DataHandle, DateStr, Element, IData, Count, code )
940                  icurs = icurs + hdrbufsize
941
942                  SELECT CASE (use_package(io_form(DataHandle)))
943#ifdef NETCDF
944                    CASE ( IO_NETCDF   )
945                      CALL ext_ncd_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
946#endif
947#ifdef INTIO
948                    CASE ( IO_INTIO   )
949                      CALL ext_int_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
950#endif
951#ifdef YYY
952                 CASE ( IO_YYY )
953                    CALL ext_yyy_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
954#endif
955#ifdef GRIB1
956                 CASE ( IO_GRIB1 )
957                    CALL ext_gr1_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
958#endif
959#ifdef GRIB2
960                 CASE ( IO_GRIB2 )
961                    CALL ext_gr2_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
962#endif
963                    CASE DEFAULT
964                      Status = 0
965                  END SELECT
966
967                  DEALLOCATE( IData )
968
969! The I/O server "root" handles the "put_dom_ti_integer" request.
970                CASE ( int_dom_ti_integer )
971!write(0,*)' int_dom_ti_integer '
972
973                  CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
974                  ALLOCATE( IData( bigbuf(icurs/itypesize + 4 ) ) )      ! 5 is the count of data items for this record ; defined in collect_on_comm.c
975                  CALL int_get_ti_header( bigbuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
976                                          DataHandle, Element, IData, Count, code )
977                  icurs = icurs + hdrbufsize
978                  SELECT CASE (use_package(io_form(DataHandle)))
979#ifdef NETCDF
980                    CASE ( IO_NETCDF   )
981                      CALL ext_ncd_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
982!write(0,*)'ext_ncd_put_dom_ti_integer ',handle(DataHandle),TRIM(Element),IData,Status
983#endif
984#ifdef INTIO
985                    CASE ( IO_INTIO   )
986                      CALL ext_int_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
987#endif
988#ifdef YYY
989                 CASE ( IO_YYY )
990                    CALL ext_yyy_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
991#endif
992#ifdef GRIB1
993                 CASE ( IO_GRIB1 )
994                    CALL ext_gr1_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
995#endif
996#ifdef GRIB2
997                 CASE ( IO_GRIB2 )
998                    CALL ext_gr2_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
999#endif
1000
1001                    CASE DEFAULT
1002                      Status = 0
1003                  END SELECT
1004
1005                  DEALLOCATE( IData)
1006 
1007! The I/O server "root" handles the "set_time" request.
1008                CASE ( int_set_time )
1009!write(0,*)' int_set_time '
1010                  CALL int_get_ti_header_char( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1011                                               DataHandle, Element, VarName, CData, code )
1012                  SELECT CASE (use_package(io_form(DataHandle)))
1013#ifdef INTIO
1014                    CASE ( IO_INTIO   )
1015                      CALL ext_int_set_time ( handle(DataHandle), TRIM(CData), Status)
1016#endif
1017                    CASE DEFAULT
1018                      Status = 0
1019                  END SELECT
1020
1021                  icurs = icurs + hdrbufsize
1022
1023! The I/O server "root" handles the "put_dom_ti_char" request.
1024                CASE ( int_dom_ti_char )
1025!write(0,*)' before int_get_ti_header_char '
1026                  CALL int_get_ti_header_char( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1027                                               DataHandle, Element, VarName, CData, code )
1028!write(0,*)' after int_get_ti_header_char ',VarName
1029
1030                  SELECT CASE (use_package(io_form(DataHandle)))
1031#ifdef NETCDF
1032                    CASE ( IO_NETCDF   )
1033                      CALL ext_ncd_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
1034#endif
1035#ifdef INTIO
1036                    CASE ( IO_INTIO   )
1037                      CALL ext_int_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
1038#endif
1039#ifdef YYY
1040                 CASE ( IO_YYY )
1041                    CALL ext_yyy_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
1042#endif
1043#ifdef GRIB1
1044                 CASE ( IO_GRIB1 )
1045                    CALL ext_gr1_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
1046#endif
1047#ifdef GRIB2
1048                 CASE ( IO_GRIB2 )
1049                    CALL ext_gr2_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
1050#endif
1051                    CASE DEFAULT
1052                      Status = 0
1053                  END SELECT
1054
1055                  icurs = icurs + hdrbufsize
1056
1057! The I/O server "root" handles the "put_var_ti_char" request.
1058                CASE ( int_var_ti_char )
1059!write(0,*)' int_var_ti_char '
1060                  CALL int_get_ti_header_char( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1061                                               DataHandle, Element, VarName, CData, code )
1062
1063                  SELECT CASE (use_package(io_form(DataHandle)))
1064#ifdef NETCDF
1065                    CASE ( IO_NETCDF   )
1066                      CALL ext_ncd_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1067#endif
1068#ifdef INTIO
1069                    CASE ( IO_INTIO   )
1070                      CALL ext_int_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1071#endif
1072#ifdef YYY
1073                 CASE ( IO_YYY )
1074                    CALL ext_yyy_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1075#endif
1076#ifdef GRIB1
1077                 CASE ( IO_GRIB1 )
1078                    CALL ext_gr1_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1079#endif
1080#ifdef GRIB2
1081                 CASE ( IO_GRIB2 )
1082                    CALL ext_gr2_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
1083#endif
1084                    CASE DEFAULT
1085                      Status = 0
1086                  END SELECT
1087
1088                  icurs = icurs + hdrbufsize
1089
1090                CASE ( int_ioexit )
1091! ioexit is now handled by sending negative message length to server
1092                  CALL wrf_error_fatal( &
1093                         "quilt: should have handled int_ioexit already")
1094! The I/O server "root" handles the "ioclose" request.
1095                CASE ( int_ioclose )
1096                  CALL int_get_handle_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1097                                              DataHandle , code )
1098                  icurs = icurs + hdrbufsize
1099
1100                  IF ( DataHandle .GE. 1 ) THEN
1101!JMDEBUGwrite(0,*)'closing DataHandle ',DataHandle,' io_form ',io_form(DataHandle)
1102
1103                  SELECT CASE (use_package(io_form(DataHandle)))
1104#ifdef NETCDF
1105                    CASE ( IO_NETCDF   )
1106                      CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
1107                      IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1108                        CALL ext_ncd_ioclose(handle(DataHandle),Status)
1109                      ENDIF
1110#endif
1111#ifdef PNETCDF
1112                    CASE ( IO_PNETCDF   )
1113                      CALL ext_pnc_inquire_filename( handle(DataHandle), fname, fstat, Status )
1114                      IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1115                        CALL ext_pnc_ioclose(handle(DataHandle),Status)
1116                      ENDIF
1117#endif
1118#ifdef INTIO
1119                    CASE ( IO_INTIO   )
1120                      CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
1121                      IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1122                        CALL ext_int_ioclose(handle(DataHandle),Status)
1123                      ENDIF
1124#endif
1125#ifdef YYY
1126                 CASE ( IO_YYY )
1127                    CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
1128                    IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1129                      CALL ext_yyy_ioclose(handle(DataHandle),Status)
1130                    ENDIF
1131#endif
1132#ifdef GRIB1
1133                 CASE ( IO_GRIB1 )
1134                    CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
1135                    IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1136                      CALL ext_gr1_ioclose(handle(DataHandle),Status)
1137                    ENDIF
1138#endif
1139#ifdef GRIB2
1140                 CASE ( IO_GRIB2 )
1141                    CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
1142                    IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1143                      CALL ext_gr2_ioclose(handle(DataHandle),Status)
1144                    ENDIF
1145#endif
1146                    CASE DEFAULT
1147                      Status = 0
1148                  END SELECT
1149                  ENDIF
1150
1151! The I/O server "root" handles the "open_for_write_begin" request.
1152                CASE ( int_open_for_write_begin )
1153
1154                  CALL int_get_ofwb_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1155                                            FileName,SysDepInfo,io_form_arg,DataHandle )
1156
1157!write(0,*)' int_open_for_write_begin itypesize ',itypesize,' itypesize ',itypesize
1158!write(0,*)' int_open_for_write_begin icurs ', icurs, hdrbufsize
1159!JMDEBUGwrite(0,*)' int_open_for_write_begin FileName ',TRIM(FileName) , ' DataHandle ', DataHandle
1160!write(0,*)' int_open_for_write_begin SysDepInfo ',TRIM(SysDepInfo)
1161                  icurs = icurs + hdrbufsize
1162!write(0,*)' int_open_for_write_begin new icurs,tag,size ', icurs, get_hdr_tag( bigbuf(icurs/itypesize) ),get_hdr_rec_size( bigbuf(icurs/itypesize) )
1163               
1164                  io_form(DataHandle) = io_form_arg
1165
1166                  SELECT CASE (use_package(io_form(DataHandle)))
1167#ifdef NETCDF
1168                    CASE ( IO_NETCDF   )
1169                      CALL ext_ncd_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1170!write(0,*)'ext_ncd_open_for_write_begin ',Trim(FileName),DataHandle,handle(DataHandle),Status
1171#endif
1172#ifdef INTIO
1173                    CASE ( IO_INTIO   )
1174                      CALL ext_int_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1175#endif
1176#ifdef YYY
1177                    CASE ( IO_YYY )
1178                       CALL ext_yyy_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1179#endif
1180#ifdef GRIB1
1181                    CASE ( IO_GRIB1 )
1182                       CALL ext_gr1_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1183#endif
1184#ifdef GRIB2
1185                    CASE ( IO_GRIB2 )
1186                       CALL ext_gr2_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
1187#endif
1188                    CASE DEFAULT
1189                      Status = 0
1190                  END SELECT
1191               
1192                  okay_to_write(DataHandle) = .false.
1193
1194! The I/O server "root" handles the "open_for_write_commit" request.
1195! In this case, the "okay_to_commit" is simply set to .true. so "write_field"
1196! requests will initiate writes to disk.  Actual commit will be done after
1197! all requests in this batch have been handled.
1198                CASE ( int_open_for_write_commit )
1199
1200                  CALL int_get_handle_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1201                                              DataHandle , code )
1202                  icurs = icurs + hdrbufsize
1203                  okay_to_commit(DataHandle) = .true.
1204
1205! The I/O server "root" handles the "write_field" (int_field) request.
1206! If okay_to_write(DataHandle) is .true. then the patch in the
1207! header (bigbuf) is written to a globally-sized internal output buffer via
1208! the call to store_patch_in_outbuf().  Note that this is where the actual
1209! "quilting" (reassembly of patches onto a full-size domain) is done.  If
1210! okay_to_write(DataHandle) is .false. then external I/O package interfaces
1211! are called to write metadata for I/O formats that support native metadata.
1212!
1213! NOTE that the I/O server "root" will only see write_field (int_field)
1214! requests AFTER an "iosync" request.
1215                CASE ( int_field )
1216                  CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
1217                  CALL int_get_write_field_header ( bigbuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize,  &
1218                                                    DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
1219                                                    DomainDesc , MemoryOrder , Stagger , DimNames ,              &
1220                                                    DomainStart , DomainEnd ,                                    &
1221                                                    MemoryStart , MemoryEnd ,                                    &
1222                                                    PatchStart , PatchEnd )
1223!write(0,*)' int_field ',TRIM(VarName),DataHandle,okay_to_write(DataHandle)
1224                  icurs = icurs + hdrbufsize
1225
1226                  IF ( okay_to_write(DataHandle) ) THEN
1227
1228!                    WRITE(0,*)'>>> ',TRIM(DateStr), ' ', TRIM(VarName), ' ', TRIM(MemoryOrder), ' ', &
1229!                        (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)*(PatchEnd(3)-PatchStart(3)+1)
1230
1231                    IF ( FieldType .EQ. WRF_FLOAT .OR. FieldType .EQ. WRF_DOUBLE)  THEN
1232                      ! Note that the WRF_DOUBLE branch of this IF statement must come first since
1233                      ! WRF_FLOAT is set equal to WRF_DOUBLE during autopromotion builds. 
1234                      IF ( FieldType .EQ. WRF_DOUBLE)  THEN
1235! this branch has not been tested TBH: 20050406
1236                        CALL mpi_type_size( MPI_DOUBLE_PRECISION, ftypesize, ierr )
1237                      ELSE
1238                        CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
1239                      ENDIF
1240                      stored_write_record = .true.
1241                      CALL store_patch_in_outbuf ( bigbuf(icurs/itypesize), dummybuf, TRIM(DateStr), TRIM(VarName) , &
1242                                                   FieldType, TRIM(MemoryOrder), TRIM(Stagger), DimNames, &
1243                                                   DomainStart , DomainEnd , &
1244                                                   MemoryStart , MemoryEnd , &
1245                                                   PatchStart , PatchEnd )
1246
1247                    ELSE IF ( FieldType .EQ. WRF_INTEGER ) THEN
1248                      CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
1249                      stored_write_record = .true.
1250                      CALL store_patch_in_outbuf ( dummybuf, bigbuf(icurs/itypesize), TRIM(DateStr), TRIM(VarName) , &
1251                                                   FieldType, TRIM(MemoryOrder), TRIM(Stagger), DimNames, &
1252                                                   DomainStart , DomainEnd , &
1253                                                   MemoryStart , MemoryEnd , &
1254                                                   PatchStart , PatchEnd )
1255                    ELSE IF ( FieldType .EQ. WRF_LOGICAL ) THEN
1256                      ftypesize = LWORDSIZE
1257                    ENDIF
1258                    icurs = icurs + (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
1259                                    (PatchEnd(3)-PatchStart(3)+1)*ftypesize
1260                  ELSE
1261                    SELECT CASE (use_package(io_form(DataHandle)))
1262#ifdef NETCDF
1263                      CASE ( IO_NETCDF   )
1264                        CALL ext_ncd_write_field ( handle(DataHandle) , TRIM(DateStr) ,         &
1265                                   TRIM(VarName) , dummy , FieldType , Comm , IOComm,           &
1266                                   DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames ,  &
1267                                   DomainStart , DomainEnd ,                                    &
1268                                   DomainStart , DomainEnd ,                                    &
1269                                   DomainStart , DomainEnd ,                                    &
1270                                   Status )
1271#endif
1272#if 0
1273! since this is training and the grib output doesn't need training, disable this branch.
1274#ifdef YYY
1275                 CASE ( IO_YYY )
1276                      CALL ext_YYY_write_field ( handle(DataHandle) , TRIM(DateStr) ,         &
1277                                 TRIM(VarName) , dummy , FieldType , Comm , IOComm,           &
1278                                 DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames ,  &
1279                                 DomainStart , DomainEnd ,                                    &
1280                                 DomainStart , DomainEnd ,                                    &
1281                                 DomainStart , DomainEnd ,                                    &
1282                                 Status )
1283#endif
1284#endif
1285                      CASE DEFAULT
1286                        Status = 0
1287                    END SELECT
1288                  ENDIF
1289                CASE ( int_iosync )
1290                  CALL int_get_handle_header( bigbuf(icurs/itypesize), hdrbufsize, itypesize, &
1291                                            DataHandle , code )
1292                  icurs = icurs + hdrbufsize
1293                CASE DEFAULT
1294                  WRITE(mess,*)'quilt: bad tag: ',get_hdr_tag( bigbuf(icurs/itypesize) ),' icurs ',icurs/itypesize
1295                  CALL wrf_error_fatal( mess )
1296              END SELECT
1297
1298            ENDDO !}
1299! Now, the I/O server "root" has finshed handling all commands from the latest
1300! call to retrieve_pieces_of_field().
1301
1302            IF (stored_write_record) THEN
1303! If any fields have been stored in a globally-sized internal output buffer
1304! (via a call to store_patch_in_outbuf()) then call write_outbuf() to write
1305! them to disk now.
1306! NOTE that the I/O server "root" will only have called
1307! store_patch_in_outbuf() when handling write_field (int_field)
1308! commands which only arrive AFTER an "iosync" command.
1309!              CALL start_timing
1310              CALL write_outbuf ( handle(DataHandle), use_package(io_form(DataHandle)))
1311!              CALL end_timing( "quilt: call to write_outbuf" )
1312            ENDIF
1313
1314! If one or more "open_for_write_commit" commands were encountered from the
1315! latest call to retrieve_pieces_of_field() then call the package-specific
1316! routine to do the commit.
1317            IF (okay_to_commit(DataHandle)) THEN
1318
1319              SELECT CASE (use_package(io_form(DataHandle)))
1320#ifdef NETCDF
1321                CASE ( IO_NETCDF   )
1322                  CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
1323                  IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1324                    CALL ext_ncd_open_for_write_commit(handle(DataHandle),Status)
1325                    okay_to_write(DataHandle) = .true.
1326                  ENDIF
1327#endif
1328#ifdef INTIO
1329                CASE ( IO_INTIO   )
1330                  CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
1331                  IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1332                    CALL ext_int_open_for_write_commit(handle(DataHandle),Status)
1333                    okay_to_write(DataHandle) = .true.
1334                  ENDIF
1335#endif
1336#ifdef YYY
1337                 CASE ( IO_YYY )
1338                    CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
1339                    IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1340                       CALL ext_yyy_open_for_write_commit(handle(DataHandle),Status)
1341                       okay_to_write(DataHandle) = .true.
1342                    ENDIF
1343#endif
1344#ifdef GRIB1
1345                 CASE ( IO_GRIB1 )
1346                    CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
1347                    IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1348                       CALL ext_gr1_open_for_write_commit(handle(DataHandle),Status)
1349                       okay_to_write(DataHandle) = .true.
1350                    ENDIF
1351#endif
1352#ifdef GRIB2
1353                 CASE ( IO_GRIB2 )
1354                    CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
1355                    IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1356                       CALL ext_gr2_open_for_write_commit(handle(DataHandle),Status)
1357                       okay_to_write(DataHandle) = .true.
1358                    ENDIF
1359#endif
1360
1361                CASE DEFAULT
1362                  Status = 0
1363              END SELECT
1364
1365            okay_to_commit(DataHandle) = .false.
1366          ENDIF
1367          DEALLOCATE( bigbuf )
1368        ENDIF
1369#endif
1370
1371! Retrieve header and all patches for the next field from the internal
1372! buffers. 
1373        CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
1374! Sum sizes of all headers and patches (data) for this field from all I/O
1375! servers in this I/O server group onto the I/O server "root".
1376        CALL MPI_Reduce( sz, bigbufsize, 1, MPI_INTEGER,  &
1377                         MPI_SUM, ntasks_local_group-1,         &
1378                         mpi_comm_local, ierr )
1379! Then, return to the top of the loop to collect headers and data from all
1380! I/O servers in this I/O server group onto the I/O server "root" and handle
1381! the next batch of commands. 
1382      END DO !}
1383
1384      DEALLOCATE( obuf )
1385
1386      ! flush output files if needed
1387      IF (stored_write_record) THEN
1388!         CALL start_timing()
1389        SELECT CASE ( use_package(io_form) )
1390#ifdef NETCDF
1391          CASE ( IO_NETCDF   )
1392            CALL ext_ncd_iosync( handle(DataHandle), Status )
1393#endif
1394#ifdef XXX
1395          CASE ( IO_XXX   )
1396            CALL ext_xxx_iosync( handle(DataHandle), Status )
1397#endif
1398#ifdef YYY
1399          CASE ( IO_YYY   )
1400            CALL ext_yyy_iosync( handle(DataHandle), Status )
1401#endif
1402#ifdef ZZZ
1403          CASE ( IO_ZZZ   )
1404            CALL ext_zzz_iosync( handle(DataHandle), Status )
1405#endif
1406#ifdef GRIB1
1407          CASE ( IO_GRIB1   )
1408            CALL ext_gr1_iosync( handle(DataHandle), Status )
1409#endif
1410#ifdef GRIB2
1411          CASE ( IO_GRIB2   )
1412            CALL ext_gr2_iosync( handle(DataHandle), Status )
1413#endif
1414#ifdef INTIO
1415          CASE ( IO_INTIO   )
1416            CALL ext_int_iosync( handle(DataHandle), Status )
1417#endif
1418          CASE DEFAULT
1419            Status = 0
1420        END SELECT
1421!CALL end_timing( "quilt: flush" )
1422      ENDIF
1423
1424      END DO ! }
1425
1426    END SUBROUTINE quilt
1427
1428    SUBROUTINE quilt_pnc
1429!<DESCRIPTION>
1430! Same as quilt() routine except that _all_ of the IO servers that call it
1431! actually write data to disk using pNetCDF. This version is only used when
1432! the  code is compiled with PNETCDF_QUILT defined.
1433!</DESCRIPTION>
1434      USE module_state_description
1435      USE module_quilt_outbuf_ops
1436      IMPLICIT NONE
1437      INCLUDE 'mpif.h'
1438#include "intio_tags.h"
1439#include "wrf_io_flags.h"
1440      INTEGER itag, ninbuf, ntasks_io_group, ntasks_local_group, mytask_local, ierr
1441      INTEGER istat
1442      INTEGER mytask_io_group
1443      INTEGER   :: nout_set = 0
1444      INTEGER   :: obufsize, bigbufsize, chunksize, sz
1445      REAL,                 DIMENSION(1) :: dummy
1446      INTEGER, ALLOCATABLE, DIMENSION(:) :: obuf, bigbuf
1447      REAL,    ALLOCATABLE, DIMENSION(:) :: RDATA
1448      INTEGER, ALLOCATABLE, DIMENSION(:) :: IDATA
1449      CHARACTER (LEN=512) :: CDATA
1450      CHARACTER (LEN=80) :: fname
1451      INTEGER icurs, hdrbufsize, itypesize, ftypesize, rtypesize, Status, fstat, io_form_arg
1452      INTEGER :: DataHandle, FieldType, Comm, IOComm, DomainDesc, code, Count
1453      INTEGER, DIMENSION(3) :: DomainStart , DomainEnd , MemoryStart , MemoryEnd , PatchStart , PatchEnd
1454      INTEGER :: dummybuf(1)
1455      INTEGER :: num_noops, num_commit_messages, num_field_training_msgs, hdr_tag
1456      CHARACTER (len=256) :: DateStr , Element, VarName, MemoryOrder , Stagger , DimNames(3), FileName, SysDepInfo, mess
1457      INTEGER, EXTERNAL :: use_package
1458      LOGICAL           :: stored_write_record, retval, written_record
1459      INTEGER iii, jjj, vid, CC, DD
1460
1461!      logical okay_to_w
1462!      character*120 sysline
1463
1464! Call ext_pkg_ioinit() routines to initialize I/O packages. 
1465      SysDepInfo = " "
1466#ifdef NETCDF
1467      CALL ext_ncd_ioinit( SysDepInfo, ierr)
1468#endif
1469#ifdef PNETCDF_QUILT
1470      CALL ext_pnc_ioinit( SysDepInfo, ierr)
1471#endif
1472#ifdef INTIO
1473      CALL ext_int_ioinit( SysDepInfo, ierr )
1474#endif
1475#ifdef XXX
1476      CALL ext_xxx_ioinit( SysDepInfo, ierr)
1477#endif
1478#ifdef YYY
1479      CALL ext_yyy_ioinit( SysDepInfo, ierr)
1480#endif
1481#ifdef ZZZ
1482      CALL ext_zzz_ioinit( SysDepInfo, ierr)
1483#endif
1484#ifdef GRIB1
1485      CALL ext_gr1_ioinit( SysDepInfo, ierr)
1486#endif
1487#ifdef GRIB2
1488      CALL ext_gr2_ioinit( SysDepInfo, ierr)
1489#endif
1490
1491      okay_to_commit = .false.
1492      stored_write_record = .false.
1493      ninbuf = 0
1494      ! get info. about the I/O server group that this I/O server task
1495      ! belongs to
1496      CALL mpi_x_comm_size( mpi_comm_io_groups(1), ntasks_io_group,    ierr )
1497      CALL MPI_COMM_RANK( mpi_comm_io_groups(1), mytask_io_group,    ierr )
1498      CALL mpi_x_comm_size( mpi_comm_local,        ntasks_local_group, ierr )
1499      CALL MPI_COMM_RANK( mpi_comm_local,        mytask_local,       ierr )
1500
1501      CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
1502      IF ( itypesize <= 0 ) THEN
1503        CALL wrf_error_fatal("external/RSL/module_dm.F: quilt: type size <= 0 invalid")
1504      ENDIF
1505
1506! Work out whether this i/o server processor has one fewer associated compute proc than
1507! the most any processor has. Can happen when number of i/o tasks does not evenly divide
1508! the number of compute tasks. This is needed to keep the i/o tasks sychronized on the
1509! same message when they start commmunicating to stitch together an output.
1510!
1511! Compute processes associated with this task:
1512       CC = ntasks_io_group - 1
1513! Number of compute tasks per I/O task (less remainder)
1514       DD = ncompute_tasks / ntasks_local_group
1515!
1516! If CC-DD is 1 on servrs with the maximum number of compute clients,
1517!             0 on servrs with one less than maximum
1518
1519
1520! infinite loop until shutdown message received
1521! This is the main request-handling loop.  I/O quilt servers stay in this loop
1522! until the model run ends. 
1523!okay_to_w = .false.
1524      DO WHILE (.TRUE.)  ! {
1525
1526!<DESCRIPTION>
1527! Each I/O server receives requests from its compute tasks.  Each request
1528! is contained in a data header (see module_internal_header_util.F for
1529! detailed descriptions of data headers).
1530! Each request is sent in two phases.  First, sizes of all messages that
1531! will be sent from the compute tasks to this I/O server are summed on the
1532! I/O server via MPI_reduce().  The I/O server then allocates buffer "obuf"
1533! and receives concatenated messages from the compute tasks in it via the
1534! call to collect_on_comm().  Note that "sizes" are generally expressed in
1535! *bytes* in this code so conversion to "count" (number of Fortran words) is
1536! required for Fortran indexing and MPI calls. 
1537!</DESCRIPTION>
1538        ! wait for info from compute tasks in the I/O group that we're ready to rock
1539        ! obufsize will contain number of *bytes*
1540!CALL start_timing
1541        ! first element of reduced is obufsize, second is DataHandle
1542        ! if needed (currently needed only for ioclose).
1543        reduced_dummy = 0
1544        CALL MPI_Reduce( reduced_dummy, reduced, 2, MPI_INTEGER,  &
1545                         MPI_SUM, mytask_io_group,          &
1546                         mpi_comm_io_groups(1), ierr )
1547        obufsize = reduced(1)
1548!CALL end_timing("MPI_Reduce at top of forever loop")
1549!JMDEBUGwrite(0,*)'obufsize = ',obufsize
1550! Negative obufsize will trigger I/O server exit. 
1551        IF ( obufsize .LT. 0 ) THEN
1552          IF ( obufsize .EQ. -100 ) THEN         ! magic number
1553#ifdef NETCDF
1554            CALL ext_ncd_ioexit( Status )
1555#endif
1556#ifdef PNETCDF_QUILT
1557            CALL ext_pnc_ioexit( Status )
1558#endif
1559#ifdef INTIO
1560            CALL ext_int_ioexit( Status )
1561#endif
1562#ifdef XXX
1563            CALL ext_xxx_ioexit( Status )
1564#endif
1565#ifdef YYY
1566            CALL ext_yyy_ioexit( Status )
1567#endif
1568#ifdef ZZZ
1569            CALL ext_zzz_ioexit( Status )
1570#endif
1571#ifdef GRIB1
1572            CALL ext_gr1_ioexit( Status )
1573#endif
1574#ifdef GRIB2
1575            CALL ext_gr2_ioexit( Status )
1576#endif
1577            CALL wrf_message ( 'I/O QUILT SERVERS DONE' )
1578            CALL mpi_finalize(ierr)
1579            STOP
1580          ELSE
1581            WRITE(mess,*)'Possible 32-bit overflow on output server. Try larger nio_tasks_per_group in namelist.'
1582            CALL wrf_error_fatal(mess)
1583          ENDIF
1584        ENDIF
1585
1586!        CALL start_timing
1587! Obufsize of zero signals a close
1588
1589! Allocate buffer obuf to be big enough for the data the compute tasks
1590! will send.  Note: obuf is size in *bytes* so we need to pare this
1591! down, since the buffer is INTEGER. 
1592        IF ( obufsize .GT. 0 ) THEN
1593          ALLOCATE( obuf( (obufsize+1)/itypesize ) )
1594
1595! let's roll; get the data from the compute procs and put in obuf
1596          CALL collect_on_comm_debug(__FILE__,__LINE__, mpi_comm_io_groups(1),        &
1597                                onebyte,                      &
1598                                dummy, 0,                     &
1599                                obuf, obufsize )
1600!          CALL end_timing( "quilt on server: collecting data from compute procs" )
1601        ELSE
1602          ! Necessarily, the compute processes send the ioclose signal,
1603          ! if there is one, after the iosync, which means they
1604          ! will stall on the ioclose message waiting for the quilt
1605          ! processes if we handle the way other messages are collected,
1606          ! using collect_on_comm.  This avoids this, but we need
1607          ! a special signal (obufsize zero) and the DataHandle
1608          ! to be closed. That handle is send as the second
1609          ! word of the io_close message received by the MPI_Reduce above.
1610          ! Then a header representing the ioclose message is constructed
1611          ! here and handled below as if it were received from the
1612          ! compute processes. The clients (compute processes) must be
1613          ! careful to send this correctly (one compule process sends the actual
1614          ! handle and everone else sends a zero, so the result sums to
1615          ! the value of the handle).
1616          !
1617          ALLOCATE( obuf( 4096 ) )
1618          ! DataHandle is provided as second element of reduced
1619          CALL int_gen_handle_header( obuf, obufsize, itypesize, &
1620                                      reduced(2) , int_ioclose )
1621        ENDIF
1622
1623!write(0,*)'calling init_store_piece_of_field'
1624! Now all messages received from the compute clients are stored in
1625! obuf.  Scan through obuf and extract headers and field data and store in
1626! internal buffers.  The scan is done twice, first to determine sizes of
1627! internal buffers required for storage of headers and fields and second to
1628! actually store the headers and fields.  This bit of code does not do any
1629! "quilting" (assembly of patches into full domains).  For each field, it
1630! simply writes all received patches for the field to disk.
1631! ARPDBG we can vastly reduce the number of writes to disk by stitching
1632! any contiguous patches together first. Has implications for synchronisation
1633! of pNetCDF calls though.
1634        CALL init_store_piece_of_field
1635        CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
1636!write(0,*)'mpi_type_size returns ', itypesize
1637! Scan obuf the first time to calculate the size of the buffer required for
1638! each field.  Calls to add_to_bufsize_for_field() accumulate sizes. 
1639        vid = 0
1640        icurs = itypesize
1641        num_noops = 0
1642        num_commit_messages = 0
1643        num_field_training_msgs = 0
1644        DO WHILE ( icurs .lt. obufsize ) ! {
1645          hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
1646          SELECT CASE ( hdr_tag )
1647            CASE ( int_field )
1648              CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize,  &
1649                                                DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
1650                                                DomainDesc , MemoryOrder , Stagger , DimNames ,              &
1651                                                DomainStart , DomainEnd ,                                    &
1652                                                MemoryStart , MemoryEnd ,                                    &
1653                                                PatchStart , PatchEnd )
1654              chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
1655                          (PatchEnd(3)-PatchStart(3)+1)*ftypesize
1656
1657              IF ( DomainDesc .EQ. 333933 ) THEN  ! Training write, only one per group of tasks
1658                 IF ( num_field_training_msgs .EQ. 0 ) THEN
1659                   call add_to_bufsize_for_field( VarName, hdrbufsize )
1660!write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1661                 ENDIF
1662                 num_field_training_msgs = num_field_training_msgs + 1
1663              ELSE
1664                 call add_to_bufsize_for_field( VarName, hdrbufsize )
1665!write(0,*) 'X-2a', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1666              ENDIF
1667              icurs = icurs + hdrbufsize
1668
1669!write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1670
1671              ! If this is a real write (i.e. not a training write), accumulate
1672              ! buffersize for this field.
1673              IF ( DomainDesc .NE. 333933 ) THEN   ! magic number
1674!write(0,*) 'X-1a', chunksize, TRIM(VarName)
1675                call add_to_bufsize_for_field( VarName, chunksize )
1676                icurs = icurs + chunksize
1677              ENDIF
1678            CASE ( int_open_for_write_commit )  ! only one per group of tasks
1679              hdrbufsize = obuf(icurs/itypesize)
1680              IF (num_commit_messages.EQ.0) THEN
1681                call add_to_bufsize_for_field( 'COMMIT', hdrbufsize )
1682              ENDIF
1683              num_commit_messages = num_commit_messages + 1
1684              icurs = icurs + hdrbufsize
1685            CASE DEFAULT
1686              hdrbufsize = obuf(icurs/itypesize)
1687
1688! This logic and the logic in the loop below is used to determine whether
1689! to send a noop records sent by the compute processes to allow to go
1690! through. The purpose is to make sure that the communications between this
1691! server and the other servers in this quilt group stay synchronized in
1692! the collection loop below, even when the servers are serving different
1693! numbers of clients. Here are some conditions:
1694!
1695!   1. The number of compute clients served will not differ by more than 1
1696!   2. The servers with +1 number of compute clients begin with task 0
1697!      of mpi_comm_local, the commicator shared by this group of servers
1698!
1699!   3. For each collective field or metadata output from the compute tasks,
1700!      there will be one record sent to the associated i/o server task. The
1701!      i/o server task collects these records and stores them contiguously
1702!      in a buffer (obuf) using collect_on_comm above.  Thus, obuf on this
1703!      server task will contain one record from each associated compute
1704!      task, in order.
1705! !
1706!   4. In the case of replicated output from the compute tasks
1707!      (e.g. put_dom_ti records and control records like
1708!      open_for_write_commit type records), only compute tasks for which
1709!      (compute_group_master == .TRUE) send the record. The other compute
1710!      tasks send noop records. This is done so that each server task
1711!      receives exactly one record plus noops from the other compute tasks.
1712!
1713!   5. Logic below does not allow any noop records through since each IO
1714!      server task now receives a valid record (from the 'compute-group master'
1715!      when doing replicated output
1716              IF (hdr_tag.NE.int_noop) THEN
1717                write(VarName,'(I5.5)')vid
1718!write(0,*) 'X-2', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1719                call add_to_bufsize_for_field( VarName, hdrbufsize )
1720                vid = vid+1
1721              ENDIF
1722              IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
1723              icurs = icurs + hdrbufsize
1724
1725          END SELECT
1726        ENDDO ! }
1727! Store the headers and field data in internal buffers.  The first call to
1728! store_piece_of_field() allocates internal buffers using sizes computed by
1729! calls to add_to_bufsize_for_field(). 
1730        vid = 0
1731        icurs = itypesize
1732        num_noops = 0
1733        num_commit_messages = 0
1734        num_field_training_msgs = 0
1735        DO WHILE ( icurs .lt. obufsize ) !{
1736!write(0,*) 'A icurs ', icurs, ' obufsize ', obufsize
1737          hdr_tag = get_hdr_tag( obuf ( icurs / itypesize ) )
1738          SELECT CASE ( hdr_tag )
1739            CASE ( int_field )
1740              CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize,  &
1741                                                DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
1742                                                DomainDesc , MemoryOrder , Stagger , DimNames ,              &
1743                                                DomainStart , DomainEnd ,                                    &
1744                                                MemoryStart , MemoryEnd ,                                    &
1745                                                PatchStart , PatchEnd )
1746              chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
1747                          (PatchEnd(3)-PatchStart(3)+1)*ftypesize
1748
1749              IF ( DomainDesc .EQ. 333933 ) THEN  ! Training write, only one per group of tasks
1750                 IF ( num_field_training_msgs .EQ. 0 ) THEN
1751                   call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
1752!write(0,*) 'A-1', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1753                 ENDIF
1754                 num_field_training_msgs = num_field_training_msgs + 1
1755              ELSE
1756                 call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
1757!write(0,*) 'A-2a', icurs, hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1758              ENDIF
1759              icurs = icurs + hdrbufsize
1760              ! If this is a real write (i.e. not a training write), store
1761              ! this piece of this field.
1762              IF ( DomainDesc .NE. 333933 ) THEN   ! magic number
1763                call store_piece_of_field( obuf(icurs/itypesize), VarName, chunksize )
1764                icurs = icurs + chunksize
1765!write(0,*) 'A-1a',TRIM(VarName),' icurs ',icurs,PatchStart(1:3),PatchEnd(1:3)
1766              ENDIF
1767            CASE ( int_open_for_write_commit )  ! only one per group of tasks
1768              hdrbufsize = obuf(icurs/itypesize)
1769              IF (num_commit_messages.EQ.0) THEN
1770                call store_piece_of_field( obuf(icurs/itypesize), 'COMMIT', hdrbufsize )
1771              ENDIF
1772              num_commit_messages = num_commit_messages + 1
1773              icurs = icurs + hdrbufsize
1774            CASE DEFAULT
1775              hdrbufsize = obuf(icurs/itypesize)
1776              IF (hdr_tag.NE.int_noop) THEN
1777
1778                write(VarName,'(I5.5)')vid
1779!write(0,*) 'A-2b', hdrbufsize, get_hdr_tag( obuf ( icurs / itypesize ) ) , get_hdr_rec_size( obuf ( icurs / itypesize ) ), TRIM(VarName)
1780                call store_piece_of_field( obuf(icurs/itypesize), VarName, hdrbufsize )
1781                vid = vid+1
1782              ENDIF
1783              IF ( hdr_tag .EQ. int_noop ) num_noops = num_noops + 1
1784              icurs = icurs + hdrbufsize
1785          END SELECT
1786       ENDDO !} while(icurs < obufsize)
1787
1788! Now, for each field, retrieve headers and patches (data) from the internal
1789! buffers and collect them all on the I/O quilt server "root" task.
1790       CALL init_retrieve_pieces_of_field
1791! Retrieve header and all patches for the first field from the internal
1792! buffers. 
1793       CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
1794       written_record = .false.
1795
1796! Loop until there are no more fields to retrieve from the internal buffers.
1797       DO WHILE ( retval ) !{
1798
1799! This I/O server now handles the collected requests from the compute
1800! tasks it serves
1801
1802            icurs = itypesize  ! icurs is a byte counter, but buffer is integer
1803
1804            stored_write_record = .false.
1805
1806! ALL I/O servers in this group loop over the collected requests they have
1807! received.
1808            DO WHILE ( icurs .lt. sz)! bigbufsize ) !{
1809
1810! The I/O server gets the request out of the next header and
1811! handles it by, in most cases, calling the appropriate external I/O package
1812! interface.
1813!write(0,*)__FILE__,__LINE__,'get_hdr_tag ',icurs,sz,get_hdr_tag( obuf(icurs/itypesize) )
1814              SELECT CASE ( get_hdr_tag( obuf(icurs/itypesize) ) )
1815! The I/O server handles the "noop" (do nothing) request.  This is
1816! actually quite easy.  "Noop" requests exist to help avoid race conditions. 
1817                CASE ( int_noop )
1818                  CALL int_get_noop_header( obuf(icurs/itypesize), &
1819                                            hdrbufsize, itypesize )
1820                  icurs = icurs + hdrbufsize
1821
1822! The I/O server "root" handles the "put_dom_td_real" request.
1823                CASE ( int_dom_td_real )
1824                  CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
1825                  ALLOCATE( RData( obuf(icurs/itypesize + 4 ) ) )      ! 5 is the count of data items for this record ; defined in collect_on_comm.c
1826                  CALL int_get_td_header( obuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
1827                                          DataHandle, DateStr, Element, RData, Count, code )
1828                  icurs = icurs + hdrbufsize
1829
1830                  SELECT CASE (use_package(io_form(DataHandle)))
1831#ifdef PNETCDF_QUILT
1832                    CASE (IO_PNETCDF  )
1833                      CALL ext_pnc_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1834#endif
1835#ifdef NETCDF
1836                    CASE ( IO_NETCDF   )
1837                      CALL ext_ncd_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1838#endif
1839#ifdef INTIO
1840                    CASE ( IO_INTIO   )
1841                      CALL ext_int_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1842#endif
1843#ifdef YYY
1844                 CASE ( IO_YYY )
1845                    CALL ext_yyy_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1846#endif
1847#ifdef GRIB1
1848                 CASE ( IO_GRIB1 )
1849                    CALL ext_gr1_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1850#endif
1851#ifdef GRIB2
1852                 CASE ( IO_GRIB2 )
1853                    CALL ext_gr2_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
1854#endif
1855                     CASE DEFAULT
1856                      Status = 0
1857                  END SELECT
1858
1859                  DEALLOCATE( RData )
1860! Every I/O server handles the "put_dom_ti_real" request.
1861                CASE ( int_dom_ti_real )
1862
1863                  CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
1864                  ALLOCATE( RData( obuf(icurs/itypesize + 4 ) ) )      ! 5 is the count of data items for this record ; defined in collect_on_comm.c
1865                  CALL int_get_ti_header( obuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
1866                                          DataHandle, Element, RData, Count, code )
1867                  icurs = icurs + hdrbufsize
1868
1869                  SELECT CASE (use_package(io_form(DataHandle)))
1870#ifdef PNETCDF_QUILT
1871                    CASE (IO_PNETCDF  )
1872                      CALL ext_pnc_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
1873#endif
1874#ifdef NETCDF
1875                    CASE ( IO_NETCDF   )
1876                      CALL ext_ncd_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
1877#endif
1878#ifdef INTIO
1879                    CASE ( IO_INTIO   )
1880                      CALL ext_int_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
1881#endif
1882#ifdef YYY
1883                 CASE ( IO_YYY )
1884                    CALL ext_yyy_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
1885#endif
1886#ifdef GRIB1
1887                 CASE ( IO_GRIB1 )
1888                    CALL ext_gr1_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
1889#endif
1890#ifdef GRIB2
1891                 CASE ( IO_GRIB2 )
1892                    CALL ext_gr2_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
1893#endif
1894                    CASE DEFAULT
1895                      Status = 0
1896                  END SELECT
1897
1898                  DEALLOCATE( RData )
1899
1900! Every I/O server handles the "put_dom_td_integer" request.
1901                CASE ( int_dom_td_integer )
1902
1903                  CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
1904                  ALLOCATE( IData( obuf(icurs/itypesize + 4 ) ) )      ! 5 is the count of data items for this record ; defined in collect_on_comm.c
1905                  CALL int_get_td_header( obuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
1906                                          DataHandle, DateStr, Element, IData, Count, code )
1907                  icurs = icurs + hdrbufsize
1908
1909                  SELECT CASE (use_package(io_form(DataHandle)))
1910#ifdef PNETCDF_QUILT
1911                  CASE (IO_PNETCDF  )
1912                      CALL ext_pnc_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
1913#endif
1914#ifdef NETCDF
1915                   CASE ( IO_NETCDF   )
1916                      CALL ext_ncd_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
1917#endif
1918#ifdef INTIO
1919                   CASE ( IO_INTIO   )
1920                      CALL ext_int_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
1921#endif
1922#ifdef YYY
1923                   CASE ( IO_YYY )
1924                      CALL ext_yyy_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
1925#endif
1926#ifdef GRIB1
1927                   CASE ( IO_GRIB1 )
1928                      CALL ext_gr1_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
1929#endif
1930#ifdef GRIB2
1931                   CASE ( IO_GRIB2 )
1932                      CALL ext_gr2_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
1933#endif
1934                   CASE DEFAULT
1935                      Status = 0
1936                   END SELECT
1937
1938                   DEALLOCATE( IData )
1939
1940! Every I/O server handles the "put_dom_ti_integer" request.
1941                CASE ( int_dom_ti_integer )
1942
1943                  CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
1944                  ALLOCATE( IData( obuf(icurs/itypesize + 4 ) ) )      ! 5 is the count of data items for this record ; defined in collect_on_comm.c
1945                  CALL int_get_ti_header( obuf(icurs/itypesize:), hdrbufsize, itypesize, ftypesize, &
1946                                          DataHandle, Element, IData, Count, code )
1947                  icurs = icurs + hdrbufsize
1948                  SELECT CASE (use_package(io_form(DataHandle)))
1949#ifdef PNETCDF_QUILT
1950                    CASE (IO_PNETCDF  )
1951                      CALL ext_pnc_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1952#endif
1953#ifdef NETCDF
1954                    CASE ( IO_NETCDF   )
1955                      CALL ext_ncd_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1956#endif
1957#ifdef INTIO
1958                    CASE ( IO_INTIO   )
1959                      CALL ext_int_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1960#endif
1961#ifdef YYY
1962                 CASE ( IO_YYY )
1963                    CALL ext_yyy_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1964#endif
1965#ifdef GRIB1
1966                 CASE ( IO_GRIB1 )
1967                    CALL ext_gr1_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1968#endif
1969#ifdef GRIB2
1970                 CASE ( IO_GRIB2 )
1971                    CALL ext_gr2_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
1972#endif
1973
1974                    CASE DEFAULT
1975                      Status = 0
1976                  END SELECT
1977
1978                  DEALLOCATE( IData)
1979 
1980! Every I/O server  handles the "set_time" request.
1981                CASE ( int_set_time )
1982
1983                  CALL int_get_ti_header_char( obuf(icurs/itypesize), hdrbufsize, itypesize, &
1984                                               DataHandle, Element, VarName, CData, code )
1985                  SELECT CASE (use_package(io_form(DataHandle)))
1986#ifdef INTIO
1987                    CASE ( IO_INTIO   )
1988                      CALL ext_int_set_time ( handle(DataHandle), TRIM(CData), Status)
1989#endif
1990                    CASE DEFAULT
1991                      Status = 0
1992                  END SELECT
1993
1994                  icurs = icurs + hdrbufsize
1995
1996! Every I/O server handles the "put_dom_ti_char" request.
1997                CASE ( int_dom_ti_char )
1998
1999                  CALL int_get_ti_header_char( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2000                                               DataHandle, Element, VarName, CData, code )
2001
2002                  SELECT CASE (use_package(io_form(DataHandle)))
2003#ifdef PNETCDF_QUILT
2004                    CASE (IO_PNETCDF  )
2005                      CALL ext_pnc_put_dom_ti_char ( handle(DataHandle), TRIM(Element), Trim(CData), Status)
2006#endif
2007#ifdef NETCDF
2008                    CASE ( IO_NETCDF   )
2009                      CALL ext_ncd_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
2010#endif
2011#ifdef INTIO
2012                    CASE ( IO_INTIO   )
2013                      CALL ext_int_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
2014#endif
2015#ifdef YYY
2016                   CASE ( IO_YYY )
2017                      CALL ext_yyy_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
2018#endif
2019#ifdef GRIB1
2020                   CASE ( IO_GRIB1 )
2021                      CALL ext_gr1_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
2022#endif
2023#ifdef GRIB2
2024                   CASE ( IO_GRIB2 )
2025                      CALL ext_gr2_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
2026#endif
2027                   CASE DEFAULT
2028                      Status = 0
2029                   END SELECT
2030
2031                  icurs = icurs + hdrbufsize
2032
2033! Every I/O server handles the "put_var_ti_char" request.
2034                CASE ( int_var_ti_char )
2035
2036                  CALL int_get_ti_header_char( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2037                                               DataHandle, Element, VarName, CData, code )
2038
2039                  SELECT CASE (use_package(io_form(DataHandle)))
2040#ifdef PNETCDF_QUILT
2041                    CASE (IO_PNETCDF  )
2042                      CALL ext_pnc_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status )
2043#endif
2044#ifdef NETCDF
2045                    CASE ( IO_NETCDF   )
2046                      CALL ext_ncd_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
2047#endif
2048#ifdef INTIO
2049                    CASE ( IO_INTIO   )
2050                      CALL ext_int_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
2051#endif
2052#ifdef YYY
2053                   CASE ( IO_YYY )
2054                      CALL ext_yyy_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
2055#endif
2056#ifdef GRIB1
2057                   CASE ( IO_GRIB1 )
2058                      CALL ext_gr1_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
2059#endif
2060#ifdef GRIB2
2061                   CASE ( IO_GRIB2 )
2062                      CALL ext_gr2_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
2063#endif
2064                   CASE DEFAULT
2065                      Status = 0
2066                   END SELECT
2067
2068                  icurs = icurs + hdrbufsize
2069
2070                CASE ( int_ioexit )
2071! ioexit is now handled by sending negative message length to server
2072                  CALL wrf_error_fatal( &
2073                         "quilt: should have handled int_ioexit already")
2074! Every I/O server handles the "ioclose" request.
2075                CASE ( int_ioclose )
2076                  CALL int_get_handle_header( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2077                                              DataHandle , code )
2078                  icurs = icurs + hdrbufsize
2079
2080                  IF ( DataHandle .GE. 1 ) THEN
2081
2082                     SELECT CASE (use_package(io_form(DataHandle)))
2083#ifdef PNETCDF_QUILT
2084                    CASE ( IO_PNETCDF   )
2085                      CALL ext_pnc_inquire_filename( handle(DataHandle), fname, fstat, Status )
2086                      IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2087                        CALL ext_pnc_ioclose(handle(DataHandle),Status)
2088                      ENDIF
2089#endif
2090#ifdef NETCDF
2091                     CASE ( IO_NETCDF   )
2092                        CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
2093                        IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2094                           CALL ext_ncd_ioclose(handle(DataHandle),Status)
2095                        ENDIF
2096#endif
2097#ifdef INTIO
2098                     CASE ( IO_INTIO   )
2099                        CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
2100                        IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2101                           CALL ext_int_ioclose(handle(DataHandle),Status)
2102                        ENDIF
2103#endif
2104#ifdef YYY
2105                     CASE ( IO_YYY )
2106                        CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
2107                        IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2108                           CALL ext_yyy_ioclose(handle(DataHandle),Status)
2109                        ENDIF
2110#endif
2111#ifdef GRIB1
2112                     CASE ( IO_GRIB1 )
2113                        CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
2114                        IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2115                           CALL ext_gr1_ioclose(handle(DataHandle),Status)
2116                        ENDIF
2117#endif
2118#ifdef GRIB2
2119                     CASE ( IO_GRIB2 )
2120                        CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
2121                        IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2122                           CALL ext_gr2_ioclose(handle(DataHandle),Status)
2123                        ENDIF
2124#endif
2125                     CASE DEFAULT
2126                        Status = 0
2127                     END SELECT
2128                  ENDIF
2129
2130! Every I/O server handles the "open_for_write_begin" request.
2131                CASE ( int_open_for_write_begin )
2132
2133                  CALL int_get_ofwb_header( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2134                                            FileName,SysDepInfo,io_form_arg,DataHandle )
2135
2136!write(0,*)' int_open_for_write_begin itypesize ',itypesize,' itypesize ',itypesize
2137!write(0,*)' int_open_for_write_begin icurs ', icurs, hdrbufsize
2138!JMDEBUGwrite(0,*)' int_open_for_write_begin FileName ',TRIM(FileName) , ' DataHandle ', DataHandle
2139!write(0,*)' int_open_for_write_begin SysDepInfo ',TRIM(SysDepInfo)
2140                  icurs = icurs + hdrbufsize
2141!write(0,*)' int_open_for_write_begin new icurs,tag,size ', icurs, get_hdr_tag( bigbuf(icurs/itypesize) ),get_hdr_rec_size( bigbuf(icurs/itypesize) )
2142               
2143                  io_form(DataHandle) = io_form_arg
2144
2145                  SELECT CASE (use_package(io_form(DataHandle)))
2146#ifdef PNETCDF_QUILT
2147                    CASE (IO_PNETCDF  )
2148                      CALL ext_pnc_open_for_write_begin(FileName,mpi_comm_local,mpi_comm_local,SysDepInfo,handle(DataHandle),Status )
2149#endif
2150#ifdef NETCDF
2151                    CASE ( IO_NETCDF   )
2152                      CALL ext_ncd_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
2153!write(0,*)'ext_ncd_open_for_write_begin ',Trim(FileName),DataHandle,handle(DataHandle),Status
2154#endif
2155#ifdef INTIO
2156                    CASE ( IO_INTIO   )
2157                      CALL ext_int_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
2158#endif
2159#ifdef YYY
2160                    CASE ( IO_YYY )
2161                       CALL ext_yyy_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
2162#endif
2163#ifdef GRIB1
2164                    CASE ( IO_GRIB1 )
2165                       CALL ext_gr1_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
2166#endif
2167#ifdef GRIB2
2168                    CASE ( IO_GRIB2 )
2169                       CALL ext_gr2_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
2170#endif
2171                    CASE DEFAULT
2172                      Status = 0
2173                  END SELECT
2174               
2175                  okay_to_write(DataHandle) = .false.
2176
2177! Every I/O server handles the "open_for_write_commit" request.
2178! In this case, the "okay_to_commit" is simply set to .true. so "write_field"
2179! (int_field) requests will initiate writes to disk.  Actual commit will be done after
2180! all requests in this batch have been handled.
2181                CASE ( int_open_for_write_commit )
2182
2183                  CALL int_get_handle_header( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2184                                              DataHandle , code )
2185                  icurs = icurs + hdrbufsize
2186                  okay_to_commit(DataHandle) = .true.
2187
2188! Every I/O server handles the "write_field" (int_field) request.
2189! If okay_to_write(DataHandle) is .true. then the patch in the
2190! header (bigbuf) is written to disk using pNetCDF.  Note that this is where the actual
2191! "quilting" (reassembly of patches onto a full-size domain) is done.  If
2192! okay_to_write(DataHandle) is .false. then external I/O package interfaces
2193! are called to write metadata for I/O formats that support native metadata.
2194!
2195! NOTE that the I/O servers will only see write_field (int_field)
2196! requests AFTER an "iosync" request.
2197                CASE ( int_field )
2198                  CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
2199                  CALL int_get_write_field_header ( obuf(icurs/itypesize), hdrbufsize, itypesize, ftypesize,  &
2200                                                    DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
2201                                                    DomainDesc , MemoryOrder , Stagger , DimNames ,              &
2202                                                    DomainStart , DomainEnd ,                                    &
2203                                                    MemoryStart , MemoryEnd ,                                    &
2204                                                    PatchStart , PatchEnd )
2205!write(0,*)' int_field ',TRIM(VarName),DataHandle,okay_to_write(DataHandle)
2206                  icurs = icurs + hdrbufsize
2207
2208                  IF ( okay_to_write(DataHandle) ) THEN
2209
2210!!$                    WRITE(0,FMT="('>>> ',(A),1x,(A),1x,A2,I6,1x,3('[',I3,',',I3,'] '))") &
2211!!$                          TRIM(DateStr), TRIM(VarName), TRIM(MemoryOrder), &
2212!!$                        (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)*(PatchEnd(3)-PatchStart(3)+1), &
2213!!$PatchStart(1),PatchEnd(1),PatchStart(2),PatchEnd(2),PatchStart(3),PatchEnd(3)
2214!!$                    WRITE(0,FMT="('>>> ',(A),1x,(A),1x,I6,1x,3('[',I3,',',I3,'] '))") &
2215!!$                          TRIM(DateStr), TRIM(VarName),  DomainDesc, &
2216!!$                          DomainStart(1),DomainEnd(1),DomainStart(2),DomainEnd(2),DomainStart(3),DomainEnd(3)
2217
2218                    IF ( FieldType .EQ. WRF_FLOAT .OR. FieldType .EQ. WRF_DOUBLE)  THEN
2219                      ! Note that the WRF_DOUBLE branch of this IF statement must come first since
2220                      ! WRF_FLOAT is set equal to WRF_DOUBLE during autopromotion builds. 
2221                      IF ( FieldType .EQ. WRF_DOUBLE)  THEN
2222! this branch has not been tested TBH: 20050406
2223                        CALL mpi_type_size( MPI_DOUBLE_PRECISION, ftypesize, ierr )
2224                      ELSE
2225                        CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
2226                      ENDIF
2227
2228#ifdef PNETCDF_QUILT
2229!                      WRITE(mess,FMT="('>>> ',(A),1x,(A),1x,I6,1x,3('[',I3,',',I3,'] '))") &
2230!                          TRIM(DateStr), TRIM(VarName),  DomainDesc, &
2231!                          DomainStart(1),DomainEnd(1), &
2232!                          DomainStart(2),DomainEnd(2),DomainStart(3),DomainEnd(3)
2233!                      CALL wrf_message(mess)
2234
2235                      CALL store_patch_in_outbuf_pnc(obuf(icurs/itypesize), &
2236                                                     dummybuf, TRIM(DateStr), &
2237                                                     TRIM(VarName) , &
2238                                                     FieldType,      &
2239                                                     TRIM(MemoryOrder), &
2240                                                     TRIM(Stagger), &
2241                                                     DimNames, &
2242                                                     DomainStart , DomainEnd ,&
2243                                                     MemoryStart , MemoryEnd ,&
2244                                                     PatchStart , PatchEnd )
2245                      stored_write_record = .true.
2246
2247!!$                      IF(VarName .eq. "PSFC")THEN
2248!!$                         CALL dump_real_array_c(obuf(icurs/itypesize), DomainStart,&
2249!!$                                                DomainEnd, PatchStart, PatchEnd,   &
2250!!$                                                mytask_local, DomainDesc)
2251!!$                      ENDIF
2252
2253#endif
2254                    ELSE IF ( FieldType .EQ. WRF_INTEGER ) THEN
2255                      CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
2256#ifdef PNETCDF_QUILT
2257                      CALL store_patch_in_outbuf_pnc ( dummybuf, &
2258                                                   obuf(icurs/itypesize), &
2259                                                   TRIM(DateStr), &
2260                                                   TRIM(VarName) , &
2261                                                   FieldType, &
2262                                                   TRIM(MemoryOrder), &
2263                                                   TRIM(Stagger), DimNames,  &
2264                                                   DomainStart , DomainEnd , &
2265                                                   MemoryStart , MemoryEnd , &
2266                                                   PatchStart , PatchEnd )
2267                      stored_write_record = .true.
2268#endif
2269                    ELSE IF ( FieldType .EQ. WRF_LOGICAL ) THEN
2270                      ftypesize = LWORDSIZE
2271                    ENDIF
2272
2273                    icurs = icurs + (PatchEnd(1)-PatchStart(1)+1)* &
2274                                    (PatchEnd(2)-PatchStart(2)+1)* &
2275                                    (PatchEnd(3)-PatchStart(3)+1)*ftypesize
2276
2277                  ELSE ! Write metadata only (or do 'training'?)
2278
2279                    SELECT CASE (use_package(io_form(DataHandle)))
2280
2281#ifdef PNETCDF_QUILT
2282                      CASE ( IO_PNETCDF )
2283                        CALL ext_pnc_write_field ( handle(DataHandle) , TRIM(DateStr),        &
2284                                   TRIM(VarName) , dummy , FieldType , mpi_comm_local , mpi_comm_local,         &
2285                                   DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger), DimNames , &
2286                                   DomainStart , DomainEnd ,                                  &
2287                                   MemoryStart , MemoryEnd ,                                  &
2288                                   PatchStart ,  PatchEnd,                                  &
2289                                   Status )
2290#endif
2291#ifdef NETCDF
2292                      CASE ( IO_NETCDF   )
2293                        CALL ext_ncd_write_field ( handle(DataHandle) , TRIM(DateStr) ,         &
2294                                   TRIM(VarName) , dummy , FieldType , Comm , IOComm,           &
2295                                   DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames ,  &
2296                                   DomainStart , DomainEnd ,                                    &
2297                                   DomainStart , DomainEnd ,                                    &
2298                                   DomainStart , DomainEnd ,                                    &
2299                                   Status )
2300#endif
2301#if 0
2302! since this is training and the grib output doesn't need training, disable this branch.
2303#ifdef YYY
2304                 CASE ( IO_YYY )
2305                      CALL ext_YYY_write_field ( handle(DataHandle) , TRIM(DateStr) ,         &
2306                                 TRIM(VarName) , dummy , FieldType , Comm , IOComm,           &
2307                                 DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames ,  &
2308                                 DomainStart , DomainEnd ,                                    &
2309                                 DomainStart , DomainEnd ,                                    &
2310                                 DomainStart , DomainEnd ,                                    &
2311                                 Status )
2312#endif
2313#endif
2314                      CASE DEFAULT
2315                        Status = 0
2316                    END SELECT
2317                  ENDIF
2318                CASE ( int_iosync )
2319                  CALL int_get_handle_header( obuf(icurs/itypesize), hdrbufsize, itypesize, &
2320                                            DataHandle , code )
2321                  icurs = icurs + hdrbufsize
2322                CASE DEFAULT
2323                  WRITE(mess,*)'quilt: bad tag: ',                            &
2324                               get_hdr_tag( obuf(icurs/itypesize) ),' icurs ',&
2325                               icurs/itypesize
2326                  CALL wrf_error_fatal( mess )
2327              END SELECT
2328
2329            ENDDO !}
2330! Now, we have finshed handling all commands from the latest
2331! call to retrieve_pieces_of_field().
2332
2333            IF (stored_write_record) THEN
2334! If any field patches have been stored in internal output buffers
2335! (via a call to store_patch_in_outbuf_pnc()) then call write_outbuf_pnc()
2336! to write them to disk now.
2337! NOTE that the I/O server will only have called
2338! store_patch_in_outbuf() when handling write_field (int_field)
2339! commands which only arrive AFTER an "iosync" command.
2340!              CALL start_timing
2341#ifdef PNETCDF_QUILT
2342              CALL write_outbuf_pnc( handle(DataHandle), &
2343                                     use_package(io_form(DataHandle)), &
2344                                     mpi_comm_local, mytask_local,     &
2345                                     ntasks_local_group)
2346#endif
2347!              CALL end_timing( "quilt_pnc: call to write_outbuf_pnc" )
2348              stored_write_record = .false.
2349              written_record = .true.
2350            ENDIF
2351
2352! If one or more "open_for_write_commit" commands were encountered from the
2353! latest call to retrieve_pieces_of_field() then call the package-specific
2354! routine to do the commit.
2355            IF (okay_to_commit(DataHandle)) THEN
2356
2357              SELECT CASE (use_package(io_form(DataHandle)))
2358#ifdef PNETCDF_QUILT
2359                CASE ( IO_PNETCDF   )
2360                  CALL ext_pnc_inquire_filename( handle(DataHandle), fname, fstat, Status )
2361                  IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2362                    CALL ext_pnc_open_for_write_commit(handle(DataHandle),Status)
2363                    okay_to_write(DataHandle) = .true.
2364                  ENDIF
2365#endif
2366#ifdef NETCDF
2367                CASE ( IO_NETCDF   )
2368                  CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
2369                  IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2370                    CALL ext_ncd_open_for_write_commit(handle(DataHandle),Status)
2371                    okay_to_write(DataHandle) = .true.
2372                  ENDIF
2373#endif
2374#ifdef INTIO
2375                CASE ( IO_INTIO   )
2376                  CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
2377                  IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2378                    CALL ext_int_open_for_write_commit(handle(DataHandle),Status)
2379                    okay_to_write(DataHandle) = .true.
2380                  ENDIF
2381#endif
2382#ifdef YYY
2383                 CASE ( IO_YYY )
2384                    CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
2385                    IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2386                       CALL ext_yyy_open_for_write_commit(handle(DataHandle),Status)
2387                       okay_to_write(DataHandle) = .true.
2388                    ENDIF
2389#endif
2390#ifdef GRIB1
2391                 CASE ( IO_GRIB1 )
2392                    CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
2393                    IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2394                       CALL ext_gr1_open_for_write_commit(handle(DataHandle),Status)
2395                       okay_to_write(DataHandle) = .true.
2396                    ENDIF
2397#endif
2398#ifdef GRIB2
2399                 CASE ( IO_GRIB2 )
2400                    CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
2401                    IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
2402                       CALL ext_gr2_open_for_write_commit(handle(DataHandle),Status)
2403                       okay_to_write(DataHandle) = .true.
2404                    ENDIF
2405#endif
2406
2407                CASE DEFAULT
2408                  Status = 0
2409              END SELECT
2410
2411            okay_to_commit(DataHandle) = .false.
2412          ENDIF
2413!!endif
2414
2415! Retrieve header and all patches for the next field from the internal
2416! buffers. 
2417        CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
2418      END DO !}
2419
2420      DEALLOCATE( obuf )
2421
2422      ! flush output files if needed
2423      IF (written_record) THEN
2424!CALL start_timing
2425        SELECT CASE ( use_package(io_form) )
2426#ifdef PNETCDF_QUILT
2427          CASE ( IO_PNETCDF   )
2428            CALL ext_pnc_iosync( handle(DataHandle), Status )
2429#endif
2430          CASE DEFAULT
2431            Status = 0
2432        END SELECT
2433        written_record = .false.
2434!CALL end_timing( "quilt_pnc: flush" )
2435      ENDIF
2436
2437      END DO ! }
2438
2439    END SUBROUTINE quilt_pnc
2440
2441! end of #endif of DM_PARALLEL
2442#endif
2443
2444    SUBROUTINE init_module_wrf_quilt
2445!<DESCRIPTION>
2446! Both client (compute) and server tasks call this routine to initialize the
2447! module.  Routine setup_quilt_servers() is called from this routine to
2448! determine which tasks are compute tasks and which are server tasks.  Server
2449! tasks then call routine quilt() and remain there for the rest of the model
2450! run.  Compute tasks return from init_module_wrf_quilt() to perform model
2451! computations. 
2452!</DESCRIPTION>
2453#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2454      IMPLICIT NONE
2455      INCLUDE 'mpif.h'
2456      INTEGER i
2457      NAMELIST /namelist_quilt/ nio_tasks_per_group, nio_groups
2458      INTEGER ntasks, mytask, ierr, io_status
2459#  if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
2460      INTEGER thread_support_provided, thread_support_requested
2461#endif
2462      INTEGER mpi_comm_here
2463      LOGICAL mpi_inited
2464      LOGICAL esmf_coupling
2465
2466!TODO:  Change this to run-time switch
2467#ifdef ESMFIO
2468      esmf_coupling = .TRUE.
2469#else
2470      esmf_coupling = .FALSE.
2471#endif
2472
2473      quilting_enabled = .FALSE.
2474      IF ( disable_quilt ) RETURN
2475
2476      DO i = 1,int_num_handles
2477        okay_to_write(i) = .FALSE.
2478        int_handle_in_use(i) = .FALSE.
2479        server_for_handle(i) = 0
2480        int_num_bytes_to_write(i) = 0
2481      ENDDO
2482
2483      CALL MPI_INITIALIZED( mpi_inited, ierr )
2484      IF ( .NOT. mpi_inited ) THEN
2485#  if defined(_OPENMP) && defined(MPI2_THREAD_SUPPORT)
2486        thread_support_requested = MPI_THREAD_FUNNELED
2487        CALL mpi_init_thread ( thread_support_requested, thread_support_provided, ierr )
2488        IF ( thread_support_provided .lt. thread_support_requested ) THEN
2489           CALL WRF_ERROR_FATAL( "failed to initialize MPI thread support")
2490        ENDIF
2491#  else
2492        CALL mpi_init ( ierr )
2493#  endif
2494        CALL wrf_set_dm_communicator( MPI_COMM_WORLD )
2495        CALL wrf_termio_dup
2496      ENDIF
2497      CALL wrf_get_dm_communicator( mpi_comm_here )
2498
2499      CALL MPI_Comm_rank ( mpi_comm_here, mytask, ierr ) ;
2500      CALL mpi_x_comm_size ( mpi_comm_here, ntasks, ierr ) ;
2501
2502      IF ( mytask .EQ. 0 ) THEN
2503        OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
2504        nio_groups = 1
2505        nio_tasks_per_group  = 0
2506        READ ( 27 , NML = namelist_quilt, IOSTAT=io_status )
2507        IF (io_status .NE. 0) THEN
2508          CALL wrf_error_fatal( "ERROR reading namelist namelist_quilt" )
2509        ENDIF
2510        CLOSE ( 27 )
2511        IF ( esmf_coupling ) THEN
2512          IF ( nio_tasks_per_group > 0 ) THEN
2513            CALL wrf_error_fatal("frame/module_io_quilt.F: cannot use "// &
2514                                 "ESMF coupling with quilt tasks") ;
2515          ENDIF
2516        ENDIF
2517      ENDIF
2518      CALL mpi_bcast( nio_tasks_per_group  , 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
2519      CALL mpi_bcast( nio_groups , 1 , MPI_INTEGER , 0 , mpi_comm_here, ierr )
2520
2521      CALL setup_quilt_servers( nio_tasks_per_group,            &
2522                                mytask,               &
2523                                ntasks,               &
2524                                nio_groups,           &
2525                                nio_tasks_in_group,   &
2526                                mpi_comm_here,       &
2527                                mpi_comm_local,       &
2528                                mpi_comm_io_groups)
2529
2530       ! provide the communicator for the integration tasks to RSL
2531       IF ( compute_node ) THEN
2532          CALL wrf_set_dm_communicator( mpi_comm_local )
2533       ELSE
2534          CALL quilt    ! will not return on io server tasks
2535       ENDIF
2536#endif
2537      RETURN
2538    END SUBROUTINE init_module_wrf_quilt
2539END MODULE module_wrf_quilt
2540
2541!<DESCRIPTION>
2542! Remaining routines in this file are defined outside of the module
2543! either to defeat arg/param type checking or to avoid an explicit use
2544! dependence.
2545!</DESCRIPTION>
2546
2547SUBROUTINE disable_quilting
2548!<DESCRIPTION>
2549! Call this in programs that you never want to be quilting (e.g. real)
2550! Must call before call to init_module_wrf_quilt(). 
2551!</DESCRIPTION>
2552  USE module_wrf_quilt
2553  disable_quilt = .TRUE.
2554  RETURN
2555END SUBROUTINE disable_quilting
2556
2557LOGICAL FUNCTION  use_output_servers()
2558!<DESCRIPTION>
2559! Returns .TRUE. if I/O quilt servers are in-use for write operations.
2560! This routine is called only by client (compute) tasks. 
2561!</DESCRIPTION>
2562  USE module_wrf_quilt
2563  use_output_servers = quilting_enabled
2564  RETURN
2565END FUNCTION use_output_servers
2566
2567LOGICAL FUNCTION  use_input_servers()
2568!<DESCRIPTION>
2569! Returns .TRUE. if I/O quilt servers are in-use for read operations.
2570! This routine is called only by client (compute) tasks. 
2571!</DESCRIPTION>
2572  USE module_wrf_quilt
2573  use_input_servers = .FALSE.
2574  RETURN
2575END FUNCTION use_input_servers
2576
2577SUBROUTINE wrf_quilt_open_for_write_begin( FileName , Comm_compute, Comm_io, SysDepInfo, &
2578                                     DataHandle , io_form_arg, Status )
2579!<DESCRIPTION>
2580! Instruct the I/O quilt servers to begin data definition ("training") phase
2581! for writing to WRF dataset FileName.  io_form_arg indicates file format.
2582! This routine is called only by client (compute) tasks. 
2583!</DESCRIPTION>
2584#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2585  USE module_wrf_quilt
2586  USE module_state_description, ONLY: IO_PNETCDF
2587  IMPLICIT NONE
2588  INCLUDE 'mpif.h'
2589#include "intio_tags.h"
2590  CHARACTER *(*), INTENT(IN)  :: FileName
2591  INTEGER ,       INTENT(IN)  :: Comm_compute , Comm_io
2592  CHARACTER *(*), INTENT(IN)  :: SysDepInfo
2593  INTEGER ,       INTENT(OUT) :: DataHandle
2594  INTEGER ,       INTENT(IN)  :: io_form_arg
2595  INTEGER ,       INTENT(OUT) :: Status
2596! Local
2597  CHARACTER*132   :: locFileName, locSysDepInfo
2598  INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
2599  REAL dummy
2600  INTEGER, EXTERNAL :: use_package
2601
2602  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_write_begin' )
2603  CALL int_get_fresh_handle(i)
2604  okay_to_write(i) = .false.
2605  DataHandle = i
2606
2607  locFileName = FileName
2608  locSysDepInfo = SysDepInfo
2609
2610  CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
2611
2612  SELECT CASE(use_package(io_form_arg))
2613
2614#ifdef PNETCDF_QUILT
2615  CASE(IO_PNETCDF)
2616     IF(compute_group_master(1)) THEN
2617        CALL int_gen_ofwb_header( hdrbuf, hdrbufsize, itypesize, &
2618                                  locFileName,locSysDepInfo,io_form_arg,&
2619                                  DataHandle )
2620     ELSE
2621        CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2622     END IF
2623#endif
2624  CASE DEFAULT
2625
2626     IF ( wrf_dm_on_monitor() ) THEN
2627        CALL int_gen_ofwb_header( hdrbuf, hdrbufsize, itypesize, &
2628                                  locFileName,locSysDepInfo,io_form_arg,DataHandle )
2629     ELSE
2630        CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2631     ENDIF
2632
2633  END SELECT
2634
2635  iserver = get_server_id ( DataHandle )
2636!JMDEBUGwrite(0,*)'wrf_quilt_open_for_write_begin iserver = ', iserver
2637  CALL get_mpi_comm_io_groups( comm_io_group , iserver )
2638!JMDEBUGwrite(0,*)'wrf_quilt_open_for_write_begin comm_io_group  = ', comm_io_group
2639
2640  CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
2641!JMDEBUGwrite(0,*)'mpi_x_comm_size tasks_in_group ',tasks_in_group, ierr
2642
2643!!JMTIMING  CALL start_timing
2644  ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
2645  reduced = 0
2646  reduced(1) = hdrbufsize
2647#ifdef PNETCDF_QUILT
2648  IF ( compute_group_master(1) ) reduced(2) = i
2649#else
2650  IF ( wrf_dm_on_monitor() )  reduced(2) = i
2651#endif
2652  CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
2653                   MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
2654                   comm_io_group, ierr )
2655!!JMTIMING   CALL end_timing("MPI_Reduce in wrf_quilt_open_for_write_begin")
2656
2657  ! send data to the i/o processor
2658  CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group,            &
2659                        onebyte,                       &
2660                        hdrbuf, hdrbufsize , &
2661                        dummy, 0 )
2662
2663  Status = 0
2664
2665
2666#endif
2667  RETURN 
2668END SUBROUTINE wrf_quilt_open_for_write_begin
2669
2670SUBROUTINE wrf_quilt_open_for_write_commit( DataHandle , Status )
2671!<DESCRIPTION>
2672! Instruct the I/O quilt servers to switch an internal flag to enable output
2673! for the dataset referenced by DataHandle.  The call to
2674! wrf_quilt_open_for_write_commit() must be paired with a call to
2675! wrf_quilt_open_for_write_begin().
2676! This routine is called only by client (compute) tasks. 
2677!</DESCRIPTION>
2678#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2679  USE module_wrf_quilt
2680  IMPLICIT NONE
2681  INCLUDE 'mpif.h'
2682#include "intio_tags.h"
2683  INTEGER ,       INTENT(IN ) :: DataHandle
2684  INTEGER ,       INTENT(OUT) :: Status
2685  INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
2686  REAL dummy
2687
2688  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_write_commit' )
2689  IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
2690    IF ( int_handle_in_use( DataHandle ) ) THEN
2691      okay_to_write( DataHandle ) = .true.
2692    ENDIF
2693  ENDIF
2694
2695  CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
2696
2697#ifdef PNETCDF_QUILT
2698!ARP Only want one command to be received by each IO server when using
2699!ARP parallel IO
2700  IF(compute_group_master(1)) THEN
2701     CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
2702                                 DataHandle, int_open_for_write_commit )
2703  ELSE
2704     CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2705  END IF
2706#else
2707
2708  IF ( wrf_dm_on_monitor() ) THEN
2709     CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
2710                                 DataHandle, int_open_for_write_commit )
2711  ELSE
2712     CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2713  ENDIF
2714#endif
2715
2716  iserver = get_server_id ( DataHandle )
2717  CALL get_mpi_comm_io_groups( comm_io_group , iserver )
2718
2719  CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
2720
2721!!JMTIMING  CALL start_timing
2722  ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
2723  reduced = 0
2724  reduced(1) = hdrbufsize
2725#ifdef PNETCDF_QUILT
2726  IF ( compute_group_master(1) ) reduced(2) = DataHandle
2727#else
2728  IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
2729#endif
2730  CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
2731                   MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
2732                   comm_io_group, ierr )
2733!!JMTIMING   CALL end_timing("MPI_Reduce in wrf_quilt_open_for_write_commit")
2734
2735  ! send data to the i/o processor
2736  CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group,            &
2737                        onebyte,                       &
2738                        hdrbuf, hdrbufsize , &
2739                        dummy, 0 )
2740
2741  Status = 0
2742
2743#endif
2744  RETURN 
2745END SUBROUTINE wrf_quilt_open_for_write_commit
2746
2747SUBROUTINE wrf_quilt_open_for_read ( FileName , Comm_compute, Comm_io, SysDepInfo, &
2748                               DataHandle , Status )
2749!<DESCRIPTION>
2750! Instruct the I/O quilt servers to open WRF dataset FileName for reading.
2751! This routine is called only by client (compute) tasks. 
2752! This is not yet supported.
2753!</DESCRIPTION>
2754#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2755  IMPLICIT NONE
2756  CHARACTER *(*), INTENT(IN)  :: FileName
2757  INTEGER ,       INTENT(IN)  :: Comm_compute , Comm_io
2758  CHARACTER *(*), INTENT(IN)  :: SysDepInfo
2759  INTEGER ,       INTENT(OUT) :: DataHandle
2760  INTEGER ,       INTENT(OUT) :: Status
2761
2762  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_read' )
2763  DataHandle = -1
2764  Status = -1
2765  CALL wrf_error_fatal ( "frame/module_io_quilt.F: wrf_quilt_open_for_read not yet supported" )
2766#endif
2767  RETURN 
2768END SUBROUTINE wrf_quilt_open_for_read
2769
2770SUBROUTINE wrf_quilt_inquire_opened ( DataHandle, FileName , FileStatus, Status )
2771!<DESCRIPTION>
2772! Inquire if the dataset referenced by DataHandle is open.
2773! Does not require communication with I/O servers.
2774! This routine is called only by client (compute) tasks. 
2775!</DESCRIPTION>
2776#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2777  USE module_wrf_quilt
2778  IMPLICIT NONE
2779#include "wrf_io_flags.h"
2780  INTEGER ,       INTENT(IN)  :: DataHandle
2781  CHARACTER *(*), INTENT(IN)  :: FileName
2782  INTEGER ,       INTENT(OUT) :: FileStatus
2783  INTEGER ,       INTENT(OUT) :: Status
2784
2785  Status = 0
2786
2787  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_inquire_opened' )
2788  IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
2789    IF ( int_handle_in_use( DataHandle ) ) THEN
2790      IF ( okay_to_write( DataHandle ) ) THEN
2791        FileStatus = WRF_FILE_OPENED_FOR_WRITE
2792      ENDIF
2793    ENDIF
2794  ENDIF
2795  Status = 0
2796 
2797#endif
2798  RETURN
2799END SUBROUTINE wrf_quilt_inquire_opened
2800
2801SUBROUTINE wrf_quilt_inquire_filename ( DataHandle, FileName , FileStatus, Status )
2802!<DESCRIPTION>
2803! Return the Filename and FileStatus associated with DataHandle.
2804! Does not require communication with I/O servers.
2805!
2806! Note that the current implementation does not actually return FileName.
2807! Currenlty, WRF does not use this returned value.  Fixing this would simply
2808! require saving the file names on the client tasks in an array similar to
2809! okay_to_write().
2810! This routine is called only by client (compute) tasks. 
2811!</DESCRIPTION>
2812#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2813  USE module_wrf_quilt
2814  IMPLICIT NONE
2815#include "wrf_io_flags.h"
2816  INTEGER ,       INTENT(IN)  :: DataHandle
2817  CHARACTER *(*), INTENT(OUT) :: FileName
2818  INTEGER ,       INTENT(OUT) :: FileStatus
2819  INTEGER ,       INTENT(OUT) :: Status
2820  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_inquire_filename' )
2821  Status = 0
2822  IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
2823    IF ( int_handle_in_use( DataHandle ) ) THEN
2824      IF ( okay_to_write( DataHandle ) ) THEN
2825        FileStatus = WRF_FILE_OPENED_FOR_WRITE
2826      ELSE
2827        FileStatus = WRF_FILE_OPENED_NOT_COMMITTED
2828      ENDIF
2829    ELSE
2830        FileStatus = WRF_FILE_NOT_OPENED
2831    ENDIF
2832    Status = 0
2833    FileName = "bogusfornow"
2834  ELSE
2835    Status = -1
2836  ENDIF
2837#endif
2838  RETURN
2839END SUBROUTINE wrf_quilt_inquire_filename
2840
2841SUBROUTINE wrf_quilt_iosync ( DataHandle, Status )
2842!<DESCRIPTION>
2843! Instruct the I/O quilt servers to synchronize the disk copy of a dataset
2844! with memory buffers.
2845!
2846! After the "iosync" header (request) is sent to the I/O quilt server,
2847! the compute tasks will then send the entire contents (headers and data) of
2848! int_local_output_buffer to their I/O quilt server.  This communication is
2849! done in subroutine send_to_io_quilt_servers().  After the I/O quilt servers
2850! receive this data, they will write all accumulated fields to disk.
2851!
2852! Significant time may be required for the I/O quilt servers to organize
2853! fields and write them to disk.  Therefore, the "iosync" request should be
2854! sent only when the compute tasks are ready to run for a while without
2855! needing to communicate with the servers.  Otherwise, the compute tasks
2856! will end up waiting for the servers to finish writing to disk, thus wasting
2857! any performance benefits of having servers at all.
2858!
2859! This routine is called only by client (compute) tasks. 
2860!</DESCRIPTION>
2861#if  defined( DM_PARALLEL ) && ! defined (STUBMPI)
2862  USE module_wrf_quilt
2863  IMPLICIT NONE
2864  include "mpif.h"
2865  INTEGER ,       INTENT(IN)  :: DataHandle
2866  INTEGER ,       INTENT(OUT) :: Status
2867
2868  INTEGER locsize , itypesize
2869  INTEGER ierr, tasks_in_group, comm_io_group, dummy, i
2870
2871  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_iosync' )
2872
2873!  CALL start_timing
2874  IF ( associated ( int_local_output_buffer ) ) THEN
2875
2876    iserver = get_server_id ( DataHandle )
2877    CALL get_mpi_comm_io_groups( comm_io_group , iserver )
2878
2879    CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
2880
2881    locsize = int_num_bytes_to_write(DataHandle)
2882
2883!    CALL start_timing
2884    ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
2885    reduced = 0
2886    reduced(1) = locsize
2887#ifdef PNETCDF_QUILT
2888! ARP Only want one command per IOServer if doing parallel IO
2889    IF ( compute_group_master(1) ) reduced(2) = DataHandle
2890#else
2891    IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
2892#endif
2893    CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
2894                     MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
2895                     comm_io_group, ierr )
2896!    CALL end_timing("MPI_Reduce in wrf_quilt_iosync")
2897
2898    ! send data to the i/o processor
2899#ifdef DEREF_KLUDGE
2900    CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group,            &
2901                          onebyte,                       &
2902                          int_local_output_buffer(1), locsize , &
2903                          dummy, 0 )
2904#else
2905    CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group,            &
2906                          onebyte,                       &
2907                          int_local_output_buffer, locsize , &
2908                          dummy, 0 )
2909#endif
2910
2911
2912    int_local_output_cursor = 1
2913!    int_num_bytes_to_write(DataHandle) = 0
2914    DEALLOCATE ( int_local_output_buffer )
2915    NULLIFY ( int_local_output_buffer )
2916  ELSE
2917    CALL wrf_message ("frame/module_io_quilt.F: wrf_quilt_iosync: no buffer allocated")
2918  ENDIF
2919!  CALL end_timing("wrf_quilt_iosync")
2920  Status = 0
2921#endif
2922  RETURN
2923END SUBROUTINE wrf_quilt_iosync
2924
2925SUBROUTINE wrf_quilt_ioclose ( DataHandle, Status )
2926!<DESCRIPTION>
2927! Instruct the I/O quilt servers to close the dataset referenced by
2928! DataHandle.
2929! This routine also clears the client file handle and, if needed, deallocates
2930! int_local_output_buffer.
2931! This routine is called only by client (compute) tasks. 
2932!</DESCRIPTION>
2933#if defined( DM_PARALLEL ) && ! defined( STUBMPI)
2934  USE module_wrf_quilt
2935  USE module_timing
2936  IMPLICIT NONE
2937  INCLUDE 'mpif.h'
2938#include "intio_tags.h"
2939  INTEGER ,       INTENT(IN)  :: DataHandle
2940  INTEGER ,       INTENT(OUT) :: Status
2941  INTEGER i, itypesize, tasks_in_group, comm_io_group, ierr
2942  REAL dummy
2943
2944!!JMTIMING  CALL start_timing
2945  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_ioclose' )
2946  CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
2947
2948! If we're using pnetcdf then each IO server will need to receive the
2949! handle just once as there is
2950! no longer a reduce over the IO servers to get it.
2951#ifdef PNETCDF_QUILT
2952  IF ( compute_group_master(1) )THEN
2953     CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
2954                                 DataHandle, int_ioclose )
2955  ELSE
2956     CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2957  ENDIF
2958#else
2959  IF ( wrf_dm_on_monitor() ) THEN
2960     CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
2961                                 DataHandle , int_ioclose )
2962  ELSE
2963     CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2964  ENDIF
2965#endif
2966
2967  iserver = get_server_id ( DataHandle )
2968  CALL get_mpi_comm_io_groups( comm_io_group , iserver )
2969
2970  CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
2971
2972!!JMTIMING  CALL start_timing
2973  ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
2974  reduced = 0
2975#ifdef PNETCDF_QUILT
2976! If we're using pnetcdf then each IO server will need the handle as there is
2977! no longer a reduce over the IO servers to get it.
2978  IF ( compute_group_master(1) ) reduced(2) = DataHandle
2979#else
2980  IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
2981#endif
2982  CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
2983                   MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
2984                   comm_io_group, ierr )
2985!!JMTIMING   CALL end_timing("MPI_Reduce in ioclose")
2986
2987#if 0
2988  ! send data to the i/o processor
2989!!JMTIMING  CALL start_timing
2990  CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group,            &
2991                        onebyte,                       &
2992                        hdrbuf, hdrbufsize , &
2993                        dummy, 0 )
2994!!JMTIMING   CALL end_timing("collect_on_comm in io_close")
2995#endif
2996
2997  int_handle_in_use(DataHandle) = .false.
2998  CALL set_server_id( DataHandle, 0 )
2999  okay_to_write(DataHandle) = .false.
3000  okay_to_commit(DataHandle) = .false.
3001  int_local_output_cursor = 1
3002  int_num_bytes_to_write(DataHandle) = 0
3003  IF ( associated ( int_local_output_buffer ) ) THEN
3004    DEALLOCATE ( int_local_output_buffer )
3005    NULLIFY ( int_local_output_buffer )
3006  ENDIF
3007
3008  Status = 0
3009!!JMTIMING   CALL end_timing( "wrf_quilt_ioclose" )
3010
3011#endif
3012  RETURN
3013END SUBROUTINE wrf_quilt_ioclose
3014
3015SUBROUTINE wrf_quilt_ioexit( Status )
3016!<DESCRIPTION>
3017! Instruct the I/O quilt servers to shut down the WRF I/O system.
3018! Do not call any wrf_quilt_*() routines after this routine has been called.
3019! This routine is called only by client (compute) tasks. 
3020!</DESCRIPTION>
3021#if defined( DM_PARALLEL ) && ! defined (STUBMPI )
3022  USE module_wrf_quilt
3023  IMPLICIT NONE
3024  INCLUDE 'mpif.h'
3025#include "intio_tags.h"
3026  INTEGER ,       INTENT(OUT) :: Status
3027  INTEGER                     :: DataHandle
3028  INTEGER i, itypesize, tasks_in_group, comm_io_group, me, ierr
3029  REAL dummy
3030
3031  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_ioexit' )
3032  CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3033
3034!ARPDBG - potential bug. Have no access to what type of IO is being used for
3035! this data so if PNETCDF_QUILT is defined then we assume that's what's being used.
3036#ifdef PNETCDF_QUILT
3037!ARP Send the ioexit message just once to each IOServer when using parallel IO
3038  IF( compute_group_master(1) ) THEN
3039     CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
3040                                 DataHandle, int_ioexit )
3041  ELSE
3042     CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3043  END IF
3044#else
3045
3046  IF ( wrf_dm_on_monitor() ) THEN
3047     CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
3048                                 DataHandle , int_ioexit )  ! Handle is dummy
3049  ELSE
3050     CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3051  ENDIF
3052#endif
3053
3054  DO iserver = 1, nio_groups
3055    CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3056
3057    CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3058    CALL mpi_comm_rank( comm_io_group , me , ierr )
3059
3060! BY SENDING A NEGATIVE SIZE WE GET THE SERVERS TO SHUT DOWN
3061    hdrbufsize = -100
3062    reduced = 0
3063    IF ( me .eq. 0 ) reduced(1) = hdrbufsize
3064    CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
3065                     MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
3066                     comm_io_group, ierr )
3067
3068  ENDDO
3069  Status = 0
3070
3071#endif
3072  RETURN 
3073END SUBROUTINE wrf_quilt_ioexit
3074
3075SUBROUTINE wrf_quilt_get_next_time ( DataHandle, DateStr, Status )
3076!<DESCRIPTION>
3077! Instruct the I/O quilt servers to return the next time stamp.
3078! This is not yet supported.
3079! This routine is called only by client (compute) tasks. 
3080!</DESCRIPTION>
3081#if defined( DM_PARALLEL ) && ! defined (STUBMPI)
3082  IMPLICIT NONE
3083  INTEGER ,       INTENT(IN)  :: DataHandle
3084  CHARACTER*(*)               :: DateStr
3085  INTEGER                     :: Status
3086#endif
3087  RETURN
3088END SUBROUTINE wrf_quilt_get_next_time
3089
3090SUBROUTINE wrf_quilt_get_previous_time ( DataHandle, DateStr, Status )
3091!<DESCRIPTION>
3092! Instruct the I/O quilt servers to return the previous time stamp.
3093! This is not yet supported.
3094! This routine is called only by client (compute) tasks. 
3095!</DESCRIPTION>
3096#if defined( DM_PARALLEL ) && ! defined (STUBMPI)
3097  IMPLICIT NONE
3098  INTEGER ,       INTENT(IN)  :: DataHandle
3099  CHARACTER*(*)               :: DateStr
3100  INTEGER                     :: Status
3101#endif
3102  RETURN
3103END SUBROUTINE wrf_quilt_get_previous_time
3104
3105SUBROUTINE wrf_quilt_set_time ( DataHandle, Data,  Status )
3106!<DESCRIPTION>
3107! Instruct the I/O quilt servers to set the time stamp in the dataset
3108! referenced by DataHandle.
3109! This routine is called only by client (compute) tasks. 
3110!</DESCRIPTION>
3111#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3112  USE module_wrf_quilt
3113  USE module_state_description, ONLY: IO_PNETCDF
3114  IMPLICIT NONE
3115  INCLUDE 'mpif.h'
3116#include "intio_tags.h"
3117  INTEGER ,       INTENT(IN)  :: DataHandle
3118  CHARACTER*(*) , INTENT(IN)  :: Data
3119  INTEGER                     :: Status
3120  INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
3121  REAL dummy
3122  INTEGER                 :: Count
3123  INTEGER, EXTERNAL       :: use_package
3124!
3125  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_set_time' )
3126
3127  IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
3128    IF ( int_handle_in_use( DataHandle ) ) THEN
3129      CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3130      Count = 0   ! there is no count for character strings
3131
3132!ARPDBG - potential bug. Have no access to what type of IO is being used for
3133! this data so if PNETCDF_QUILT is defined then we assume that's what's being used.
3134#ifdef PNETCDF_QUILT
3135      IF(compute_group_master(1) )THEN
3136! ARPDBG - actually only want to send one time header to each IO server as
3137! can't tell that's what they are on the IO servers themselves
3138         CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
3139                                      DataHandle, "TIMESTAMP", "", Data, int_set_time )
3140      ELSE
3141         CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3142      END IF
3143#else
3144      IF ( wrf_dm_on_monitor() ) THEN
3145         CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
3146                                      DataHandle, "TIMESTAMP", "", Data, int_set_time )
3147      ELSE
3148         CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3149      ENDIF
3150#endif
3151
3152      iserver = get_server_id ( DataHandle )
3153      CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3154      CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3155
3156      ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3157      reduced = 0
3158      reduced(1) = hdrbufsize
3159#ifdef PNETCDF_QUILT
3160      IF ( compute_group_master(1) ) reduced(2) = DataHandle
3161#else
3162      IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
3163#endif
3164      CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
3165                       MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
3166                       comm_io_group, ierr )
3167      ! send data to the i/o processor
3168      CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group,            &
3169                            onebyte,                       &
3170                            hdrbuf, hdrbufsize , &
3171                            dummy, 0 )
3172    ENDIF
3173  ENDIF
3174
3175#endif
3176RETURN
3177END SUBROUTINE wrf_quilt_set_time
3178
3179SUBROUTINE wrf_quilt_get_next_var ( DataHandle, VarName, Status )
3180!<DESCRIPTION>
3181! When reading, instruct the I/O quilt servers to return the name of the next
3182! variable in the current time frame.
3183! This is not yet supported.
3184! This routine is called only by client (compute) tasks. 
3185!</DESCRIPTION>
3186#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3187  IMPLICIT NONE
3188  INTEGER ,       INTENT(IN)  :: DataHandle
3189  CHARACTER*(*)               :: VarName
3190  INTEGER                     :: Status
3191#endif
3192  RETURN
3193END SUBROUTINE wrf_quilt_get_next_var
3194
3195SUBROUTINE wrf_quilt_get_dom_ti_real ( DataHandle,Element,   Data, Count, Outcount, Status )
3196!<DESCRIPTION>
3197! Instruct the I/O quilt servers to attempt to read Count words of time
3198! independent domain metadata named "Element"
3199! from the open dataset described by DataHandle.
3200! Metadata of type real are
3201! stored in array Data.
3202! Actual number of words read is returned in OutCount.
3203! This routine is called only by client (compute) tasks. 
3204
3205! This is not yet supported.
3206!</DESCRIPTION>
3207#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3208  IMPLICIT NONE
3209  INTEGER ,       INTENT(IN)  :: DataHandle
3210  CHARACTER*(*) , INTENT(IN)  :: Element
3211  REAL,            INTENT(IN) :: Data(*)
3212  INTEGER ,       INTENT(IN)  :: Count
3213  INTEGER                     :: Outcount
3214  INTEGER                     :: Status
3215  CALL wrf_message('wrf_quilt_get_dom_ti_real not supported yet')
3216#endif
3217RETURN
3218END SUBROUTINE wrf_quilt_get_dom_ti_real
3219
3220SUBROUTINE wrf_quilt_put_dom_ti_real ( DataHandle,Element,   Data, Count,  Status )
3221!<DESCRIPTION>
3222! Instruct the I/O quilt servers to write Count words of time independent
3223! domain metadata named "Element"
3224! to the open dataset described by DataHandle.
3225! Metadata of type real are
3226! copied from array Data.
3227! This routine is called only by client (compute) tasks. 
3228!</DESCRIPTION>
3229#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3230  USE module_wrf_quilt
3231  IMPLICIT NONE
3232  INCLUDE 'mpif.h'
3233#include "intio_tags.h"
3234  INTEGER ,       INTENT(IN)  :: DataHandle
3235  CHARACTER*(*) , INTENT(IN)  :: Element
3236  REAL ,          INTENT(IN)  :: Data(*)
3237  INTEGER ,       INTENT(IN)  :: Count
3238  INTEGER                     :: Status
3239!Local
3240  CHARACTER*132   :: locElement
3241  INTEGER i, typesize, itypesize, tasks_in_group, ierr, comm_io_group
3242  REAL dummy
3243!
3244!!JMTIMING  CALL start_timing
3245  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_real' )
3246  CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3247  locElement = Element
3248
3249  IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
3250    IF ( int_handle_in_use( DataHandle ) ) THEN
3251      CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3252      CALL MPI_TYPE_SIZE( MPI_REAL, typesize, ierr )
3253
3254#ifdef PNETCDF_QUILT
3255      IF ( compute_group_master(1) ) THEN
3256         CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
3257                                 DataHandle, locElement, Data, Count, int_dom_ti_real )
3258      ELSE
3259         CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3260      ENDIF
3261#else
3262      IF ( wrf_dm_on_monitor() ) THEN
3263         CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
3264                                 DataHandle, locElement, Data, Count, int_dom_ti_real )
3265      ELSE
3266         CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3267      ENDIF
3268#endif
3269
3270      iserver = get_server_id ( DataHandle )
3271      CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3272      CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3273
3274!!JMTIMING      CALL start_timing
3275      ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3276      reduced = 0
3277      reduced(1) = hdrbufsize
3278#ifdef PNETCDF_QUILT
3279      IF( compute_group_master(1) )  reduced(2) = DataHandle
3280#else
3281      IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
3282#endif
3283      CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
3284                       MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
3285                       comm_io_group, ierr )
3286!!JMTIMING       CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_real")
3287      ! send data to the i/o processor
3288      CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group,            &
3289                            onebyte,                       &
3290                            hdrbuf, hdrbufsize , &
3291                            dummy, 0 )
3292    ENDIF
3293  ENDIF
3294
3295  Status = 0
3296!!JMTIMING   CALL end_timing("wrf_quilt_put_dom_ti_real")
3297#endif
3298RETURN
3299END SUBROUTINE wrf_quilt_put_dom_ti_real
3300
3301SUBROUTINE wrf_quilt_get_dom_ti_double ( DataHandle,Element,   Data, Count, Outcount, Status )
3302!<DESCRIPTION>
3303! Instruct the I/O quilt servers to attempt to read Count words of time
3304! independent domain metadata named "Element"
3305! from the open dataset described by DataHandle.
3306! Metadata of type double are
3307! stored in array Data.
3308! Actual number of words read is returned in OutCount.
3309! This routine is called only by client (compute) tasks. 
3310!
3311! This is not yet supported.
3312!</DESCRIPTION>
3313#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3314  IMPLICIT NONE
3315  INTEGER ,       INTENT(IN)  :: DataHandle
3316  CHARACTER*(*) , INTENT(IN)  :: Element
3317  real*8                      :: Data(*)
3318  INTEGER ,       INTENT(IN)  :: Count
3319  INTEGER                     :: OutCount
3320  INTEGER                     :: Status
3321  CALL wrf_error_fatal('wrf_quilt_get_dom_ti_double not supported yet')
3322#endif
3323RETURN
3324END SUBROUTINE wrf_quilt_get_dom_ti_double
3325
3326SUBROUTINE wrf_quilt_put_dom_ti_double ( DataHandle,Element,   Data, Count,  Status )
3327!<DESCRIPTION>
3328! Instruct the I/O quilt servers to write Count words of time independent
3329! domain metadata named "Element"
3330! to the open dataset described by DataHandle.
3331! Metadata of type double are
3332! copied from array Data.
3333! This routine is called only by client (compute) tasks. 
3334!
3335! This is not yet supported.
3336!</DESCRIPTION>
3337#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3338  IMPLICIT NONE
3339  INTEGER ,       INTENT(IN)  :: DataHandle
3340  CHARACTER*(*) , INTENT(IN)  :: Element
3341  REAL*8 ,        INTENT(IN)  :: Data(*)
3342  INTEGER ,       INTENT(IN)  :: Count
3343  INTEGER                     :: Status
3344  CALL wrf_error_fatal('wrf_quilt_put_dom_ti_double not supported yet')
3345#endif
3346RETURN
3347END SUBROUTINE wrf_quilt_put_dom_ti_double
3348
3349SUBROUTINE wrf_quilt_get_dom_ti_integer ( DataHandle,Element,   Data, Count, Outcount, Status )
3350!<DESCRIPTION>
3351! Instruct the I/O quilt servers to attempt to read Count words of time
3352! independent domain metadata named "Element"
3353! from the open dataset described by DataHandle.
3354! Metadata of type integer are
3355! stored in array Data.
3356! Actual number of words read is returned in OutCount.
3357! This routine is called only by client (compute) tasks. 
3358!
3359! This is not yet supported.
3360!</DESCRIPTION>
3361#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3362  IMPLICIT NONE
3363  INTEGER ,       INTENT(IN)  :: DataHandle
3364  CHARACTER*(*) , INTENT(IN)  :: Element
3365  integer                     :: Data(*)
3366  INTEGER ,       INTENT(IN)  :: Count
3367  INTEGER                      :: OutCount
3368  INTEGER                     :: Status
3369  CALL wrf_message('wrf_quilt_get_dom_ti_integer not supported yet')
3370#endif
3371RETURN
3372END SUBROUTINE wrf_quilt_get_dom_ti_integer
3373
3374SUBROUTINE wrf_quilt_put_dom_ti_integer ( DataHandle,Element,   Data, Count,  Status )
3375!<DESCRIPTION>
3376! Instruct the I/O quilt servers to write Count words of time independent
3377! domain metadata named "Element"
3378! to the open dataset described by DataHandle.
3379! Metadata of type integer are
3380! copied from array Data.
3381! This routine is called only by client (compute) tasks. 
3382!</DESCRIPTION>
3383#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3384  USE module_wrf_quilt
3385  USE module_state_description, ONLY: IO_PNETCDF
3386  IMPLICIT NONE
3387  INCLUDE 'mpif.h'
3388#include "intio_tags.h"
3389  INTEGER ,       INTENT(IN)  :: DataHandle
3390  CHARACTER*(*) , INTENT(IN)  :: Element
3391  INTEGER ,       INTENT(IN)  :: Data(*)
3392  INTEGER ,       INTENT(IN)  :: Count
3393  INTEGER                     :: Status
3394! Local
3395  CHARACTER*132   :: locElement
3396  INTEGER i, typesize, itypesize, tasks_in_group, ierr, comm_io_group
3397  REAL dummy
3398  INTEGER, EXTERNAL :: use_package
3399!
3400
3401!!JMTIMING  CALL start_timing
3402  locElement = Element
3403
3404  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_integer' )
3405
3406  IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
3407    IF ( int_handle_in_use( DataHandle ) ) THEN
3408      CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3409      CALL MPI_TYPE_SIZE( MPI_INTEGER, typesize, ierr )
3410
3411!ARPDBG - potential bug. Have no access to what type of IO is being used for
3412! this data so if PNETCDF_QUILT is defined then we assume that's what's being used.
3413#ifdef PNETCDF_QUILT
3414      IF ( compute_group_master(1) )THEN
3415         CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
3416                                 DataHandle, locElement, Data, Count,     &
3417                                 int_dom_ti_integer )
3418      ELSE
3419         CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3420      ENDIF
3421#else
3422      IF ( wrf_dm_on_monitor() ) THEN
3423         CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
3424                                 DataHandle, locElement, Data, Count,     &
3425                                 int_dom_ti_integer )
3426      ELSE
3427         CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3428      ENDIF
3429#endif
3430
3431      iserver = get_server_id ( DataHandle )
3432      CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3433      CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3434
3435!!JMTIMING      CALL start_timing
3436      ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3437      reduced = 0
3438      reduced(1) = hdrbufsize
3439#ifdef PNETCDF_QUILT
3440      IF ( compute_group_master(1) ) reduced(2) = DataHandle
3441#else
3442      IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
3443#endif
3444      CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
3445                       MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
3446                       comm_io_group, ierr )
3447
3448!!JMTIMING       CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_integer")
3449      ! send data to the i/o processor
3450      CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group,            &
3451                            onebyte,                       &
3452                            hdrbuf, hdrbufsize , &
3453                            dummy, 0 )
3454    ENDIF
3455  ENDIF
3456  CALL wrf_debug ( DEBUG_LVL, 'returning from wrf_quilt_put_dom_ti_integer' )
3457!!JMTIMING   CALL end_timing("wrf_quilt_put_dom_ti_integer" )
3458
3459#endif
3460RETURN
3461END SUBROUTINE wrf_quilt_put_dom_ti_integer
3462
3463SUBROUTINE wrf_quilt_get_dom_ti_logical ( DataHandle,Element,   Data, Count, Outcount, Status )
3464!<DESCRIPTION>
3465! Instruct the I/O quilt servers to attempt to read Count words of time
3466! independent domain metadata named "Element"
3467! from the open dataset described by DataHandle.
3468! Metadata of type logical are
3469! stored in array Data.
3470! Actual number of words read is returned in OutCount.
3471! This routine is called only by client (compute) tasks. 
3472!
3473! This is not yet supported.
3474!</DESCRIPTION>
3475#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3476  IMPLICIT NONE
3477  INTEGER ,       INTENT(IN)  :: DataHandle
3478  CHARACTER*(*) , INTENT(IN)  :: Element
3479  logical                     :: Data(*)
3480  INTEGER ,       INTENT(IN)  :: Count
3481  INTEGER                      :: OutCount
3482  INTEGER                     :: Status
3483!  CALL wrf_message('wrf_quilt_get_dom_ti_logical not supported yet')
3484#endif
3485RETURN
3486END SUBROUTINE wrf_quilt_get_dom_ti_logical
3487
3488SUBROUTINE wrf_quilt_put_dom_ti_logical ( DataHandle,Element,   Data, Count,  Status )
3489!<DESCRIPTION>
3490! Instruct the I/O quilt servers to write Count words of time independent
3491! domain metadata named "Element"
3492! to the open dataset described by DataHandle.
3493! Metadata of type logical are
3494! copied from array Data.
3495! This routine is called only by client (compute) tasks. 
3496!
3497! This is not yet supported.
3498!</DESCRIPTION>
3499#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3500  IMPLICIT NONE
3501  INTEGER ,       INTENT(IN)  :: DataHandle
3502  CHARACTER*(*) , INTENT(IN)  :: Element
3503  logical ,            INTENT(IN) :: Data(*)
3504  INTEGER ,       INTENT(IN)  :: Count
3505  INTEGER                     :: Status
3506! Local
3507  INTEGER i
3508  INTEGER one_or_zero(Count)
3509
3510  DO i = 1, Count
3511    IF ( Data(i) ) THEN
3512      one_or_zero(i) = 1
3513    ELSE
3514      one_or_zero(i) = 0
3515    ENDIF
3516  ENDDO
3517
3518  CALL wrf_quilt_put_dom_ti_integer ( DataHandle,Element,   one_or_zero, Count,  Status )
3519#endif
3520RETURN
3521END SUBROUTINE wrf_quilt_put_dom_ti_logical
3522
3523SUBROUTINE wrf_quilt_get_dom_ti_char ( DataHandle,Element,   Data,  Status )
3524!<DESCRIPTION>
3525! Instruct the I/O quilt servers to attempt to read time independent
3526! domain metadata named "Element"
3527! from the open dataset described by DataHandle.
3528! Metadata of type char are
3529! stored in string Data.
3530! This routine is called only by client (compute) tasks. 
3531!
3532! This is not yet supported.
3533!</DESCRIPTION>
3534#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3535  IMPLICIT NONE
3536  INTEGER ,       INTENT(IN)  :: DataHandle
3537  CHARACTER*(*) , INTENT(IN)  :: Element
3538  CHARACTER*(*)               :: Data
3539  INTEGER                     :: Status
3540  CALL wrf_message('wrf_quilt_get_dom_ti_char not supported yet')
3541#endif
3542RETURN
3543END SUBROUTINE wrf_quilt_get_dom_ti_char
3544
3545SUBROUTINE wrf_quilt_put_dom_ti_char ( DataHandle, Element,  Data,  Status )
3546!<DESCRIPTION>
3547! Instruct the I/O quilt servers to write time independent
3548! domain metadata named "Element"
3549! to the open dataset described by DataHandle.
3550! Metadata of type char are
3551! copied from string Data.
3552! This routine is called only by client (compute) tasks. 
3553!</DESCRIPTION>
3554#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3555  USE module_wrf_quilt
3556  IMPLICIT NONE
3557  INCLUDE 'mpif.h'
3558#include "intio_tags.h"
3559  INTEGER ,       INTENT(IN)  :: DataHandle
3560  CHARACTER*(*) , INTENT(IN)  :: Element
3561  CHARACTER*(*) , INTENT(IN)  :: Data
3562  INTEGER                     :: Status
3563  INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group, me
3564  REAL dummy
3565!
3566!!JMTIMING  CALL start_timing
3567  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_char' )
3568
3569  IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
3570    IF ( int_handle_in_use( DataHandle ) ) THEN
3571      CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
3572
3573!ARPDBG - potential bug. Have no access to what type of IO is being used for
3574! this data so if PNETCDF_QUILT is defined then we assume that's what's being used.
3575#ifdef PNETCDF_QUILT
3576      IF(compute_group_master(1))THEN
3577         CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
3578                                      DataHandle, Element, "", Data, &
3579                                      int_dom_ti_char )
3580      ELSE
3581         CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3582      END IF
3583#else
3584      IF ( wrf_dm_on_monitor() ) THEN
3585         CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
3586                                      DataHandle, Element, "", Data, int_dom_ti_char )
3587      ELSE
3588         CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
3589      ENDIF
3590#endif
3591
3592      iserver = get_server_id ( DataHandle )
3593!  write(0,*)'wrf_quilt_put_dom_ti_char ',iserver
3594      CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3595      CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3596      ! send the size of my local buffer to the i/o task (obufsize doesnt mean anything here)
3597!!JMTIMING!  CALL start_timing
3598!write(0,*)'calling MPI_Barrier'
3599!  CALL MPI_Barrier( mpi_comm_local, ierr )
3600!write(0,*)'back from MPI_Barrier'
3601!!JMTIMING!   CALL end_timing("MPI_Barrier in wrf_quilt_put_dom_ti_char")
3602
3603!!JMTIMING      CALL start_timing
3604      ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3605      reduced_dummy = 0
3606      reduced = 0
3607      reduced(1) = hdrbufsize
3608#ifdef PNETCDF_QUILT
3609      IF(compute_group_master(1))    reduced(2) = DataHandle
3610#else
3611      IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
3612#endif
3613!call mpi_comm_rank( comm_io_group , me, ierr )
3614
3615      CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
3616                       MPI_SUM, tasks_in_group-1,          &   ! nio_tasks_in_group-1 is me
3617                       comm_io_group, ierr )
3618
3619!!JMTIMING       CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_char")
3620      ! send data to the i/o processor
3621!!JMTIMING  CALL start_timing
3622
3623      CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group,            &
3624                            onebyte,                       &
3625                            hdrbuf, hdrbufsize , &
3626                            dummy, 0 )
3627!!JMTIMING   CALL end_timing("collect_on_comm in wrf_quilt_put_dom_ti_char")
3628    ENDIF
3629  ENDIF
3630!!JMTIMING   CALL end_timing("wrf_quilt_put_dom_ti_char")
3631
3632#endif
3633RETURN
3634END SUBROUTINE wrf_quilt_put_dom_ti_char
3635
3636SUBROUTINE wrf_quilt_get_dom_td_real ( DataHandle,Element, DateStr,  Data, Count, Outcount, Status )
3637!<DESCRIPTION>
3638! Instruct the I/O quilt servers to attempt to read Count words of time
3639! dependent domain metadata named "Element" valid at time DateStr
3640! from the open dataset described by DataHandle.
3641! Metadata of type real are
3642! stored in array Data.
3643! Actual number of words read is returned in OutCount.
3644! This routine is called only by client (compute) tasks. 
3645!
3646! This is not yet supported.
3647!</DESCRIPTION>
3648#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3649  IMPLICIT NONE
3650  INTEGER ,       INTENT(IN)  :: DataHandle
3651  CHARACTER*(*) , INTENT(IN)  :: Element
3652  CHARACTER*(*) , INTENT(IN)  :: DateStr
3653  real                        :: Data(*)
3654  INTEGER ,       INTENT(IN)  :: Count
3655  INTEGER                     :: OutCount
3656  INTEGER                     :: Status
3657#endif
3658RETURN
3659END SUBROUTINE wrf_quilt_get_dom_td_real
3660
3661SUBROUTINE wrf_quilt_put_dom_td_real ( DataHandle,Element, DateStr,  Data, Count,  Status )
3662!<DESCRIPTION>
3663! Instruct the I/O quilt servers to write Count words of time dependent
3664! domain metadata named "Element" valid at time DateStr
3665! to the open dataset described by DataHandle.
3666! Metadata of type real are
3667! copied from array Data.
3668! This routine is called only by client (compute) tasks. 
3669!
3670! This is not yet supported.
3671!</DESCRIPTION>
3672#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3673  IMPLICIT NONE
3674  INTEGER ,       INTENT(IN)  :: DataHandle
3675  CHARACTER*(*) , INTENT(IN)  :: Element
3676  CHARACTER*(*) , INTENT(IN)  :: DateStr
3677  real ,            INTENT(IN) :: Data(*)
3678  INTEGER ,       INTENT(IN)  :: Count
3679  INTEGER                     :: Status
3680#endif
3681RETURN
3682END SUBROUTINE wrf_quilt_put_dom_td_real
3683
3684SUBROUTINE wrf_quilt_get_dom_td_double ( DataHandle,Element, DateStr,  Data, Count, Outcount, Status )
3685!<DESCRIPTION>
3686! Instruct the I/O quilt servers to attempt to read Count words of time
3687! dependent domain metadata named "Element" valid at time DateStr
3688! from the open dataset described by DataHandle.
3689! Metadata of type double are
3690! stored in array Data.
3691! Actual number of words read is returned in OutCount.
3692! This routine is called only by client (compute) tasks. 
3693!
3694! This is not yet supported.
3695!</DESCRIPTION>
3696#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3697  IMPLICIT NONE
3698  INTEGER ,       INTENT(IN)  :: DataHandle
3699  CHARACTER*(*) , INTENT(IN)  :: Element
3700  CHARACTER*(*) , INTENT(IN)  :: DateStr
3701  real*8                          :: Data(*)
3702  INTEGER ,       INTENT(IN)  :: Count
3703  INTEGER                      :: OutCount
3704  INTEGER                     :: Status
3705#endif
3706  CALL wrf_error_fatal('wrf_quilt_get_dom_td_double not supported yet')
3707RETURN
3708END SUBROUTINE wrf_quilt_get_dom_td_double
3709
3710SUBROUTINE wrf_quilt_put_dom_td_double ( DataHandle,Element, DateStr,  Data, Count,  Status )
3711!<DESCRIPTION>
3712! Instruct the I/O quilt servers to write Count words of time dependent
3713! domain metadata named "Element" valid at time DateStr
3714! to the open dataset described by DataHandle.
3715! Metadata of type double are
3716! copied from array Data.
3717! This routine is called only by client (compute) tasks. 
3718!
3719! This is not yet supported.
3720!</DESCRIPTION>
3721#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3722  IMPLICIT NONE
3723  INTEGER ,       INTENT(IN)  :: DataHandle
3724  CHARACTER*(*) , INTENT(IN)  :: Element
3725  CHARACTER*(*) , INTENT(IN)  :: DateStr
3726  real*8 ,            INTENT(IN) :: Data(*)
3727  INTEGER ,       INTENT(IN)  :: Count
3728  INTEGER                     :: Status
3729#endif
3730  CALL wrf_error_fatal('wrf_quilt_put_dom_td_double not supported yet')
3731RETURN
3732END SUBROUTINE wrf_quilt_put_dom_td_double
3733
3734SUBROUTINE wrf_quilt_get_dom_td_integer ( DataHandle,Element, DateStr,  Data, Count, Outcount, Status )
3735!<DESCRIPTION>
3736! Instruct the I/O quilt servers to attempt to read Count words of time
3737! dependent domain metadata named "Element" valid at time DateStr
3738! from the open dataset described by DataHandle.
3739! Metadata of type integer are
3740! stored in array Data.
3741! Actual number of words read is returned in OutCount.
3742! This routine is called only by client (compute) tasks. 
3743!
3744! This is not yet supported.
3745!</DESCRIPTION>
3746#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3747  IMPLICIT NONE
3748  INTEGER ,       INTENT(IN)  :: DataHandle
3749  CHARACTER*(*) , INTENT(IN)  :: Element
3750  CHARACTER*(*) , INTENT(IN)  :: DateStr
3751  integer                          :: Data(*)
3752  INTEGER ,       INTENT(IN)  :: Count
3753  INTEGER                      :: OutCount
3754  INTEGER                     :: Status
3755#endif
3756RETURN
3757END SUBROUTINE wrf_quilt_get_dom_td_integer
3758
3759SUBROUTINE wrf_quilt_put_dom_td_integer ( DataHandle,Element, DateStr,  Data, Count,  Status )
3760!<DESCRIPTION>
3761! Instruct the I/O quilt servers to write Count words of time dependent
3762! domain metadata named "Element" valid at time DateStr
3763! to the open dataset described by DataHandle.
3764! Metadata of type integer are
3765! copied from array Data.
3766! This routine is called only by client (compute) tasks. 
3767!
3768! This is not yet supported.
3769!</DESCRIPTION>
3770#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3771  IMPLICIT NONE
3772  INTEGER ,       INTENT(IN)  :: DataHandle
3773  CHARACTER*(*) , INTENT(IN)  :: Element
3774  CHARACTER*(*) , INTENT(IN)  :: DateStr
3775  integer ,            INTENT(IN) :: Data(*)
3776  INTEGER ,       INTENT(IN)  :: Count
3777  INTEGER                     :: Status
3778#endif
3779RETURN
3780END SUBROUTINE wrf_quilt_put_dom_td_integer
3781
3782SUBROUTINE wrf_quilt_get_dom_td_logical ( DataHandle,Element, DateStr,  Data, Count, Outcount, Status )
3783!<DESCRIPTION>
3784! Instruct the I/O quilt servers to attempt to read Count words of time
3785! dependent domain metadata named "Element" valid at time DateStr
3786! from the open dataset described by DataHandle.
3787! Metadata of type logical are
3788! stored in array Data.
3789! Actual number of words read is returned in OutCount.
3790! This routine is called only by client (compute) tasks. 
3791!
3792! This is not yet supported.
3793!</DESCRIPTION>
3794#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3795  IMPLICIT NONE
3796  INTEGER ,       INTENT(IN)  :: DataHandle
3797  CHARACTER*(*) , INTENT(IN)  :: Element
3798  CHARACTER*(*) , INTENT(IN)  :: DateStr
3799  logical                          :: Data(*)
3800  INTEGER ,       INTENT(IN)  :: Count
3801  INTEGER                      :: OutCount
3802  INTEGER                     :: Status
3803#endif
3804RETURN
3805END SUBROUTINE wrf_quilt_get_dom_td_logical
3806
3807SUBROUTINE wrf_quilt_put_dom_td_logical ( DataHandle,Element, DateStr,  Data, Count,  Status )
3808!<DESCRIPTION>
3809! Instruct the I/O quilt servers to write Count words of time dependent
3810! domain metadata named "Element" valid at time DateStr
3811! to the open dataset described by DataHandle.
3812! Metadata of type logical are
3813! copied from array Data.
3814! This routine is called only by client (compute) tasks. 
3815!
3816! This is not yet supported.
3817!</DESCRIPTION>
3818#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3819  IMPLICIT NONE
3820  INTEGER ,       INTENT(IN)  :: DataHandle
3821  CHARACTER*(*) , INTENT(IN)  :: Element
3822  CHARACTER*(*) , INTENT(IN)  :: DateStr
3823  logical ,            INTENT(IN) :: Data(*)
3824  INTEGER ,       INTENT(IN)  :: Count
3825  INTEGER                     :: Status
3826#endif
3827RETURN
3828END SUBROUTINE wrf_quilt_put_dom_td_logical
3829
3830SUBROUTINE wrf_quilt_get_dom_td_char ( DataHandle,Element, DateStr,  Data,  Status )
3831!<DESCRIPTION>
3832! Instruct the I/O quilt servers to attempt to read time dependent
3833! domain metadata named "Element" valid at time DateStr
3834! from the open dataset described by DataHandle.
3835! Metadata of type char are
3836! stored in string Data.
3837! This routine is called only by client (compute) tasks. 
3838!
3839! This is not yet supported.
3840!</DESCRIPTION>
3841#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3842  IMPLICIT NONE
3843  INTEGER ,       INTENT(IN)  :: DataHandle
3844  CHARACTER*(*) , INTENT(IN)  :: Element
3845  CHARACTER*(*) , INTENT(IN)  :: DateStr
3846  CHARACTER*(*)               :: Data
3847  INTEGER                     :: Status
3848#endif
3849RETURN
3850END SUBROUTINE wrf_quilt_get_dom_td_char
3851
3852SUBROUTINE wrf_quilt_put_dom_td_char ( DataHandle,Element, DateStr,  Data,  Status )
3853!<DESCRIPTION>
3854! Instruct $he I/O quilt servers to write time dependent
3855! domain metadata named "Element" valid at time DateStr
3856! to the open dataset described by DataHandle.
3857! Metadata of type char are
3858! copied from string Data.
3859! This routine is called only by client (compute) tasks. 
3860!
3861! This is not yet supported.
3862!</DESCRIPTION>
3863#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3864  IMPLICIT NONE
3865  INTEGER ,       INTENT(IN)  :: DataHandle
3866  CHARACTER*(*) , INTENT(IN)  :: Element
3867  CHARACTER*(*) , INTENT(IN)  :: DateStr
3868  CHARACTER*(*) , INTENT(IN) :: Data
3869  INTEGER                          :: Status
3870#endif
3871RETURN
3872END SUBROUTINE wrf_quilt_put_dom_td_char
3873
3874SUBROUTINE wrf_quilt_get_var_ti_real ( DataHandle,Element,  Varname, Data, Count, Outcount, Status )
3875!<DESCRIPTION>
3876! Instruct the I/O quilt servers to attempt to read Count words of time
3877! independent attribute "Element" of variable "Varname"
3878! from the open dataset described by DataHandle.
3879! Attribute of type real is
3880! stored in array Data.
3881! Actual number of words read is returned in OutCount.
3882! This routine is called only by client (compute) tasks. 
3883!
3884! This is not yet supported.
3885!</DESCRIPTION>
3886#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3887  IMPLICIT NONE
3888  INTEGER ,       INTENT(IN)  :: DataHandle
3889  CHARACTER*(*) , INTENT(IN)  :: Element
3890  CHARACTER*(*) , INTENT(IN)  :: VarName
3891  real                          :: Data(*)
3892  INTEGER ,       INTENT(IN)  :: Count
3893  INTEGER                     :: OutCount
3894  INTEGER                     :: Status
3895#endif
3896RETURN
3897END SUBROUTINE wrf_quilt_get_var_ti_real
3898
3899SUBROUTINE wrf_quilt_put_var_ti_real ( DataHandle,Element,  Varname, Data, Count,  Status )
3900!<DESCRIPTION>
3901! Instruct the I/O quilt servers to write Count words of time independent
3902! attribute "Element" of variable "Varname"
3903! to the open dataset described by DataHandle.
3904! Attribute of type real is
3905! copied from array Data.
3906! This routine is called only by client (compute) tasks. 
3907!
3908! This is not yet supported.
3909!</DESCRIPTION>
3910#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3911  IMPLICIT NONE
3912  INTEGER ,       INTENT(IN)  :: DataHandle
3913  CHARACTER*(*) , INTENT(IN)  :: Element
3914  CHARACTER*(*) , INTENT(IN)  :: VarName
3915  real ,            INTENT(IN) :: Data(*)
3916  INTEGER ,       INTENT(IN)  :: Count
3917  INTEGER                     :: Status
3918#endif
3919RETURN
3920END SUBROUTINE wrf_quilt_put_var_ti_real
3921
3922SUBROUTINE wrf_quilt_get_var_ti_double ( DataHandle,Element,  Varname, Data, Count, Outcount, Status )
3923!<DESCRIPTION>
3924! Instruct the I/O quilt servers to attempt to read Count words of time
3925! independent attribute "Element" of variable "Varname"
3926! from the open dataset described by DataHandle.
3927! Attribute of type double is
3928! stored in array Data.
3929! Actual number of words read is returned in OutCount.
3930! This routine is called only by client (compute) tasks. 
3931!
3932! This is not yet supported.
3933!</DESCRIPTION>
3934#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3935  IMPLICIT NONE
3936  INTEGER ,       INTENT(IN)  :: DataHandle
3937  CHARACTER*(*) , INTENT(IN)  :: Element
3938  CHARACTER*(*) , INTENT(IN)  :: VarName
3939  real*8                      :: Data(*)
3940  INTEGER ,       INTENT(IN)  :: Count
3941  INTEGER                     :: OutCount
3942  INTEGER                     :: Status
3943#endif
3944  CALL wrf_error_fatal('wrf_quilt_get_var_ti_double not supported yet')
3945RETURN
3946END SUBROUTINE wrf_quilt_get_var_ti_double
3947
3948SUBROUTINE wrf_quilt_put_var_ti_double ( DataHandle,Element,  Varname, Data, Count,  Status )
3949!<DESCRIPTION>
3950! Instruct the I/O quilt servers to write Count words of time independent
3951! attribute "Element" of variable "Varname"
3952! to the open dataset described by DataHandle.
3953! Attribute of type double is
3954! copied from array Data.
3955! This routine is called only by client (compute) tasks. 
3956!
3957! This is not yet supported.
3958!</DESCRIPTION>
3959#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3960  IMPLICIT NONE
3961  INTEGER ,       INTENT(IN)  :: DataHandle
3962  CHARACTER*(*) , INTENT(IN)  :: Element
3963  CHARACTER*(*) , INTENT(IN)  :: VarName
3964  real*8 ,        INTENT(IN) :: Data(*)
3965  INTEGER ,       INTENT(IN)  :: Count
3966  INTEGER                     :: Status
3967#endif
3968  CALL wrf_error_fatal('wrf_quilt_put_var_ti_double not supported yet')
3969RETURN
3970END SUBROUTINE wrf_quilt_put_var_ti_double
3971
3972SUBROUTINE wrf_quilt_get_var_ti_integer ( DataHandle,Element,  Varname, Data, Count, Outcount, Status )
3973!<DESCRIPTION>
3974! Instruct the I/O quilt servers to attempt to read Count words of time
3975! independent attribute "Element" of variable "Varname"
3976! from the open dataset described by DataHandle.
3977! Attribute of type integer is
3978! stored in array Data.
3979! Actual number of words read is returned in OutCount.
3980! This routine is called only by client (compute) tasks. 
3981!
3982! This is not yet supported.
3983!</DESCRIPTION>
3984#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3985  IMPLICIT NONE
3986  INTEGER ,       INTENT(IN)  :: DataHandle
3987  CHARACTER*(*) , INTENT(IN)  :: Element
3988  CHARACTER*(*) , INTENT(IN)  :: VarName
3989  integer                     :: Data(*)
3990  INTEGER ,       INTENT(IN)  :: Count
3991  INTEGER                     :: OutCount
3992  INTEGER                     :: Status
3993#endif
3994RETURN
3995END SUBROUTINE wrf_quilt_get_var_ti_integer
3996
3997SUBROUTINE wrf_quilt_put_var_ti_integer ( DataHandle,Element,  Varname, Data, Count,  Status )
3998!<DESCRIPTION>
3999! Instruct the I/O quilt servers to write Count words of time independent
4000! attribute "Element" of variable "Varname"
4001! to the open dataset described by DataHandle.
4002! Attribute of type integer is
4003! copied from array Data.
4004! This routine is called only by client (compute) tasks. 
4005!
4006! This is not yet supported.
4007!</DESCRIPTION>
4008#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4009  IMPLICIT NONE
4010  INTEGER ,       INTENT(IN)  :: DataHandle
4011  CHARACTER*(*) , INTENT(IN)  :: Element
4012  CHARACTER*(*) , INTENT(IN)  :: VarName
4013  integer ,            INTENT(IN) :: Data(*)
4014  INTEGER ,       INTENT(IN)  :: Count
4015  INTEGER                     :: Status
4016#endif
4017RETURN
4018END SUBROUTINE wrf_quilt_put_var_ti_integer
4019
4020SUBROUTINE wrf_quilt_get_var_ti_logical ( DataHandle,Element,  Varname, Data, Count, Outcount, Status )
4021!<DESCRIPTION>
4022! Instruct the I/O quilt servers to attempt to read Count words of time
4023! independent attribute "Element" of variable "Varname"
4024! from the open dataset described by DataHandle.
4025! Attribute of type logical is
4026! stored in array Data.
4027! Actual number of words read is returned in OutCount.
4028! This routine is called only by client (compute) tasks. 
4029!
4030! This is not yet supported.
4031!</DESCRIPTION>
4032#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4033  IMPLICIT NONE
4034  INTEGER ,       INTENT(IN)  :: DataHandle
4035  CHARACTER*(*) , INTENT(IN)  :: Element
4036  CHARACTER*(*) , INTENT(IN)  :: VarName
4037  logical                     :: Data(*)
4038  INTEGER ,       INTENT(IN)  :: Count
4039  INTEGER                     :: OutCount
4040  INTEGER                     :: Status
4041#endif
4042RETURN
4043END SUBROUTINE wrf_quilt_get_var_ti_logical
4044
4045SUBROUTINE wrf_quilt_put_var_ti_logical ( DataHandle,Element,  Varname, Data, Count,  Status )
4046!<DESCRIPTION>
4047! Instruct the I/O quilt servers to write Count words of time independent
4048! attribute "Element" of variable "Varname"
4049! to the open dataset described by DataHandle.
4050! Attribute of type logical is
4051! copied from array Data.
4052! This routine is called only by client (compute) tasks. 
4053!
4054! This is not yet supported.
4055!</DESCRIPTION>
4056#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4057  IMPLICIT NONE
4058  INTEGER ,       INTENT(IN)  :: DataHandle
4059  CHARACTER*(*) , INTENT(IN)  :: Element
4060  CHARACTER*(*) , INTENT(IN)  :: VarName
4061  logical ,            INTENT(IN) :: Data(*)
4062  INTEGER ,       INTENT(IN)  :: Count
4063  INTEGER                     :: Status
4064#endif
4065RETURN
4066END SUBROUTINE wrf_quilt_put_var_ti_logical
4067
4068SUBROUTINE wrf_quilt_get_var_ti_char ( DataHandle,Element,  Varname, Data,  Status )
4069!<DESCRIPTION>
4070! Instruct the I/O quilt servers to attempt to read time independent
4071! attribute "Element" of variable "Varname"
4072! from the open dataset described by DataHandle.
4073! Attribute of type char is
4074! stored in string Data.
4075! This routine is called only by client (compute) tasks. 
4076!
4077! This is not yet supported.
4078!</DESCRIPTION>
4079#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4080  IMPLICIT NONE
4081  INTEGER ,       INTENT(IN)  :: DataHandle
4082  CHARACTER*(*) , INTENT(IN)  :: Element
4083  CHARACTER*(*) , INTENT(IN)  :: VarName
4084  CHARACTER*(*)               :: Data
4085  INTEGER                     :: Status
4086#endif
4087RETURN
4088END SUBROUTINE wrf_quilt_get_var_ti_char
4089
4090SUBROUTINE wrf_quilt_put_var_ti_char ( DataHandle,Element,  Varname, Data,  Status )
4091!<DESCRIPTION>
4092! Instruct the I/O quilt servers to write time independent
4093! attribute "Element" of variable "Varname"
4094! to the open dataset described by DataHandle.
4095! Attribute of type char is
4096! copied from string Data.
4097! This routine is called only by client (compute) tasks. 
4098!</DESCRIPTION>
4099
4100#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4101  USE module_wrf_quilt
4102  IMPLICIT NONE
4103  INCLUDE 'mpif.h'
4104#include "intio_tags.h"
4105  INTEGER ,       INTENT(IN)  :: DataHandle
4106  CHARACTER*(*) , INTENT(IN)  :: Element
4107  CHARACTER*(*) , INTENT(IN)  :: VarName
4108  CHARACTER*(*) , INTENT(IN)  :: Data
4109  INTEGER                     :: Status
4110  INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
4111  REAL dummy
4112!
4113
4114!!JMTIMING  CALL start_timing
4115  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_var_ti_char' )
4116
4117  IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
4118    IF ( int_handle_in_use( DataHandle ) ) THEN
4119      CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
4120
4121#ifdef PNETCDF_QUILT
4122      IF ( compute_group_master(1) ) THEN
4123         CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
4124                                      DataHandle, TRIM(Element),     &
4125                                      TRIM(VarName), TRIM(Data), int_var_ti_char )
4126      ELSE
4127         CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
4128      ENDIF
4129#else
4130      IF ( wrf_dm_on_monitor() ) THEN
4131         CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
4132                                      DataHandle, TRIM(Element),     &
4133                                      TRIM(VarName), TRIM(Data), int_var_ti_char )
4134      ELSE
4135         CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
4136      ENDIF
4137#endif
4138
4139      iserver = get_server_id ( DataHandle )
4140      CALL get_mpi_comm_io_groups( comm_io_group , iserver )
4141      CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
4142
4143!!JMTIMING      CALL start_timing
4144      ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
4145      reduced = 0
4146      reduced(1) = hdrbufsize
4147#ifdef PNETCDF_QUILT
4148      IF ( compute_group_master(1) ) reduced(2) = DataHandle
4149#else
4150      IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
4151#endif
4152      CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
4153                       MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
4154                       comm_io_group, ierr )
4155!!JMTIMING       CALL end_timing("MPI_Reduce in wrf_quilt_put_var_ti_char")
4156      ! send data to the i/o processor
4157      CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group,            &
4158                            onebyte,                       &
4159                            hdrbuf, hdrbufsize , &
4160                            dummy, 0 )
4161    ENDIF
4162  ENDIF
4163!!JMTIMING   CALL end_timing("wrf_quilt_put_dom_ti_char" )
4164
4165#endif
4166RETURN
4167END SUBROUTINE wrf_quilt_put_var_ti_char
4168
4169SUBROUTINE wrf_quilt_get_var_td_real ( DataHandle,Element,  DateStr,Varname, Data, Count, Outcount, Status )
4170!<DESCRIPTION>
4171! Instruct the I/O quilt servers to attempt to read Count words of time
4172! dependent attribute "Element" of variable "Varname" valid at time DateStr
4173! from the open dataset described by DataHandle.
4174! Attribute of type real is
4175! stored in array Data.
4176! Actual number of words read is returned in OutCount.
4177! This routine is called only by client (compute) tasks. 
4178!
4179! This is not yet supported.
4180!</DESCRIPTION>
4181#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4182  IMPLICIT NONE
4183  INTEGER ,       INTENT(IN)  :: DataHandle
4184  CHARACTER*(*) , INTENT(IN)  :: Element
4185  CHARACTER*(*) , INTENT(IN)  :: DateStr
4186  CHARACTER*(*) , INTENT(IN)  :: VarName
4187  real                        :: Data(*)
4188  INTEGER ,       INTENT(IN)  :: Count
4189  INTEGER                     :: OutCount
4190  INTEGER                     :: Status
4191#endif
4192RETURN
4193END SUBROUTINE wrf_quilt_get_var_td_real
4194
4195SUBROUTINE wrf_quilt_put_var_td_real ( DataHandle,Element,  DateStr,Varname, Data, Count,  Status )
4196!<DESCRIPTION>
4197! Instruct the I/O quilt servers to write Count words of time dependent
4198! attribute "Element" of variable "Varname" valid at time DateStr
4199! to the open dataset described by DataHandle.
4200! Attribute of type real is
4201! copied from array Data.
4202! This routine is called only by client (compute) tasks. 
4203!
4204! This is not yet supported.
4205!</DESCRIPTION>
4206#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4207  IMPLICIT NONE
4208  INTEGER ,       INTENT(IN)  :: DataHandle
4209  CHARACTER*(*) , INTENT(IN)  :: Element
4210  CHARACTER*(*) , INTENT(IN)  :: DateStr
4211  CHARACTER*(*) , INTENT(IN)  :: VarName
4212  real ,            INTENT(IN) :: Data(*)
4213  INTEGER ,       INTENT(IN)  :: Count
4214  INTEGER                     :: Status
4215#endif
4216RETURN
4217END SUBROUTINE wrf_quilt_put_var_td_real
4218
4219SUBROUTINE wrf_quilt_get_var_td_double ( DataHandle,Element,  DateStr,Varname, Data, Count, Outcount, Status )
4220!<DESCRIPTION>
4221! Instruct the I/O quilt servers to attempt to read Count words of time
4222! dependent attribute "Element" of variable "Varname" valid at time DateStr
4223! from the open dataset described by DataHandle.
4224! Attribute of type double is
4225! stored in array Data.
4226! Actual number of words read is returned in OutCount.
4227! This routine is called only by client (compute) tasks. 
4228!
4229! This is not yet supported.
4230!</DESCRIPTION>
4231#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4232  IMPLICIT NONE
4233  INTEGER ,       INTENT(IN)  :: DataHandle
4234  CHARACTER*(*) , INTENT(IN)  :: Element
4235  CHARACTER*(*) , INTENT(IN)  :: DateStr
4236  CHARACTER*(*) , INTENT(IN)  :: VarName
4237  real*8                      :: Data(*)
4238  INTEGER ,       INTENT(IN)  :: Count
4239  INTEGER                     :: OutCount
4240  INTEGER                     :: Status
4241#endif
4242  CALL wrf_error_fatal('wrf_quilt_get_var_td_double not supported yet')
4243RETURN
4244END SUBROUTINE wrf_quilt_get_var_td_double
4245
4246SUBROUTINE wrf_quilt_put_var_td_double ( DataHandle,Element,  DateStr,Varname, Data, Count,  Status )
4247!<DESCRIPTION>
4248! Instruct the I/O quilt servers to write Count words of time dependent
4249! attribute "Element" of variable "Varname" valid at time DateStr
4250! to the open dataset described by DataHandle.
4251! Attribute of type double is
4252! copied from array Data.
4253! This routine is called only by client (compute) tasks. 
4254!
4255! This is not yet supported.
4256!</DESCRIPTION>
4257#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4258  IMPLICIT NONE
4259  INTEGER ,       INTENT(IN)  :: DataHandle
4260  CHARACTER*(*) , INTENT(IN)  :: Element
4261  CHARACTER*(*) , INTENT(IN)  :: DateStr
4262  CHARACTER*(*) , INTENT(IN)  :: VarName
4263  real*8 ,            INTENT(IN) :: Data(*)
4264  INTEGER ,       INTENT(IN)  :: Count
4265  INTEGER                     :: Status
4266#endif
4267  CALL wrf_error_fatal('wrf_quilt_put_var_td_double not supported yet')
4268RETURN
4269END SUBROUTINE wrf_quilt_put_var_td_double
4270
4271SUBROUTINE wrf_quilt_get_var_td_integer ( DataHandle,Element,  DateStr,Varname, Data, Count, Outcount,Status)
4272!<DESCRIPTION>
4273! Instruct the I/O quilt servers to attempt to read Count words of time
4274! dependent attribute "Element" of variable "Varname" valid at time DateStr
4275! from the open dataset described by DataHandle.
4276! Attribute of type integer is
4277! stored in array Data.
4278! Actual number of words read is returned in OutCount.
4279! This routine is called only by client (compute) tasks. 
4280!
4281! This is not yet supported.
4282!</DESCRIPTION>
4283#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4284  IMPLICIT NONE
4285  INTEGER ,       INTENT(IN)  :: DataHandle
4286  CHARACTER*(*) , INTENT(IN)  :: Element
4287  CHARACTER*(*) , INTENT(IN)  :: DateStr
4288  CHARACTER*(*) , INTENT(IN)  :: VarName
4289  integer                     :: Data(*)
4290  INTEGER ,       INTENT(IN)  :: Count
4291  INTEGER                     :: OutCount
4292  INTEGER                     :: Status
4293#endif
4294RETURN
4295END SUBROUTINE wrf_quilt_get_var_td_integer
4296
4297SUBROUTINE wrf_quilt_put_var_td_integer ( DataHandle,Element,  DateStr,Varname, Data, Count,  Status )
4298!<DESCRIPTION>
4299! Instruct the I/O quilt servers to write Count words of time dependent
4300! attribute "Element" of variable "Varname" valid at time DateStr
4301! to the open dataset described by DataHandle.
4302! Attribute of type integer is
4303! copied from array Data.
4304! This routine is called only by client (compute) tasks. 
4305!
4306! This is not yet supported.
4307!</DESCRIPTION>
4308#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4309  IMPLICIT NONE
4310  INTEGER ,       INTENT(IN)  :: DataHandle
4311  CHARACTER*(*) , INTENT(IN)  :: Element
4312  CHARACTER*(*) , INTENT(IN)  :: DateStr
4313  CHARACTER*(*) , INTENT(IN)  :: VarName
4314  integer ,       INTENT(IN)  :: Data(*)
4315  INTEGER ,       INTENT(IN)  :: Count
4316  INTEGER                     :: Status
4317#endif
4318RETURN
4319END SUBROUTINE wrf_quilt_put_var_td_integer
4320
4321SUBROUTINE wrf_quilt_get_var_td_logical ( DataHandle,Element,  DateStr,Varname, Data, Count, Outcount, Status )
4322!<DESCRIPTION>
4323! Instruct the I/O quilt servers to attempt to read Count words of time
4324! dependent attribute "Element" of variable "Varname" valid at time DateStr
4325! from the open dataset described by DataHandle.
4326! Attribute of type logical is
4327! stored in array Data.
4328! Actual number of words read is returned in OutCount.
4329! This routine is called only by client (compute) tasks. 
4330!
4331! This is not yet supported.
4332!</DESCRIPTION>
4333#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4334  IMPLICIT NONE
4335  INTEGER ,       INTENT(IN)  :: DataHandle
4336  CHARACTER*(*) , INTENT(IN)  :: Element
4337  CHARACTER*(*) , INTENT(IN)  :: DateStr
4338  CHARACTER*(*) , INTENT(IN)  :: VarName
4339  logical                          :: Data(*)
4340  INTEGER ,       INTENT(IN)  :: Count
4341  INTEGER                      :: OutCount
4342  INTEGER                     :: Status
4343#endif
4344RETURN
4345END SUBROUTINE wrf_quilt_get_var_td_logical
4346
4347SUBROUTINE wrf_quilt_put_var_td_logical ( DataHandle,Element,  DateStr,Varname, Data, Count,  Status )
4348!<DESCRIPTION>
4349! Instruct the I/O quilt servers to write Count words of time dependent
4350! attribute "Element" of variable "Varname" valid at time DateStr
4351! to the open dataset described by DataHandle.
4352! Attribute of type logical is
4353! copied from array Data.
4354! This routine is called only by client (compute) tasks. 
4355!
4356! This is not yet supported.
4357!</DESCRIPTION>
4358#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4359  IMPLICIT NONE
4360  INTEGER ,       INTENT(IN)  :: DataHandle
4361  CHARACTER*(*) , INTENT(IN)  :: Element
4362  CHARACTER*(*) , INTENT(IN)  :: DateStr
4363  CHARACTER*(*) , INTENT(IN)  :: VarName
4364  logical ,            INTENT(IN) :: Data(*)
4365  INTEGER ,       INTENT(IN)  :: Count
4366  INTEGER                     :: Status
4367#endif
4368RETURN
4369END SUBROUTINE wrf_quilt_put_var_td_logical
4370
4371SUBROUTINE wrf_quilt_get_var_td_char ( DataHandle,Element,  DateStr,Varname, Data,  Status )
4372!<DESCRIPTION>
4373! Instruct the I/O quilt servers to attempt to read time dependent
4374! attribute "Element" of variable "Varname" valid at time DateStr
4375! from the open dataset described by DataHandle.
4376! Attribute of type char is
4377! stored in string Data.
4378! This routine is called only by client (compute) tasks. 
4379!
4380! This is not yet supported.
4381!</DESCRIPTION>
4382#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4383  IMPLICIT NONE
4384  INTEGER ,       INTENT(IN)  :: DataHandle
4385  CHARACTER*(*) , INTENT(IN)  :: Element
4386  CHARACTER*(*) , INTENT(IN)  :: DateStr
4387  CHARACTER*(*) , INTENT(IN)  :: VarName
4388  CHARACTER*(*)               :: Data
4389  INTEGER                     :: Status
4390#endif
4391RETURN
4392END SUBROUTINE wrf_quilt_get_var_td_char
4393
4394SUBROUTINE wrf_quilt_put_var_td_char ( DataHandle,Element,  DateStr,Varname, Data,  Status )
4395!<DESCRIPTION>
4396! Instruct the I/O quilt servers to write time dependent
4397! attribute "Element" of variable "Varname" valid at time DateStr
4398! to the open dataset described by DataHandle.
4399! Attribute of type char is
4400! copied from string Data.
4401! This routine is called only by client (compute) tasks. 
4402!
4403! This is not yet supported.
4404!</DESCRIPTION>
4405#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4406  IMPLICIT NONE
4407  INTEGER ,       INTENT(IN)  :: DataHandle
4408  CHARACTER*(*) , INTENT(IN)  :: Element
4409  CHARACTER*(*) , INTENT(IN)  :: DateStr
4410  CHARACTER*(*) , INTENT(IN)  :: VarName
4411  CHARACTER*(*) , INTENT(IN) :: Data
4412  INTEGER                    :: Status
4413#endif
4414RETURN
4415END SUBROUTINE wrf_quilt_put_var_td_char
4416
4417SUBROUTINE wrf_quilt_read_field ( DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
4418                            DomainDesc , MemoryOrder , Stagger , DimNames ,              &
4419                            DomainStart , DomainEnd ,                                    &
4420                            MemoryStart , MemoryEnd ,                                    &
4421                            PatchStart , PatchEnd ,                                      &
4422                            Status )
4423!<DESCRIPTION>
4424! Instruct the I/O quilt servers to read the variable named VarName from the
4425! dataset pointed to by DataHandle.
4426! This routine is called only by client (compute) tasks. 
4427!
4428! This is not yet supported.
4429!</DESCRIPTION>
4430#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4431  IMPLICIT NONE
4432  INTEGER ,       INTENT(IN)    :: DataHandle
4433  CHARACTER*(*) , INTENT(INOUT) :: DateStr
4434  CHARACTER*(*) , INTENT(INOUT) :: VarName
4435  INTEGER ,       INTENT(INOUT) :: Field(*)
4436  integer                       ,intent(in)    :: FieldType
4437  integer                       ,intent(inout) :: Comm
4438  integer                       ,intent(inout) :: IOComm
4439  integer                       ,intent(in)    :: DomainDesc
4440  character*(*)                 ,intent(in)    :: MemoryOrder
4441  character*(*)                 ,intent(in)    :: Stagger
4442  character*(*) , dimension (*) ,intent(in)    :: DimNames
4443  integer ,dimension(*)         ,intent(in)    :: DomainStart, DomainEnd
4444  integer ,dimension(*)         ,intent(in)    :: MemoryStart, MemoryEnd
4445  integer ,dimension(*)         ,intent(in)    :: PatchStart,  PatchEnd
4446  integer                       ,intent(out)   :: Status
4447  Status = 0
4448#endif
4449RETURN
4450END SUBROUTINE wrf_quilt_read_field
4451
4452SUBROUTINE wrf_quilt_write_field ( DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm,  &
4453                             DomainDesc , MemoryOrder , Stagger , DimNames ,              &
4454                             DomainStart , DomainEnd ,                                    &
4455                             MemoryStart , MemoryEnd ,                                    &
4456                             PatchStart , PatchEnd ,                                      &
4457                             Status )
4458!<DESCRIPTION>
4459! Prepare instructions for the I/O quilt servers to write the variable named
4460! VarName to the dataset pointed to by DataHandle.
4461!
4462! During a "training" write this routine accumulates number and sizes of
4463! messages that will be sent to the I/O server associated with this compute
4464! (client) task.
4465!
4466! During a "real" write, this routine begins by allocating
4467! int_local_output_buffer if it has not already been allocated.  Sizes
4468! accumulated during "training" are used to determine how big
4469! int_local_output_buffer must be.  This routine then stores "int_field"
4470! headers and associated field data in int_local_output_buffer.  The contents
4471! of int_local_output_buffer are actually sent to the I/O quilt server in
4472! routine wrf_quilt_iosync().  This scheme allows output of multiple variables
4473! to be aggregated into a single "iosync" operation.
4474! This routine is called only by client (compute) tasks. 
4475!</DESCRIPTION>
4476#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4477  USE module_state_description
4478  USE module_wrf_quilt
4479  IMPLICIT NONE
4480  INCLUDE 'mpif.h'
4481#include "wrf_io_flags.h"
4482  INTEGER ,       INTENT(IN)    :: DataHandle
4483  CHARACTER*(*) , INTENT(IN)    :: DateStr
4484  CHARACTER*(*) , INTENT(IN)    :: VarName
4485!  INTEGER ,       INTENT(IN)    :: Field(*)
4486  integer                       ,intent(in)    :: FieldType
4487  integer                       ,intent(inout) :: Comm
4488  integer                       ,intent(inout) :: IOComm
4489  integer                       ,intent(in)    :: DomainDesc
4490  character*(*)                 ,intent(in)    :: MemoryOrder
4491  character*(*)                 ,intent(in)    :: Stagger
4492  character*(*) , dimension (*) ,intent(in)    :: DimNames
4493  integer ,dimension(*)         ,intent(in)    :: DomainStart, DomainEnd
4494  integer ,dimension(*)         ,intent(in)    :: MemoryStart, MemoryEnd
4495  integer ,dimension(*)         ,intent(in)    :: PatchStart,  PatchEnd
4496  integer                       ,intent(out)   :: Status
4497
4498  integer ii,jj,kk,myrank
4499
4500  REAL, DIMENSION( MemoryStart(1):MemoryEnd(1), &
4501                   MemoryStart(2):MemoryEnd(2), &
4502                   MemoryStart(3):MemoryEnd(3) ) :: Field
4503  INTEGER locsize , typesize, itypesize
4504  INTEGER ierr, tasks_in_group, comm_io_group, dummy, i
4505  INTEGER, EXTERNAL :: use_package
4506
4507!!ARPTIMING  CALL start_timing
4508  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_write_field' )
4509
4510  IF ( .NOT. (DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles) ) THEN
4511    CALL wrf_error_fatal("frame/module_io_quilt.F: wrf_quilt_write_field: invalid data handle" )
4512  ENDIF
4513  IF ( .NOT. int_handle_in_use( DataHandle ) ) THEN
4514    CALL wrf_error_fatal("frame/module_io_quilt.F: wrf_quilt_write_field: DataHandle not opened" )
4515  ENDIF
4516
4517  locsize = (PatchEnd(1)-PatchStart(1)+1)* &
4518            (PatchEnd(2)-PatchStart(2)+1)* &
4519            (PatchEnd(3)-PatchStart(3)+1)
4520
4521  CALL mpi_type_size( MPI_INTEGER, itypesize, ierr )
4522  ! Note that the WRF_DOUBLE branch of this IF statement must come first since
4523  ! WRF_FLOAT is set equal to WRF_DOUBLE during autopromotion builds. 
4524  IF ( FieldType .EQ. WRF_DOUBLE ) THEN
4525    CALL mpi_type_size( MPI_DOUBLE_PRECISION, typesize, ierr )
4526  ELSE IF ( FieldType .EQ. WRF_FLOAT ) THEN
4527    CALL mpi_type_size( MPI_REAL, typesize, ierr )
4528  ELSE IF ( FieldType .EQ. WRF_INTEGER ) THEN
4529    CALL mpi_type_size( MPI_INTEGER, typesize, ierr )
4530  ELSE IF ( FieldType .EQ. WRF_LOGICAL ) THEN
4531    CALL mpi_type_size( MPI_LOGICAL, typesize, ierr )
4532  ENDIF
4533
4534  IF ( .NOT. okay_to_write( DataHandle ) ) THEN
4535
4536      ! This is a "training" write.
4537      ! it is not okay to actually write; what we do here is just "bookkeep": count up
4538      ! the number and size of messages that we will output to io server associated with
4539      ! this task
4540
4541      CALL int_gen_write_field_header ( hdrbuf, hdrbufsize, itypesize, typesize,           &
4542                               DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm,  &
4543                               333933         , MemoryOrder , Stagger , DimNames ,              &   ! 333933 means training; magic number
4544                               DomainStart , DomainEnd ,                                    &
4545                               MemoryStart , MemoryEnd ,                                    &
4546                               PatchStart , PatchEnd )
4547
4548      int_num_bytes_to_write(DataHandle) = int_num_bytes_to_write(DataHandle) + locsize * typesize + hdrbufsize
4549
4550      ! Send the hdr for the write in case the interface is calling the I/O API in "learn" mode
4551
4552      iserver = get_server_id ( DataHandle )
4553!JMDEBUGwrite(0,*)'wrf_quilt_write_field (dryrun) ',iserver
4554      CALL get_mpi_comm_io_groups( comm_io_group , iserver )
4555      ! send the size of my local buffer to the i/o task (obufsize doesnt mean anything here)
4556
4557      CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
4558
4559#if 0
4560      IF ( .NOT. wrf_dm_on_monitor() ) THEN     ! only one task in compute grid sends this message; send noops on others
4561        CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
4562      ENDIF
4563#endif
4564
4565
4566!!ARPTIMING      CALL start_timing
4567      ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
4568      reduced = 0
4569      reduced(1) = hdrbufsize
4570#ifdef PNETCDF_QUILT
4571      IF ( compute_group_master(1) ) reduced(2) = DataHandle
4572#else
4573      IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
4574#endif
4575      CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
4576                       MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
4577                       comm_io_group, ierr )
4578!!ARPTIMING      CALL end_timing("MPI_Reduce in wrf_quilt_write_field dryrun")
4579      ! send data to the i/o processor
4580
4581      CALL collect_on_comm_debug(__FILE__,__LINE__, comm_io_group,                   &
4582                            onebyte,                          &
4583                            hdrbuf, hdrbufsize ,                 &
4584                            dummy, 0 )
4585
4586  ELSE
4587
4588    IF ( .NOT. associated( int_local_output_buffer ) ) THEN
4589      ALLOCATE ( int_local_output_buffer( (int_num_bytes_to_write( DataHandle )+1)/itypesize ), Stat=ierr )
4590      IF(ierr /= 0)THEN
4591         CALL wrf_error_fatal("frame/module_io_quilt.F: wrf_quilt_write_field: allocate of int_local_output_buffer failed" )
4592      END IF
4593      int_local_output_cursor = 1
4594    ENDIF
4595      iserver = get_server_id ( DataHandle )
4596!JMDEBUGwrite(0,*)'wrf_quilt_write_field (writing) ',iserver
4597
4598    ! This is NOT a "training" write.  It is OK to write now.
4599    CALL int_gen_write_field_header ( hdrbuf, hdrbufsize, itypesize, typesize,           &
4600                             DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm,  &
4601                             0          , MemoryOrder , Stagger , DimNames ,              &   ! non-333933 means okay to write; magic number
4602                             DomainStart , DomainEnd ,                                    &
4603                             MemoryStart , MemoryEnd ,                                    &
4604                             PatchStart , PatchEnd )
4605
4606    ! Pack header into int_local_output_buffer.  It will be sent to the
4607    ! I/O servers during the next "iosync" operation. 
4608#ifdef DEREF_KLUDGE
4609    CALL int_pack_data ( hdrbuf , hdrbufsize , int_local_output_buffer(1), int_local_output_cursor )
4610#else
4611    CALL int_pack_data ( hdrbuf , hdrbufsize , int_local_output_buffer, int_local_output_cursor )
4612#endif
4613
4614    ! Pack field data into int_local_output_buffer.  It will be sent to the
4615    ! I/O servers during the next "iosync" operation. 
4616#ifdef DEREF_KLUDGE
4617    CALL int_pack_data ( Field(PatchStart(1):PatchEnd(1),PatchStart(2):PatchEnd(2),PatchStart(3):PatchEnd(3) ), &
4618                                  locsize * typesize , int_local_output_buffer(1), int_local_output_cursor )
4619#else
4620    CALL int_pack_data ( Field(PatchStart(1):PatchEnd(1),PatchStart(2):PatchEnd(2),PatchStart(3):PatchEnd(3) ), &
4621                                  locsize * typesize , int_local_output_buffer, int_local_output_cursor )
4622#endif
4623
4624  ENDIF
4625  Status = 0
4626!!ARPTIMING  CALL end_timing("wrf_quilt_write_field")
4627
4628#endif
4629  RETURN
4630END SUBROUTINE wrf_quilt_write_field
4631
4632SUBROUTINE wrf_quilt_get_var_info ( DataHandle , VarName , NDim , MemoryOrder , Stagger , &
4633                              DomainStart , DomainEnd , Status )
4634!<DESCRIPTION>
4635! This routine applies only to a dataset that is open for read.  It instructs
4636! the I/O quilt servers to return information about variable VarName.
4637! This routine is called only by client (compute) tasks. 
4638!
4639! This is not yet supported.
4640!</DESCRIPTION>
4641#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4642  IMPLICIT NONE
4643  integer               ,intent(in)     :: DataHandle
4644  character*(*)         ,intent(in)     :: VarName
4645  integer                               :: NDim
4646  character*(*)                         :: MemoryOrder
4647  character*(*)                         :: Stagger
4648  integer ,dimension(*)                 :: DomainStart, DomainEnd
4649  integer                               :: Status
4650#endif
4651RETURN
4652END SUBROUTINE wrf_quilt_get_var_info
4653
4654SUBROUTINE get_mpi_comm_io_groups( retval, isrvr )
4655!<DESCRIPTION>
4656! This routine returns the compute+io communicator to which this
4657! compute task belongs for I/O server group "isrvr".
4658! This routine is called only by client (compute) tasks. 
4659!</DESCRIPTION>
4660#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4661      USE module_wrf_quilt
4662      IMPLICIT NONE
4663      INTEGER, INTENT(IN ) :: isrvr
4664      INTEGER, INTENT(OUT) :: retval
4665      retval = mpi_comm_io_groups(isrvr)
4666#endif
4667      RETURN
4668END SUBROUTINE get_mpi_comm_io_groups
4669
4670SUBROUTINE get_nio_tasks_in_group( retval )
4671!<DESCRIPTION>
4672! This routine returns the number of I/O server tasks in each
4673! I/O server group.  It can be called by both clients and
4674! servers. 
4675!</DESCRIPTION>
4676#if defined( DM_PARALLEL ) && !defined( STUBMPI )
4677      USE module_wrf_quilt
4678      IMPLICIT NONE
4679      INTEGER, INTENT(OUT) :: retval
4680      retval = nio_tasks_in_group
4681#endif
4682      RETURN
4683END SUBROUTINE get_nio_tasks_in_group
4684
4685SUBROUTINE collect_on_comm_debug(file,line, comm_io_group,   &
4686                        sze,                                 &
4687                        hdrbuf, hdrbufsize ,                 &
4688                        outbuf, outbufsize                   )
4689  IMPLICIT NONE
4690  CHARACTER*(*) file
4691  INTEGER line
4692  INTEGER comm_io_group
4693  INTEGER sze
4694  INTEGER hdrbuf(*), outbuf(*)
4695  INTEGER hdrbufsize, outbufsize
4696
4697  !write(0,*)'collect_on_comm_debug ',trim(file),line,sze,hdrbufsize,outbufsize
4698  CALL collect_on_comm( comm_io_group,                       &
4699                        sze,                                 &
4700                        hdrbuf, hdrbufsize ,                 &
4701                        outbuf, outbufsize                   )
4702  !write(0,*)trim(file),line,'returning'
4703  RETURN
4704END
4705
4706
4707SUBROUTINE collect_on_comm_debug2(file,line,var,tag,sz,hdr_rec_size, &
4708                        comm_io_group,                       &
4709                        sze,                                 &
4710                        hdrbuf, hdrbufsize ,                 &
4711                        outbuf, outbufsize                   )
4712  IMPLICIT NONE
4713  CHARACTER*(*) file,var
4714  INTEGER line,tag,sz,hdr_rec_size
4715  INTEGER comm_io_group
4716  INTEGER sze
4717  INTEGER hdrbuf(*), outbuf(*)
4718  INTEGER hdrbufsize, outbufsize
4719
4720!  write(0,*)'collect_on_comm_debug2 ',trim(file),line,trim(var),tag,sz,hdr_rec_size,sze,hdrbufsize,outbufsize
4721  CALL collect_on_comm( comm_io_group,                       &
4722                        sze,                                 &
4723                        hdrbuf, hdrbufsize ,                 &
4724                        outbuf, outbufsize                   )
4725!  write(0,*)'collect_on_comm_debug2 ',trim(file),line,'returning for ',trim(var)
4726  RETURN
4727END
Note: See TracBrowser for help on using the repository browser.