source: trunk/WRF.COMMON/WRFV2/frame/module_io_quilt.F

Last change on this file was 11, checked in by aslmd, 14 years ago

spiga@svn-planeto:ajoute le modele meso-echelle martien

File size: 131.3 KB
Line 
1!WRF:DRIVER_LAYER:IO
2!
3#define DEBUG_LVL 50
4!#define mpi_x_comm_size(i,j,k)  Mpi_Comm_Size ( i,j,k ) ; write(0,*) __LINE__
5#define mpi_x_comm_size(i,j,k)  Mpi_Comm_Size ( i,j,k )
6
7MODULE module_wrf_quilt
8!<DESCRIPTION>
9!<PRE>
10! This module contains WRF-specific I/O quilt routines called by both
11! client (compute) and server (I/O quilt) tasks.  I/O quilt servers are
12! a run-time optimization that allow I/O operations, executed on the I/O
13! quilt server tasks, to be overlapped with useful computation, executed on
14! the compute tasks.  Since I/O operations are often quite slow compared to
15! computation, this performance optimization can increase parallel
16! efficiency. 
17!
18! Currently, one group of I/O servers can be specified at run-time.  Namelist
19! variable "nio_tasks_per_group" is used to specify the number of I/O server
20! tasks in this group.  In most cases, parallel efficiency is optimized when
21! the minimum number of I/O server tasks are used.  If memory needed to cache
22! I/O operations fits on a single processor, then set nio_tasks_per_group=1. 
23! If not, increase the number of I/O server tasks until I/O operations fit in
24! memory.  In the future, multiple groups of I/O server tasks will be
25! supported.  The number of groups will be specified by namelist variable
26! "nio_groups".  For now, nio_groups must be set to 1.  Currently, I/O servers
27! only support overlap of output operations with computation.  Also, only I/O
28! packages that do no support native parallel I/O may be used with I/O server
29! tasks.  This excludes PHDF5 and MCEL. 
30!
31! In this module, the I/O quilt server tasks call package-dependent
32! WRF-specific I/O interfaces to perform I/O operations requested by the
33! client (compute) tasks.  All of these calls occur inside subroutine
34! quilt(). 
35!
36! The client (compute) tasks call package-independent WRF-specific "quilt I/O"
37! interfaces that send requests to the I/O quilt servers.  All of these calls
38! are made from module_io.F. 
39!
40! These routines have the same names and (roughly) the same arguments as those
41! specified in the WRF I/O API except that:
42! - "Quilt I/O" routines defined in this file and called by routines in
43!   module_io.F have the "wrf_quilt_" prefix.
44! - Package-dependent routines called from routines in this file are defined
45!   in the external I/O packages and have the "ext_" prefix.
46!
47! Both client (compute) and server tasks call routine init_module_wrf_quilt()
48! which then calls setup_quilt_servers() determine which tasks are compute
49! tasks and which are server tasks.  Before the end of init_module_wrf_quilt()
50! server tasks call routine quilt() and remain there for the rest of the model
51! run.  Compute tasks return from init_module_wrf_quilt() to perform model
52! computations. 
53!
54! See http://www.mmm.ucar.edu/wrf/WG2/software_2.0/IOAPI.doc for the latest
55! version of the WRF I/O API.  This document includes detailed descriptions
56! of subroutines and their arguments that are not duplicated here.
57!</PRE>
58!</DESCRIPTION>
59  USE module_internal_header_util
60  USE module_timing
61
62  INTEGER, PARAMETER :: int_num_handles = 99
63  LOGICAL, DIMENSION(int_num_handles) :: okay_to_write, int_handle_in_use, okay_to_commit
64  INTEGER, DIMENSION(int_num_handles) :: int_num_bytes_to_write, io_form
65  REAL, POINTER    :: int_local_output_buffer(:)
66  INTEGER          :: int_local_output_cursor
67  LOGICAL          :: quilting_enabled
68  LOGICAL          :: disable_quilt = .FALSE.
69  INTEGER          :: prev_server_for_handle = -1
70  INTEGER          :: server_for_handle(int_num_handles)
71  INTEGER          :: reduced(2), reduced_dummy(2)
72  LOGICAL, EXTERNAL :: wrf_dm_on_monitor
73
74  INTEGER nio_groups
75#ifdef DM_PARALLEL
76  INTEGER mpi_comm_local
77  INTEGER mpi_comm_io_groups(100)
78  INTEGER nio_tasks_in_group
79  INTEGER nio_tasks_per_group
80  INTEGER ncompute_tasks
81  INTEGER ntasks
82  INTEGER mytask
83
84  INTEGER, PARAMETER           :: onebyte = 1
85  INTEGER comm_io_servers, iserver, hdrbufsize, obufsize
86  INTEGER, DIMENSION(4096)     :: hdrbuf
87  INTEGER, DIMENSION(int_num_handles)     :: handle
88#endif
89
90  CONTAINS
91
92#if  defined(DM_PARALLEL)  &&  !defined( STUBMPI )
93    INTEGER FUNCTION get_server_id ( dhandle )
94!<DESCRIPTION>
95! Logic in the client side to know which io server
96! group to send to. If the unit corresponds to a file that's
97! already been opened, then we have no choice but to send the
98! data to that group again, regardless of whether there are
99! other server-groups. If it's a new file, we can chose a new
100! server group. I.e. opening a file locks it onto a server
101! group. Closing the file unlocks it.
102!</DESCRIPTION>
103      IMPLICIT NONE
104      INTEGER, INTENT(IN) :: dhandle
105      IF ( dhandle .GE. 1 .AND. dhandle .LE. int_num_handles ) THEN
106        IF ( server_for_handle ( dhandle ) .GE. 1 ) THEN
107          get_server_id = server_for_handle ( dhandle )
108        ELSE
109          prev_server_for_handle = mod ( prev_server_for_handle + 1 , nio_groups )
110          server_for_handle( dhandle ) = prev_server_for_handle+1
111          get_server_id = prev_server_for_handle+1
112        ENDIF
113      ELSE
114         CALL wrf_message('module_io_quilt: get_server_id bad dhandle' )
115      ENDIF
116    END FUNCTION get_server_id
117#endif
118
119    SUBROUTINE set_server_id ( dhandle, value )
120       IMPLICIT NONE
121       INTEGER, INTENT(IN) :: dhandle, value
122       IF ( dhandle .GE. 1 .AND. dhandle .LE. int_num_handles ) THEN
123         server_for_handle(dhandle) = value
124       ELSE
125         CALL wrf_message('module_io_quilt: set_server_id bad dhandle' )
126       ENDIF
127    END SUBROUTINE set_server_id
128
129#if defined( DM_PARALLEL ) && !defined( STUBMPI )
130    SUBROUTINE int_get_fresh_handle( retval )
131!<DESCRIPTION>
132! Find an unused "client file handle" and return it in retval.
133! The "client file handle" is used to remember how a file was opened
134! so clients do not need to ask the I/O quilt servers for this information.
135! It is also used as a file identifier in communications with the I/O
136! server task.
137!
138! Note that client tasks know nothing about package-specific handles.
139! Only the I/O quilt servers know about them.
140!</DESCRIPTION>
141      INTEGER i, retval
142      retval = -1
143      DO i = 1, int_num_handles
144        IF ( .NOT. int_handle_in_use(i) )  THEN
145          retval = i
146          GOTO 33
147        ENDIF
148      ENDDO
14933    CONTINUE
150      IF ( retval < 0 )  THEN
151        CALL wrf_error_fatal("frame/module_io_quilt.F: int_get_fresh_handle() can not")
152      ENDIF
153      int_handle_in_use(i) = .TRUE.
154      NULLIFY ( int_local_output_buffer )
155    END SUBROUTINE int_get_fresh_handle
156
157    SUBROUTINE setup_quilt_servers ( nio_tasks_per_group,     &
158                                     mytask,                  &
159                                     ntasks,                  &
160                                     n_groups_arg,            &
161                                     nio,                     &
162                                     mpi_comm_wrld,           &
163                                     mpi_comm_local,          &
164                                     mpi_comm_io_groups)
165!<DESCRIPTION>
166! Both client (compute) and server tasks call this routine to
167! determine which tasks are compute tasks and which are I/O server tasks. 
168!
169! Module variables MPI_COMM_LOCAL and MPI_COMM_IO_GROUPS(:) are set up to
170! contain MPI communicators as follows: 
171!
172! MPI_COMM_LOCAL is the Communicator for the local groups of tasks. For the
173! compute tasks it is the group of compute tasks; for a server group it the
174! communicator of tasks in the server group.
175!
176! Elements of MPI_COMM_IO_GROUPS are communicators that each contain one or
177! more compute tasks and a single I/O server assigned to those compute tasks. 
178! The I/O server tasks is always the last task in these communicators. 
179! On a compute task, which has a single associate in each of the server
180! groups, MPI_COMM_IO_GROUPS is treated as an array; each element corresponds
181! to a different server group.
182! On a server task only the first element of MPI_COMM_IO_GROUPS is used
183! because each server task is part of only one io_group. 
184!
185! I/O server tasks in each I/O server group are divided among compute tasks as
186! evenly as possible. 
187!
188! When multiple I/O server groups are used, each must have the same number of
189! tasks.  When the total number of extra I/O tasks does not divide evenly by
190! the number of io server groups requested, the remainder tasks are not used
191! (wasted). 
192!
193! For example, communicator membership for 18 tasks with nio_groups=2 and
194! nio_tasks_per_group=3 is shown below: 
195!
196!<PRE>
197! Membership for MPI_COMM_LOCAL communicators:
198!   COMPUTE TASKS:          0   1   2   3   4   5   6   7   8   9  10  11
199!   1ST I/O SERVER GROUP:  12  13  14
200!   2ND I/O SERVER GROUP:  15  16  17
201!
202! Membership for MPI_COMM_IO_GROUPS(1): 
203!   COMPUTE TASKS 0, 3, 6, 9:   0   3   6   9  12
204!   COMPUTE TASKS 1, 4, 7,10:   1   4   7  10  13
205!   COMPUTE TASKS 2, 5, 8,11:   2   5   8  11  14
206!   I/O SERVER TASK       12:   0   3   6   9  12
207!   I/O SERVER TASK       13:   1   4   7  10  13
208!   I/O SERVER TASK       14:   2   5   8  11  14
209!   I/O SERVER TASK       15:   0   3   6   9  15
210!   I/O SERVER TASK       16:   1   4   7  10  16
211!   I/O SERVER TASK       17:   2   5   8  11  17
212!
213! Membership for MPI_COMM_IO_GROUPS(2): 
214!   COMPUTE TASKS 0, 3, 6, 9:   0   3   6   9  15
215!   COMPUTE TASKS 1, 4, 7,10:   1   4   7  10  16
216!   COMPUTE TASKS 2, 5, 8,11:   2   5   8  11  17
217!   I/O SERVER TASK       12:  ** not used **
218!   I/O SERVER TASK       13:  ** not used **
219!   I/O SERVER TASK       14:  ** not used **
220!   I/O SERVER TASK       15:  ** not used **
221!   I/O SERVER TASK       16:  ** not used **
222!   I/O SERVER TASK       17:  ** not used **
223!</PRE>
224!</DESCRIPTION>
225      IMPLICIT NONE
226      INCLUDE 'mpif.h'
227      INTEGER,                      INTENT(IN)  :: nio_tasks_per_group, mytask, ntasks, &
228                                                   n_groups_arg, mpi_comm_wrld
229      INTEGER,  INTENT(OUT)                     :: mpi_comm_local, nio
230      INTEGER, DIMENSION(100),      INTENT(OUT) :: mpi_comm_io_groups
231! Local
232      INTEGER                     :: i, j, ii, comdup, ierr, niotasks, n_groups, iisize
233      INTEGER, DIMENSION(ntasks)  :: icolor
234      CHARACTER*128 mess
235
236      n_groups = n_groups_arg
237      IF ( n_groups .LT. 1 ) n_groups = 1
238
239!<DESCRIPTION>
240! nio is number of io tasks per group.  If there arent enough tasks to satisfy
241! the requirement that there be at least as many compute tasks as io tasks in
242! each group, then just print a warning and dump out of quilting
243!</DESCRIPTION>
244
245      nio = nio_tasks_per_group
246      ncompute_tasks = ntasks - (nio * n_groups)
247      IF ( ncompute_tasks .LT. nio ) THEN
248        WRITE(mess,'("Not enough tasks to have ",I3," groups of ",I3," I/O tasks. No quilting.")')n_groups,nio
249        nio            = 0
250        ncompute_tasks = ntasks
251      ELSE                                   
252        WRITE(mess,'("Quilting with ",I3," groups of ",I3," I/O tasks.")')n_groups,nio
253      ENDIF                                   
254      CALL wrf_message(mess)
255   
256      IF ( nio .LT. 0 ) THEN
257        nio = 0
258      ENDIF
259      IF ( nio .EQ. 0 ) THEN
260        quilting_enabled = .FALSE.
261        mpi_comm_local = MPI_COMM_WORLD
262        mpi_comm_io_groups = MPI_COMM_WORLD
263        RETURN
264      ENDIF
265      quilting_enabled = .TRUE.
266
267! First construct the local communicators
268! prepare to split the communicator by designating compute-only tasks
269      DO i = 1, ncompute_tasks
270        icolor(i) = 0
271      ENDDO
272      ii = 1
273! and designating the groups of i/o tasks
274      DO i = ncompute_tasks+1, ntasks, nio
275        DO j = i, i+nio-1
276          icolor(j) = ii
277        ENDDO
278        ii = ii+1
279      ENDDO
280      CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
281      CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask,mpi_comm_local,ierr)
282
283! Now construct the communicators for the io_groups; round-robining the compute tasks
284      DO i = 1, ncompute_tasks
285        icolor(i) = mod(i-1,nio)
286      ENDDO
287! ... and add the io servers as the last task in each group
288      DO j = 1, n_groups
289        ! TBH:  each I/O group will contain only one I/O server
290        DO i = ncompute_tasks+1,ntasks
291          icolor(i) = MPI_UNDEFINED
292        ENDDO
293        ii = 0
294        DO i = ncompute_tasks+(j-1)*nio+1,ncompute_tasks+j*nio
295          icolor(i) = ii
296          ii = ii+1
297        ENDDO
298        CALL MPI_Comm_dup(mpi_comm_wrld,comdup,ierr)
299        CALL MPI_Comm_split(comdup,icolor(mytask+1),mytask,mpi_comm_io_groups(j),ierr)
300!CALL MPI_Comm_Size( mpi_comm_io_groups(j) , iisize, ierr )
301      ENDDO
302! If I am an I/O server, figure out which group I'm in and make that group's
303! communicator the first element in the mpi_comm_io_groups array (I will ignore
304! all of the other elements).
305      IF ( mytask+1 .GT. ncompute_tasks ) THEN
306        niotasks = ntasks - ncompute_tasks
307        i = mytask - ncompute_tasks
308        j = i / nio + 1
309        mpi_comm_io_groups(1) = mpi_comm_io_groups(j)
310      ENDIF
311
312    END SUBROUTINE setup_quilt_servers
313
314    SUBROUTINE quilt
315!<DESCRIPTION>
316! I/O server tasks call this routine and remain in it for the rest of the
317! model run.  I/O servers receive I/O requests from compute tasks and
318! perform requested I/O operations by calling package-dependent WRF-specific
319! I/O interfaces.  Requests are sent in the form of "data headers".  Each
320! request has a unique "header" message associated with it.  For requests that
321! contain large amounts of data, the data is appended to the header.  See
322! file module_internal_header_util.F for detailed descriptions of all
323! headers. 
324!
325! We wish to be able to link to different packages depending on whether
326! the I/O is restart, initial, history, or boundary.
327!</DESCRIPTION>
328      USE module_state_description
329      USE module_quilt_outbuf_ops
330      IMPLICIT NONE
331      INCLUDE 'mpif.h'
332#include "intio_tags.h"
333#include "wrf_io_flags.h"
334      INTEGER itag, ninbuf, ntasks_io_group, ntasks_local_group, mytask_local, ierr
335      INTEGER istat
336      INTEGER mytask_io_group
337      INTEGER   :: nout_set = 0
338      INTEGER   :: obufsize, bigbufsize, inttypesize, chunksize, sz
339      REAL, DIMENSION(1)      :: dummy
340      INTEGER, ALLOCATABLE, DIMENSION(:) :: obuf, bigbuf
341      REAL,    ALLOCATABLE, DIMENSION(:) :: RDATA
342      INTEGER, ALLOCATABLE, DIMENSION(:) :: IDATA
343      CHARACTER (LEN=512) :: CDATA
344      CHARACTER (LEN=80) :: fname
345      INTEGER icurs, hdrbufsize, itypesize, ftypesize, Status, fstat, io_form_arg
346      INTEGER :: DataHandle, FieldType, Comm, IOComm, DomainDesc, code, Count
347      INTEGER, DIMENSION(3) :: DomainStart , DomainEnd , MemoryStart , MemoryEnd , PatchStart , PatchEnd
348      INTEGER :: dummybuf(1)
349      CHARACTER (len=80) :: DateStr , Element, VarName, MemoryOrder , Stagger , DimNames(3), FileName, SysDepInfo, mess
350      INTEGER, EXTERNAL :: use_package
351      LOGICAL           :: stored_write_record, retval
352      INTEGER iii, jjj, vid
353
354!
355
356! Call ext_pkg_ioinit() routines to initialize I/O packages. 
357      SysDepInfo = " "
358#ifdef NETCDF
359      CALL ext_ncd_ioinit( SysDepInfo, ierr)
360#endif
361#ifdef INTIO
362      CALL ext_int_ioinit( SysDepInfo, ierr )
363#endif
364#ifdef XXX
365      CALL ext_xxx_ioinit( SysDepInfo, ierr)
366#endif
367#ifdef YYY
368      CALL ext_yyy_ioinit( SysDepInfo, ierr)
369#endif
370#ifdef ZZZ
371      CALL ext_zzz_ioinit( SysDepInfo, ierr)
372#endif
373#ifdef GRIB1
374      CALL ext_gr1_ioinit( SysDepInfo, ierr)
375#endif
376#ifdef GRIB2
377      CALL ext_gr2_ioinit( SysDepInfo, ierr)
378#endif
379
380      okay_to_commit = .false.
381      stored_write_record = .false.
382      ninbuf = 0
383      ! get info. about the I/O server group that this I/O server task
384      ! belongs to
385      ! Last task in this I/O server group is the I/O server "root"
386      ! The I/O server "root" actually writes data to disk
387      ! TBH:  WARNING:  This is also implicit in the call to collect_on_comm().
388      CALL mpi_x_comm_size( mpi_comm_io_groups(1), ntasks_io_group,    ierr )
389      CALL MPI_COMM_RANK( mpi_comm_io_groups(1), mytask_io_group,    ierr )
390      CALL mpi_x_comm_size( mpi_comm_local,        ntasks_local_group, ierr )
391      CALL MPI_COMM_RANK( mpi_comm_local,        mytask_local,       ierr )
392
393      CALL MPI_TYPE_SIZE( MPI_INTEGER, inttypesize, ierr )
394      IF ( inttypesize <= 0 ) THEN
395        CALL wrf_error_fatal("external/RSL/module_dm.F: quilt: type size <= 0 invalid")
396      ENDIF
397! infinite loop until shutdown message received
398! This is the main request-handling loop.  I/O quilt servers stay in this loop
399! until the model run ends. 
400      DO WHILE (.TRUE.)
401
402!<DESCRIPTION>
403! Each I/O server receives requests from its compute tasks.  Each request
404! is contained in a data header (see module_internal_header_util.F for
405! detailed descriptions of data headers).
406! Each request is sent in two phases.  First, sizes of all messages that
407! will be sent from the compute tasks to this I/O server are summed on the
408! I/O server via MPI_reduce().  The I/O server then allocates buffer "obuf"
409! and receives concatenated messages from the compute tasks in it via the
410! call to collect_on_comm().  Note that "sizes" are generally expressed in
411! *bytes* in this code so conversion to "count" (number of Fortran words) is
412! required for Fortran indexing and MPI calls. 
413!</DESCRIPTION>
414        ! wait for info from compute tasks in the I/O group that we're ready to rock
415        ! obufsize will contain number of *bytes*
416!JMTIMINGCALL start_timing
417        ! first element of reduced is obufsize, second is DataHandle
418        ! if needed (currently needed only for ioclose).
419        reduced_dummy = 0
420        CALL MPI_Reduce( reduced_dummy, reduced, 2, MPI_INTEGER,  &
421                         MPI_SUM, mytask_io_group,          &
422                         mpi_comm_io_groups(1), ierr )
423        obufsize = reduced(1)
424!JMTIMING CALL end_timing("MPI_Reduce at top of forever loop")
425!JMDEBUGwrite(0,*)'obufsize = ',obufsize
426!write(0,*)'ninbuf ',ninbuf,' obufsize ', obufsize
427! Negative obufsize will trigger I/O server exit. 
428        IF ( obufsize .LT. 0 ) THEN
429          IF ( obufsize .EQ. -100 ) THEN         ! magic number
430#ifdef NETCDF
431            CALL ext_ncd_ioexit( Status )
432#endif
433#ifdef INTIO
434            CALL ext_int_ioexit( Status )
435#endif
436#ifdef XXX
437            CALL ext_xxx_ioexit( Status )
438#endif
439#ifdef YYY
440            CALL ext_yyy_ioexit( Status )
441#endif
442#ifdef ZZZ
443            CALL ext_zzz_ioexit( Status )
444#endif
445#ifdef GRIB1
446            CALL ext_gr1_ioexit( Status )
447#endif
448#ifdef GRIB2
449            CALL ext_gr2_ioexit( Status )
450#endif
451            CALL wrf_message ( 'I/O QUILT SERVERS DONE' )
452            CALL mpi_finalize(ierr)
453            STOP
454          ELSE
455            CALL wrf_error_fatal('Possible 32-bit overflow on output server. Try larger nio_tasks_per_group in namelist.')
456          ENDIF
457        ENDIF
458
459!JMTIMING        CALL start_timing
460! Obufsize of zero signals a close
461
462! Allocate buffer obuf to be big enough for the data the compute tasks
463! will send.  Note: obuf is size in *bytes* so we need to pare this
464! down, since the buffer is INTEGER. 
465        IF ( obufsize .GT. 0 ) THEN
466          ALLOCATE( obuf( (obufsize+1)/inttypesize ) )
467
468! let's roll; get the data from the compute procs and put in obuf
469          CALL collect_on_comm( mpi_comm_io_groups(1),        &
470                                onebyte,                      &
471                                dummy, 0,                     &
472                                obuf, obufsize )
473!JMTIMING           CALL end_timing( "quilt on server: collecting data from compute procs" )
474        ELSE
475          ! Necessarily, the compute processes send the ioclose signal,
476          ! if there is one, after the iosync, which means they
477          ! will stall on the ioclose message waiting for the quilt
478          ! processes if we handle the way other messages are collected,
479          ! using collect_on_comm.  This avoids this, but we need
480          ! a special signal (obufsize zero) and the DataHandle
481          ! to be closed. That handle is send as the second
482          ! word of the io_close message received by the MPI_Reduce above.
483          ! Then a header representing the ioclose message is constructed
484          ! here and handled below as if it were received from the
485          ! compute processes. The clients (compute processes) must be
486          ! careful to send this correctly (one compule process sends the actual
487          ! handle and everone else sends a zero, so the result sums to
488          ! the value of the handle).
489          !
490          ALLOCATE( obuf( 4096 ) )
491          ! DataHandle is provided as second element of reduced
492          CALL int_gen_handle_header( obuf, obufsize, itypesize, &
493                                      reduced(2) , int_ioclose )
494
495        ENDIF
496
497!write(0,*)'calling init_store_piece_of_field'
498! Now all messages received from the compute clients are stored in
499! obuf.  Scan through obuf and extract headers and field data and store in
500! internal buffers.  The scan is done twice, first to determine sizes of
501! internal buffers required for storage of headers and fields and second to
502! actually store the headers and fields.  This bit of code does not do the
503! "quilting" (assembly of patches into full domains).  For each field, it
504! simply concatenates all received patches for the field into a separate
505! internal buffer (i.e. one buffer per field).  Quilting is done later by
506! routine store_patch_in_outbuf(). 
507        CALL init_store_piece_of_field
508        CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
509!write(0,*)'mpi_type_size returns ', itypesize
510! Scan obuf the first time to calculate the size of the buffer required for
511! each field.  Calls to add_to_bufsize_for_field() accumulate sizes. 
512        vid = 0
513        icurs = inttypesize
514        DO WHILE ( icurs .lt. obufsize )
515          SELECT CASE ( get_hdr_tag( obuf ( icurs / inttypesize ) ) )
516            CASE ( int_field )
517              CALL int_get_write_field_header ( obuf(icurs/inttypesize), hdrbufsize, itypesize, ftypesize,  &
518                                                DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
519                                                DomainDesc , MemoryOrder , Stagger , DimNames ,              &
520                                                DomainStart , DomainEnd ,                                    &
521                                                MemoryStart , MemoryEnd ,                                    &
522                                                PatchStart , PatchEnd )
523              chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
524                          (PatchEnd(3)-PatchStart(3)+1)*ftypesize
525
526!write(0,*) 'X-1', hdrbufsize, get_hdr_tag( obuf ( icurs / inttypesize ) ) , get_hdr_rec_size( obuf ( icurs / inttypesize ) ), TRIM(VarName)
527              call add_to_bufsize_for_field( VarName, hdrbufsize )
528              icurs = icurs + hdrbufsize
529              ! If this is a real write (i.e. not a training write), accumulate
530              ! buffersize for this field.
531              IF ( DomainDesc .NE. 333933 ) THEN   ! magic number
532!write(0,*) 'X-1a', chunksize, TRIM(VarName)
533                call add_to_bufsize_for_field( VarName, chunksize )
534                icurs = icurs + chunksize
535              ENDIF
536            CASE DEFAULT
537              hdrbufsize = obuf(icurs/inttypesize)
538              write(VarName,'(I5.5)')vid
539!write(0,*) 'X-2', hdrbufsize, get_hdr_tag( obuf ( icurs / inttypesize ) ) , get_hdr_rec_size( obuf ( icurs / inttypesize ) ), TRIM(VarName)
540              call add_to_bufsize_for_field( VarName, hdrbufsize )
541              icurs = icurs + hdrbufsize
542              vid = vid+1
543          END SELECT
544        ENDDO
545! Store the headers and field data in internal buffers.  The first call to
546! store_piece_of_field() allocates internal buffers using sizes computed by
547! calls to add_to_bufsize_for_field(). 
548        vid = 0
549        icurs = inttypesize
550        DO WHILE ( icurs .lt. obufsize )
551!write(0,*) 'A icurs ', icurs, ' obufsize ', obufsize
552          SELECT CASE ( get_hdr_tag( obuf ( icurs / inttypesize ) ) )
553            CASE ( int_field )
554              CALL int_get_write_field_header ( obuf(icurs/inttypesize), hdrbufsize, itypesize, ftypesize,  &
555                                                DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
556                                                DomainDesc , MemoryOrder , Stagger , DimNames ,              &
557                                                DomainStart , DomainEnd ,                                    &
558                                                MemoryStart , MemoryEnd ,                                    &
559                                                PatchStart , PatchEnd )
560              chunksize = (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
561                          (PatchEnd(3)-PatchStart(3)+1)*ftypesize
562
563
564              call store_piece_of_field( obuf(icurs/inttypesize), VarName, hdrbufsize )
565!write(0,*) 'A-1', hdrbufsize, get_hdr_tag( obuf ( icurs / inttypesize ) ) , get_hdr_rec_size( obuf ( icurs / inttypesize ) ), TRIM(VarName)
566              icurs = icurs + hdrbufsize
567              ! If this is a real write (i.e. not a training write), store
568              ! this piece of this field.
569              IF ( DomainDesc .NE. 333933 ) THEN   ! magic number
570!write(0,*) 'A-1a', chunksize, TRIM(VarName)
571                call store_piece_of_field( obuf(icurs/inttypesize), VarName, chunksize )
572                icurs = icurs + chunksize
573              ENDIF
574            CASE DEFAULT
575              hdrbufsize = obuf(icurs/inttypesize)
576              write(VarName,'(I5.5)')vid
577              call store_piece_of_field( obuf(icurs/inttypesize), VarName, hdrbufsize )
578!write(0,*) 'A-2', hdrbufsize, get_hdr_tag( obuf ( icurs / inttypesize ) ) , get_hdr_rec_size( obuf ( icurs / inttypesize ) ), TRIM(VarName)
579              icurs = icurs + hdrbufsize
580              vid = vid+1
581          END SELECT
582        ENDDO
583
584!call mpi_comm_size( mpi_comm_local, iii, ierr )
585!write(0,*)'mpi_comm_size mpi_comm_local ',iii
586!call mpi_comm_rank( mpi_comm_local, iii, ierr )
587!write(0,*)'mpi_comm_rank mpi_comm_local ',iii
588
589!write(0,*)'calling init_retrieve_pieces_of_field '
590! Now, for each field, retrieve headers and patches (data) from the internal
591! buffers and collect them all on the I/O quilt server "root" task.
592        CALL init_retrieve_pieces_of_field
593! Retrieve header and all patches for the first field from the internal
594! buffers. 
595        CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
596!write(0,*)'calling first retrieve_pieces_of_field ',TRIM(VarName),obufsize,sz,retval
597! Sum sizes of all headers and patches (data) for this field from all I/O
598! servers in this I/O server group onto the I/O server "root".
599        CALL MPI_Reduce( sz, bigbufsize, 1, MPI_INTEGER,  &
600                         MPI_SUM, ntasks_local_group-1,         &
601                         mpi_comm_local, ierr )
602!write(0,*)'after MPI_Reduce ',sz, bigbufsize
603
604! Loop until there are no more fields to retrieve from the internal buffers.
605        DO WHILE ( retval )
606
607!write(0,*)' VarName ',TRIM(VarName),' sz ',sz,' bigbufsize ',bigbufsize
608
609! I/O server "root" allocates space to collect headers and fields from all
610! other servers in this I/O server group.
611          IF ( mytask_local .EQ. ntasks_local_group-1 ) THEN
612!write(0,*)'allocating bufbuf ', (bigbufsize+1)/inttypesize
613            ALLOCATE( bigbuf( (bigbufsize+1)/inttypesize ) )
614          ENDIF
615
616!write(0,*)'before collect_on_comm tag,size ',Trim(VarName),get_hdr_tag(obuf),get_hdr_rec_size(obuf)
617! Collect buffers and fields from all I/O servers in this I/O server group
618! onto the I/O server "root"
619          CALL collect_on_comm( mpi_comm_local,                    &
620                                onebyte,                           &
621                                obuf, sz,  &
622                                bigbuf, bigbufsize )
623!write(0,*)'after collect_on_comm ', sz, bigbufsize
624! The I/O server "root" now handles collected requests from all compute
625! tasks served by this I/O server group (i.e. all compute tasks). 
626          IF ( mytask_local .EQ. ntasks_local_group-1 ) THEN
627!jjj = 4
628!do iii = 1, ntasks_local_group
629!  write(0,*)'i,j,tag,size ', iii, jjj, get_hdr_tag(bigbuf(jjj/4)),get_hdr_rec_size(bigbuf(jjj/4))
630!  jjj = jjj + get_hdr_rec_size(bigbuf(jjj/4))
631!enddo
632
633            icurs = inttypesize  ! icurs is a byte counter, but buffer is integer
634
635            stored_write_record = .false.
636
637! The I/O server "root" loops over the collected requests. 
638            DO WHILE ( icurs .lt. bigbufsize )
639              CALL mpi_type_size ( MPI_INTEGER , itypesize , ierr )
640
641!write(0,*)'B tag,size ',icurs,get_hdr_tag( bigbuf(icurs/inttypesize) ),get_hdr_rec_size( bigbuf(icurs/inttypesize) )
642!write(0,*)'   inttypesize ',inttypesize,' itypesize ',itypesize,' icurs/inttypesize ',icurs/inttypesize
643! The I/O server "root" gets the request out of the next header and
644! handles it by, in most cases, calling the appropriate external I/O package
645! interface.
646              SELECT CASE ( get_hdr_tag( bigbuf(icurs/inttypesize) ) )
647! The I/O server "root" handles the "noop" (do nothing) request.  This is
648! actually quite easy.  "Noop" requests exist to help avoid race conditions. 
649! In some cases, only one compute task will everything about a request so
650! other compute tasks send "noop" requests. 
651                CASE ( int_noop )
652!write(0,*)' int_noop '
653                  CALL int_get_noop_header( bigbuf(icurs/inttypesize), hdrbufsize, itypesize )
654                  icurs = icurs + hdrbufsize
655
656! The I/O server "root" handles the "put_dom_td_real" request.
657                CASE ( int_dom_td_real )
658!write(0,*)' int_dom_td_real '
659                  CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
660                  ALLOCATE( RData( bigbuf(icurs/inttypesize + 4 ) ) )      ! 5 is the count of data items for this record ; defined in collect_on_comm.c
661                  CALL int_get_td_header( bigbuf(icurs/inttypesize:), hdrbufsize, inttypesize, ftypesize, &
662                                          DataHandle, DateStr, Element, RData, Count, code )
663                  icurs = icurs + hdrbufsize
664
665                  SELECT CASE (use_package(io_form(DataHandle)))
666#ifdef NETCDF
667                    CASE ( IO_NETCDF   )
668                      CALL ext_ncd_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
669#endif
670#ifdef INTIO
671                    CASE ( IO_INTIO   )
672                      CALL ext_int_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
673#endif
674#ifdef YYY
675                 CASE ( IO_YYY )
676                    CALL ext_yyy_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
677#endif
678#ifdef GRIB1
679                 CASE ( IO_GRIB1 )
680                    CALL ext_gr1_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
681#endif
682#ifdef GRIB2
683                 CASE ( IO_GRIB2 )
684                    CALL ext_gr2_put_dom_td_real( handle(DataHandle),TRIM(Element),TRIM(DateStr),RData, Count, Status )
685#endif
686                     CASE DEFAULT
687                      Status = 0
688                  END SELECT
689
690                  DEALLOCATE( RData )
691! The I/O server "root" handles the "put_dom_ti_real" request.
692                CASE ( int_dom_ti_real )
693!write(0,*)' int_dom_ti_real '
694                  CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
695                  ALLOCATE( RData( bigbuf(icurs/inttypesize + 4 ) ) )      ! 5 is the count of data items for this record ; defined in collect_on_comm.c
696                  CALL int_get_ti_header( bigbuf(icurs/inttypesize:), hdrbufsize, inttypesize, ftypesize, &
697                                          DataHandle, Element, RData, Count, code )
698                  icurs = icurs + hdrbufsize
699
700                  SELECT CASE (use_package(io_form(DataHandle)))
701#ifdef NETCDF
702                    CASE ( IO_NETCDF   )
703                      CALL ext_ncd_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
704!write(0,*)'ext_ncd_put_dom_ti_real ',handle(DataHandle),TRIM(Element),RData,Status
705#endif
706#ifdef INTIO
707                    CASE ( IO_INTIO   )
708                      CALL ext_int_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
709#endif
710#ifdef YYY
711                 CASE ( IO_YYY )
712                    CALL ext_yyy_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
713#endif
714#ifdef GRIB1
715                 CASE ( IO_GRIB1 )
716                    CALL ext_gr1_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
717#endif
718#ifdef GRIB2
719                 CASE ( IO_GRIB2 )
720                    CALL ext_gr2_put_dom_ti_real( handle(DataHandle),TRIM(Element), RData, Count, Status )
721#endif
722                    CASE DEFAULT
723                      Status = 0
724                  END SELECT
725
726                  DEALLOCATE( RData )
727
728! The I/O server "root" handles the "put_dom_td_integer" request.
729                CASE ( int_dom_td_integer )
730!write(0,*)' int_dom_td_integer '
731                  CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
732                  ALLOCATE( IData( bigbuf(icurs/inttypesize + 4 ) ) )      ! 5 is the count of data items for this record ; defined in collect_on_comm.c
733                  CALL int_get_td_header( bigbuf(icurs/inttypesize:), hdrbufsize, inttypesize, ftypesize, &
734                                          DataHandle, DateStr, Element, IData, Count, code )
735                  icurs = icurs + hdrbufsize
736
737                  SELECT CASE (use_package(io_form(DataHandle)))
738#ifdef NETCDF
739                    CASE ( IO_NETCDF   )
740                      CALL ext_ncd_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
741#endif
742#ifdef INTIO
743                    CASE ( IO_INTIO   )
744                      CALL ext_int_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
745#endif
746#ifdef YYY
747                 CASE ( IO_YYY )
748                    CALL ext_yyy_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
749#endif
750#ifdef GRIB1
751                 CASE ( IO_GRIB1 )
752                    CALL ext_gr1_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
753#endif
754#ifdef GRIB2
755                 CASE ( IO_GRIB2 )
756                    CALL ext_gr2_put_dom_td_integer( handle(DataHandle),TRIM(Element), Trim(DateStr), IData, Count, Status )
757#endif
758                    CASE DEFAULT
759                      Status = 0
760                  END SELECT
761
762                  DEALLOCATE( IData )
763
764! The I/O server "root" handles the "put_dom_ti_integer" request.
765                CASE ( int_dom_ti_integer )
766!write(0,*)' int_dom_ti_integer '
767
768                  CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
769                  ALLOCATE( IData( bigbuf(icurs/inttypesize + 4 ) ) )      ! 5 is the count of data items for this record ; defined in collect_on_comm.c
770                  CALL int_get_ti_header( bigbuf(icurs/inttypesize:), hdrbufsize, inttypesize, ftypesize, &
771                                          DataHandle, Element, IData, Count, code )
772                  icurs = icurs + hdrbufsize
773                  SELECT CASE (use_package(io_form(DataHandle)))
774#ifdef NETCDF
775                    CASE ( IO_NETCDF   )
776                      CALL ext_ncd_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
777!write(0,*)'ext_ncd_put_dom_ti_integer ',handle(DataHandle),TRIM(Element),IData,Status
778#endif
779#ifdef INTIO
780                    CASE ( IO_INTIO   )
781                      CALL ext_int_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
782#endif
783#ifdef YYY
784                 CASE ( IO_YYY )
785                    CALL ext_yyy_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
786#endif
787#ifdef GRIB1
788                 CASE ( IO_GRIB1 )
789                    CALL ext_gr1_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
790#endif
791#ifdef GRIB2
792                 CASE ( IO_GRIB2 )
793                    CALL ext_gr2_put_dom_ti_integer( handle(DataHandle),TRIM(Element), IData, Count, Status )
794#endif
795
796                    CASE DEFAULT
797                      Status = 0
798                  END SELECT
799
800                  DEALLOCATE( IData)
801 
802! The I/O server "root" handles the "set_time" request.
803                CASE ( int_set_time )
804!write(0,*)' int_set_time '
805                  CALL int_get_ti_header_char( bigbuf(icurs/inttypesize), hdrbufsize, inttypesize, &
806                                               DataHandle, Element, VarName, CData, code )
807                  SELECT CASE (use_package(io_form(DataHandle)))
808#ifdef INTIO
809                    CASE ( IO_INTIO   )
810                      CALL ext_int_set_time ( handle(DataHandle), TRIM(CData), Status)
811#endif
812                    CASE DEFAULT
813                      Status = 0
814                  END SELECT
815
816                  icurs = icurs + hdrbufsize
817
818! The I/O server "root" handles the "put_dom_ti_char" request.
819                CASE ( int_dom_ti_char )
820!write(0,*)' before int_get_ti_header_char '
821                  CALL int_get_ti_header_char( bigbuf(icurs/inttypesize), hdrbufsize, inttypesize, &
822                                               DataHandle, Element, VarName, CData, code )
823!write(0,*)' after int_get_ti_header_char ',VarName
824
825                  SELECT CASE (use_package(io_form(DataHandle)))
826#ifdef NETCDF
827                    CASE ( IO_NETCDF   )
828                      CALL ext_ncd_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
829#endif
830#ifdef INTIO
831                    CASE ( IO_INTIO   )
832                      CALL ext_int_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
833#endif
834#ifdef YYY
835                 CASE ( IO_YYY )
836                    CALL ext_yyy_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
837#endif
838#ifdef GRIB1
839                 CASE ( IO_GRIB1 )
840                    CALL ext_gr1_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
841#endif
842#ifdef GRIB2
843                 CASE ( IO_GRIB2 )
844                    CALL ext_gr2_put_dom_ti_char ( handle(DataHandle), TRIM(Element), TRIM(CData), Status)
845#endif
846                    CASE DEFAULT
847                      Status = 0
848                  END SELECT
849
850                  icurs = icurs + hdrbufsize
851
852! The I/O server "root" handles the "put_var_ti_char" request.
853                CASE ( int_var_ti_char )
854!write(0,*)' int_var_ti_char '
855                  CALL int_get_ti_header_char( bigbuf(icurs/inttypesize), hdrbufsize, inttypesize, &
856                                               DataHandle, Element, VarName, CData, code )
857
858                  SELECT CASE (use_package(io_form(DataHandle)))
859#ifdef NETCDF
860                    CASE ( IO_NETCDF   )
861                      CALL ext_ncd_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
862#endif
863#ifdef INTIO
864                    CASE ( IO_INTIO   )
865                      CALL ext_int_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
866#endif
867#ifdef YYY
868                 CASE ( IO_YYY )
869                    CALL ext_yyy_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
870#endif
871#ifdef GRIB1
872                 CASE ( IO_GRIB1 )
873                    CALL ext_gr1_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
874#endif
875#ifdef GRIB2
876                 CASE ( IO_GRIB2 )
877                    CALL ext_gr2_put_var_ti_char ( handle(DataHandle), TRIM(Element), TRIM(VarName), TRIM(CData), Status)
878#endif
879                    CASE DEFAULT
880                      Status = 0
881                  END SELECT
882
883                  icurs = icurs + hdrbufsize
884
885                CASE ( int_ioexit )
886! ioexit is now handled by sending negative message length to server
887                  CALL wrf_error_fatal( &
888                         "quilt: should have handled int_ioexit already")
889! The I/O server "root" handles the "ioclose" request.
890                CASE ( int_ioclose )
891                  CALL int_get_handle_header( bigbuf(icurs/inttypesize), hdrbufsize, itypesize, &
892                                              DataHandle , code )
893                  icurs = icurs + hdrbufsize
894
895                  IF ( DataHandle .GE. 1 ) THEN
896!JMDEBUGwrite(0,*)'closing DataHandle ',DataHandle
897
898                  SELECT CASE (use_package(io_form(DataHandle)))
899#ifdef NETCDF
900                    CASE ( IO_NETCDF   )
901                      CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
902                      IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
903                        CALL ext_ncd_ioclose(handle(DataHandle),Status)
904                      ENDIF
905#endif
906#ifdef INTIO
907                    CASE ( IO_INTIO   )
908                      CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
909                      IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
910                        CALL ext_int_ioclose(handle(DataHandle),Status)
911                      ENDIF
912#endif
913#ifdef YYY
914                 CASE ( IO_YYY )
915                    CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
916                    IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
917                      CALL ext_yyy_ioclose(handle(DataHandle),Status)
918                    ENDIF
919#endif
920#ifdef GRIB1
921                 CASE ( IO_GRIB1 )
922                    CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
923                    IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
924                      CALL ext_gr1_ioclose(handle(DataHandle),Status)
925                    ENDIF
926#endif
927#ifdef GRIB2
928                 CASE ( IO_GRIB2 )
929                    CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
930                    IF ( fstat .EQ. WRF_FILE_OPENED_FOR_WRITE .OR. fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
931                      CALL ext_gr2_ioclose(handle(DataHandle),Status)
932                    ENDIF
933#endif
934                    CASE DEFAULT
935                      Status = 0
936                  END SELECT
937                  ENDIF
938
939! The I/O server "root" handles the "open_for_write_begin" request.
940                CASE ( int_open_for_write_begin )
941
942                  CALL int_get_ofwb_header( bigbuf(icurs/inttypesize), hdrbufsize, itypesize, &
943                                            FileName,SysDepInfo,io_form_arg,DataHandle )
944
945!write(0,*)' int_open_for_write_begin inttypesize ',inttypesize,' itypesize ',itypesize
946!write(0,*)' int_open_for_write_begin icurs ', icurs, hdrbufsize
947!JMDEBUGwrite(0,*)' int_open_for_write_begin FileName ',TRIM(FileName) , ' DataHandle ', DataHandle
948!write(0,*)' int_open_for_write_begin SysDepInfo ',TRIM(SysDepInfo)
949                  icurs = icurs + hdrbufsize
950!write(0,*)' int_open_for_write_begin new icurs,tag,size ', icurs, get_hdr_tag( bigbuf(icurs/inttypesize) ),get_hdr_rec_size( bigbuf(icurs/inttypesize) )
951               
952                  io_form(DataHandle) = io_form_arg
953
954                  SELECT CASE (use_package(io_form(DataHandle)))
955#ifdef NETCDF
956                    CASE ( IO_NETCDF   )
957                      CALL ext_ncd_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
958!write(0,*)'ext_ncd_open_for_write_begin ',Trim(FileName),DataHandle,handle(DataHandle),Status
959#endif
960#ifdef INTIO
961                    CASE ( IO_INTIO   )
962                      CALL ext_int_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
963#endif
964#ifdef YYY
965                    CASE ( IO_YYY )
966                       CALL ext_yyy_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
967#endif
968#ifdef GRIB1
969                    CASE ( IO_GRIB1 )
970                       CALL ext_gr1_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
971#endif
972#ifdef GRIB2
973                    CASE ( IO_GRIB2 )
974                       CALL ext_gr2_open_for_write_begin(FileName,Comm,IOComm,SysDepInfo,handle(DataHandle),Status)
975#endif
976                    CASE DEFAULT
977                      Status = 0
978                  END SELECT
979               
980                  okay_to_write(DataHandle) = .false.
981
982! The I/O server "root" handles the "open_for_write_commit" request.
983! In this case, the "okay_to_commit" is simply set to .true. so "write_field"
984! requests will initiate writes to disk.  Actual commit will be done after
985! all requests in this batch have been handled.
986                CASE ( int_open_for_write_commit )
987
988                  CALL int_get_handle_header( bigbuf(icurs/inttypesize), hdrbufsize, itypesize, &
989                                              DataHandle , code )
990!write(0,*)' int_open_for_write_commit icurs ', icurs, hdrbufsize
991                  icurs = icurs + hdrbufsize
992                  okay_to_commit(DataHandle) = .true.
993
994! The I/O server "root" handles the "write_field" (int_field) request.
995! If okay_to_write(DataHandle) is .true. then the patch in the
996! header (bigbuf) is written to a globally-sized internal output buffer via
997! the call to store_patch_in_outbuf().  Note that this is where the actual
998! "quilting" (reassembly of patches onto a full-size domain) is done.  If
999! okay_to_write(DataHandle) is .false. then external I/O package interfaces
1000! are called to write metadata for I/O formats that support native metadata.
1001!
1002! NOTE that the I/O server "root" will only see write_field (int_field)
1003! requests AFTER an "iosync" request.
1004                CASE ( int_field )
1005!write(0,*)' int_field '
1006                  CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
1007                  CALL int_get_write_field_header ( bigbuf(icurs/inttypesize), hdrbufsize, itypesize, ftypesize,  &
1008                                                    DataHandle , DateStr , VarName , Dummy , FieldType , Comm , IOComm, &
1009                                                    DomainDesc , MemoryOrder , Stagger , DimNames ,              &
1010                                                    DomainStart , DomainEnd ,                                    &
1011                                                    MemoryStart , MemoryEnd ,                                    &
1012                                                    PatchStart , PatchEnd )
1013                  icurs = icurs + hdrbufsize
1014
1015                  IF ( okay_to_write(DataHandle) ) THEN
1016
1017!                   WRITE(*,*)'>>> ',TRIM(DateStr), ' ', TRIM(VarName), ' ', TRIM(MemoryOrder), ' ', &
1018!                        (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)*(PatchEnd(3)-PatchStart(3)+1)
1019
1020                    IF ( FieldType .EQ. WRF_FLOAT .OR. FieldType .EQ. WRF_DOUBLE)  THEN
1021                      ! Note that the WRF_DOUBLE branch of this IF statement must come first since
1022                      ! WRF_FLOAT is set equal to WRF_DOUBLE during autopromotion builds. 
1023                      IF ( FieldType .EQ. WRF_DOUBLE)  THEN
1024! this branch has not been tested TBH: 20050406
1025                        CALL mpi_type_size( MPI_DOUBLE_PRECISION, ftypesize, ierr )
1026                      ELSE
1027                        CALL mpi_type_size( MPI_REAL, ftypesize, ierr )
1028                      ENDIF
1029                      stored_write_record = .true.
1030                      CALL store_patch_in_outbuf ( bigbuf(icurs/inttypesize), dummybuf, TRIM(DateStr), TRIM(VarName) , &
1031                                                   FieldType, TRIM(MemoryOrder), TRIM(Stagger), DimNames, &
1032                                                   DomainStart , DomainEnd , &
1033                                                   MemoryStart , MemoryEnd , &
1034                                                   PatchStart , PatchEnd )
1035
1036                    ELSE IF ( FieldType .EQ. WRF_INTEGER ) THEN
1037                      CALL mpi_type_size( MPI_INTEGER, ftypesize, ierr )
1038                      stored_write_record = .true.
1039                      CALL store_patch_in_outbuf ( dummybuf, bigbuf(icurs/inttypesize), TRIM(DateStr), TRIM(VarName) , &
1040                                                   FieldType, TRIM(MemoryOrder), TRIM(Stagger), DimNames, &
1041                                                   DomainStart , DomainEnd , &
1042                                                   MemoryStart , MemoryEnd , &
1043                                                   PatchStart , PatchEnd )
1044                    ELSE IF ( FieldType .EQ. WRF_LOGICAL ) THEN
1045                      ftypesize = LWORDSIZE
1046                    ENDIF
1047                    icurs = icurs + (PatchEnd(1)-PatchStart(1)+1)*(PatchEnd(2)-PatchStart(2)+1)* &
1048                                    (PatchEnd(3)-PatchStart(3)+1)*ftypesize
1049                  ELSE
1050                    SELECT CASE (use_package(io_form(DataHandle)))
1051#ifdef NETCDF
1052                      CASE ( IO_NETCDF   )
1053                        CALL ext_ncd_write_field ( handle(DataHandle) , TRIM(DateStr) ,         &
1054                                   TRIM(VarName) , dummy , FieldType , Comm , IOComm,           &
1055                                   DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames ,  &
1056                                   DomainStart , DomainEnd ,                                    &
1057                                   DomainStart , DomainEnd ,                                    &
1058                                   DomainStart , DomainEnd ,                                    &
1059                                   Status )
1060#endif
1061#if 0
1062! since this is training and the grib output doesn't need training, disable this branch.
1063#ifdef YYY
1064                 CASE ( IO_YYY )
1065                      CALL ext_YYY_write_field ( handle(DataHandle) , TRIM(DateStr) ,         &
1066                                 TRIM(VarName) , dummy , FieldType , Comm , IOComm,           &
1067                                 DomainDesc , TRIM(MemoryOrder) , TRIM(Stagger) , DimNames ,  &
1068                                 DomainStart , DomainEnd ,                                    &
1069                                 DomainStart , DomainEnd ,                                    &
1070                                 DomainStart , DomainEnd ,                                    &
1071                                 Status )
1072#endif
1073#endif
1074                      CASE DEFAULT
1075                        Status = 0
1076                    END SELECT
1077                  ENDIF
1078                CASE ( int_iosync )
1079!write(0,*)' int_iosync '
1080                  CALL int_get_handle_header( bigbuf(icurs/inttypesize), hdrbufsize, itypesize, &
1081                                            DataHandle , code )
1082                  icurs = icurs + hdrbufsize
1083                CASE DEFAULT
1084                  WRITE(mess,*)'quilt: bad tag: ',get_hdr_tag( bigbuf(icurs/inttypesize) ),' icurs ',icurs/inttypesize
1085                  CALL wrf_error_fatal( mess )
1086              END SELECT
1087
1088            ENDDO
1089! Now, the I/O server "root" has finshed handling all commands from the latest
1090! call to retrieve_pieces_of_field().
1091
1092            IF (stored_write_record) THEN
1093!write(0,*)'calling write_outbuf ',DataHandle
1094! If any fields have been stored in a globally-sized internal output buffer
1095! (via a call to store_patch_in_outbuf()) then call write_outbuf() to write
1096! them to disk now.
1097! NOTE that the I/O server "root" will only have called
1098! store_patch_in_outbuf() when handling write_field (int_field)
1099! commands which only arrive AFTER an "iosync" command.
1100!JMTIMING              CALL start_timing
1101              CALL write_outbuf ( handle(DataHandle), use_package(io_form(DataHandle)))
1102!JMTIMING               CALL end_timing( "quilt: call to write_outbuf" )
1103!write(0,*)'back from write_outbuf ',DataHandle
1104            ENDIF
1105
1106! If one or more "open_for_write_commit" commands were encountered from the
1107! latest call to retrieve_pieces_of_field() then call the package-specific
1108! routine to do the commit.
1109            IF (okay_to_commit(DataHandle)) THEN
1110
1111              SELECT CASE (use_package(io_form(DataHandle)))
1112#ifdef NETCDF
1113                CASE ( IO_NETCDF   )
1114                  CALL ext_ncd_inquire_filename( handle(DataHandle), fname, fstat, Status )
1115!write(0,*)'preparing to commit ', DataHandle, fstat, fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED
1116                  IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1117!write(0,*)'calling ext_ncd_open_for_write_commit ', handle(DataHandle), DataHandle
1118                    CALL ext_ncd_open_for_write_commit(handle(DataHandle),Status)
1119!write(0,*)'back from ext_ncd_open_for_write_commit ', Status
1120                    okay_to_write(DataHandle) = .true.
1121                  ENDIF
1122#endif
1123#ifdef INTIO
1124                CASE ( IO_INTIO   )
1125                  CALL ext_int_inquire_filename( handle(DataHandle), fname, fstat, Status )
1126                  IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1127                    CALL ext_int_open_for_write_commit(handle(DataHandle),Status)
1128                    okay_to_write(DataHandle) = .true.
1129                  ENDIF
1130#endif
1131#ifdef YYY
1132                 CASE ( IO_YYY )
1133                    CALL ext_yyy_inquire_filename( handle(DataHandle), fname, fstat, Status )
1134                    IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1135                       CALL ext_yyy_open_for_write_commit(handle(DataHandle),Status)
1136                       okay_to_write(DataHandle) = .true.
1137                    ENDIF
1138#endif
1139#ifdef GRIB1
1140                 CASE ( IO_GRIB1 )
1141                    CALL ext_gr1_inquire_filename( handle(DataHandle), fname, fstat, Status )
1142                    IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1143                       CALL ext_gr1_open_for_write_commit(handle(DataHandle),Status)
1144                       okay_to_write(DataHandle) = .true.
1145                    ENDIF
1146#endif
1147#ifdef GRIB2
1148                 CASE ( IO_GRIB2 )
1149                    CALL ext_gr2_inquire_filename( handle(DataHandle), fname, fstat, Status )
1150                    IF ( fstat .EQ. WRF_FILE_OPENED_NOT_COMMITTED ) THEN
1151                       CALL ext_gr2_open_for_write_commit(handle(DataHandle),Status)
1152                       okay_to_write(DataHandle) = .true.
1153                    ENDIF
1154#endif
1155
1156                CASE DEFAULT
1157                  Status = 0
1158              END SELECT
1159
1160            okay_to_commit(DataHandle) = .false.
1161          ENDIF
1162          DEALLOCATE( bigbuf )
1163        ENDIF
1164
1165! Retrieve header and all patches for the next field from the internal
1166! buffers. 
1167        CALL retrieve_pieces_of_field ( obuf , VarName, obufsize, sz, retval )
1168!write(0,*)'calling next retrieve_pieces_of_field ',trim(VarName),obufsize,sz,retval
1169! Sum sizes of all headers and patches (data) for this field from all I/O
1170! servers in this I/O server group onto the I/O server "root".
1171        CALL MPI_Reduce( sz, bigbufsize, 1, MPI_INTEGER,  &
1172                         MPI_SUM, ntasks_local_group-1,         &
1173                         mpi_comm_local, ierr )
1174! Then, return to the top of the loop to collect headers and data from all
1175! I/O servers in this I/O server group onto the I/O server "root" and handle
1176! the next batch of commands. 
1177!write(0,*)'after MPI_Reduce ',sz, bigbufsize
1178      END DO
1179
1180      DEALLOCATE( obuf )
1181
1182      ! flush output files if needed
1183      IF (stored_write_record) THEN
1184!JMTIMING        CALL start_timing
1185        SELECT CASE ( use_package(io_form) )
1186#ifdef NETCDF
1187          CASE ( IO_NETCDF   )
1188            CALL ext_ncd_iosync( handle(DataHandle), Status )
1189#endif
1190#ifdef XXX
1191          CASE ( IO_XXX   )
1192            CALL ext_xxx_iosync( handle(DataHandle), Status )
1193#endif
1194#ifdef YYY
1195          CASE ( IO_YYY   )
1196            CALL ext_yyy_iosync( handle(DataHandle), Status )
1197#endif
1198#ifdef ZZZ
1199          CASE ( IO_ZZZ   )
1200            CALL ext_zzz_iosync( handle(DataHandle), Status )
1201#endif
1202#ifdef GRIB1
1203          CASE ( IO_GRIB1   )
1204            CALL ext_gr1_iosync( handle(DataHandle), Status )
1205#endif
1206#ifdef GRIB2
1207          CASE ( IO_GRIB2   )
1208            CALL ext_gr2_iosync( handle(DataHandle), Status )
1209#endif
1210#ifdef INTIO
1211          CASE ( IO_INTIO   )
1212            CALL ext_int_iosync( handle(DataHandle), Status )
1213#endif
1214          CASE DEFAULT
1215            Status = 0
1216        END SELECT
1217!JMTIMING         CALL end_timing( "quilt: flush" )
1218      ENDIF
1219
1220      END DO
1221
1222    END SUBROUTINE quilt
1223
1224! end of #endif of DM_PARALLEL
1225#endif
1226
1227    SUBROUTINE init_module_wrf_quilt
1228!<DESCRIPTION>
1229! Both client (compute) and server tasks call this routine to initialize the
1230! module.  Routine setup_quilt_servers() is called from this routine to
1231! determine which tasks are compute tasks and which are server tasks.  Server
1232! tasks then call routine quilt() and remain there for the rest of the model
1233! run.  Compute tasks return from init_module_wrf_quilt() to perform model
1234! computations. 
1235!</DESCRIPTION>
1236#if defined( DM_PARALLEL ) && !defined( STUBMPI )
1237      IMPLICIT NONE
1238      INCLUDE 'mpif.h'
1239      INTEGER i
1240      NAMELIST /namelist_quilt/ nio_tasks_per_group, nio_groups
1241      INTEGER ntasks, mytask, ierr, io_status
1242      LOGICAL mpi_inited
1243
1244      quilting_enabled = .FALSE.
1245      IF ( disable_quilt ) RETURN
1246
1247      DO i = 1,int_num_handles
1248        okay_to_write(i) = .FALSE.
1249        int_handle_in_use(i) = .FALSE.
1250        server_for_handle(i) = 0
1251        int_num_bytes_to_write(i) = 0
1252      ENDDO
1253
1254      CALL MPI_INITIALIZED( mpi_inited, ierr )
1255      IF ( mpi_inited ) THEN
1256        CALL wrf_error_fatal("frame/module_io_quilt.F: quilt initialization "// &
1257                             "must be called before MPI_Init") ;
1258      ENDIF
1259
1260      CALL mpi_init ( ierr )
1261      CALL wrf_set_dm_communicator( MPI_COMM_WORLD )
1262      CALL wrf_termio_dup
1263      CALL MPI_Comm_rank ( MPI_COMM_WORLD, mytask, ierr ) ;
1264      CALL mpi_x_comm_size ( MPI_COMM_WORLD, ntasks, ierr ) ;
1265
1266      IF ( mytask .EQ. 0 ) THEN
1267        OPEN ( unit=27, file="namelist.input", form="formatted", status="old" )
1268        nio_groups = 1
1269        nio_tasks_per_group  = 0
1270        READ ( 27 , NML = namelist_quilt, IOSTAT=io_status )
1271        IF (io_status .NE. 0) THEN
1272          CALL wrf_error_fatal( "ERROR reading namelist namelist_quilt" )
1273        ENDIF
1274        CLOSE ( 27 )
1275      ENDIF
1276      CALL mpi_bcast( nio_tasks_per_group  , 1 , MPI_INTEGER , 0 , MPI_COMM_WORLD, ierr )
1277      CALL mpi_bcast( nio_groups , 1 , MPI_INTEGER , 0 , MPI_COMM_WORLD, ierr )
1278
1279      CALL setup_quilt_servers( nio_tasks_per_group,            &
1280                                mytask,               &
1281                                ntasks,               &
1282                                nio_groups,           &
1283                                nio_tasks_in_group,   &
1284                                MPI_COMM_WORLD,       &
1285                                mpi_comm_local,       &
1286                                mpi_comm_io_groups)
1287
1288       ! provide the communicator for the integration tasks to RSL
1289       IF ( mytask .lt. ncompute_tasks ) THEN
1290          CALL wrf_set_dm_communicator( mpi_comm_local )
1291       ELSE
1292          CALL quilt    ! will not return on io server tasks
1293       ENDIF
1294#endif
1295      RETURN
1296    END SUBROUTINE init_module_wrf_quilt
1297END MODULE module_wrf_quilt
1298
1299!<DESCRIPTION>
1300! Remaining routines in this file are defined outside of the module
1301! either to defeat arg/param type checking or to avoid an explicit use
1302! dependence.
1303!</DESCRIPTION>
1304
1305SUBROUTINE disable_quilting
1306!<DESCRIPTION>
1307! Call this in programs that you never want to be quilting (e.g. real)
1308! Must call before call to init_module_wrf_quilt(). 
1309!</DESCRIPTION>
1310  USE module_wrf_quilt
1311  disable_quilt = .TRUE.
1312  RETURN
1313END SUBROUTINE disable_quilting
1314
1315LOGICAL FUNCTION  use_output_servers()
1316!<DESCRIPTION>
1317! Returns .TRUE. if I/O quilt servers are in-use for write operations.
1318! This routine is called only by client (compute) tasks. 
1319!</DESCRIPTION>
1320  USE module_wrf_quilt
1321  use_output_servers = quilting_enabled
1322  RETURN
1323END FUNCTION use_output_servers
1324
1325LOGICAL FUNCTION  use_input_servers()
1326!<DESCRIPTION>
1327! Returns .TRUE. if I/O quilt servers are in-use for read operations.
1328! This routine is called only by client (compute) tasks. 
1329!</DESCRIPTION>
1330  USE module_wrf_quilt
1331  use_input_servers = .FALSE.
1332  RETURN
1333END FUNCTION use_input_servers
1334
1335SUBROUTINE wrf_quilt_open_for_write_begin( FileName , Comm_compute, Comm_io, SysDepInfo, &
1336                                     DataHandle , io_form_arg, Status )
1337!<DESCRIPTION>
1338! Instruct the I/O quilt servers to begin data definition ("training") phase
1339! for writing to WRF dataset FileName.  io_form_arg indicates file format.
1340! This routine is called only by client (compute) tasks. 
1341!</DESCRIPTION>
1342#if defined( DM_PARALLEL ) && !defined( STUBMPI )
1343  USE module_wrf_quilt
1344  IMPLICIT NONE
1345  INCLUDE 'mpif.h'
1346#include "intio_tags.h"
1347  CHARACTER *(*), INTENT(IN)  :: FileName
1348  INTEGER ,       INTENT(IN)  :: Comm_compute , Comm_io
1349  CHARACTER *(*), INTENT(IN)  :: SysDepInfo
1350  INTEGER ,       INTENT(OUT) :: DataHandle
1351  INTEGER ,       INTENT(IN)  :: io_form_arg
1352  INTEGER ,       INTENT(OUT) :: Status
1353! Local
1354  CHARACTER*132   :: locFileName, locSysDepInfo
1355  INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
1356  REAL dummy
1357
1358  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_write_begin' )
1359  CALL int_get_fresh_handle(i)
1360  okay_to_write(i) = .false.
1361  DataHandle = i
1362
1363  locFileName = FileName
1364  locSysDepInfo = SysDepInfo
1365
1366  CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
1367  IF ( wrf_dm_on_monitor() ) THEN
1368    CALL int_gen_ofwb_header( hdrbuf, hdrbufsize, itypesize, &
1369                            locFileName,locSysDepInfo,io_form_arg,DataHandle )
1370  ELSE
1371    CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
1372  ENDIF
1373
1374  iserver = get_server_id ( DataHandle )
1375!JMDEBUGwrite(0,*)'wrf_quilt_open_for_write_begin iserver = ', iserver
1376  CALL get_mpi_comm_io_groups( comm_io_group , iserver )
1377!JMDEBUGwrite(0,*)'wrf_quilt_open_for_write_begin comm_io_group  = ', comm_io_group
1378
1379  CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
1380!JMDEBUGwrite(0,*)'mpi_x_comm_size tasks_in_group ',tasks_in_group, ierr
1381
1382!JMTIMING  CALL start_timing
1383  ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
1384  reduced = 0
1385  reduced(1) = hdrbufsize
1386  IF ( wrf_dm_on_monitor() )  reduced(2) = i
1387  CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
1388                   MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
1389                   comm_io_group, ierr )
1390!JMTIMING   CALL end_timing("MPI_Reduce in wrf_quilt_open_for_write_begin")
1391
1392  ! send data to the i/o processor
1393  CALL collect_on_comm( comm_io_group,            &
1394                        onebyte,                       &
1395                        hdrbuf, hdrbufsize , &
1396                        dummy, 0 )
1397
1398  Status = 0
1399
1400
1401#endif
1402  RETURN 
1403END SUBROUTINE wrf_quilt_open_for_write_begin
1404
1405SUBROUTINE wrf_quilt_open_for_write_commit( DataHandle , Status )
1406!<DESCRIPTION>
1407! Instruct the I/O quilt servers to switch an internal flag to enable output
1408! for the dataset referenced by DataHandle.  The call to
1409! wrf_quilt_open_for_write_commit() must be paired with a call to
1410! wrf_quilt_open_for_write_begin().
1411! This routine is called only by client (compute) tasks. 
1412!</DESCRIPTION>
1413#if defined( DM_PARALLEL ) && !defined( STUBMPI )
1414  USE module_wrf_quilt
1415  IMPLICIT NONE
1416  INCLUDE 'mpif.h'
1417#include "intio_tags.h"
1418  INTEGER ,       INTENT(IN ) :: DataHandle
1419  INTEGER ,       INTENT(OUT) :: Status
1420  INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
1421  REAL dummy
1422
1423  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_write_commit' )
1424  IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
1425    IF ( int_handle_in_use( DataHandle ) ) THEN
1426      okay_to_write( DataHandle ) = .true.
1427    ENDIF
1428  ENDIF
1429
1430  CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
1431  CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
1432                              DataHandle, int_open_for_write_commit )
1433
1434  iserver = get_server_id ( DataHandle )
1435  CALL get_mpi_comm_io_groups( comm_io_group , iserver )
1436
1437  CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
1438
1439!JMTIMING  CALL start_timing
1440  ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
1441  reduced = 0
1442  reduced(1) = hdrbufsize
1443  IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
1444  CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
1445                   MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
1446                   comm_io_group, ierr )
1447!JMTIMING   CALL end_timing("MPI_Reduce in wrf_quilt_open_for_write_commit")
1448
1449  ! send data to the i/o processor
1450  CALL collect_on_comm( comm_io_group,            &
1451                        onebyte,                       &
1452                        hdrbuf, hdrbufsize , &
1453                        dummy, 0 )
1454
1455  Status = 0
1456
1457#endif
1458  RETURN 
1459END SUBROUTINE wrf_quilt_open_for_write_commit
1460
1461SUBROUTINE wrf_quilt_open_for_read ( FileName , Comm_compute, Comm_io, SysDepInfo, &
1462                               DataHandle , Status )
1463!<DESCRIPTION>
1464! Instruct the I/O quilt servers to open WRF dataset FileName for reading.
1465! This routine is called only by client (compute) tasks. 
1466! This is not yet supported.
1467!</DESCRIPTION>
1468#if defined( DM_PARALLEL ) && !defined( STUBMPI )
1469  IMPLICIT NONE
1470  CHARACTER *(*), INTENT(IN)  :: FileName
1471  INTEGER ,       INTENT(IN)  :: Comm_compute , Comm_io
1472  CHARACTER *(*), INTENT(IN)  :: SysDepInfo
1473  INTEGER ,       INTENT(OUT) :: DataHandle
1474  INTEGER ,       INTENT(OUT) :: Status
1475
1476  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_open_for_read' )
1477  DataHandle = -1
1478  Status = -1
1479  CALL wrf_error_fatal ( "frame/module_io_quilt.F: wrf_quilt_open_for_read not yet supported" )
1480#endif
1481  RETURN 
1482END SUBROUTINE wrf_quilt_open_for_read
1483
1484SUBROUTINE wrf_quilt_inquire_opened ( DataHandle, FileName , FileStatus, Status )
1485!<DESCRIPTION>
1486! Inquire if the dataset referenced by DataHandle is open.
1487! Does not require communication with I/O servers.
1488! This routine is called only by client (compute) tasks. 
1489!</DESCRIPTION>
1490#if defined( DM_PARALLEL ) && !defined( STUBMPI )
1491  USE module_wrf_quilt
1492  IMPLICIT NONE
1493#include "wrf_io_flags.h"
1494  INTEGER ,       INTENT(IN)  :: DataHandle
1495  CHARACTER *(*), INTENT(IN)  :: FileName
1496  INTEGER ,       INTENT(OUT) :: FileStatus
1497  INTEGER ,       INTENT(OUT) :: Status
1498
1499  Status = 0
1500
1501  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_inquire_opened' )
1502  IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
1503    IF ( int_handle_in_use( DataHandle ) ) THEN
1504      IF ( okay_to_write( DataHandle ) ) THEN
1505        FileStatus = WRF_FILE_OPENED_FOR_WRITE
1506      ENDIF
1507    ENDIF
1508  ENDIF
1509  Status = 0
1510 
1511#endif
1512  RETURN
1513END SUBROUTINE wrf_quilt_inquire_opened
1514
1515SUBROUTINE wrf_quilt_inquire_filename ( DataHandle, FileName , FileStatus, Status )
1516!<DESCRIPTION>
1517! Return the Filename and FileStatus associated with DataHandle.
1518! Does not require communication with I/O servers.
1519!
1520! Note that the current implementation does not actually return FileName.
1521! Currenlty, WRF does not use this returned value.  Fixing this would simply
1522! require saving the file names on the client tasks in an array similar to
1523! okay_to_write().
1524! This routine is called only by client (compute) tasks. 
1525!</DESCRIPTION>
1526#if defined( DM_PARALLEL ) && !defined( STUBMPI )
1527  USE module_wrf_quilt
1528  IMPLICIT NONE
1529#include "wrf_io_flags.h"
1530  INTEGER ,       INTENT(IN)  :: DataHandle
1531  CHARACTER *(*), INTENT(OUT) :: FileName
1532  INTEGER ,       INTENT(OUT) :: FileStatus
1533  INTEGER ,       INTENT(OUT) :: Status
1534  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_inquire_filename' )
1535  Status = 0
1536  IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
1537    IF ( int_handle_in_use( DataHandle ) ) THEN
1538      IF ( okay_to_write( DataHandle ) ) THEN
1539        FileStatus = WRF_FILE_OPENED_FOR_WRITE
1540      ELSE
1541        FileStatus = WRF_FILE_OPENED_NOT_COMMITTED
1542      ENDIF
1543    ELSE
1544        FileStatus = WRF_FILE_NOT_OPENED
1545    ENDIF
1546    Status = 0
1547    FileName = "bogusfornow"
1548  ELSE
1549    Status = -1
1550  ENDIF
1551#endif
1552  RETURN
1553END SUBROUTINE wrf_quilt_inquire_filename
1554
1555SUBROUTINE wrf_quilt_iosync ( DataHandle, Status )
1556!<DESCRIPTION>
1557! Instruct the I/O quilt servers to synchronize the disk copy of a dataset
1558! with memory buffers.
1559!
1560! After the "iosync" header (request) is sent to the I/O quilt server,
1561! the compute tasks will then send the entire contents (headers and data) of
1562! int_local_output_buffer to their I/O quilt server.  This communication is
1563! done in subroutine send_to_io_quilt_servers().  After the I/O quilt servers
1564! receive this data, they will write all accumulated fields to disk.
1565!
1566! Significant time may be required for the I/O quilt servers to organize
1567! fields and write them to disk.  Therefore, the "iosync" request should be
1568! sent only when the compute tasks are ready to run for a while without
1569! needing to communicate with the servers.  Otherwise, the compute tasks
1570! will end up waiting for the servers to finish writing to disk, thus wasting
1571! any performance benefits of having servers at all.
1572!
1573! This routine is called only by client (compute) tasks. 
1574!</DESCRIPTION>
1575#if  defined( DM_PARALLEL ) && ! defined (STUBMPI)
1576  USE module_wrf_quilt
1577  IMPLICIT NONE
1578  include "mpif.h"
1579  INTEGER ,       INTENT(IN)  :: DataHandle
1580  INTEGER ,       INTENT(OUT) :: Status
1581
1582  INTEGER locsize , inttypesize
1583  INTEGER ierr, tasks_in_group, comm_io_group, dummy, i
1584
1585  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_iosync' )
1586
1587!JMTIMING  CALL start_timing
1588  IF ( associated ( int_local_output_buffer ) ) THEN
1589
1590    iserver = get_server_id ( DataHandle )
1591    CALL get_mpi_comm_io_groups( comm_io_group , iserver )
1592
1593    CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
1594
1595    locsize = int_num_bytes_to_write(DataHandle)
1596
1597!JMTIMING    CALL start_timing
1598    ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
1599    reduced = 0
1600    reduced(1) = locsize
1601    IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
1602    CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
1603                     MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
1604                     comm_io_group, ierr )
1605!JMTIMING     CALL end_timing("MPI_Reduce in wrf_quilt_iosync")
1606
1607    ! send data to the i/o processor
1608#ifdef DEREF_KLUDGE
1609    CALL collect_on_comm( comm_io_group,            &
1610                          onebyte,                       &
1611                          int_local_output_buffer(1), locsize , &
1612                          dummy, 0 )
1613#else
1614    CALL collect_on_comm( comm_io_group,            &
1615                          onebyte,                       &
1616                          int_local_output_buffer, locsize , &
1617                          dummy, 0 )
1618#endif
1619
1620
1621    int_local_output_cursor = 1
1622!    int_num_bytes_to_write(DataHandle) = 0
1623    DEALLOCATE ( int_local_output_buffer )
1624    NULLIFY ( int_local_output_buffer )
1625  ELSE
1626    CALL wrf_message ("frame/module_io_quilt.F: wrf_quilt_iosync: no buffer allocated")
1627  ENDIF
1628!JMTIMING   CALL end_timing("wrf_quilt_iosync")
1629  Status = 0
1630#endif
1631  RETURN
1632END SUBROUTINE wrf_quilt_iosync
1633
1634SUBROUTINE wrf_quilt_ioclose ( DataHandle, Status )
1635!<DESCRIPTION>
1636! Instruct the I/O quilt servers to close the dataset referenced by
1637! DataHandle.
1638! This routine also clears the client file handle and, if needed, deallocates
1639! int_local_output_buffer.
1640! This routine is called only by client (compute) tasks. 
1641!</DESCRIPTION>
1642#if defined( DM_PARALLEL ) && ! defined( STUBMPI)
1643  USE module_wrf_quilt
1644  USE module_timing
1645  IMPLICIT NONE
1646  INCLUDE 'mpif.h'
1647#include "intio_tags.h"
1648  INTEGER ,       INTENT(IN)  :: DataHandle
1649  INTEGER ,       INTENT(OUT) :: Status
1650  INTEGER i, itypesize, tasks_in_group, comm_io_group, ierr
1651  REAL dummy
1652
1653!JMTIMING  CALL start_timing
1654  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_ioclose' )
1655  CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
1656
1657  IF ( wrf_dm_on_monitor() ) THEN
1658    CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
1659                                DataHandle , int_ioclose )
1660  ELSE
1661    CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
1662  ENDIF
1663
1664  iserver = get_server_id ( DataHandle )
1665  CALL get_mpi_comm_io_groups( comm_io_group , iserver )
1666
1667  CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
1668
1669!JMTIMING  CALL start_timing
1670  ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
1671  reduced = 0
1672  IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
1673!JMDEBUGwrite(0,*)'before MPI_Reduce in ioclose: reduced ', reduced
1674  CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
1675                   MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
1676                   comm_io_group, ierr )
1677!JMTIMING   CALL end_timing("MPI_Reduce in ioclose")
1678
1679#if 0
1680  ! send data to the i/o processor
1681!JMTIMING  CALL start_timing
1682  CALL collect_on_comm( comm_io_group,            &
1683                        onebyte,                       &
1684                        hdrbuf, hdrbufsize , &
1685                        dummy, 0 )
1686!JMTIMING   CALL end_timing("collect_on_comm in io_close")
1687#endif
1688
1689  int_handle_in_use(DataHandle) = .false.
1690  CALL set_server_id( DataHandle, 0 )
1691  okay_to_write(DataHandle) = .false.
1692  okay_to_commit(DataHandle) = .false.
1693  int_local_output_cursor = 1
1694  int_num_bytes_to_write(DataHandle) = 0
1695  IF ( associated ( int_local_output_buffer ) ) THEN
1696    DEALLOCATE ( int_local_output_buffer )
1697    NULLIFY ( int_local_output_buffer )
1698  ENDIF
1699
1700  Status = 0
1701!JMTIMING   CALL end_timing( "wrf_quilt_ioclose" )
1702
1703#endif
1704  RETURN
1705END SUBROUTINE wrf_quilt_ioclose
1706
1707SUBROUTINE wrf_quilt_ioexit( Status )
1708!<DESCRIPTION>
1709! Instruct the I/O quilt servers to shut down the WRF I/O system.
1710! Do not call any wrf_quilt_*() routines after this routine has been called.
1711! This routine is called only by client (compute) tasks. 
1712!</DESCRIPTION>
1713#if defined( DM_PARALLEL ) && ! defined (STUBMPI )
1714  USE module_wrf_quilt
1715  IMPLICIT NONE
1716  INCLUDE 'mpif.h'
1717#include "intio_tags.h"
1718  INTEGER ,       INTENT(OUT) :: Status
1719  INTEGER                     :: DataHandle
1720  INTEGER i, itypesize, tasks_in_group, comm_io_group, me, ierr
1721  REAL dummy
1722
1723  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_ioexit' )
1724  CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
1725
1726  IF ( wrf_dm_on_monitor() ) THEN
1727    CALL int_gen_handle_header( hdrbuf, hdrbufsize, itypesize, &
1728                                DataHandle , int_ioexit )  ! Handle is dummy
1729  ELSE
1730    CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
1731  ENDIF
1732
1733  DO iserver = 1, nio_groups
1734    CALL get_mpi_comm_io_groups( comm_io_group , iserver )
1735
1736    CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
1737    CALL mpi_comm_rank( comm_io_group , me , ierr )
1738
1739! BY SENDING A NEGATIVE SIZE WE GET THE SERVERS TO SHUT DOWN
1740    hdrbufsize = -100
1741    reduced = 0
1742    IF ( me .eq. 0 ) reduced(1) = hdrbufsize
1743    CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
1744                     MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
1745                     comm_io_group, ierr )
1746
1747  ENDDO
1748  Status = 0
1749
1750#endif
1751  RETURN 
1752END SUBROUTINE wrf_quilt_ioexit
1753
1754SUBROUTINE wrf_quilt_get_next_time ( DataHandle, DateStr, Status )
1755!<DESCRIPTION>
1756! Instruct the I/O quilt servers to return the next time stamp.
1757! This is not yet supported.
1758! This routine is called only by client (compute) tasks. 
1759!</DESCRIPTION>
1760#if defined( DM_PARALLEL ) && ! defined (STUBMPI)
1761  IMPLICIT NONE
1762  INTEGER ,       INTENT(IN)  :: DataHandle
1763  CHARACTER*(*)               :: DateStr
1764  INTEGER                     :: Status
1765#endif
1766  RETURN
1767END SUBROUTINE wrf_quilt_get_next_time
1768
1769SUBROUTINE wrf_quilt_get_previous_time ( DataHandle, DateStr, Status )
1770!<DESCRIPTION>
1771! Instruct the I/O quilt servers to return the previous time stamp.
1772! This is not yet supported.
1773! This routine is called only by client (compute) tasks. 
1774!</DESCRIPTION>
1775#if defined( DM_PARALLEL ) && ! defined (STUBMPI)
1776  IMPLICIT NONE
1777  INTEGER ,       INTENT(IN)  :: DataHandle
1778  CHARACTER*(*)               :: DateStr
1779  INTEGER                     :: Status
1780#endif
1781  RETURN
1782END SUBROUTINE wrf_quilt_get_previous_time
1783
1784SUBROUTINE wrf_quilt_set_time ( DataHandle, Data,  Status )
1785!<DESCRIPTION>
1786! Instruct the I/O quilt servers to set the time stamp in the dataset
1787! referenced by DataHandle.
1788! This routine is called only by client (compute) tasks. 
1789!</DESCRIPTION>
1790#if defined( DM_PARALLEL ) && !defined( STUBMPI )
1791  USE module_wrf_quilt
1792  IMPLICIT NONE
1793  INCLUDE 'mpif.h'
1794#include "intio_tags.h"
1795  INTEGER ,       INTENT(IN)  :: DataHandle
1796  CHARACTER*(*) , INTENT(IN)  :: Data
1797  INTEGER                     :: Status
1798  INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
1799  REAL dummy
1800  INTEGER                 :: Count
1801!
1802  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_set_time' )
1803
1804  IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
1805    IF ( int_handle_in_use( DataHandle ) ) THEN
1806      CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
1807      Count = 0   ! there is no count for character strings
1808      CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
1809                              DataHandle, "TIMESTAMP", "", Data, int_set_time )
1810      iserver = get_server_id ( DataHandle )
1811      CALL get_mpi_comm_io_groups( comm_io_group , iserver )
1812      CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
1813
1814      ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
1815      reduced = 0
1816      reduced(1) = hdrbufsize
1817      IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
1818      CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
1819                       MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
1820                       comm_io_group, ierr )
1821      ! send data to the i/o processor
1822      CALL collect_on_comm( comm_io_group,            &
1823                            onebyte,                       &
1824                            hdrbuf, hdrbufsize , &
1825                            dummy, 0 )
1826    ENDIF
1827  ENDIF
1828
1829#endif
1830RETURN
1831END SUBROUTINE wrf_quilt_set_time
1832
1833SUBROUTINE wrf_quilt_get_next_var ( DataHandle, VarName, Status )
1834!<DESCRIPTION>
1835! When reading, instruct the I/O quilt servers to return the name of the next
1836! variable in the current time frame.
1837! This is not yet supported.
1838! This routine is called only by client (compute) tasks. 
1839!</DESCRIPTION>
1840#if defined( DM_PARALLEL ) && !defined( STUBMPI )
1841  IMPLICIT NONE
1842  INTEGER ,       INTENT(IN)  :: DataHandle
1843  CHARACTER*(*)               :: VarName
1844  INTEGER                     :: Status
1845#endif
1846  RETURN
1847END SUBROUTINE wrf_quilt_get_next_var
1848
1849SUBROUTINE wrf_quilt_get_dom_ti_real ( DataHandle,Element,   Data, Count, Outcount, Status )
1850!<DESCRIPTION>
1851! Instruct the I/O quilt servers to attempt to read Count words of time
1852! independent domain metadata named "Element"
1853! from the open dataset described by DataHandle.
1854! Metadata of type real are
1855! stored in array Data.
1856! Actual number of words read is returned in OutCount.
1857! This routine is called only by client (compute) tasks. 
1858
1859! This is not yet supported.
1860!</DESCRIPTION>
1861#if defined( DM_PARALLEL ) && !defined( STUBMPI )
1862  IMPLICIT NONE
1863  INTEGER ,       INTENT(IN)  :: DataHandle
1864  CHARACTER*(*) , INTENT(IN)  :: Element
1865  REAL,            INTENT(IN) :: Data(*)
1866  INTEGER ,       INTENT(IN)  :: Count
1867  INTEGER                     :: Outcount
1868  INTEGER                     :: Status
1869  CALL wrf_message('wrf_quilt_get_dom_ti_real not supported yet')
1870#endif
1871RETURN
1872END SUBROUTINE wrf_quilt_get_dom_ti_real
1873
1874SUBROUTINE wrf_quilt_put_dom_ti_real ( DataHandle,Element,   Data, Count,  Status )
1875!<DESCRIPTION>
1876! Instruct the I/O quilt servers to write Count words of time independent
1877! domain metadata named "Element"
1878! to the open dataset described by DataHandle.
1879! Metadata of type real are
1880! copied from array Data.
1881! This routine is called only by client (compute) tasks. 
1882!</DESCRIPTION>
1883#if defined( DM_PARALLEL ) && !defined( STUBMPI )
1884  USE module_wrf_quilt
1885  IMPLICIT NONE
1886  INCLUDE 'mpif.h'
1887#include "intio_tags.h"
1888  INTEGER ,       INTENT(IN)  :: DataHandle
1889  CHARACTER*(*) , INTENT(IN)  :: Element
1890  real ,            INTENT(IN) :: Data(*)
1891  INTEGER ,       INTENT(IN)  :: Count
1892  INTEGER                     :: Status
1893!Local
1894  CHARACTER*132   :: locElement
1895  INTEGER i, typesize, itypesize, tasks_in_group, ierr, comm_io_group
1896  REAL dummy
1897!
1898!JMTIMING  CALL start_timing
1899  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_real' )
1900  CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
1901  locElement = Element
1902
1903  IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
1904    IF ( int_handle_in_use( DataHandle ) ) THEN
1905      CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
1906      CALL MPI_TYPE_SIZE( MPI_REAL, typesize, ierr )
1907      IF ( wrf_dm_on_monitor() ) THEN
1908        CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
1909                                DataHandle, locElement, Data, Count, int_dom_ti_real )
1910      ELSE
1911        CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
1912      ENDIF
1913      iserver = get_server_id ( DataHandle )
1914      CALL get_mpi_comm_io_groups( comm_io_group , iserver )
1915      CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
1916
1917!JMTIMING      CALL start_timing
1918      ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
1919      reduced = 0
1920      reduced(1) = hdrbufsize
1921      IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
1922      CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
1923                       MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
1924                       comm_io_group, ierr )
1925!JMTIMING       CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_real")
1926      ! send data to the i/o processor
1927      CALL collect_on_comm( comm_io_group,            &
1928                            onebyte,                       &
1929                            hdrbuf, hdrbufsize , &
1930                            dummy, 0 )
1931    ENDIF
1932  ENDIF
1933
1934  Status = 0
1935!JMTIMING   CALL end_timing("wrf_quilt_put_dom_ti_real")
1936#endif
1937RETURN
1938END SUBROUTINE wrf_quilt_put_dom_ti_real
1939
1940SUBROUTINE wrf_quilt_get_dom_ti_double ( DataHandle,Element,   Data, Count, Outcount, Status )
1941!<DESCRIPTION>
1942! Instruct the I/O quilt servers to attempt to read Count words of time
1943! independent domain metadata named "Element"
1944! from the open dataset described by DataHandle.
1945! Metadata of type double are
1946! stored in array Data.
1947! Actual number of words read is returned in OutCount.
1948! This routine is called only by client (compute) tasks. 
1949!
1950! This is not yet supported.
1951!</DESCRIPTION>
1952#if defined( DM_PARALLEL ) && !defined( STUBMPI )
1953  IMPLICIT NONE
1954  INTEGER ,       INTENT(IN)  :: DataHandle
1955  CHARACTER*(*) , INTENT(IN)  :: Element
1956  real*8                      :: Data(*)
1957  INTEGER ,       INTENT(IN)  :: Count
1958  INTEGER                     :: OutCount
1959  INTEGER                     :: Status
1960  CALL wrf_error_fatal('wrf_quilt_get_dom_ti_double not supported yet')
1961#endif
1962RETURN
1963END SUBROUTINE wrf_quilt_get_dom_ti_double
1964
1965SUBROUTINE wrf_quilt_put_dom_ti_double ( DataHandle,Element,   Data, Count,  Status )
1966!<DESCRIPTION>
1967! Instruct the I/O quilt servers to write Count words of time independent
1968! domain metadata named "Element"
1969! to the open dataset described by DataHandle.
1970! Metadata of type double are
1971! copied from array Data.
1972! This routine is called only by client (compute) tasks. 
1973!
1974! This is not yet supported.
1975!</DESCRIPTION>
1976#if defined( DM_PARALLEL ) && !defined( STUBMPI )
1977  IMPLICIT NONE
1978  INTEGER ,       INTENT(IN)  :: DataHandle
1979  CHARACTER*(*) , INTENT(IN)  :: Element
1980  real*8 ,            INTENT(IN) :: Data(*)
1981  INTEGER ,       INTENT(IN)  :: Count
1982  INTEGER                     :: Status
1983  CALL wrf_error_fatal('wrf_quilt_put_dom_ti_double not supported yet')
1984#endif
1985RETURN
1986END SUBROUTINE wrf_quilt_put_dom_ti_double
1987
1988SUBROUTINE wrf_quilt_get_dom_ti_integer ( DataHandle,Element,   Data, Count, Outcount, Status )
1989!<DESCRIPTION>
1990! Instruct the I/O quilt servers to attempt to read Count words of time
1991! independent domain metadata named "Element"
1992! from the open dataset described by DataHandle.
1993! Metadata of type integer are
1994! stored in array Data.
1995! Actual number of words read is returned in OutCount.
1996! This routine is called only by client (compute) tasks. 
1997!
1998! This is not yet supported.
1999!</DESCRIPTION>
2000#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2001  IMPLICIT NONE
2002  INTEGER ,       INTENT(IN)  :: DataHandle
2003  CHARACTER*(*) , INTENT(IN)  :: Element
2004  integer                     :: Data(*)
2005  INTEGER ,       INTENT(IN)  :: Count
2006  INTEGER                      :: OutCount
2007  INTEGER                     :: Status
2008  CALL wrf_message('wrf_quilt_get_dom_ti_integer not supported yet')
2009#endif
2010RETURN
2011END SUBROUTINE wrf_quilt_get_dom_ti_integer
2012
2013SUBROUTINE wrf_quilt_put_dom_ti_integer ( DataHandle,Element,   Data, Count,  Status )
2014!<DESCRIPTION>
2015! Instruct the I/O quilt servers to write Count words of time independent
2016! domain metadata named "Element"
2017! to the open dataset described by DataHandle.
2018! Metadata of type integer are
2019! copied from array Data.
2020! This routine is called only by client (compute) tasks. 
2021!</DESCRIPTION>
2022#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2023  USE module_wrf_quilt
2024  IMPLICIT NONE
2025  INCLUDE 'mpif.h'
2026#include "intio_tags.h"
2027  INTEGER ,       INTENT(IN)  :: DataHandle
2028  CHARACTER*(*) , INTENT(IN)  :: Element
2029  INTEGER ,       INTENT(IN) :: Data(*)
2030  INTEGER ,       INTENT(IN)  :: Count
2031  INTEGER                     :: Status
2032! Local
2033  CHARACTER*132   :: locElement
2034  INTEGER i, typesize, itypesize, tasks_in_group, ierr, comm_io_group
2035  REAL dummy
2036!
2037
2038!JMTIMING  CALL start_timing
2039  locElement = Element
2040
2041  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_integer' )
2042
2043  IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
2044    IF ( int_handle_in_use( DataHandle ) ) THEN
2045      CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
2046      CALL MPI_TYPE_SIZE( MPI_INTEGER, typesize, ierr )
2047      IF ( wrf_dm_on_monitor() ) THEN
2048        CALL int_gen_ti_header( hdrbuf, hdrbufsize, itypesize, typesize, &
2049                                DataHandle, locElement, Data, Count, int_dom_ti_integer )
2050      ELSE
2051        CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2052      ENDIF
2053      iserver = get_server_id ( DataHandle )
2054      CALL get_mpi_comm_io_groups( comm_io_group , iserver )
2055      CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
2056
2057!JMTIMING      CALL start_timing
2058      ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
2059      reduced = 0
2060      reduced(1) = hdrbufsize
2061      IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
2062      CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
2063                       MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
2064                       comm_io_group, ierr )
2065
2066!JMTIMING       CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_integer")
2067      ! send data to the i/o processor
2068      CALL collect_on_comm( comm_io_group,            &
2069                            onebyte,                       &
2070                            hdrbuf, hdrbufsize , &
2071                            dummy, 0 )
2072    ENDIF
2073  ENDIF
2074  CALL wrf_debug ( DEBUG_LVL, 'returning from wrf_quilt_put_dom_ti_integer' )
2075!JMTIMING   CALL end_timing("wrf_quilt_put_dom_ti_integer" )
2076
2077#endif
2078RETURN
2079END SUBROUTINE wrf_quilt_put_dom_ti_integer
2080
2081SUBROUTINE wrf_quilt_get_dom_ti_logical ( DataHandle,Element,   Data, Count, Outcount, Status )
2082!<DESCRIPTION>
2083! Instruct the I/O quilt servers to attempt to read Count words of time
2084! independent domain metadata named "Element"
2085! from the open dataset described by DataHandle.
2086! Metadata of type logical are
2087! stored in array Data.
2088! Actual number of words read is returned in OutCount.
2089! This routine is called only by client (compute) tasks. 
2090!
2091! This is not yet supported.
2092!</DESCRIPTION>
2093#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2094  IMPLICIT NONE
2095  INTEGER ,       INTENT(IN)  :: DataHandle
2096  CHARACTER*(*) , INTENT(IN)  :: Element
2097  logical                     :: Data(*)
2098  INTEGER ,       INTENT(IN)  :: Count
2099  INTEGER                      :: OutCount
2100  INTEGER                     :: Status
2101!  CALL wrf_message('wrf_quilt_get_dom_ti_logical not supported yet')
2102#endif
2103RETURN
2104END SUBROUTINE wrf_quilt_get_dom_ti_logical
2105
2106SUBROUTINE wrf_quilt_put_dom_ti_logical ( DataHandle,Element,   Data, Count,  Status )
2107!<DESCRIPTION>
2108! Instruct the I/O quilt servers to write Count words of time independent
2109! domain metadata named "Element"
2110! to the open dataset described by DataHandle.
2111! Metadata of type logical are
2112! copied from array Data.
2113! This routine is called only by client (compute) tasks. 
2114!
2115! This is not yet supported.
2116!</DESCRIPTION>
2117#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2118  IMPLICIT NONE
2119  INTEGER ,       INTENT(IN)  :: DataHandle
2120  CHARACTER*(*) , INTENT(IN)  :: Element
2121  logical ,            INTENT(IN) :: Data(*)
2122  INTEGER ,       INTENT(IN)  :: Count
2123  INTEGER                     :: Status
2124! Local
2125  INTEGER i
2126  INTEGER one_or_zero(Count)
2127
2128  DO i = 1, Count
2129    IF ( Data(i) ) THEN
2130      one_or_zero(i) = 1
2131    ELSE
2132      one_or_zero(i) = 0
2133    ENDIF
2134  ENDDO
2135
2136  CALL wrf_quilt_put_dom_ti_integer ( DataHandle,Element,   one_or_zero, Count,  Status )
2137#endif
2138RETURN
2139END SUBROUTINE wrf_quilt_put_dom_ti_logical
2140
2141SUBROUTINE wrf_quilt_get_dom_ti_char ( DataHandle,Element,   Data,  Status )
2142!<DESCRIPTION>
2143! Instruct the I/O quilt servers to attempt to read time independent
2144! domain metadata named "Element"
2145! from the open dataset described by DataHandle.
2146! Metadata of type char are
2147! stored in string Data.
2148! This routine is called only by client (compute) tasks. 
2149!
2150! This is not yet supported.
2151!</DESCRIPTION>
2152#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2153  IMPLICIT NONE
2154  INTEGER ,       INTENT(IN)  :: DataHandle
2155  CHARACTER*(*) , INTENT(IN)  :: Element
2156  CHARACTER*(*)               :: Data
2157  INTEGER                     :: Status
2158  CALL wrf_message('wrf_quilt_get_dom_ti_char not supported yet')
2159#endif
2160RETURN
2161END SUBROUTINE wrf_quilt_get_dom_ti_char
2162
2163SUBROUTINE wrf_quilt_put_dom_ti_char ( DataHandle, Element,  Data,  Status )
2164!<DESCRIPTION>
2165! Instruct the I/O quilt servers to write time independent
2166! domain metadata named "Element"
2167! to the open dataset described by DataHandle.
2168! Metadata of type char are
2169! copied from string Data.
2170! This routine is called only by client (compute) tasks. 
2171!</DESCRIPTION>
2172#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2173  USE module_wrf_quilt
2174  IMPLICIT NONE
2175  INCLUDE 'mpif.h'
2176#include "intio_tags.h"
2177  INTEGER ,       INTENT(IN)  :: DataHandle
2178  CHARACTER*(*) , INTENT(IN)  :: Element
2179  CHARACTER*(*) , INTENT(IN)  :: Data
2180  INTEGER                     :: Status
2181  INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group, me
2182  REAL dummy
2183!
2184!JMTIMING  CALL start_timing
2185  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_dom_ti_char' )
2186
2187  IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
2188    IF ( int_handle_in_use( DataHandle ) ) THEN
2189      CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
2190      IF ( wrf_dm_on_monitor() ) THEN
2191        CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
2192                                DataHandle, Element, "", Data, int_dom_ti_char )
2193      ELSE
2194        CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2195      ENDIF
2196      iserver = get_server_id ( DataHandle )
2197!  write(0,*)'wrf_quilt_put_dom_ti_char ',iserver
2198      CALL get_mpi_comm_io_groups( comm_io_group , iserver )
2199      CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
2200      ! send the size of my local buffer to the i/o task (obufsize doesnt mean anything here)
2201!JMTIMING!  CALL start_timing
2202!write(0,*)'calling MPI_Barrier'
2203!  CALL MPI_Barrier( mpi_comm_local, ierr )
2204!write(0,*)'back from MPI_Barrier'
2205!JMTIMING!   CALL end_timing("MPI_Barrier in wrf_quilt_put_dom_ti_char")
2206
2207!JMTIMING      CALL start_timing
2208      ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
2209      reduced_dummy = 0
2210      reduced = 0
2211      reduced(1) = hdrbufsize
2212      IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
2213
2214!call mpi_comm_rank( comm_io_group , me, ierr )
2215!write(0,*)'calling MPI_Reduce me and tasks_in_group and comm_io_group',me,tasks_in_group ,comm_io_group
2216
2217      CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
2218                       MPI_SUM, tasks_in_group-1,          &   ! nio_tasks_in_group-1 is me
2219                       comm_io_group, ierr )
2220
2221!JMTIMING       CALL end_timing("MPI_Reduce in wrf_quilt_put_dom_ti_char")
2222      ! send data to the i/o processor
2223!JMTIMING  CALL start_timing
2224
2225      CALL collect_on_comm( comm_io_group,            &
2226                            onebyte,                       &
2227                            hdrbuf, hdrbufsize , &
2228                            dummy, 0 )
2229!JMTIMING   CALL end_timing("collect_on_comm in wrf_quilt_put_dom_ti_char")
2230    ENDIF
2231  ENDIF
2232!JMTIMING   CALL end_timing("wrf_quilt_put_dom_ti_char")
2233
2234#endif
2235RETURN
2236END SUBROUTINE wrf_quilt_put_dom_ti_char
2237
2238SUBROUTINE wrf_quilt_get_dom_td_real ( DataHandle,Element, DateStr,  Data, Count, Outcount, Status )
2239!<DESCRIPTION>
2240! Instruct the I/O quilt servers to attempt to read Count words of time
2241! dependent domain metadata named "Element" valid at time DateStr
2242! from the open dataset described by DataHandle.
2243! Metadata of type real are
2244! stored in array Data.
2245! Actual number of words read is returned in OutCount.
2246! This routine is called only by client (compute) tasks. 
2247!
2248! This is not yet supported.
2249!</DESCRIPTION>
2250#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2251  IMPLICIT NONE
2252  INTEGER ,       INTENT(IN)  :: DataHandle
2253  CHARACTER*(*) , INTENT(IN)  :: Element
2254  CHARACTER*(*) , INTENT(IN)  :: DateStr
2255  real                        :: Data(*)
2256  INTEGER ,       INTENT(IN)  :: Count
2257  INTEGER                     :: OutCount
2258  INTEGER                     :: Status
2259#endif
2260RETURN
2261END SUBROUTINE wrf_quilt_get_dom_td_real
2262
2263SUBROUTINE wrf_quilt_put_dom_td_real ( DataHandle,Element, DateStr,  Data, Count,  Status )
2264!<DESCRIPTION>
2265! Instruct the I/O quilt servers to write Count words of time dependent
2266! domain metadata named "Element" valid at time DateStr
2267! to the open dataset described by DataHandle.
2268! Metadata of type real are
2269! copied from array Data.
2270! This routine is called only by client (compute) tasks. 
2271!
2272! This is not yet supported.
2273!</DESCRIPTION>
2274#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2275  IMPLICIT NONE
2276  INTEGER ,       INTENT(IN)  :: DataHandle
2277  CHARACTER*(*) , INTENT(IN)  :: Element
2278  CHARACTER*(*) , INTENT(IN)  :: DateStr
2279  real ,            INTENT(IN) :: Data(*)
2280  INTEGER ,       INTENT(IN)  :: Count
2281  INTEGER                     :: Status
2282#endif
2283RETURN
2284END SUBROUTINE wrf_quilt_put_dom_td_real
2285
2286SUBROUTINE wrf_quilt_get_dom_td_double ( DataHandle,Element, DateStr,  Data, Count, Outcount, Status )
2287!<DESCRIPTION>
2288! Instruct the I/O quilt servers to attempt to read Count words of time
2289! dependent domain metadata named "Element" valid at time DateStr
2290! from the open dataset described by DataHandle.
2291! Metadata of type double are
2292! stored in array Data.
2293! Actual number of words read is returned in OutCount.
2294! This routine is called only by client (compute) tasks. 
2295!
2296! This is not yet supported.
2297!</DESCRIPTION>
2298#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2299  IMPLICIT NONE
2300  INTEGER ,       INTENT(IN)  :: DataHandle
2301  CHARACTER*(*) , INTENT(IN)  :: Element
2302  CHARACTER*(*) , INTENT(IN)  :: DateStr
2303  real*8                          :: Data(*)
2304  INTEGER ,       INTENT(IN)  :: Count
2305  INTEGER                      :: OutCount
2306  INTEGER                     :: Status
2307#endif
2308  CALL wrf_error_fatal('wrf_quilt_get_dom_td_double not supported yet')
2309RETURN
2310END SUBROUTINE wrf_quilt_get_dom_td_double
2311
2312SUBROUTINE wrf_quilt_put_dom_td_double ( DataHandle,Element, DateStr,  Data, Count,  Status )
2313!<DESCRIPTION>
2314! Instruct the I/O quilt servers to write Count words of time dependent
2315! domain metadata named "Element" valid at time DateStr
2316! to the open dataset described by DataHandle.
2317! Metadata of type double are
2318! copied from array Data.
2319! This routine is called only by client (compute) tasks. 
2320!
2321! This is not yet supported.
2322!</DESCRIPTION>
2323#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2324  IMPLICIT NONE
2325  INTEGER ,       INTENT(IN)  :: DataHandle
2326  CHARACTER*(*) , INTENT(IN)  :: Element
2327  CHARACTER*(*) , INTENT(IN)  :: DateStr
2328  real*8 ,            INTENT(IN) :: Data(*)
2329  INTEGER ,       INTENT(IN)  :: Count
2330  INTEGER                     :: Status
2331#endif
2332  CALL wrf_error_fatal('wrf_quilt_put_dom_td_double not supported yet')
2333RETURN
2334END SUBROUTINE wrf_quilt_put_dom_td_double
2335
2336SUBROUTINE wrf_quilt_get_dom_td_integer ( DataHandle,Element, DateStr,  Data, Count, Outcount, Status )
2337!<DESCRIPTION>
2338! Instruct the I/O quilt servers to attempt to read Count words of time
2339! dependent domain metadata named "Element" valid at time DateStr
2340! from the open dataset described by DataHandle.
2341! Metadata of type integer are
2342! stored in array Data.
2343! Actual number of words read is returned in OutCount.
2344! This routine is called only by client (compute) tasks. 
2345!
2346! This is not yet supported.
2347!</DESCRIPTION>
2348#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2349  IMPLICIT NONE
2350  INTEGER ,       INTENT(IN)  :: DataHandle
2351  CHARACTER*(*) , INTENT(IN)  :: Element
2352  CHARACTER*(*) , INTENT(IN)  :: DateStr
2353  integer                          :: Data(*)
2354  INTEGER ,       INTENT(IN)  :: Count
2355  INTEGER                      :: OutCount
2356  INTEGER                     :: Status
2357#endif
2358RETURN
2359END SUBROUTINE wrf_quilt_get_dom_td_integer
2360
2361SUBROUTINE wrf_quilt_put_dom_td_integer ( DataHandle,Element, DateStr,  Data, Count,  Status )
2362!<DESCRIPTION>
2363! Instruct the I/O quilt servers to write Count words of time dependent
2364! domain metadata named "Element" valid at time DateStr
2365! to the open dataset described by DataHandle.
2366! Metadata of type integer are
2367! copied from array Data.
2368! This routine is called only by client (compute) tasks. 
2369!
2370! This is not yet supported.
2371!</DESCRIPTION>
2372#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2373  IMPLICIT NONE
2374  INTEGER ,       INTENT(IN)  :: DataHandle
2375  CHARACTER*(*) , INTENT(IN)  :: Element
2376  CHARACTER*(*) , INTENT(IN)  :: DateStr
2377  integer ,            INTENT(IN) :: Data(*)
2378  INTEGER ,       INTENT(IN)  :: Count
2379  INTEGER                     :: Status
2380#endif
2381RETURN
2382END SUBROUTINE wrf_quilt_put_dom_td_integer
2383
2384SUBROUTINE wrf_quilt_get_dom_td_logical ( DataHandle,Element, DateStr,  Data, Count, Outcount, Status )
2385!<DESCRIPTION>
2386! Instruct the I/O quilt servers to attempt to read Count words of time
2387! dependent domain metadata named "Element" valid at time DateStr
2388! from the open dataset described by DataHandle.
2389! Metadata of type logical are
2390! stored in array Data.
2391! Actual number of words read is returned in OutCount.
2392! This routine is called only by client (compute) tasks. 
2393!
2394! This is not yet supported.
2395!</DESCRIPTION>
2396#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2397  IMPLICIT NONE
2398  INTEGER ,       INTENT(IN)  :: DataHandle
2399  CHARACTER*(*) , INTENT(IN)  :: Element
2400  CHARACTER*(*) , INTENT(IN)  :: DateStr
2401  logical                          :: Data(*)
2402  INTEGER ,       INTENT(IN)  :: Count
2403  INTEGER                      :: OutCount
2404  INTEGER                     :: Status
2405#endif
2406RETURN
2407END SUBROUTINE wrf_quilt_get_dom_td_logical
2408
2409SUBROUTINE wrf_quilt_put_dom_td_logical ( DataHandle,Element, DateStr,  Data, Count,  Status )
2410!<DESCRIPTION>
2411! Instruct the I/O quilt servers to write Count words of time dependent
2412! domain metadata named "Element" valid at time DateStr
2413! to the open dataset described by DataHandle.
2414! Metadata of type logical are
2415! copied from array Data.
2416! This routine is called only by client (compute) tasks. 
2417!
2418! This is not yet supported.
2419!</DESCRIPTION>
2420#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2421  IMPLICIT NONE
2422  INTEGER ,       INTENT(IN)  :: DataHandle
2423  CHARACTER*(*) , INTENT(IN)  :: Element
2424  CHARACTER*(*) , INTENT(IN)  :: DateStr
2425  logical ,            INTENT(IN) :: Data(*)
2426  INTEGER ,       INTENT(IN)  :: Count
2427  INTEGER                     :: Status
2428#endif
2429RETURN
2430END SUBROUTINE wrf_quilt_put_dom_td_logical
2431
2432SUBROUTINE wrf_quilt_get_dom_td_char ( DataHandle,Element, DateStr,  Data,  Status )
2433!<DESCRIPTION>
2434! Instruct the I/O quilt servers to attempt to read time dependent
2435! domain metadata named "Element" valid at time DateStr
2436! from the open dataset described by DataHandle.
2437! Metadata of type char are
2438! stored in string Data.
2439! This routine is called only by client (compute) tasks. 
2440!
2441! This is not yet supported.
2442!</DESCRIPTION>
2443#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2444  IMPLICIT NONE
2445  INTEGER ,       INTENT(IN)  :: DataHandle
2446  CHARACTER*(*) , INTENT(IN)  :: Element
2447  CHARACTER*(*) , INTENT(IN)  :: DateStr
2448  CHARACTER*(*)               :: Data
2449  INTEGER                     :: Status
2450#endif
2451RETURN
2452END SUBROUTINE wrf_quilt_get_dom_td_char
2453
2454SUBROUTINE wrf_quilt_put_dom_td_char ( DataHandle,Element, DateStr,  Data,  Status )
2455!<DESCRIPTION>
2456! Instruct $he I/O quilt servers to write time dependent
2457! domain metadata named "Element" valid at time DateStr
2458! to the open dataset described by DataHandle.
2459! Metadata of type char are
2460! copied from string Data.
2461! This routine is called only by client (compute) tasks. 
2462!
2463! This is not yet supported.
2464!</DESCRIPTION>
2465#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2466  IMPLICIT NONE
2467  INTEGER ,       INTENT(IN)  :: DataHandle
2468  CHARACTER*(*) , INTENT(IN)  :: Element
2469  CHARACTER*(*) , INTENT(IN)  :: DateStr
2470  CHARACTER*(*) , INTENT(IN) :: Data
2471  INTEGER                          :: Status
2472#endif
2473RETURN
2474END SUBROUTINE wrf_quilt_put_dom_td_char
2475
2476SUBROUTINE wrf_quilt_get_var_ti_real ( DataHandle,Element,  Varname, Data, Count, Outcount, Status )
2477!<DESCRIPTION>
2478! Instruct the I/O quilt servers to attempt to read Count words of time
2479! independent attribute "Element" of variable "Varname"
2480! from the open dataset described by DataHandle.
2481! Attribute of type real is
2482! stored in array Data.
2483! Actual number of words read is returned in OutCount.
2484! This routine is called only by client (compute) tasks. 
2485!
2486! This is not yet supported.
2487!</DESCRIPTION>
2488#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2489  IMPLICIT NONE
2490  INTEGER ,       INTENT(IN)  :: DataHandle
2491  CHARACTER*(*) , INTENT(IN)  :: Element
2492  CHARACTER*(*) , INTENT(IN)  :: VarName
2493  real                          :: Data(*)
2494  INTEGER ,       INTENT(IN)  :: Count
2495  INTEGER                     :: OutCount
2496  INTEGER                     :: Status
2497#endif
2498RETURN
2499END SUBROUTINE wrf_quilt_get_var_ti_real
2500
2501SUBROUTINE wrf_quilt_put_var_ti_real ( DataHandle,Element,  Varname, Data, Count,  Status )
2502!<DESCRIPTION>
2503! Instruct the I/O quilt servers to write Count words of time independent
2504! attribute "Element" of variable "Varname"
2505! to the open dataset described by DataHandle.
2506! Attribute of type real is
2507! copied from array Data.
2508! This routine is called only by client (compute) tasks. 
2509!
2510! This is not yet supported.
2511!</DESCRIPTION>
2512#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2513  IMPLICIT NONE
2514  INTEGER ,       INTENT(IN)  :: DataHandle
2515  CHARACTER*(*) , INTENT(IN)  :: Element
2516  CHARACTER*(*) , INTENT(IN)  :: VarName
2517  real ,            INTENT(IN) :: Data(*)
2518  INTEGER ,       INTENT(IN)  :: Count
2519  INTEGER                     :: Status
2520#endif
2521RETURN
2522END SUBROUTINE wrf_quilt_put_var_ti_real
2523
2524SUBROUTINE wrf_quilt_get_var_ti_double ( DataHandle,Element,  Varname, Data, Count, Outcount, Status )
2525!<DESCRIPTION>
2526! Instruct the I/O quilt servers to attempt to read Count words of time
2527! independent attribute "Element" of variable "Varname"
2528! from the open dataset described by DataHandle.
2529! Attribute of type double is
2530! stored in array Data.
2531! Actual number of words read is returned in OutCount.
2532! This routine is called only by client (compute) tasks. 
2533!
2534! This is not yet supported.
2535!</DESCRIPTION>
2536#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2537  IMPLICIT NONE
2538  INTEGER ,       INTENT(IN)  :: DataHandle
2539  CHARACTER*(*) , INTENT(IN)  :: Element
2540  CHARACTER*(*) , INTENT(IN)  :: VarName
2541  real*8                      :: Data(*)
2542  INTEGER ,       INTENT(IN)  :: Count
2543  INTEGER                     :: OutCount
2544  INTEGER                     :: Status
2545#endif
2546  CALL wrf_error_fatal('wrf_quilt_get_var_ti_double not supported yet')
2547RETURN
2548END SUBROUTINE wrf_quilt_get_var_ti_double
2549
2550SUBROUTINE wrf_quilt_put_var_ti_double ( DataHandle,Element,  Varname, Data, Count,  Status )
2551!<DESCRIPTION>
2552! Instruct the I/O quilt servers to write Count words of time independent
2553! attribute "Element" of variable "Varname"
2554! to the open dataset described by DataHandle.
2555! Attribute of type double is
2556! copied from array Data.
2557! This routine is called only by client (compute) tasks. 
2558!
2559! This is not yet supported.
2560!</DESCRIPTION>
2561#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2562  IMPLICIT NONE
2563  INTEGER ,       INTENT(IN)  :: DataHandle
2564  CHARACTER*(*) , INTENT(IN)  :: Element
2565  CHARACTER*(*) , INTENT(IN)  :: VarName
2566  real*8 ,        INTENT(IN) :: Data(*)
2567  INTEGER ,       INTENT(IN)  :: Count
2568  INTEGER                     :: Status
2569#endif
2570  CALL wrf_error_fatal('wrf_quilt_put_var_ti_double not supported yet')
2571RETURN
2572END SUBROUTINE wrf_quilt_put_var_ti_double
2573
2574SUBROUTINE wrf_quilt_get_var_ti_integer ( DataHandle,Element,  Varname, Data, Count, Outcount, Status )
2575!<DESCRIPTION>
2576! Instruct the I/O quilt servers to attempt to read Count words of time
2577! independent attribute "Element" of variable "Varname"
2578! from the open dataset described by DataHandle.
2579! Attribute of type integer is
2580! stored in array Data.
2581! Actual number of words read is returned in OutCount.
2582! This routine is called only by client (compute) tasks. 
2583!
2584! This is not yet supported.
2585!</DESCRIPTION>
2586#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2587  IMPLICIT NONE
2588  INTEGER ,       INTENT(IN)  :: DataHandle
2589  CHARACTER*(*) , INTENT(IN)  :: Element
2590  CHARACTER*(*) , INTENT(IN)  :: VarName
2591  integer                     :: Data(*)
2592  INTEGER ,       INTENT(IN)  :: Count
2593  INTEGER                     :: OutCount
2594  INTEGER                     :: Status
2595#endif
2596RETURN
2597END SUBROUTINE wrf_quilt_get_var_ti_integer
2598
2599SUBROUTINE wrf_quilt_put_var_ti_integer ( DataHandle,Element,  Varname, Data, Count,  Status )
2600!<DESCRIPTION>
2601! Instruct the I/O quilt servers to write Count words of time independent
2602! attribute "Element" of variable "Varname"
2603! to the open dataset described by DataHandle.
2604! Attribute of type integer is
2605! copied from array Data.
2606! This routine is called only by client (compute) tasks. 
2607!
2608! This is not yet supported.
2609!</DESCRIPTION>
2610#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2611  IMPLICIT NONE
2612  INTEGER ,       INTENT(IN)  :: DataHandle
2613  CHARACTER*(*) , INTENT(IN)  :: Element
2614  CHARACTER*(*) , INTENT(IN)  :: VarName
2615  integer ,            INTENT(IN) :: Data(*)
2616  INTEGER ,       INTENT(IN)  :: Count
2617  INTEGER                     :: Status
2618#endif
2619RETURN
2620END SUBROUTINE wrf_quilt_put_var_ti_integer
2621
2622SUBROUTINE wrf_quilt_get_var_ti_logical ( DataHandle,Element,  Varname, Data, Count, Outcount, Status )
2623!<DESCRIPTION>
2624! Instruct the I/O quilt servers to attempt to read Count words of time
2625! independent attribute "Element" of variable "Varname"
2626! from the open dataset described by DataHandle.
2627! Attribute of type logical is
2628! stored in array Data.
2629! Actual number of words read is returned in OutCount.
2630! This routine is called only by client (compute) tasks. 
2631!
2632! This is not yet supported.
2633!</DESCRIPTION>
2634#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2635  IMPLICIT NONE
2636  INTEGER ,       INTENT(IN)  :: DataHandle
2637  CHARACTER*(*) , INTENT(IN)  :: Element
2638  CHARACTER*(*) , INTENT(IN)  :: VarName
2639  logical                     :: Data(*)
2640  INTEGER ,       INTENT(IN)  :: Count
2641  INTEGER                     :: OutCount
2642  INTEGER                     :: Status
2643#endif
2644RETURN
2645END SUBROUTINE wrf_quilt_get_var_ti_logical
2646
2647SUBROUTINE wrf_quilt_put_var_ti_logical ( DataHandle,Element,  Varname, Data, Count,  Status )
2648!<DESCRIPTION>
2649! Instruct the I/O quilt servers to write Count words of time independent
2650! attribute "Element" of variable "Varname"
2651! to the open dataset described by DataHandle.
2652! Attribute of type logical is
2653! copied from array Data.
2654! This routine is called only by client (compute) tasks. 
2655!
2656! This is not yet supported.
2657!</DESCRIPTION>
2658#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2659  IMPLICIT NONE
2660  INTEGER ,       INTENT(IN)  :: DataHandle
2661  CHARACTER*(*) , INTENT(IN)  :: Element
2662  CHARACTER*(*) , INTENT(IN)  :: VarName
2663  logical ,            INTENT(IN) :: Data(*)
2664  INTEGER ,       INTENT(IN)  :: Count
2665  INTEGER                     :: Status
2666#endif
2667RETURN
2668END SUBROUTINE wrf_quilt_put_var_ti_logical
2669
2670SUBROUTINE wrf_quilt_get_var_ti_char ( DataHandle,Element,  Varname, Data,  Status )
2671!<DESCRIPTION>
2672! Instruct the I/O quilt servers to attempt to read time independent
2673! attribute "Element" of variable "Varname"
2674! from the open dataset described by DataHandle.
2675! Attribute of type char is
2676! stored in string Data.
2677! This routine is called only by client (compute) tasks. 
2678!
2679! This is not yet supported.
2680!</DESCRIPTION>
2681#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2682  IMPLICIT NONE
2683  INTEGER ,       INTENT(IN)  :: DataHandle
2684  CHARACTER*(*) , INTENT(IN)  :: Element
2685  CHARACTER*(*) , INTENT(IN)  :: VarName
2686  CHARACTER*(*)               :: Data
2687  INTEGER                     :: Status
2688#endif
2689RETURN
2690END SUBROUTINE wrf_quilt_get_var_ti_char
2691
2692SUBROUTINE wrf_quilt_put_var_ti_char ( DataHandle,Element,  Varname, Data,  Status )
2693!<DESCRIPTION>
2694! Instruct the I/O quilt servers to write time independent
2695! attribute "Element" of variable "Varname"
2696! to the open dataset described by DataHandle.
2697! Attribute of type char is
2698! copied from string Data.
2699! This routine is called only by client (compute) tasks. 
2700!</DESCRIPTION>
2701
2702#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2703  USE module_wrf_quilt
2704  IMPLICIT NONE
2705  INCLUDE 'mpif.h'
2706#include "intio_tags.h"
2707  INTEGER ,       INTENT(IN)  :: DataHandle
2708  CHARACTER*(*) , INTENT(IN)  :: Element
2709  CHARACTER*(*) , INTENT(IN)  :: VarName
2710  CHARACTER*(*) , INTENT(IN)  :: Data
2711  INTEGER                     :: Status
2712  INTEGER i, itypesize, tasks_in_group, ierr, comm_io_group
2713  REAL dummy
2714!
2715
2716!JMTIMING  CALL start_timing
2717  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_put_var_ti_char' )
2718
2719  IF ( DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles ) THEN
2720    IF ( int_handle_in_use( DataHandle ) ) THEN
2721      CALL MPI_TYPE_SIZE( MPI_INTEGER, itypesize, ierr )
2722      IF ( wrf_dm_on_monitor() ) THEN
2723        CALL int_gen_ti_header_char( hdrbuf, hdrbufsize, itypesize, &
2724                                DataHandle, TRIM(Element), TRIM(VarName), TRIM(Data), int_var_ti_char )
2725      ELSE
2726        CALL int_gen_noop_header( hdrbuf, hdrbufsize, itypesize )
2727      ENDIF
2728      iserver = get_server_id ( DataHandle )
2729      CALL get_mpi_comm_io_groups( comm_io_group , iserver )
2730      CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
2731
2732!JMTIMING      CALL start_timing
2733      ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
2734      reduced = 0
2735      reduced(1) = hdrbufsize
2736      IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
2737      CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
2738                       MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
2739                       comm_io_group, ierr )
2740!JMTIMING       CALL end_timing("MPI_Reduce in wrf_quilt_put_var_ti_char")
2741      ! send data to the i/o processor
2742      CALL collect_on_comm( comm_io_group,            &
2743                            onebyte,                       &
2744                            hdrbuf, hdrbufsize , &
2745                            dummy, 0 )
2746    ENDIF
2747  ENDIF
2748!JMTIMING   CALL end_timing("wrf_quilt_put_dom_ti_char" )
2749
2750#endif
2751RETURN
2752END SUBROUTINE wrf_quilt_put_var_ti_char
2753
2754SUBROUTINE wrf_quilt_get_var_td_real ( DataHandle,Element,  DateStr,Varname, Data, Count, Outcount, Status )
2755!<DESCRIPTION>
2756! Instruct the I/O quilt servers to attempt to read Count words of time
2757! dependent attribute "Element" of variable "Varname" valid at time DateStr
2758! from the open dataset described by DataHandle.
2759! Attribute of type real is
2760! stored in array Data.
2761! Actual number of words read is returned in OutCount.
2762! This routine is called only by client (compute) tasks. 
2763!
2764! This is not yet supported.
2765!</DESCRIPTION>
2766#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2767  IMPLICIT NONE
2768  INTEGER ,       INTENT(IN)  :: DataHandle
2769  CHARACTER*(*) , INTENT(IN)  :: Element
2770  CHARACTER*(*) , INTENT(IN)  :: DateStr
2771  CHARACTER*(*) , INTENT(IN)  :: VarName
2772  real                        :: Data(*)
2773  INTEGER ,       INTENT(IN)  :: Count
2774  INTEGER                     :: OutCount
2775  INTEGER                     :: Status
2776#endif
2777RETURN
2778END SUBROUTINE wrf_quilt_get_var_td_real
2779
2780SUBROUTINE wrf_quilt_put_var_td_real ( DataHandle,Element,  DateStr,Varname, Data, Count,  Status )
2781!<DESCRIPTION>
2782! Instruct the I/O quilt servers to write Count words of time dependent
2783! attribute "Element" of variable "Varname" valid at time DateStr
2784! to the open dataset described by DataHandle.
2785! Attribute of type real is
2786! copied from array Data.
2787! This routine is called only by client (compute) tasks. 
2788!
2789! This is not yet supported.
2790!</DESCRIPTION>
2791#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2792  IMPLICIT NONE
2793  INTEGER ,       INTENT(IN)  :: DataHandle
2794  CHARACTER*(*) , INTENT(IN)  :: Element
2795  CHARACTER*(*) , INTENT(IN)  :: DateStr
2796  CHARACTER*(*) , INTENT(IN)  :: VarName
2797  real ,            INTENT(IN) :: Data(*)
2798  INTEGER ,       INTENT(IN)  :: Count
2799  INTEGER                     :: Status
2800#endif
2801RETURN
2802END SUBROUTINE wrf_quilt_put_var_td_real
2803
2804SUBROUTINE wrf_quilt_get_var_td_double ( DataHandle,Element,  DateStr,Varname, Data, Count, Outcount, Status )
2805!<DESCRIPTION>
2806! Instruct the I/O quilt servers to attempt to read Count words of time
2807! dependent attribute "Element" of variable "Varname" valid at time DateStr
2808! from the open dataset described by DataHandle.
2809! Attribute of type double is
2810! stored in array Data.
2811! Actual number of words read is returned in OutCount.
2812! This routine is called only by client (compute) tasks. 
2813!
2814! This is not yet supported.
2815!</DESCRIPTION>
2816#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2817  IMPLICIT NONE
2818  INTEGER ,       INTENT(IN)  :: DataHandle
2819  CHARACTER*(*) , INTENT(IN)  :: Element
2820  CHARACTER*(*) , INTENT(IN)  :: DateStr
2821  CHARACTER*(*) , INTENT(IN)  :: VarName
2822  real*8                      :: Data(*)
2823  INTEGER ,       INTENT(IN)  :: Count
2824  INTEGER                     :: OutCount
2825  INTEGER                     :: Status
2826#endif
2827  CALL wrf_error_fatal('wrf_quilt_get_var_td_double not supported yet')
2828RETURN
2829END SUBROUTINE wrf_quilt_get_var_td_double
2830
2831SUBROUTINE wrf_quilt_put_var_td_double ( DataHandle,Element,  DateStr,Varname, Data, Count,  Status )
2832!<DESCRIPTION>
2833! Instruct the I/O quilt servers to write Count words of time dependent
2834! attribute "Element" of variable "Varname" valid at time DateStr
2835! to the open dataset described by DataHandle.
2836! Attribute of type double is
2837! copied from array Data.
2838! This routine is called only by client (compute) tasks. 
2839!
2840! This is not yet supported.
2841!</DESCRIPTION>
2842#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2843  IMPLICIT NONE
2844  INTEGER ,       INTENT(IN)  :: DataHandle
2845  CHARACTER*(*) , INTENT(IN)  :: Element
2846  CHARACTER*(*) , INTENT(IN)  :: DateStr
2847  CHARACTER*(*) , INTENT(IN)  :: VarName
2848  real*8 ,            INTENT(IN) :: Data(*)
2849  INTEGER ,       INTENT(IN)  :: Count
2850  INTEGER                     :: Status
2851#endif
2852  CALL wrf_error_fatal('wrf_quilt_put_var_td_double not supported yet')
2853RETURN
2854END SUBROUTINE wrf_quilt_put_var_td_double
2855
2856SUBROUTINE wrf_quilt_get_var_td_integer ( DataHandle,Element,  DateStr,Varname, Data, Count, Outcount,Status)
2857!<DESCRIPTION>
2858! Instruct the I/O quilt servers to attempt to read Count words of time
2859! dependent attribute "Element" of variable "Varname" valid at time DateStr
2860! from the open dataset described by DataHandle.
2861! Attribute of type integer is
2862! stored in array Data.
2863! Actual number of words read is returned in OutCount.
2864! This routine is called only by client (compute) tasks. 
2865!
2866! This is not yet supported.
2867!</DESCRIPTION>
2868#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2869  IMPLICIT NONE
2870  INTEGER ,       INTENT(IN)  :: DataHandle
2871  CHARACTER*(*) , INTENT(IN)  :: Element
2872  CHARACTER*(*) , INTENT(IN)  :: DateStr
2873  CHARACTER*(*) , INTENT(IN)  :: VarName
2874  integer                     :: Data(*)
2875  INTEGER ,       INTENT(IN)  :: Count
2876  INTEGER                     :: OutCount
2877  INTEGER                     :: Status
2878#endif
2879RETURN
2880END SUBROUTINE wrf_quilt_get_var_td_integer
2881
2882SUBROUTINE wrf_quilt_put_var_td_integer ( DataHandle,Element,  DateStr,Varname, Data, Count,  Status )
2883!<DESCRIPTION>
2884! Instruct the I/O quilt servers to write Count words of time dependent
2885! attribute "Element" of variable "Varname" valid at time DateStr
2886! to the open dataset described by DataHandle.
2887! Attribute of type integer is
2888! copied from array Data.
2889! This routine is called only by client (compute) tasks. 
2890!
2891! This is not yet supported.
2892!</DESCRIPTION>
2893#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2894  IMPLICIT NONE
2895  INTEGER ,       INTENT(IN)  :: DataHandle
2896  CHARACTER*(*) , INTENT(IN)  :: Element
2897  CHARACTER*(*) , INTENT(IN)  :: DateStr
2898  CHARACTER*(*) , INTENT(IN)  :: VarName
2899  integer ,       INTENT(IN)  :: Data(*)
2900  INTEGER ,       INTENT(IN)  :: Count
2901  INTEGER                     :: Status
2902#endif
2903RETURN
2904END SUBROUTINE wrf_quilt_put_var_td_integer
2905
2906SUBROUTINE wrf_quilt_get_var_td_logical ( DataHandle,Element,  DateStr,Varname, Data, Count, Outcount, Status )
2907!<DESCRIPTION>
2908! Instruct the I/O quilt servers to attempt to read Count words of time
2909! dependent attribute "Element" of variable "Varname" valid at time DateStr
2910! from the open dataset described by DataHandle.
2911! Attribute of type logical is
2912! stored in array Data.
2913! Actual number of words read is returned in OutCount.
2914! This routine is called only by client (compute) tasks. 
2915!
2916! This is not yet supported.
2917!</DESCRIPTION>
2918#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2919  IMPLICIT NONE
2920  INTEGER ,       INTENT(IN)  :: DataHandle
2921  CHARACTER*(*) , INTENT(IN)  :: Element
2922  CHARACTER*(*) , INTENT(IN)  :: DateStr
2923  CHARACTER*(*) , INTENT(IN)  :: VarName
2924  logical                          :: Data(*)
2925  INTEGER ,       INTENT(IN)  :: Count
2926  INTEGER                      :: OutCount
2927  INTEGER                     :: Status
2928#endif
2929RETURN
2930END SUBROUTINE wrf_quilt_get_var_td_logical
2931
2932SUBROUTINE wrf_quilt_put_var_td_logical ( DataHandle,Element,  DateStr,Varname, Data, Count,  Status )
2933!<DESCRIPTION>
2934! Instruct the I/O quilt servers to write Count words of time dependent
2935! attribute "Element" of variable "Varname" valid at time DateStr
2936! to the open dataset described by DataHandle.
2937! Attribute of type logical is
2938! copied from array Data.
2939! This routine is called only by client (compute) tasks. 
2940!
2941! This is not yet supported.
2942!</DESCRIPTION>
2943#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2944  IMPLICIT NONE
2945  INTEGER ,       INTENT(IN)  :: DataHandle
2946  CHARACTER*(*) , INTENT(IN)  :: Element
2947  CHARACTER*(*) , INTENT(IN)  :: DateStr
2948  CHARACTER*(*) , INTENT(IN)  :: VarName
2949  logical ,            INTENT(IN) :: Data(*)
2950  INTEGER ,       INTENT(IN)  :: Count
2951  INTEGER                     :: Status
2952#endif
2953RETURN
2954END SUBROUTINE wrf_quilt_put_var_td_logical
2955
2956SUBROUTINE wrf_quilt_get_var_td_char ( DataHandle,Element,  DateStr,Varname, Data,  Status )
2957!<DESCRIPTION>
2958! Instruct the I/O quilt servers to attempt to read time dependent
2959! attribute "Element" of variable "Varname" valid at time DateStr
2960! from the open dataset described by DataHandle.
2961! Attribute of type char is
2962! stored in string Data.
2963! This routine is called only by client (compute) tasks. 
2964!
2965! This is not yet supported.
2966!</DESCRIPTION>
2967#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2968  IMPLICIT NONE
2969  INTEGER ,       INTENT(IN)  :: DataHandle
2970  CHARACTER*(*) , INTENT(IN)  :: Element
2971  CHARACTER*(*) , INTENT(IN)  :: DateStr
2972  CHARACTER*(*) , INTENT(IN)  :: VarName
2973  CHARACTER*(*)               :: Data
2974  INTEGER                     :: Status
2975#endif
2976RETURN
2977END SUBROUTINE wrf_quilt_get_var_td_char
2978
2979SUBROUTINE wrf_quilt_put_var_td_char ( DataHandle,Element,  DateStr,Varname, Data,  Status )
2980!<DESCRIPTION>
2981! Instruct the I/O quilt servers to write time dependent
2982! attribute "Element" of variable "Varname" valid at time DateStr
2983! to the open dataset described by DataHandle.
2984! Attribute of type char is
2985! copied from string Data.
2986! This routine is called only by client (compute) tasks. 
2987!
2988! This is not yet supported.
2989!</DESCRIPTION>
2990#if defined( DM_PARALLEL ) && !defined( STUBMPI )
2991  IMPLICIT NONE
2992  INTEGER ,       INTENT(IN)  :: DataHandle
2993  CHARACTER*(*) , INTENT(IN)  :: Element
2994  CHARACTER*(*) , INTENT(IN)  :: DateStr
2995  CHARACTER*(*) , INTENT(IN)  :: VarName
2996  CHARACTER*(*) , INTENT(IN) :: Data
2997  INTEGER                    :: Status
2998#endif
2999RETURN
3000END SUBROUTINE wrf_quilt_put_var_td_char
3001
3002SUBROUTINE wrf_quilt_read_field ( DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm, &
3003                            DomainDesc , MemoryOrder , Stagger , DimNames ,              &
3004                            DomainStart , DomainEnd ,                                    &
3005                            MemoryStart , MemoryEnd ,                                    &
3006                            PatchStart , PatchEnd ,                                      &
3007                            Status )
3008!<DESCRIPTION>
3009! Instruct the I/O quilt servers to read the variable named VarName from the
3010! dataset pointed to by DataHandle.
3011! This routine is called only by client (compute) tasks. 
3012!
3013! This is not yet supported.
3014!</DESCRIPTION>
3015#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3016  IMPLICIT NONE
3017  INTEGER ,       INTENT(IN)    :: DataHandle
3018  CHARACTER*(*) , INTENT(INOUT) :: DateStr
3019  CHARACTER*(*) , INTENT(INOUT) :: VarName
3020  INTEGER ,       INTENT(INOUT) :: Field(*)
3021  integer                       ,intent(in)    :: FieldType
3022  integer                       ,intent(inout) :: Comm
3023  integer                       ,intent(inout) :: IOComm
3024  integer                       ,intent(in)    :: DomainDesc
3025  character*(*)                 ,intent(in)    :: MemoryOrder
3026  character*(*)                 ,intent(in)    :: Stagger
3027  character*(*) , dimension (*) ,intent(in)    :: DimNames
3028  integer ,dimension(*)         ,intent(in)    :: DomainStart, DomainEnd
3029  integer ,dimension(*)         ,intent(in)    :: MemoryStart, MemoryEnd
3030  integer ,dimension(*)         ,intent(in)    :: PatchStart,  PatchEnd
3031  integer                       ,intent(out)   :: Status
3032  Status = 0
3033#endif
3034RETURN
3035END SUBROUTINE wrf_quilt_read_field
3036
3037SUBROUTINE wrf_quilt_write_field ( DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm,  &
3038                             DomainDesc , MemoryOrder , Stagger , DimNames ,              &
3039                             DomainStart , DomainEnd ,                                    &
3040                             MemoryStart , MemoryEnd ,                                    &
3041                             PatchStart , PatchEnd ,                                      &
3042                             Status )
3043!<DESCRIPTION>
3044! Prepare instructions for the I/O quilt servers to write the variable named
3045! VarName to the dataset pointed to by DataHandle.
3046!
3047! During a "training" write this routine accumulates number and sizes of
3048! messages that will be sent to the I/O server associated with this compute
3049! (client) task.
3050!
3051! During a "real" write, this routine begins by allocating
3052! int_local_output_buffer if it has not already been allocated.  Sizes
3053! accumulated during "training" are used to determine how big
3054! int_local_output_buffer must be.  This routine then stores "int_field"
3055! headers and associated field data in int_local_output_buffer.  The contents
3056! of int_local_output_buffer are actually sent to the I/O quilt server in
3057! routine wrf_quilt_iosync().  This scheme allows output of multiple variables
3058! to be aggregated into a single "iosync" operation.
3059! This routine is called only by client (compute) tasks. 
3060!</DESCRIPTION>
3061#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3062  USE module_state_description
3063  USE module_wrf_quilt
3064  IMPLICIT NONE
3065  INCLUDE 'mpif.h'
3066#include "wrf_io_flags.h"
3067  INTEGER ,       INTENT(IN)    :: DataHandle
3068  CHARACTER*(*) , INTENT(IN)    :: DateStr
3069  CHARACTER*(*) , INTENT(IN)    :: VarName
3070!  INTEGER ,       INTENT(IN)    :: Field(*)
3071  integer                       ,intent(in)    :: FieldType
3072  integer                       ,intent(inout) :: Comm
3073  integer                       ,intent(inout) :: IOComm
3074  integer                       ,intent(in)    :: DomainDesc
3075  character*(*)                 ,intent(in)    :: MemoryOrder
3076  character*(*)                 ,intent(in)    :: Stagger
3077  character*(*) , dimension (*) ,intent(in)    :: DimNames
3078  integer ,dimension(*)         ,intent(in)    :: DomainStart, DomainEnd
3079  integer ,dimension(*)         ,intent(in)    :: MemoryStart, MemoryEnd
3080  integer ,dimension(*)         ,intent(in)    :: PatchStart,  PatchEnd
3081  integer                       ,intent(out)   :: Status
3082
3083  integer ii,jj,kk,myrank
3084
3085  REAL, DIMENSION( MemoryStart(1):MemoryEnd(1), &
3086                   MemoryStart(2):MemoryEnd(2), &
3087                   MemoryStart(3):MemoryEnd(3) ) :: Field
3088  INTEGER locsize , typesize, inttypesize
3089  INTEGER ierr, tasks_in_group, comm_io_group, dummy, i
3090  INTEGER, EXTERNAL :: use_package
3091
3092!JMTIMING  CALL start_timing
3093  CALL wrf_debug ( DEBUG_LVL, 'in wrf_quilt_write_field' )
3094
3095  IF ( .NOT. (DataHandle .GE. 1 .AND. DataHandle .LE. int_num_handles) ) THEN
3096    CALL wrf_error_fatal("frame/module_io_quilt.F: wrf_quilt_write_field: invalid data handle" )
3097  ENDIF
3098  IF ( .NOT. int_handle_in_use( DataHandle ) ) THEN
3099    CALL wrf_error_fatal("frame/module_io_quilt.F: wrf_quilt_write_field: DataHandle not opened" )
3100  ENDIF
3101
3102  locsize = (PatchEnd(1)-PatchStart(1)+1)* &
3103            (PatchEnd(2)-PatchStart(2)+1)* &
3104            (PatchEnd(3)-PatchStart(3)+1)
3105
3106  CALL mpi_type_size( MPI_INTEGER, inttypesize, ierr )
3107  ! Note that the WRF_DOUBLE branch of this IF statement must come first since
3108  ! WRF_FLOAT is set equal to WRF_DOUBLE during autopromotion builds. 
3109  IF ( FieldType .EQ. WRF_DOUBLE ) THEN
3110    CALL mpi_type_size( MPI_DOUBLE_PRECISION, typesize, ierr )
3111  ELSE IF ( FieldType .EQ. WRF_FLOAT ) THEN
3112    CALL mpi_type_size( MPI_REAL, typesize, ierr )
3113  ELSE IF ( FieldType .EQ. WRF_INTEGER ) THEN
3114    CALL mpi_type_size( MPI_INTEGER, typesize, ierr )
3115  ELSE IF ( FieldType .EQ. WRF_LOGICAL ) THEN
3116    CALL mpi_type_size( MPI_LOGICAL, typesize, ierr )
3117  ENDIF
3118
3119  IF ( .NOT. okay_to_write( DataHandle ) ) THEN
3120
3121      ! This is a "training" write.
3122      ! it is not okay to actually write; what we do here is just "bookkeep": count up
3123      ! the number and size of messages that we will output to io server associated with
3124      ! this task
3125
3126      CALL int_gen_write_field_header ( hdrbuf, hdrbufsize, inttypesize, typesize,           &
3127                               DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm,  &
3128                               333933         , MemoryOrder , Stagger , DimNames ,              &   ! 333933 means training; magic number
3129                               DomainStart , DomainEnd ,                                    &
3130                               MemoryStart , MemoryEnd ,                                    &
3131                               PatchStart , PatchEnd )
3132
3133      int_num_bytes_to_write(DataHandle) = int_num_bytes_to_write(DataHandle) + locsize * typesize + hdrbufsize
3134
3135      ! Send the hdr for the write in case the interface is calling the I/O API in "learn" mode
3136
3137      iserver = get_server_id ( DataHandle )
3138!JMDEBUGwrite(0,*)'wrf_quilt_write_field (dryrun) ',iserver
3139      CALL get_mpi_comm_io_groups( comm_io_group , iserver )
3140      ! send the size of my local buffer to the i/o task (obufsize doesnt mean anything here)
3141
3142      CALL mpi_x_comm_size( comm_io_group , tasks_in_group , ierr )
3143
3144      IF ( .NOT. wrf_dm_on_monitor() ) THEN     ! only one task in compute grid sends this message; send noops on others
3145        CALL int_gen_noop_header( hdrbuf, hdrbufsize, inttypesize )
3146      ENDIF
3147
3148
3149!JMTIMING      CALL start_timing
3150      ! send the size of my local buffer to the i/o task (reduced_dummy doesnt mean anything on client side)
3151      reduced = 0
3152      reduced(1) = hdrbufsize
3153      IF ( wrf_dm_on_monitor() )  reduced(2) = DataHandle
3154      CALL MPI_Reduce( reduced, reduced_dummy, 2, MPI_INTEGER,  &
3155                       MPI_SUM, tasks_in_group-1,          &   ! root = nio_tasks_in_group-1 is me
3156                       comm_io_group, ierr )
3157!JMTIMING       CALL end_timing("MPI_Reduce in wrf_quilt_write_field dryrun")
3158      ! send data to the i/o processor
3159
3160      CALL collect_on_comm( comm_io_group,                   &
3161                            onebyte,                          &
3162                            hdrbuf, hdrbufsize ,                 &
3163                            dummy, 0 )
3164
3165  ELSE
3166
3167    IF ( .NOT. associated( int_local_output_buffer ) ) THEN
3168      ALLOCATE ( int_local_output_buffer( (int_num_bytes_to_write( DataHandle )+1)/inttypesize ) )
3169      int_local_output_cursor = 1
3170    ENDIF
3171      iserver = get_server_id ( DataHandle )
3172!JMDEBUGwrite(0,*)'wrf_quilt_write_field (writing) ',iserver
3173
3174    ! This is NOT a "training" write.  It is OK to write now.
3175    CALL int_gen_write_field_header ( hdrbuf, hdrbufsize, inttypesize, typesize,           &
3176                             DataHandle , DateStr , VarName , Field , FieldType , Comm , IOComm,  &
3177                             0          , MemoryOrder , Stagger , DimNames ,              &   ! non-333933 means okay to write; magic number
3178                             DomainStart , DomainEnd ,                                    &
3179                             MemoryStart , MemoryEnd ,                                    &
3180                             PatchStart , PatchEnd )
3181
3182    ! Pack header into int_local_output_buffer.  It will be sent to the
3183    ! I/O servers during the next "iosync" operation. 
3184#ifdef DEREF_KLUDGE
3185    CALL int_pack_data ( hdrbuf , hdrbufsize , int_local_output_buffer(1), int_local_output_cursor )
3186#else
3187    CALL int_pack_data ( hdrbuf , hdrbufsize , int_local_output_buffer, int_local_output_cursor )
3188#endif
3189
3190    ! Pack field data into int_local_output_buffer.  It will be sent to the
3191    ! I/O servers during the next "iosync" operation. 
3192#ifdef DEREF_KLUDGE
3193    CALL int_pack_data ( Field(PatchStart(1):PatchEnd(1),PatchStart(2):PatchEnd(2),PatchStart(3):PatchEnd(3) ), &
3194                                  locsize * typesize , int_local_output_buffer(1), int_local_output_cursor )
3195#else
3196    CALL int_pack_data ( Field(PatchStart(1):PatchEnd(1),PatchStart(2):PatchEnd(2),PatchStart(3):PatchEnd(3) ), &
3197                                  locsize * typesize , int_local_output_buffer, int_local_output_cursor )
3198#endif
3199
3200  ENDIF
3201  Status = 0
3202!JMTIMING   CALL end_timing("wrf_quilt_write_field")
3203
3204#endif
3205  RETURN
3206END SUBROUTINE wrf_quilt_write_field
3207
3208SUBROUTINE wrf_quilt_get_var_info ( DataHandle , VarName , NDim , MemoryOrder , Stagger , &
3209                              DomainStart , DomainEnd , Status )
3210!<DESCRIPTION>
3211! This routine applies only to a dataset that is open for read.  It instructs
3212! the I/O quilt servers to return information about variable VarName.
3213! This routine is called only by client (compute) tasks. 
3214!
3215! This is not yet supported.
3216!</DESCRIPTION>
3217#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3218  IMPLICIT NONE
3219  integer               ,intent(in)     :: DataHandle
3220  character*(*)         ,intent(in)     :: VarName
3221  integer                               :: NDim
3222  character*(*)                         :: MemoryOrder
3223  character*(*)                         :: Stagger
3224  integer ,dimension(*)                 :: DomainStart, DomainEnd
3225  integer                               :: Status
3226#endif
3227RETURN
3228END SUBROUTINE wrf_quilt_get_var_info
3229
3230SUBROUTINE get_mpi_comm_io_groups( retval, isrvr )
3231!<DESCRIPTION>
3232! This routine returns the compute+io communicator to which this
3233! compute task belongs for I/O server group "isrvr".
3234! This routine is called only by client (compute) tasks. 
3235!</DESCRIPTION>
3236#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3237      USE module_wrf_quilt
3238      IMPLICIT NONE
3239      INTEGER, INTENT(IN ) :: isrvr
3240      INTEGER, INTENT(OUT) :: retval
3241      retval = mpi_comm_io_groups(isrvr)
3242#endif
3243      RETURN
3244END SUBROUTINE get_mpi_comm_io_groups
3245
3246SUBROUTINE get_nio_tasks_in_group( retval )
3247!<DESCRIPTION>
3248! This routine returns the number of I/O server tasks in each
3249! I/O server group.  It can be called by both clients and
3250! servers. 
3251!</DESCRIPTION>
3252#if defined( DM_PARALLEL ) && !defined( STUBMPI )
3253      USE module_wrf_quilt
3254      IMPLICIT NONE
3255      INTEGER, INTENT(OUT) :: retval
3256      retval = nio_tasks_in_group
3257#endif
3258      RETURN
3259END SUBROUTINE get_nio_tasks_in_group
3260
3261
Note: See TracBrowser for help on using the repository browser.