!
! This module contains WRF-specific I/O quilt routines called by both
! client (compute) and server (I/O quilt) tasks. I/O quilt servers are
! a run-time optimization that allow I/O operations, executed on the I/O
! quilt server tasks, to be overlapped with useful computation, executed on
! the compute tasks. Since I/O operations are often quite slow compared to
! computation, this performance optimization can increase parallel
! efficiency.
!
! Currently, one group of I/O servers can be specified at run-time. Namelist
! variable "nio_tasks_per_group" is used to specify the number of I/O server
! tasks in this group. In most cases, parallel efficiency is optimized when
! the minimum number of I/O server tasks are used. If memory needed to cache
! I/O operations fits on a single processor, then set nio_tasks_per_group=1.
! If not, increase the number of I/O server tasks until I/O operations fit in
! memory. In the future, multiple groups of I/O server tasks will be
! supported. The number of groups will be specified by namelist variable
! "nio_groups". For now, nio_groups must be set to 1. Currently, I/O servers
! only support overlap of output operations with computation. Also, only I/O
! packages that do no support native parallel I/O may be used with I/O server
! tasks. This excludes PHDF5 and MCEL.
!
! In this module, the I/O quilt server tasks call package-dependent
! WRF-specific I/O interfaces to perform I/O operations requested by the
! client (compute) tasks. All of these calls occur inside subroutine
! quilt().
!
! The client (compute) tasks call package-independent WRF-specific "quilt I/O"
! interfaces that send requests to the I/O quilt servers. All of these calls
! are made from module_io.F.
!
! These routines have the same names and (roughly) the same arguments as those
! specified in the WRF I/O API except that:
! - "Quilt I/O" routines defined in this file and called by routines in
! module_io.F have the "wrf_quilt_" prefix.
! - Package-dependent routines called from routines in this file are defined
! in the external I/O packages and have the "ext_" prefix.
!
! Both client (compute) and server tasks call routine init_module_wrf_quilt()
! which then calls setup_quilt_servers() determine which tasks are compute
! tasks and which are server tasks. Before the end of init_module_wrf_quilt()
! server tasks call routine quilt() and remain there for the rest of the model
! run. Compute tasks return from init_module_wrf_quilt() to perform model
! computations.
!
! See http://www.mmm.ucar.edu/wrf/WG2/software_2.0/IOAPI.doc for the latest
! version of the WRF I/O API. This document includes detailed descriptions
! of subroutines and their arguments that are not duplicated here.
!
!
USE module_internal_header_util
USE module_timing
INTEGER, PARAMETER :: int_num_handles = 99
LOGICAL, DIMENSION(int_num_handles) :: okay_to_write, int_handle_in_use, okay_to_commit
INTEGER, DIMENSION(int_num_handles) :: int_num_bytes_to_write, io_form
REAL, POINTER :: int_local_output_buffer(:)
INTEGER :: int_local_output_cursor
LOGICAL :: quilting_enabled
LOGICAL :: disable_quilt = .FALSE.
INTEGER :: prev_server_for_handle = -1
INTEGER :: server_for_handle(int_num_handles)
INTEGER :: reduced(2), reduced_dummy(2)
LOGICAL, EXTERNAL :: wrf_dm_on_monitor
INTEGER nio_groups
#ifdef DM_PARALLEL
INTEGER mpi_comm_local
INTEGER mpi_comm_io_groups(100)
INTEGER nio_tasks_in_group
INTEGER nio_tasks_per_group
INTEGER ncompute_tasks
INTEGER ntasks
INTEGER mytask
INTEGER, PARAMETER :: onebyte = 1
INTEGER comm_io_servers, iserver, hdrbufsize, obufsize
INTEGER, DIMENSION(4096) :: hdrbuf
INTEGER, DIMENSION(int_num_handles) :: handle
#endif
CONTAINS
#if defined(DM_PARALLEL) && !defined( STUBMPI )
INTEGER FUNCTION get_server_id ( dhandle )
!
! Both client (compute) and server tasks call this routine to
! determine which tasks are compute tasks and which are I/O server tasks.
!
! Module variables MPI_COMM_LOCAL and MPI_COMM_IO_GROUPS(:) are set up to
! contain MPI communicators as follows:
!
! MPI_COMM_LOCAL is the Communicator for the local groups of tasks. For the
! compute tasks it is the group of compute tasks; for a server group it the
! communicator of tasks in the server group.
!
! Elements of MPI_COMM_IO_GROUPS are communicators that each contain one or
! more compute tasks and a single I/O server assigned to those compute tasks.
! The I/O server tasks is always the last task in these communicators.
! On a compute task, which has a single associate in each of the server
! groups, MPI_COMM_IO_GROUPS is treated as an array; each element corresponds
! to a different server group.
! On a server task only the first element of MPI_COMM_IO_GROUPS is used
! because each server task is part of only one io_group.
!
! I/O server tasks in each I/O server group are divided among compute tasks as
! evenly as possible.
!
! When multiple I/O server groups are used, each must have the same number of
! tasks. When the total number of extra I/O tasks does not divide evenly by
! the number of io server groups requested, the remainder tasks are not used
! (wasted).
!
! For example, communicator membership for 18 tasks with nio_groups=2 and
! nio_tasks_per_group=3 is shown below:
!
!
! Membership for MPI_COMM_LOCAL communicators:
! COMPUTE TASKS: 0 1 2 3 4 5 6 7 8 9 10 11
! 1ST I/O SERVER GROUP: 12 13 14
! 2ND I/O SERVER GROUP: 15 16 17
!
! Membership for MPI_COMM_IO_GROUPS(1):
! COMPUTE TASKS 0, 3, 6, 9: 0 3 6 9 12
! COMPUTE TASKS 1, 4, 7,10: 1 4 7 10 13
! COMPUTE TASKS 2, 5, 8,11: 2 5 8 11 14
! I/O SERVER TASK 12: 0 3 6 9 12
! I/O SERVER TASK 13: 1 4 7 10 13
! I/O SERVER TASK 14: 2 5 8 11 14
! I/O SERVER TASK 15: 0 3 6 9 15
! I/O SERVER TASK 16: 1 4 7 10 16
! I/O SERVER TASK 17: 2 5 8 11 17
!
! Membership for MPI_COMM_IO_GROUPS(2):
! COMPUTE TASKS 0, 3, 6, 9: 0 3 6 9 15
! COMPUTE TASKS 1, 4, 7,10: 1 4 7 10 16
! COMPUTE TASKS 2, 5, 8,11: 2 5 8 11 17
! I/O SERVER TASK 12: ** not used **
! I/O SERVER TASK 13: ** not used **
! I/O SERVER TASK 14: ** not used **
! I/O SERVER TASK 15: ** not used **
! I/O SERVER TASK 16: ** not used **
! I/O SERVER TASK 17: ** not used **
!
!
USE module_configure
IMPLICIT NONE
INCLUDE 'mpif.h'
INTEGER, INTENT(IN) :: nio_tasks_per_group, mytask, ntasks, &
n_groups_arg, mpi_comm_wrld
INTEGER, INTENT(OUT) :: mpi_comm_local, nio
INTEGER, DIMENSION(100), INTENT(OUT) :: mpi_comm_io_groups
! Local
INTEGER :: i, j, ii, comdup, ierr, niotasks, n_groups, iisize
INTEGER, DIMENSION(ntasks) :: icolor
CHARACTER*128 mess
INTEGER io_form_setting
!check the namelist and make sure there are no output forms specified
!that cannot be quilted
CALL nl_get_io_form_history(1, io_form_setting) ; call sokay( 'history', io_form_setting )
CALL nl_get_io_form_restart(1, io_form_setting) ; call sokay( 'restart', io_form_setting )
CALL nl_get_io_form_auxhist1(1, io_form_setting) ; call sokay( 'auxhist1', io_form_setting )
CALL nl_get_io_form_auxhist2(1, io_form_setting) ; call sokay( 'auxhist2', io_form_setting )
CALL nl_get_io_form_auxhist3(1, io_form_setting) ; call sokay( 'auxhist3', io_form_setting )
CALL nl_get_io_form_auxhist4(1, io_form_setting) ; call sokay( 'auxhist4', io_form_setting )
CALL nl_get_io_form_auxhist5(1, io_form_setting) ; call sokay( 'auxhist5', io_form_setting )
CALL nl_get_io_form_auxhist6(1, io_form_setting) ; call sokay( 'auxhist6', io_form_setting )
CALL nl_get_io_form_auxhist7(1, io_form_setting) ; call sokay( 'auxhist7', io_form_setting )
CALL nl_get_io_form_auxhist8(1, io_form_setting) ; call sokay( 'auxhist8', io_form_setting )
CALL nl_get_io_form_auxhist9(1, io_form_setting) ; call sokay( 'auxhist9', io_form_setting )
CALL nl_get_io_form_auxhist10(1, io_form_setting) ; call sokay( 'auxhist10', io_form_setting )
CALL nl_get_io_form_auxhist11(1, io_form_setting) ; call sokay( 'auxhist11', io_form_setting )
n_groups = n_groups_arg
IF ( n_groups .LT. 1 ) n_groups = 1
!