!WRF:DRIVER_LAYER:DOMAIN_OBJECT ! ! Following are the routines contained within this MODULE: ! alloc_and_configure_domain 1. Allocate the space for a single domain (constants ! and null terminate pointers). ! 2. Connect the domains as a linked list. ! 3. Store all of the domain constants. ! 4. CALL alloc_space_field. ! alloc_space_field 1. Allocate space for the gridded data required for ! each domain. ! dealloc_space_domain 1. Reconnect linked list nodes since the current ! node is removed. ! 2. CALL dealloc_space_field. ! 3. Deallocate single domain. ! dealloc_space_field 1. Deallocate each of the fields for a particular ! domain. ! first_loc_integer 1. Find the first incidence of a particular ! domain identifier from an array of domain ! identifiers. MODULE module_domain USE module_driver_constants USE module_machine USE module_state_description USE module_configure USE module_wrf_error USE module_utility CHARACTER (LEN=80) program_name ! An entire domain. This contains multiple meteorological fields by having ! arrays (such as "data_3d") of pointers for each field. Also inside each ! domain is a link to a couple of other domains, one is just the ! "next" domain that is to be stored, the other is the next domain which ! happens to also be on the "same_level". TYPE domain_ptr TYPE(domain), POINTER :: ptr END TYPE domain_ptr INTEGER, PARAMETER :: HISTORY_ALARM=1, AUXHIST1_ALARM=2, AUXHIST2_ALARM=3, & AUXHIST3_ALARM=4, AUXHIST4_ALARM=5, AUXHIST5_ALARM=6, & AUXHIST6_ALARM=7, AUXHIST7_ALARM=8, AUXHIST8_ALARM=9, & AUXHIST9_ALARM=10, AUXHIST10_ALARM=11, AUXHIST11_ALARM=12, & AUXINPUT1_ALARM=13, AUXINPUT2_ALARM=14, AUXINPUT3_ALARM=15, & AUXINPUT4_ALARM=16, AUXINPUT5_ALARM=17, & AUXINPUT6_ALARM=18, AUXINPUT7_ALARM=19, AUXINPUT8_ALARM=20, & AUXINPUT9_ALARM=21, AUXINPUT10_ALARM=22, AUXINPUT11_ALARM=23, & RESTART_ALARM=24, BOUNDARY_ALARM=25, INPUTOUT_ALARM=26, & ! for outputing input (e.g. for 3dvar) ALARM_SUBTIME=27, & #ifdef MOVE_NESTS COMPUTE_VORTEX_CENTER_ALARM=28, & MAX_WRF_ALARMS=28 ! WARNING: MAX_WRF_ALARMS must be ! large enough to include all of ! the alarms declared above. #else MAX_WRF_ALARMS=27 ! WARNING: MAX_WRF_ALARMS must be ! large enough to include all of ! the alarms declared above. #endif #include TYPE domain ! SEE THE INCLUDE FILE FOR DEFINITIONS OF STATE FIELDS WITHIN THE DOMAIN DATA STRUCTURE #include INTEGER :: comms( max_comms ), shift_x, shift_y INTEGER :: id INTEGER :: domdesc INTEGER :: communicator INTEGER :: iocommunicator INTEGER,POINTER :: mapping(:,:) INTEGER,POINTER :: i_start(:),i_end(:) INTEGER,POINTER :: j_start(:),j_end(:) INTEGER :: max_tiles INTEGER :: num_tiles ! taken out of namelist 20000908 INTEGER :: num_tiles_x ! taken out of namelist 20000908 INTEGER :: num_tiles_y ! taken out of namelist 20000908 INTEGER :: num_tiles_spec ! place to store number of tiles computed from ! externally specified params TYPE(domain_ptr) , DIMENSION( : ) , POINTER :: parents TYPE(domain_ptr) , DIMENSION( : ) , POINTER :: nests TYPE(domain) , POINTER :: sibling ! overlapped domains at same lev TYPE(domain) , POINTER :: intermediate_grid INTEGER :: num_parents, num_nests, num_siblings INTEGER , DIMENSION( max_parents ) :: child_of_parent INTEGER , DIMENSION( max_nests ) :: active INTEGER , DIMENSION(0:5) :: nframes ! frames per outfile for history ! streams (0 is main history) TYPE(domain) , POINTER :: next TYPE(domain) , POINTER :: same_level LOGICAL , DIMENSION ( 4 ) :: bdy_mask ! which boundaries are on processor LOGICAL :: first_force ! domain dimensions INTEGER :: sd31, ed31, sd32, ed32, sd33, ed33, & sd21, ed21, sd22, ed22, & sd11, ed11 INTEGER :: sp31, ep31, sp32, ep32, sp33, ep33, & sp21, ep21, sp22, ep22, & sp11, ep11, & sm31, em31, sm32, em32, sm33, em33, & sm21, em21, sm22, em22, & sm11, em11, & sp31x, ep31x, sp32x, ep32x, sp33x, ep33x, & sp21x, ep21x, sp22x, ep22x, & sm31x, em31x, sm32x, em32x, sm33x, em33x, & sm21x, em21x, sm22x, em22x, & sp31y, ep31y, sp32y, ep32y, sp33y, ep33y, & sp21y, ep21y, sp22y, ep22y, & sm31y, em31y, sm32y, em32y, sm33y, em33y, & sm21y, em21y, sm22y, em22y Type(WRFU_Clock), POINTER :: domain_clock Type(WRFU_Time) :: start_subtime, stop_subtime Type(WRFU_Time) :: this_bdy_time, next_bdy_time Type(WRFU_Time) :: this_emi_time, next_emi_time Type(WRFU_TimeInterval), DIMENSION(MAX_WRF_ALARMS) :: io_intervals Type(WRFU_Alarm), POINTER :: alarms(:) ! This awful hackery accounts for the fact that ESMF2.2.0 objects cannot tell ! us if they have ever been created or not. So, we have to keep track of this ! ourselves to avoid destroying an object that has never been created! Rip ! this out once ESMF has useful introspection for creation... LOGICAL :: domain_clock_created LOGICAL, POINTER :: alarms_created(:) ! Have clocks and times been initialized yet? LOGICAL :: time_set ! This flag controls first-time-step behavior for ESMF runs ! which require components to return to the top-level driver ! after initializing import and export states. In WRF, this ! initialization is done in the "training phase" of ! med_before_solve_io(). LOGICAL :: return_after_training_io END TYPE domain ! Now that a "domain" TYPE exists, we can use it to store a few pointers ! to this type. These are primarily for use in traversing the linked list. ! The "head_grid" is always the pointer to the first domain that is ! allocated. This is available and is not to be changed. The others are ! just temporary pointers. TYPE(domain) , POINTER :: head_grid , new_grid , next_grid , old_grid ! To facilitate an easy integration of each of the domains that are on the ! same level, we have an array for the head pointer for each level. This ! removed the need to search through the linked list at each time step to ! find which domains are to be active. TYPE domain_levels TYPE(domain) , POINTER :: first_domain END TYPE domain_levels TYPE(domain_levels) , DIMENSION(max_levels) :: head_for_each_level ! Use this to support debugging features, giving easy access to clock, etc. TYPE(domain), POINTER :: current_grid LOGICAL, SAVE :: current_grid_set = .FALSE. ! internal routines PRIVATE domain_time_test_print PRIVATE test_adjust_io_timestr CONTAINS SUBROUTINE adjust_domain_dims_for_move( grid , dx, dy ) IMPLICIT NONE TYPE( domain ), POINTER :: grid INTEGER, INTENT(IN) :: dx, dy data_ordering : SELECT CASE ( model_data_order ) CASE ( DATA_ORDER_XYZ ) grid%sm31 = grid%sm31 + dx grid%em31 = grid%em31 + dx grid%sm32 = grid%sm32 + dy grid%em32 = grid%em32 + dy grid%sp31 = grid%sp31 + dx grid%ep31 = grid%ep31 + dx grid%sp32 = grid%sp32 + dy grid%ep32 = grid%ep32 + dy grid%sd31 = grid%sd31 + dx grid%ed31 = grid%ed31 + dx grid%sd32 = grid%sd32 + dy grid%ed32 = grid%ed32 + dy CASE ( DATA_ORDER_YXZ ) grid%sm31 = grid%sm31 + dy grid%em31 = grid%em31 + dy grid%sm32 = grid%sm32 + dx grid%em32 = grid%em32 + dx grid%sp31 = grid%sp31 + dy grid%ep31 = grid%ep31 + dy grid%sp32 = grid%sp32 + dx grid%ep32 = grid%ep32 + dx grid%sd31 = grid%sd31 + dy grid%ed31 = grid%ed31 + dy grid%sd32 = grid%sd32 + dx grid%ed32 = grid%ed32 + dx CASE ( DATA_ORDER_ZXY ) grid%sm32 = grid%sm32 + dx grid%em32 = grid%em32 + dx grid%sm33 = grid%sm33 + dy grid%em33 = grid%em33 + dy grid%sp32 = grid%sp32 + dx grid%ep32 = grid%ep32 + dx grid%sp33 = grid%sp33 + dy grid%ep33 = grid%ep33 + dy grid%sd32 = grid%sd32 + dx grid%ed32 = grid%ed32 + dx grid%sd33 = grid%sd33 + dy grid%ed33 = grid%ed33 + dy CASE ( DATA_ORDER_ZYX ) grid%sm32 = grid%sm32 + dy grid%em32 = grid%em32 + dy grid%sm33 = grid%sm33 + dx grid%em33 = grid%em33 + dx grid%sp32 = grid%sp32 + dy grid%ep32 = grid%ep32 + dy grid%sp33 = grid%sp33 + dx grid%ep33 = grid%ep33 + dx grid%sd32 = grid%sd32 + dy grid%ed32 = grid%ed32 + dy grid%sd33 = grid%sd33 + dx grid%ed33 = grid%ed33 + dx CASE ( DATA_ORDER_XZY ) grid%sm31 = grid%sm31 + dx grid%em31 = grid%em31 + dx grid%sm33 = grid%sm33 + dy grid%em33 = grid%em33 + dy grid%sp31 = grid%sp31 + dx grid%ep31 = grid%ep31 + dx grid%sp33 = grid%sp33 + dy grid%ep33 = grid%ep33 + dy grid%sd31 = grid%sd31 + dx grid%ed31 = grid%ed31 + dx grid%sd33 = grid%sd33 + dy grid%ed33 = grid%ed33 + dy CASE ( DATA_ORDER_YZX ) grid%sm31 = grid%sm31 + dy grid%em31 = grid%em31 + dy grid%sm33 = grid%sm33 + dx grid%em33 = grid%em33 + dx grid%sp31 = grid%sp31 + dy grid%ep31 = grid%ep31 + dy grid%sp33 = grid%sp33 + dx grid%ep33 = grid%ep33 + dx grid%sd31 = grid%sd31 + dy grid%ed31 = grid%ed31 + dy grid%sd33 = grid%sd33 + dx grid%ed33 = grid%ed33 + dx END SELECT data_ordering #if 0 CALL dealloc_space_field ( grid ) CALL alloc_space_field ( grid, grid%id , 1 , 2 , .FALSE. , & grid%sd31, grid%ed31, grid%sd32, grid%ed32, grid%sd33, grid%ed33, & grid%sm31, grid%em31, grid%sm32, grid%em32, grid%sm33, grid%em33, & grid%sm31x, grid%em31x, grid%sm32x, grid%em32x, grid%sm33x, grid%em33x, & ! x-xpose grid%sm31y, grid%em31y, grid%sm32y, grid%em32y, grid%sm33y, grid%em33y & ! y-xpose ) #endif RETURN END SUBROUTINE adjust_domain_dims_for_move SUBROUTINE get_ijk_from_grid ( grid , & ids, ide, jds, jde, kds, kde, & ims, ime, jms, jme, kms, kme, & ips, ipe, jps, jpe, kps, kpe ) IMPLICIT NONE TYPE( domain ), INTENT (IN) :: grid INTEGER, INTENT(OUT) :: & ids, ide, jds, jde, kds, kde, & ims, ime, jms, jme, kms, kme, & ips, ipe, jps, jpe, kps, kpe data_ordering : SELECT CASE ( model_data_order ) CASE ( DATA_ORDER_XYZ ) ids = grid%sd31 ide = grid%ed31 jds = grid%sd32 jde = grid%ed32 kds = grid%sd33 kde = grid%ed33 ims = grid%sm31 ime = grid%em31 jms = grid%sm32 jme = grid%em32 kms = grid%sm33 kme = grid%em33 ips = grid%sp31 ipe = grid%ep31 jps = grid%sp32 jpe = grid%ep32 kps = grid%sp33 kpe = grid%ep33 CASE ( DATA_ORDER_YXZ ) ids = grid%sd32 ide = grid%ed32 jds = grid%sd31 jde = grid%ed31 kds = grid%sd33 kde = grid%ed33 ims = grid%sm32 ime = grid%em32 jms = grid%sm31 jme = grid%em31 kms = grid%sm33 kme = grid%em33 ips = grid%sp32 ipe = grid%ep32 jps = grid%sp31 jpe = grid%ep31 kps = grid%sp33 kpe = grid%ep33 CASE ( DATA_ORDER_ZXY ) ids = grid%sd32 ide = grid%ed32 jds = grid%sd33 jde = grid%ed33 kds = grid%sd31 kde = grid%ed31 ims = grid%sm32 ime = grid%em32 jms = grid%sm33 jme = grid%em33 kms = grid%sm31 kme = grid%em31 ips = grid%sp32 ipe = grid%ep32 jps = grid%sp33 jpe = grid%ep33 kps = grid%sp31 kpe = grid%ep31 CASE ( DATA_ORDER_ZYX ) ids = grid%sd33 ide = grid%ed33 jds = grid%sd32 jde = grid%ed32 kds = grid%sd31 kde = grid%ed31 ims = grid%sm33 ime = grid%em33 jms = grid%sm32 jme = grid%em32 kms = grid%sm31 kme = grid%em31 ips = grid%sp33 ipe = grid%ep33 jps = grid%sp32 jpe = grid%ep32 kps = grid%sp31 kpe = grid%ep31 CASE ( DATA_ORDER_XZY ) ids = grid%sd31 ide = grid%ed31 jds = grid%sd33 jde = grid%ed33 kds = grid%sd32 kde = grid%ed32 ims = grid%sm31 ime = grid%em31 jms = grid%sm33 jme = grid%em33 kms = grid%sm32 kme = grid%em32 ips = grid%sp31 ipe = grid%ep31 jps = grid%sp33 jpe = grid%ep33 kps = grid%sp32 kpe = grid%ep32 CASE ( DATA_ORDER_YZX ) ids = grid%sd33 ide = grid%ed33 jds = grid%sd31 jde = grid%ed31 kds = grid%sd32 kde = grid%ed32 ims = grid%sm33 ime = grid%em33 jms = grid%sm31 jme = grid%em31 kms = grid%sm32 kme = grid%em32 ips = grid%sp33 ipe = grid%ep33 jps = grid%sp31 jpe = grid%ep31 kps = grid%sp32 kpe = grid%ep32 END SELECT data_ordering END SUBROUTINE get_ijk_from_grid ! Default version ; Otherwise module containing interface to DM library will provide SUBROUTINE wrf_patch_domain( id , domdesc , parent, parent_id , parent_domdesc , & sd1 , ed1 , sp1 , ep1 , sm1 , em1 , & sd2 , ed2 , sp2 , ep2 , sm2 , em2 , & sd3 , ed3 , sp3 , ep3 , sm3 , em3 , & sp1x , ep1x , sm1x , em1x , & sp2x , ep2x , sm2x , em2x , & sp3x , ep3x , sm3x , em3x , & sp1y , ep1y , sm1y , em1y , & sp2y , ep2y , sm2y , em2y , & sp3y , ep3y , sm3y , em3y , & bdx , bdy , bdy_mask ) ! ! Wrf_patch_domain is called as part of the process of initiating a new ! domain. Based on the global domain dimension information that is ! passed in it computes the patch and memory dimensions on this ! distributed-memory process for parallel compilation when DM_PARALLEL is ! defined in configure.wrf. In this case, it relies on an external ! communications package-contributed routine, wrf_dm_patch_domain. For ! non-parallel compiles, it returns the patch and memory dimensions based ! on the entire domain. In either case, the memory dimensions will be ! larger than the patch dimensions, since they allow for distributed ! memory halo regions (DM_PARALLEL only) and for boundary regions around ! the domain (used for idealized cases only). The width of the boundary ! regions to be accommodated is passed in as bdx and bdy. ! ! The bdy_mask argument is a four-dimensional logical array, each element ! of which is set to true for any boundaries that this process's patch ! contains (all four are true in the non-DM_PARALLEL case) and false ! otherwise. The indices into the bdy_mask are defined in ! frame/module_state_description.F. P_XSB corresponds boundary that ! exists at the beginning of the X-dimension; ie. the western boundary; ! P_XEB to the boundary that corresponds to the end of the X-dimension ! (east). Likewise for Y (south and north respectively). ! ! The correspondence of the first, second, and third dimension of each ! set (domain, memory, and patch) with the coordinate axes of the model ! domain is based on the setting of the variable model_data_order, which ! comes into this routine through USE association of ! module_driver_constants in the enclosing module of this routine, ! module_domain. Model_data_order is defined by the Registry, based on ! the dimspec entries which associate dimension specifiers (e.g. 'k') in ! the Registry with a coordinate axis and specify which dimension of the ! arrays they represent. For WRF, the sd1 , ed1 , sp1 , ep1 , sm1 , and ! em1 correspond to the starts and ends of the global, patch, and memory ! dimensions in X; those with 2 specify Z (vertical); and those with 3 ! specify Y. Note that the WRF convention is to overdimension to allow ! for staggered fields so that sdn:edn are the starts ! and ends of the staggered domains in X. The non-staggered grid runs ! sdn:edn-1. The extra row or column on the north or ! east boundaries is not used for non-staggered fields. ! ! The domdesc and parent_domdesc arguments are for external communication ! packages (e.g. RSL) that establish and return to WRF integer handles ! for referring to operations on domains. These descriptors are not set ! or used otherwise and they are opaque, which means they are never ! accessed or modified in WRF; they are only only passed between calls to ! the external package. ! USE module_machine IMPLICIT NONE LOGICAL, DIMENSION(4), INTENT(OUT) :: bdy_mask INTEGER, INTENT(IN) :: sd1 , ed1 , sd2 , ed2 , sd3 , ed3 , bdx , bdy INTEGER, INTENT(OUT) :: sp1 , ep1 , sp2 , ep2 , sp3 , ep3 , & ! z-xpose (std) sm1 , em1 , sm2 , em2 , sm3 , em3 INTEGER, INTENT(OUT) :: sp1x , ep1x , sp2x , ep2x , sp3x , ep3x , & ! x-xpose sm1x , em1x , sm2x , em2x , sm3x , em3x INTEGER, INTENT(OUT) :: sp1y , ep1y , sp2y , ep2y , sp3y , ep3y , & ! y-xpose sm1y , em1y , sm2y , em2y , sm3y , em3y INTEGER, INTENT(IN) :: id , parent_id , parent_domdesc INTEGER, INTENT(INOUT) :: domdesc TYPE(domain), POINTER :: parent !local data INTEGER spec_bdy_width CALL nl_get_spec_bdy_width( 1, spec_bdy_width ) #ifndef DM_PARALLEL bdy_mask = .true. ! only one processor so all 4 boundaries are there ! this is a trivial version -- 1 patch per processor; ! use version in module_dm to compute for DM sp1 = sd1 ; sp2 = sd2 ; sp3 = sd3 ep1 = ed1 ; ep2 = ed2 ; ep3 = ed3 SELECT CASE ( model_data_order ) CASE ( DATA_ORDER_XYZ ) sm1 = sp1 - bdx ; em1 = ep1 + bdx sm2 = sp2 - bdy ; em2 = ep2 + bdy sm3 = sp3 ; em3 = ep3 CASE ( DATA_ORDER_YXZ ) sm1 = sp1 - bdy ; em1 = ep1 + bdy sm2 = sp2 - bdx ; em2 = ep2 + bdx sm3 = sp3 ; em3 = ep3 CASE ( DATA_ORDER_ZXY ) sm1 = sp1 ; em1 = ep1 sm2 = sp2 - bdx ; em2 = ep2 + bdx sm3 = sp3 - bdy ; em3 = ep3 + bdy CASE ( DATA_ORDER_ZYX ) sm1 = sp1 ; em1 = ep1 sm2 = sp2 - bdy ; em2 = ep2 + bdy sm3 = sp3 - bdx ; em3 = ep3 + bdx CASE ( DATA_ORDER_XZY ) sm1 = sp1 - bdx ; em1 = ep1 + bdx sm2 = sp2 ; em2 = ep2 sm3 = sp3 - bdy ; em3 = ep3 + bdy CASE ( DATA_ORDER_YZX ) sm1 = sp1 - bdy ; em1 = ep1 + bdy sm2 = sp2 ; em2 = ep2 sm3 = sp3 - bdx ; em3 = ep3 + bdx END SELECT sm1x = sm1 ; em1x = em1 ! just copy sm2x = sm2 ; em2x = em2 sm3x = sm3 ; em3x = em3 sm1y = sm1 ; em1y = em1 ! just copy sm2y = sm2 ; em2y = em2 sm3y = sm3 ; em3y = em3 ! assigns mostly just to suppress warning messages that INTENT OUT vars not assigned sp1x = sp1 ; ep1x = ep1 ; sp2x = sp2 ; ep2x = ep2 ; sp3x = sp3 ; ep3x = ep3 sp1y = sp1 ; ep1y = ep1 ; sp2y = sp2 ; ep2y = ep2 ; sp3y = sp3 ; ep3y = ep3 #else ! This is supplied by the package specific version of module_dm, which ! is supplied by the external package and copied into the src directory ! when the code is compiled. The cp command will be found in the externals ! target of the configure.wrf file for this architecture. Eg: for RSL ! routine is defined in external/RSL/module_dm.F . ! Note, it would be very nice to be able to pass parent to this routine; ! however, there doesn't seem to be a way to do that in F90. That is because ! to pass a pointer to a domain structure, this call requires an interface ! definition for wrf_dm_patch_domain (otherwise it will try to convert the ! pointer to something). In order to provide an interface definition, we ! would need to either USE module_dm or use an interface block. In either ! case it generates a circular USE reference, since module_dm uses ! module_domain. JM 20020416 CALL wrf_dm_patch_domain( id , domdesc , parent_id , parent_domdesc , & sd1 , ed1 , sp1 , ep1 , sm1 , em1 , & sd2 , ed2 , sp2 , ep2 , sm2 , em2 , & sd3 , ed3 , sp3 , ep3 , sm3 , em3 , & sp1x , ep1x , sm1x , em1x , & sp2x , ep2x , sm2x , em2x , & sp3x , ep3x , sm3x , em3x , & sp1y , ep1y , sm1y , em1y , & sp2y , ep2y , sm2y , em2y , & sp3y , ep3y , sm3y , em3y , & bdx , bdy ) SELECT CASE ( model_data_order ) CASE ( DATA_ORDER_XYZ ) bdy_mask( P_XSB ) = ( sp1 <= sd1 .AND. sd1 <= ep1 .AND. sp1 <= sd1+spec_bdy_width-1 .AND. sd1+spec_bdy_width-1 <= ep1 ) bdy_mask( P_YSB ) = ( sp2 <= sd2 .AND. sd2 <= ep2 .AND. sp2 <= sd2+spec_bdy_width-1 .AND. sd2+spec_bdy_width-1 <= ep2 ) bdy_mask( P_XEB ) = ( sp1 <= ed1 .AND. ed1 <= ep1 .AND. sp1 <= ed1-spec_bdy_width-1 .AND. ed1-spec_bdy_width-1 <= ep1 ) bdy_mask( P_YEB ) = ( sp2 <= ed2 .AND. ed2 <= ep2 .AND. sp2 <= ed2-spec_bdy_width-1 .AND. ed2-spec_bdy_width-1 <= ep2 ) CASE ( DATA_ORDER_YXZ ) bdy_mask( P_XSB ) = ( sp2 <= sd2 .AND. sd2 <= ep2 .AND. sp2 <= sd2+spec_bdy_width-1 .AND. sd2+spec_bdy_width-1 <= ep2 ) bdy_mask( P_YSB ) = ( sp1 <= sd1 .AND. sd1 <= ep1 .AND. sp1 <= sd1+spec_bdy_width-1 .AND. sd1+spec_bdy_width-1 <= ep1 ) bdy_mask( P_XEB ) = ( sp2 <= ed2 .AND. ed2 <= ep2 .AND. sp2 <= ed2-spec_bdy_width-1 .AND. ed2-spec_bdy_width-1 <= ep2 ) bdy_mask( P_YEB ) = ( sp1 <= ed1 .AND. ed1 <= ep1 .AND. sp1 <= ed1-spec_bdy_width-1 .AND. ed1-spec_bdy_width-1 <= ep1 ) CASE ( DATA_ORDER_ZXY ) bdy_mask( P_XSB ) = ( sp2 <= sd2 .AND. sd2 <= ep2 .AND. sp2 <= sd2+spec_bdy_width-1 .AND. sd2+spec_bdy_width-1 <= ep2 ) bdy_mask( P_YSB ) = ( sp3 <= sd3 .AND. sd3 <= ep3 .AND. sp3 <= sd3+spec_bdy_width-1 .AND. sd3+spec_bdy_width-1 <= ep3 ) bdy_mask( P_XEB ) = ( sp2 <= ed2 .AND. ed2 <= ep2 .AND. sp2 <= ed2-spec_bdy_width-1 .AND. ed2-spec_bdy_width-1 <= ep2 ) bdy_mask( P_YEB ) = ( sp3 <= ed3 .AND. ed3 <= ep3 .AND. sp3 <= ed3-spec_bdy_width-1 .AND. ed3-spec_bdy_width-1 <= ep3 ) CASE ( DATA_ORDER_ZYX ) bdy_mask( P_XSB ) = ( sp3 <= sd3 .AND. sd3 <= ep3 .AND. sp3 <= sd3+spec_bdy_width-1 .AND. sd3+spec_bdy_width-1 <= ep3 ) bdy_mask( P_YSB ) = ( sp2 <= sd2 .AND. sd2 <= ep2 .AND. sp2 <= sd2+spec_bdy_width-1 .AND. sd2+spec_bdy_width-1 <= ep2 ) bdy_mask( P_XEB ) = ( sp3 <= ed3 .AND. ed3 <= ep3 .AND. sp3 <= ed3-spec_bdy_width-1 .AND. ed3-spec_bdy_width-1 <= ep3 ) bdy_mask( P_YEB ) = ( sp2 <= ed2 .AND. ed2 <= ep2 .AND. sp2 <= ed2-spec_bdy_width-1 .AND. ed2-spec_bdy_width-1 <= ep2 ) CASE ( DATA_ORDER_XZY ) bdy_mask( P_XSB ) = ( sp1 <= sd1 .AND. sd1 <= ep1 .AND. sp1 <= sd1+spec_bdy_width-1 .AND. sd1+spec_bdy_width-1 <= ep1 ) bdy_mask( P_YSB ) = ( sp3 <= sd3 .AND. sd3 <= ep3 .AND. sp3 <= sd3+spec_bdy_width-1 .AND. sd3+spec_bdy_width-1 <= ep3 ) bdy_mask( P_XEB ) = ( sp1 <= ed1 .AND. ed1 <= ep1 .AND. sp1 <= ed1-spec_bdy_width-1 .AND. ed1-spec_bdy_width-1 <= ep1 ) bdy_mask( P_YEB ) = ( sp3 <= ed3 .AND. ed3 <= ep3 .AND. sp3 <= ed3-spec_bdy_width-1 .AND. ed3-spec_bdy_width-1 <= ep3 ) CASE ( DATA_ORDER_YZX ) bdy_mask( P_XSB ) = ( sp3 <= sd3 .AND. sd3 <= ep3 .AND. sp3 <= sd3+spec_bdy_width-1 .AND. sd3+spec_bdy_width-1 <= ep3 ) bdy_mask( P_YSB ) = ( sp1 <= sd1 .AND. sd1 <= ep1 .AND. sp1 <= sd1+spec_bdy_width-1 .AND. sd1+spec_bdy_width-1 <= ep1 ) bdy_mask( P_XEB ) = ( sp3 <= ed3 .AND. ed3 <= ep3 .AND. sp3 <= ed3-spec_bdy_width-1 .AND. ed3-spec_bdy_width-1 <= ep3 ) bdy_mask( P_YEB ) = ( sp1 <= ed1 .AND. ed1 <= ep1 .AND. sp1 <= ed1-spec_bdy_width-1 .AND. ed1-spec_bdy_width-1 <= ep1 ) END SELECT #endif RETURN END SUBROUTINE wrf_patch_domain ! SUBROUTINE alloc_and_configure_domain ( domain_id , grid , parent, kid ) ! ! This subroutine is used to allocate a domain data structure of ! TYPE(DOMAIN) pointed to by the argument grid, link it into the ! nested domain hierarchy, and set it's configuration information from ! the appropriate settings in the WRF namelist file. Specifically, if the ! domain being allocated and configured is nest, the parent ! argument will point to the already existing domain data structure for ! the parent domain and the kid argument will be set to an ! integer indicating which child of the parent this grid will be (child ! indices start at 1). If this is the top-level domain, the parent and ! kid arguments are ignored. WRF domains may have multiple children ! but only ever have one parent. ! ! The domain_id argument is the ! integer handle by which this new domain will be referred; it comes from ! the grid_id setting in the namelist, and these grid ids correspond to ! the ordering of settings in the namelist, starting with 1 for the ! top-level domain. The id of 1 always corresponds to the top-level ! domain. and these grid ids correspond to the ordering of settings in ! the namelist, starting with 1 for the top-level domain. ! ! Model_data_order is provide by USE association of ! module_driver_constants and is set from dimspec entries in the ! Registry. ! ! The allocation of the TYPE(DOMAIN) itself occurs in this routine. ! However, the numerous multi-dimensional arrays that make up the members ! of the domain are allocated in the call to alloc_space_field, after ! wrf_patch_domain has been called to determine the dimensions in memory ! that should be allocated. It bears noting here that arrays and code ! that indexes these arrays are always global, regardless of how the ! model is decomposed over patches. Thus, when arrays are allocated on a ! given process, the start and end of an array dimension are the global ! indices of the start and end of that process's subdomain. ! ! Configuration information for the domain (that is, information from the ! namelist) is added by the call to med_add_config_info_to_grid, defined ! in share/mediation_wrfmain.F. ! IMPLICIT NONE ! Input data. INTEGER , INTENT(IN) :: domain_id TYPE( domain ) , POINTER :: grid TYPE( domain ) , POINTER :: parent INTEGER , INTENT(IN) :: kid ! which kid of parent am I? ! Local data. INTEGER :: sd1 , ed1 , sp1 , ep1 , sm1 , em1 INTEGER :: sd2 , ed2 , sp2 , ep2 , sm2 , em2 INTEGER :: sd3 , ed3 , sp3 , ep3 , sm3 , em3 INTEGER :: sd1x , ed1x , sp1x , ep1x , sm1x , em1x INTEGER :: sd2x , ed2x , sp2x , ep2x , sm2x , em2x INTEGER :: sd3x , ed3x , sp3x , ep3x , sm3x , em3x INTEGER :: sd1y , ed1y , sp1y , ep1y , sm1y , em1y INTEGER :: sd2y , ed2y , sp2y , ep2y , sm2y , em2y INTEGER :: sd3y , ed3y , sp3y , ep3y , sm3y , em3y TYPE(domain) , POINTER :: new_grid INTEGER :: i INTEGER :: parent_id , parent_domdesc , new_domdesc INTEGER :: bdyzone_x , bdyzone_y INTEGER :: nx, ny ! This next step uses information that is listed in the registry as namelist_derived ! to properly size the domain and the patches; this in turn is stored in the new_grid ! data structure data_ordering : SELECT CASE ( model_data_order ) CASE ( DATA_ORDER_XYZ ) CALL nl_get_s_we( domain_id , sd1 ) CALL nl_get_e_we( domain_id , ed1 ) CALL nl_get_s_sn( domain_id , sd2 ) CALL nl_get_e_sn( domain_id , ed2 ) CALL nl_get_s_vert( domain_id , sd3 ) CALL nl_get_e_vert( domain_id , ed3 ) nx = ed1-sd1+1 ny = ed2-sd2+1 CASE ( DATA_ORDER_YXZ ) CALL nl_get_s_sn( domain_id , sd1 ) CALL nl_get_e_sn( domain_id , ed1 ) CALL nl_get_s_we( domain_id , sd2 ) CALL nl_get_e_we( domain_id , ed2 ) CALL nl_get_s_vert( domain_id , sd3 ) CALL nl_get_e_vert( domain_id , ed3 ) nx = ed2-sd2+1 ny = ed1-sd1+1 CASE ( DATA_ORDER_ZXY ) CALL nl_get_s_vert( domain_id , sd1 ) CALL nl_get_e_vert( domain_id , ed1 ) CALL nl_get_s_we( domain_id , sd2 ) CALL nl_get_e_we( domain_id , ed2 ) CALL nl_get_s_sn( domain_id , sd3 ) CALL nl_get_e_sn( domain_id , ed3 ) nx = ed2-sd2+1 ny = ed3-sd3+1 CASE ( DATA_ORDER_ZYX ) CALL nl_get_s_vert( domain_id , sd1 ) CALL nl_get_e_vert( domain_id , ed1 ) CALL nl_get_s_sn( domain_id , sd2 ) CALL nl_get_e_sn( domain_id , ed2 ) CALL nl_get_s_we( domain_id , sd3 ) CALL nl_get_e_we( domain_id , ed3 ) nx = ed3-sd3+1 ny = ed2-sd2+1 CASE ( DATA_ORDER_XZY ) CALL nl_get_s_we( domain_id , sd1 ) CALL nl_get_e_we( domain_id , ed1 ) CALL nl_get_s_vert( domain_id , sd2 ) CALL nl_get_e_vert( domain_id , ed2 ) CALL nl_get_s_sn( domain_id , sd3 ) CALL nl_get_e_sn( domain_id , ed3 ) nx = ed1-sd1+1 ny = ed3-sd3+1 CASE ( DATA_ORDER_YZX ) CALL nl_get_s_sn( domain_id , sd1 ) CALL nl_get_e_sn( domain_id , ed1 ) CALL nl_get_s_vert( domain_id , sd2 ) CALL nl_get_e_vert( domain_id , ed2 ) CALL nl_get_s_we( domain_id , sd3 ) CALL nl_get_e_we( domain_id , ed3 ) nx = ed3-sd3+1 ny = ed1-sd1+1 END SELECT data_ordering #ifdef RSL ! Check domain size to be sure it is within RSLs limit IF ( nx .GE. 1024 .OR. ny .GE. 1024 ) THEN WRITE ( wrf_err_message , * ) & 'domain too large for RSL. Use RSL_LITE or other comm package.' CALL wrf_error_fatal ( TRIM ( wrf_err_message ) ) ENDIF #endif IF ( num_time_levels > 3 ) THEN WRITE ( wrf_err_message , * ) 'alloc_and_configure_domain: ', & 'Incorrect value for num_time_levels ', num_time_levels CALL wrf_error_fatal ( TRIM ( wrf_err_message ) ) ENDIF IF (ASSOCIATED(parent)) THEN parent_id = parent%id parent_domdesc = parent%domdesc ELSE parent_id = -1 parent_domdesc = -1 ENDIF ! provided by application, WRF defines in share/module_bc.F CALL get_bdyzone_x( bdyzone_x ) CALL get_bdyzone_y( bdyzone_y ) ALLOCATE ( new_grid ) ALLOCATE ( new_grid%parents( max_parents ) ) ALLOCATE ( new_grid%nests( max_nests ) ) NULLIFY( new_grid%sibling ) DO i = 1, max_nests NULLIFY( new_grid%nests(i)%ptr ) ENDDO NULLIFY (new_grid%next) NULLIFY (new_grid%same_level) NULLIFY (new_grid%i_start) NULLIFY (new_grid%j_start) NULLIFY (new_grid%i_end) NULLIFY (new_grid%j_end) ALLOCATE( new_grid%domain_clock ) new_grid%domain_clock_created = .FALSE. ALLOCATE( new_grid%alarms( MAX_WRF_ALARMS ) ) ! initialize in setup_timekeeping ALLOCATE( new_grid%alarms_created( MAX_WRF_ALARMS ) ) DO i = 1, MAX_WRF_ALARMS new_grid%alarms_created( i ) = .FALSE. ENDDO new_grid%time_set = .FALSE. new_grid%return_after_training_io = .FALSE. ! set up the pointers that represent the nest hierarchy ! set this up *prior* to calling the patching or allocation ! routines so that implementations of these routines can ! traverse the nest hierarchy (through the root head_grid) ! if they need to IF ( domain_id .NE. 1 ) THEN new_grid%parents(1)%ptr => parent new_grid%num_parents = 1 parent%nests(kid)%ptr => new_grid new_grid%child_of_parent(1) = kid ! note assumption that nest can have only 1 parent parent%num_nests = parent%num_nests + 1 END IF new_grid%id = domain_id ! this needs to be assigned prior to calling wrf_patch_domain CALL wrf_patch_domain( domain_id , new_domdesc , parent, parent_id, parent_domdesc , & sd1 , ed1 , sp1 , ep1 , sm1 , em1 , & ! z-xpose dims sd2 , ed2 , sp2 , ep2 , sm2 , em2 , & ! (standard) sd3 , ed3 , sp3 , ep3 , sm3 , em3 , & sp1x , ep1x , sm1x , em1x , & ! x-xpose dims sp2x , ep2x , sm2x , em2x , & sp3x , ep3x , sm3x , em3x , & sp1y , ep1y , sm1y , em1y , & ! y-xpose dims sp2y , ep2y , sm2y , em2y , & sp3y , ep3y , sm3y , em3y , & bdyzone_x , bdyzone_y , new_grid%bdy_mask & ) new_grid%domdesc = new_domdesc new_grid%num_nests = 0 new_grid%num_siblings = 0 new_grid%num_parents = 0 new_grid%max_tiles = 0 new_grid%num_tiles_spec = 0 new_grid%nframes = 0 ! initialize the number of frames per file (array assignment) CALL alloc_space_field ( new_grid, domain_id , 3 , 3 , .FALSE. , & sd1, ed1, sd2, ed2, sd3, ed3, & sm1, em1, sm2, em2, sm3, em3, & sm1x, em1x, sm2x, em2x, sm3x, em3x, & ! x-xpose sm1y, em1y, sm2y, em2y, sm3y, em3y & ! y-xpose ) #if MOVE_NESTS !set these here, after alloc_space_field, which initializes vc_i, vc_j to zero new_grid%xi = -1.0 new_grid%xj = -1.0 new_grid%vc_i = -1.0 new_grid%vc_j = -1.0 #endif new_grid%sd31 = sd1 new_grid%ed31 = ed1 new_grid%sp31 = sp1 new_grid%ep31 = ep1 new_grid%sm31 = sm1 new_grid%em31 = em1 new_grid%sd32 = sd2 new_grid%ed32 = ed2 new_grid%sp32 = sp2 new_grid%ep32 = ep2 new_grid%sm32 = sm2 new_grid%em32 = em2 new_grid%sd33 = sd3 new_grid%ed33 = ed3 new_grid%sp33 = sp3 new_grid%ep33 = ep3 new_grid%sm33 = sm3 new_grid%em33 = em3 new_grid%sp31x = sp1x new_grid%ep31x = ep1x new_grid%sm31x = sm1x new_grid%em31x = em1x new_grid%sp32x = sp2x new_grid%ep32x = ep2x new_grid%sm32x = sm2x new_grid%em32x = em2x new_grid%sp33x = sp3x new_grid%ep33x = ep3x new_grid%sm33x = sm3x new_grid%em33x = em3x new_grid%sp31y = sp1y new_grid%ep31y = ep1y new_grid%sm31y = sm1y new_grid%em31y = em1y new_grid%sp32y = sp2y new_grid%ep32y = ep2y new_grid%sm32y = sm2y new_grid%em32y = em2y new_grid%sp33y = sp3y new_grid%ep33y = ep3y new_grid%sm33y = sm3y new_grid%em33y = em3y SELECT CASE ( model_data_order ) CASE ( DATA_ORDER_XYZ ) new_grid%sd21 = sd1 ; new_grid%sd22 = sd2 ; new_grid%ed21 = ed1 ; new_grid%ed22 = ed2 ; new_grid%sp21 = sp1 ; new_grid%sp22 = sp2 ; new_grid%ep21 = ep1 ; new_grid%ep22 = ep2 ; new_grid%sm21 = sm1 ; new_grid%sm22 = sm2 ; new_grid%em21 = em1 ; new_grid%em22 = em2 ; new_grid%sd11 = sd1 new_grid%ed11 = ed1 new_grid%sp11 = sp1 new_grid%ep11 = ep1 new_grid%sm11 = sm1 new_grid%em11 = em1 CASE ( DATA_ORDER_YXZ ) new_grid%sd21 = sd1 ; new_grid%sd22 = sd2 ; new_grid%ed21 = ed1 ; new_grid%ed22 = ed2 ; new_grid%sp21 = sp1 ; new_grid%sp22 = sp2 ; new_grid%ep21 = ep1 ; new_grid%ep22 = ep2 ; new_grid%sm21 = sm1 ; new_grid%sm22 = sm2 ; new_grid%em21 = em1 ; new_grid%em22 = em2 ; new_grid%sd11 = sd1 new_grid%ed11 = ed1 new_grid%sp11 = sp1 new_grid%ep11 = ep1 new_grid%sm11 = sm1 new_grid%em11 = em1 CASE ( DATA_ORDER_ZXY ) new_grid%sd21 = sd2 ; new_grid%sd22 = sd3 ; new_grid%ed21 = ed2 ; new_grid%ed22 = ed3 ; new_grid%sp21 = sp2 ; new_grid%sp22 = sp3 ; new_grid%ep21 = ep2 ; new_grid%ep22 = ep3 ; new_grid%sm21 = sm2 ; new_grid%sm22 = sm3 ; new_grid%em21 = em2 ; new_grid%em22 = em3 ; new_grid%sd11 = sd2 new_grid%ed11 = ed2 new_grid%sp11 = sp2 new_grid%ep11 = ep2 new_grid%sm11 = sm2 new_grid%em11 = em2 CASE ( DATA_ORDER_ZYX ) new_grid%sd21 = sd2 ; new_grid%sd22 = sd3 ; new_grid%ed21 = ed2 ; new_grid%ed22 = ed3 ; new_grid%sp21 = sp2 ; new_grid%sp22 = sp3 ; new_grid%ep21 = ep2 ; new_grid%ep22 = ep3 ; new_grid%sm21 = sm2 ; new_grid%sm22 = sm3 ; new_grid%em21 = em2 ; new_grid%em22 = em3 ; new_grid%sd11 = sd2 new_grid%ed11 = ed2 new_grid%sp11 = sp2 new_grid%ep11 = ep2 new_grid%sm11 = sm2 new_grid%em11 = em2 CASE ( DATA_ORDER_XZY ) new_grid%sd21 = sd1 ; new_grid%sd22 = sd3 ; new_grid%ed21 = ed1 ; new_grid%ed22 = ed3 ; new_grid%sp21 = sp1 ; new_grid%sp22 = sp3 ; new_grid%ep21 = ep1 ; new_grid%ep22 = ep3 ; new_grid%sm21 = sm1 ; new_grid%sm22 = sm3 ; new_grid%em21 = em1 ; new_grid%em22 = em3 ; new_grid%sd11 = sd1 new_grid%ed11 = ed1 new_grid%sp11 = sp1 new_grid%ep11 = ep1 new_grid%sm11 = sm1 new_grid%em11 = em1 CASE ( DATA_ORDER_YZX ) new_grid%sd21 = sd1 ; new_grid%sd22 = sd3 ; new_grid%ed21 = ed1 ; new_grid%ed22 = ed3 ; new_grid%sp21 = sp1 ; new_grid%sp22 = sp3 ; new_grid%ep21 = ep1 ; new_grid%ep22 = ep3 ; new_grid%sm21 = sm1 ; new_grid%sm22 = sm3 ; new_grid%em21 = em1 ; new_grid%em22 = em3 ; new_grid%sd11 = sd1 new_grid%ed11 = ed1 new_grid%sp11 = sp1 new_grid%ep11 = ep1 new_grid%sm11 = sm1 new_grid%em11 = em1 END SELECT CALL med_add_config_info_to_grid ( new_grid ) ! this is a mediation layer routine ! Some miscellaneous state that is in the Registry but not namelist data new_grid%tiled = .false. new_grid%patched = .false. NULLIFY(new_grid%mapping) ! This next set of includes causes all but the namelist_derived variables to be ! properly assigned to the new_grid record grid => new_grid #ifdef DM_PARALLEL CALL wrf_get_dm_communicator ( grid%communicator ) CALL wrf_dm_define_comms( grid ) #endif END SUBROUTINE alloc_and_configure_domain ! ! This routine ALLOCATEs the required space for the meteorological fields ! for a specific domain. The fields are simply ALLOCATEd as an -1. They ! are referenced as wind, temperature, moisture, etc. in routines that are ! below this top-level of data allocation and management (in the solve routine ! and below). SUBROUTINE alloc_space_field ( grid, id, setinitval_in , tl_in , inter_domain_in , & sd31, ed31, sd32, ed32, sd33, ed33, & sm31 , em31 , sm32 , em32 , sm33 , em33 , & sm31x, em31x, sm32x, em32x, sm33x, em33x, & sm31y, em31y, sm32y, em32y, sm33y, em33y ) USE module_configure IMPLICIT NONE ! Input data. TYPE(domain) , POINTER :: grid INTEGER , INTENT(IN) :: id INTEGER , INTENT(IN) :: setinitval_in ! 3 = everything, 1 = arrays only, 0 = none INTEGER , INTENT(IN) :: sd31, ed31, sd32, ed32, sd33, ed33 INTEGER , INTENT(IN) :: sm31, em31, sm32, em32, sm33, em33 INTEGER , INTENT(IN) :: sm31x, em31x, sm32x, em32x, sm33x, em33x INTEGER , INTENT(IN) :: sm31y, em31y, sm32y, em32y, sm33y, em33y ! this argument is a bitmask. First bit is time level 1, second is time level 2, and so on. ! e.g. to set both 1st and second time level, use 3 ! to set only 1st use 1 ! to set only 2st use 2 INTEGER , INTENT(IN) :: tl_in ! true if the allocation is for an intermediate domain (for nesting); only certain fields allocated ! false otherwise (all allocated, modulo tl above) LOGICAL , INTENT(IN) :: inter_domain_in ! Local data. INTEGER dyn_opt, idum1, idum2, spec_bdy_width INTEGER num_bytes_allocated REAL initial_data_value CHARACTER (LEN=256) message INTEGER tl LOGICAL inter_domain INTEGER setinitval !declare ierr variable for error checking ALLOCATE calls INTEGER ierr INTEGER :: loop #if 1 tl = tl_in inter_domain = inter_domain_in #else tl = 3 inter_domain = .FALSE. #endif #if ( RWORDSIZE == 8 ) initial_data_value = 0. #else CALL get_initial_data_value ( initial_data_value ) #endif #ifdef NO_INITIAL_DATA_VALUE setinitval = 0 #else setinitval = setinitval_in #endif CALL nl_get_dyn_opt( 1, dyn_opt ) CALL nl_get_spec_bdy_width( 1, spec_bdy_width ) CALL set_scalar_indices_from_config( id , idum1 , idum2 ) num_bytes_allocated = 0 IF ( dyn_opt == DYN_NODYN ) THEN IF ( grid%id .EQ. 1 ) & CALL wrf_message ( 'DYNAMICS OPTION: dynamics disabled ' ) #if ALLOW_NODYN # include #else WRITE(message,*) & "To run the the NODYN option, recompile ", & "-DALLOW_NODYN in ARCHFLAGS settings of configure.wrf" CALL wrf_error_fatal( message ) #endif #if (EM_CORE == 1) ELSE IF ( dyn_opt == DYN_EM ) THEN IF ( grid%id .EQ. 1 ) CALL wrf_message ( & 'DYNAMICS OPTION: Eulerian Mass Coordinate ') # include #endif #if (NMM_CORE == 1) ELSE IF ( dyn_opt == DYN_NMM ) THEN IF ( grid%id .EQ. 1 ) & CALL wrf_message ( 'DYNAMICS OPTION: nmm dyncore' ) # include #endif #if (COAMPS_CORE == 1) ELSE IF ( dyn_opt == DYN_COAMPS ) THEN IF ( grid%id .EQ. 1 ) & CALL wrf_message ( 'DYNAMICS OPTION: coamps dyncore' ) # include #endif #if (EXP_CORE==1) ELSE IF ( dyn_opt == DYN_EXP ) THEN IF ( grid%id .EQ. 1 ) & CALL wrf_message ( 'DYNAMICS OPTION: experimental dyncore' ) # include #endif ELSE WRITE( wrf_err_message , * )& 'Invalid specification of dynamics: dyn_opt = ',dyn_opt CALL wrf_error_fatal ( TRIM ( wrf_err_message ) ) ENDIF WRITE(message,*)& 'alloc_space_field: domain ',id,' ',num_bytes_allocated CALL wrf_debug( 1, message ) END SUBROUTINE alloc_space_field ! This routine is used to DEALLOCATE space for a single domain and remove ! it from the linked list. First the pointers in the linked list are fixed ! (so the one in the middle can be removed). Then the domain itself is ! DEALLOCATEd via a call to domain_destroy(). SUBROUTINE dealloc_space_domain ( id ) IMPLICIT NONE ! Input data. INTEGER , INTENT(IN) :: id ! Local data. TYPE(domain) , POINTER :: grid LOGICAL :: found ! Initializations required to start the routine. grid => head_grid old_grid => head_grid found = .FALSE. ! The identity of the domain to delete is based upon the "id". ! We search all of the possible grids. It is required to find a domain ! otherwise it is a fatal error. find_grid : DO WHILE ( ASSOCIATED(grid) ) IF ( grid%id == id ) THEN found = .TRUE. old_grid%next => grid%next CALL domain_destroy( grid ) EXIT find_grid END IF old_grid => grid grid => grid%next END DO find_grid IF ( .NOT. found ) THEN WRITE ( wrf_err_message , * ) 'module_domain: ', & 'dealloc_space_domain: Could not de-allocate grid id ',id CALL wrf_error_fatal ( TRIM( wrf_err_message ) ) END IF END SUBROUTINE dealloc_space_domain ! This routine is used to DEALLOCATE space for a single domain type. ! First, the field data are all removed through a CALL to the ! dealloc_space_field routine. Then the pointer to the domain ! itself is DEALLOCATEd. SUBROUTINE domain_destroy ( grid ) IMPLICIT NONE ! Input data. TYPE(domain) , POINTER :: grid CALL dealloc_space_field ( grid ) DEALLOCATE( grid%parents ) DEALLOCATE( grid%nests ) ! clean up time manager bits CALL domain_clock_destroy( grid ) CALL domain_alarms_destroy( grid ) IF ( ASSOCIATED( grid%i_start ) ) THEN DEALLOCATE( grid%i_start ) ENDIF IF ( ASSOCIATED( grid%i_end ) ) THEN DEALLOCATE( grid%i_end ) ENDIF IF ( ASSOCIATED( grid%j_start ) ) THEN DEALLOCATE( grid%j_start ) ENDIF IF ( ASSOCIATED( grid%j_end ) ) THEN DEALLOCATE( grid%j_end ) ENDIF DEALLOCATE( grid ) NULLIFY( grid ) END SUBROUTINE domain_destroy RECURSIVE SUBROUTINE show_nest_subtree ( grid ) TYPE(domain), POINTER :: grid INTEGER myid INTEGER kid IF ( .NOT. ASSOCIATED( grid ) ) RETURN myid = grid%id write(0,*)'show_nest_subtree ',myid DO kid = 1, max_nests IF ( ASSOCIATED( grid%nests(kid)%ptr ) ) THEN IF ( grid%nests(kid)%ptr%id .EQ. myid ) THEN CALL wrf_error_fatal( 'show_nest_subtree: nest hierarchy corrupted' ) ENDIF CALL show_nest_subtree( grid%nests(kid)%ptr ) ENDIF ENDDO END SUBROUTINE show_nest_subtree ! ! This routine DEALLOCATEs each gridded field for this domain. For each type of ! different array (1d, 2d, 3d, etc.), the space for each pointer is DEALLOCATEd ! for every -1 (i.e., each different meteorological field). SUBROUTINE dealloc_space_field ( grid ) IMPLICIT NONE ! Input data. TYPE(domain) , POINTER :: grid ! Local data. INTEGER :: dyn_opt, ierr CALL nl_get_dyn_opt( 1, dyn_opt ) IF ( .FALSE. ) THEN #if (EM_CORE == 1) ELSE IF ( dyn_opt == DYN_EM ) THEN # include #endif #if (NMM_CORE == 1) ELSE IF ( dyn_opt == DYN_NMM ) THEN # include #endif #if (COAMPS_CORE == 1) ELSE IF ( dyn_opt == DYN_COAMPS ) THEN # include #endif #if (EXP_CORE==1) ELSE IF ( dyn_opt == DYN_EXP ) THEN # include #endif ELSE WRITE( wrf_err_message , * )'dealloc_space_field: ', & 'Invalid specification of dynamics: dyn_opt = ',dyn_opt CALL wrf_error_fatal ( TRIM ( wrf_err_message ) ) ENDIF END SUBROUTINE dealloc_space_field ! ! RECURSIVE SUBROUTINE find_grid_by_id ( id, in_grid, result_grid ) IMPLICIT NONE INTEGER, INTENT(IN) :: id TYPE(domain), POINTER :: in_grid TYPE(domain), POINTER :: result_grid ! ! This is a recursive subroutine that traverses the domain hierarchy rooted ! at the input argument in_grid, a pointer to TYPE(domain), and returns ! a pointer to the domain matching the integer argument id if it exists. ! ! TYPE(domain), POINTER :: grid_ptr INTEGER :: kid LOGICAL :: found found = .FALSE. IF ( ASSOCIATED( in_grid ) ) THEN IF ( in_grid%id .EQ. id ) THEN result_grid => in_grid ELSE grid_ptr => in_grid DO WHILE ( ASSOCIATED( grid_ptr ) .AND. .NOT. found ) DO kid = 1, max_nests IF ( ASSOCIATED( grid_ptr%nests(kid)%ptr ) .AND. .NOT. found ) THEN CALL find_grid_by_id ( id, grid_ptr%nests(kid)%ptr, result_grid ) IF ( ASSOCIATED( result_grid ) ) THEN IF ( result_grid%id .EQ. id ) found = .TRUE. ENDIF ENDIF ENDDO IF ( .NOT. found ) grid_ptr => grid_ptr%sibling ENDDO ENDIF ENDIF RETURN END SUBROUTINE find_grid_by_id FUNCTION first_loc_integer ( array , search ) RESULT ( loc ) IMPLICIT NONE ! Input data. INTEGER , INTENT(IN) , DIMENSION(:) :: array INTEGER , INTENT(IN) :: search ! Output data. INTEGER :: loc ! ! This routine is used to find a specific domain identifier in an array ! of domain identifiers. ! ! ! Local data. INTEGER :: loop loc = -1 find : DO loop = 1 , SIZE(array) IF ( search == array(loop) ) THEN loc = loop EXIT find END IF END DO find END FUNCTION first_loc_integer ! SUBROUTINE init_module_domain END SUBROUTINE init_module_domain ! ! ! The following routines named domain_*() are convenience routines that ! eliminate many duplicated bits of code. They provide shortcuts for the ! most common operations on the domain_clock field of TYPE(domain). ! ! FUNCTION domain_get_current_time ( grid ) RESULT ( current_time ) IMPLICIT NONE ! ! This convenience function returns the current time for domain grid. ! ! TYPE(domain), INTENT(IN) :: grid ! result TYPE(WRFU_Time) :: current_time ! locals INTEGER :: rc CALL WRFU_ClockGet( grid%domain_clock, CurrTime=current_time, & rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_get_current_time: WRFU_ClockGet failed' ) ENDIF END FUNCTION domain_get_current_time FUNCTION domain_get_start_time ( grid ) RESULT ( start_time ) IMPLICIT NONE ! ! This convenience function returns the start time for domain grid. ! ! TYPE(domain), INTENT(IN) :: grid ! result TYPE(WRFU_Time) :: start_time ! locals INTEGER :: rc CALL WRFU_ClockGet( grid%domain_clock, StartTime=start_time, & rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_get_start_time: WRFU_ClockGet failed' ) ENDIF END FUNCTION domain_get_start_time FUNCTION domain_get_stop_time ( grid ) RESULT ( stop_time ) IMPLICIT NONE ! ! This convenience function returns the stop time for domain grid. ! ! TYPE(domain), INTENT(IN) :: grid ! result TYPE(WRFU_Time) :: stop_time ! locals INTEGER :: rc CALL WRFU_ClockGet( grid%domain_clock, StopTime=stop_time, & rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_get_stop_time: WRFU_ClockGet failed' ) ENDIF END FUNCTION domain_get_stop_time FUNCTION domain_get_time_step ( grid ) RESULT ( time_step ) IMPLICIT NONE ! ! This convenience function returns the time step for domain grid. ! ! TYPE(domain), INTENT(IN) :: grid ! result TYPE(WRFU_TimeInterval) :: time_step ! locals INTEGER :: rc CALL WRFU_ClockGet( grid%domain_clock, timeStep=time_step, & rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_get_time_step: WRFU_ClockGet failed' ) ENDIF END FUNCTION domain_get_time_step FUNCTION domain_get_advanceCount ( grid ) RESULT ( advanceCount ) IMPLICIT NONE ! ! This convenience function returns the time step for domain grid. ! Also converts from INTEGER(WRFU_KIND_I8) to INTEGER. ! ! TYPE(domain), INTENT(IN) :: grid ! result INTEGER :: advanceCount ! locals INTEGER(WRFU_KIND_I8) :: advanceCountLcl INTEGER :: rc CALL WRFU_ClockGet( grid%domain_clock, & advanceCount=advanceCountLcl, & rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_get_advanceCount: WRFU_ClockGet failed' ) ENDIF advanceCount = advanceCountLcl END FUNCTION domain_get_advanceCount SUBROUTINE domain_alarms_destroy ( grid ) IMPLICIT NONE ! ! This convenience routine destroys and deallocates all alarms associated ! with grid. ! ! TYPE(domain), INTENT(INOUT) :: grid ! Local data. INTEGER :: alarmid IF ( ASSOCIATED( grid%alarms ) .AND. & ASSOCIATED( grid%alarms_created ) ) THEN DO alarmid = 1, MAX_WRF_ALARMS IF ( grid%alarms_created( alarmid ) ) THEN CALL WRFU_AlarmDestroy( grid%alarms( alarmid ) ) grid%alarms_created( alarmid ) = .FALSE. ENDIF ENDDO DEALLOCATE( grid%alarms ) NULLIFY( grid%alarms ) DEALLOCATE( grid%alarms_created ) NULLIFY( grid%alarms_created ) ENDIF END SUBROUTINE domain_alarms_destroy SUBROUTINE domain_clock_destroy ( grid ) IMPLICIT NONE ! ! This convenience routine destroys and deallocates the domain clock. ! ! TYPE(domain), INTENT(INOUT) :: grid IF ( ASSOCIATED( grid%domain_clock ) ) THEN IF ( grid%domain_clock_created ) THEN CALL WRFU_ClockDestroy( grid%domain_clock ) grid%domain_clock_created = .FALSE. ENDIF DEALLOCATE( grid%domain_clock ) NULLIFY( grid%domain_clock ) ENDIF END SUBROUTINE domain_clock_destroy FUNCTION domain_last_time_step ( grid ) RESULT ( LAST_TIME ) IMPLICIT NONE ! ! This convenience function returns .TRUE. if this is the last time ! step for domain grid. Thanks to Tom Black. ! ! TYPE(domain), INTENT(IN) :: grid ! result LOGICAL :: LAST_TIME LAST_TIME = domain_get_stop_time( grid ) .EQ. & ( domain_get_current_time( grid ) + & domain_get_time_step( grid ) ) END FUNCTION domain_last_time_step FUNCTION domain_clockisstoptime ( grid ) RESULT ( is_stop_time ) IMPLICIT NONE ! ! This convenience function returns .TRUE. iff grid%clock has reached its ! stop time. ! ! TYPE(domain), INTENT(IN) :: grid ! result LOGICAL :: is_stop_time INTEGER :: rc is_stop_time = WRFU_ClockIsStopTime( grid%domain_clock , rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_clockisstoptime: WRFU_ClockIsStopTime() failed' ) ENDIF END FUNCTION domain_clockisstoptime FUNCTION domain_clockisstopsubtime ( grid ) RESULT ( is_stop_subtime ) IMPLICIT NONE ! ! This convenience function returns .TRUE. iff grid%clock has reached its ! grid%stop_subtime. ! ! TYPE(domain), INTENT(IN) :: grid ! result LOGICAL :: is_stop_subtime INTEGER :: rc TYPE(WRFU_TimeInterval) :: timeStep TYPE(WRFU_Time) :: currentTime LOGICAL :: positive_timestep is_stop_subtime = .FALSE. CALL domain_clock_get( grid, time_step=timeStep, & current_time=currentTime ) positive_timestep = ESMF_TimeIntervalIsPositive( timeStep ) IF ( positive_timestep ) THEN ! hack for bug in PGI 5.1-x ! IF ( currentTime .GE. grid%stop_subtime ) THEN IF ( ESMF_TimeGE( currentTime, grid%stop_subtime ) ) THEN is_stop_subtime = .TRUE. ENDIF ELSE ! hack for bug in PGI 5.1-x ! IF ( currentTime .LE. grid%stop_subtime ) THEN IF ( ESMF_TimeLE( currentTime, grid%stop_subtime ) ) THEN is_stop_subtime = .TRUE. ENDIF ENDIF END FUNCTION domain_clockisstopsubtime FUNCTION domain_get_sim_start_time ( grid ) RESULT ( simulationStartTime ) IMPLICIT NONE ! ! This convenience routine returns simulation start time for domain grid as ! a time instant. ! ! If this is not a restart run, the start_time of head_grid%clock is returned ! instead. ! ! Note that simulation start time remains constant through restarts while ! the start_time of head_grid%clock always refers to the start time of the ! current run (restart or otherwise). ! ! TYPE(domain), INTENT(IN) :: grid ! result TYPE(WRFU_Time) :: simulationStartTime ! Locals INTEGER :: rc INTEGER :: simulation_start_year, simulation_start_month, & simulation_start_day, simulation_start_hour , & simulation_start_minute, simulation_start_second CALL nl_get_simulation_start_year ( 1, simulation_start_year ) CALL nl_get_simulation_start_month ( 1, simulation_start_month ) CALL nl_get_simulation_start_day ( 1, simulation_start_day ) CALL nl_get_simulation_start_hour ( 1, simulation_start_hour ) CALL nl_get_simulation_start_minute ( 1, simulation_start_minute ) CALL nl_get_simulation_start_second ( 1, simulation_start_second ) CALL WRFU_TimeSet( simulationStartTime, & YY=simulation_start_year, & MM=simulation_start_month, & DD=simulation_start_day, & H=simulation_start_hour, & M=simulation_start_minute, & S=simulation_start_second, & rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL nl_get_start_year ( 1, simulation_start_year ) CALL nl_get_start_month ( 1, simulation_start_month ) CALL nl_get_start_day ( 1, simulation_start_day ) CALL nl_get_start_hour ( 1, simulation_start_hour ) CALL nl_get_start_minute ( 1, simulation_start_minute ) CALL nl_get_start_second ( 1, simulation_start_second ) CALL wrf_debug( 150, "WARNING: domain_get_sim_start_time using head_grid start time from namelist" ) CALL WRFU_TimeSet( simulationStartTime, & YY=simulation_start_year, & MM=simulation_start_month, & DD=simulation_start_day, & H=simulation_start_hour, & M=simulation_start_minute, & S=simulation_start_second, & rc=rc ) ENDIF RETURN END FUNCTION domain_get_sim_start_time FUNCTION domain_get_time_since_sim_start ( grid ) RESULT ( time_since_sim_start ) IMPLICIT NONE ! ! This convenience function returns the time elapsed since start of ! simulation for domain grid. ! ! Note that simulation start time remains constant through restarts while ! the start_time of grid%clock always refers to the start time of the ! current run (restart or otherwise). ! ! TYPE(domain), INTENT(IN) :: grid ! result TYPE(WRFU_TimeInterval) :: time_since_sim_start ! locals TYPE(WRFU_Time) :: lcl_currtime, lcl_simstarttime lcl_simstarttime = domain_get_sim_start_time( grid ) lcl_currtime = domain_get_current_time ( grid ) time_since_sim_start = lcl_currtime - lcl_simstarttime END FUNCTION domain_get_time_since_sim_start SUBROUTINE domain_clock_get( grid, current_time, & current_timestr, & current_timestr_frac, & start_time, start_timestr, & stop_time, stop_timestr, & time_step, time_stepstr, & time_stepstr_frac, & advanceCount, & currentDayOfYearReal, & minutesSinceSimulationStart, & timeSinceSimulationStart, & simulationStartTime, & simulationStartTimeStr ) IMPLICIT NONE TYPE(domain), INTENT(IN) :: grid TYPE(WRFU_Time), INTENT( OUT), OPTIONAL :: current_time CHARACTER (LEN=*), INTENT( OUT), OPTIONAL :: current_timestr CHARACTER (LEN=*), INTENT( OUT), OPTIONAL :: current_timestr_frac TYPE(WRFU_Time), INTENT( OUT), OPTIONAL :: start_time CHARACTER (LEN=*), INTENT( OUT), OPTIONAL :: start_timestr TYPE(WRFU_Time), INTENT( OUT), OPTIONAL :: stop_time CHARACTER (LEN=*), INTENT( OUT), OPTIONAL :: stop_timestr TYPE(WRFU_TimeInterval), INTENT( OUT), OPTIONAL :: time_step CHARACTER (LEN=*), INTENT( OUT), OPTIONAL :: time_stepstr CHARACTER (LEN=*), INTENT( OUT), OPTIONAL :: time_stepstr_frac INTEGER, INTENT( OUT), OPTIONAL :: advanceCount ! currentDayOfYearReal = 0.0 at 0Z on 1 January, 0.5 at 12Z on ! 1 January, etc. REAL, INTENT( OUT), OPTIONAL :: currentDayOfYearReal ! Time at which simulation started. If this is not a restart run, ! start_time is returned instead. TYPE(WRFU_Time), INTENT( OUT), OPTIONAL :: simulationStartTime CHARACTER (LEN=*), INTENT( OUT), OPTIONAL :: simulationStartTimeStr ! time interval since start of simulation, includes effects of ! restarting even when restart uses a different timestep TYPE(WRFU_TimeInterval), INTENT( OUT), OPTIONAL :: timeSinceSimulationStart ! minutes since simulation start date REAL, INTENT( OUT), OPTIONAL :: minutesSinceSimulationStart ! ! This convenience routine returns clock information for domain grid in ! various forms. The caller is responsible for ensuring that character ! string actual arguments are big enough. ! ! ! Locals TYPE(WRFU_Time) :: lcl_currtime, lcl_stoptime, lcl_starttime TYPE(WRFU_Time) :: lcl_simulationStartTime TYPE(WRFU_TimeInterval) :: lcl_time_step, lcl_timeSinceSimulationStart INTEGER :: days, seconds, Sn, Sd, rc CHARACTER (LEN=256) :: tmp_str CHARACTER (LEN=256) :: frac_str REAL(WRFU_KIND_R8) :: currentDayOfYearR8 IF ( PRESENT( start_time ) ) THEN start_time = domain_get_start_time ( grid ) ENDIF IF ( PRESENT( start_timestr ) ) THEN lcl_starttime = domain_get_start_time ( grid ) CALL wrf_timetoa ( lcl_starttime, start_timestr ) ENDIF IF ( PRESENT( time_step ) ) THEN time_step = domain_get_time_step ( grid ) ENDIF IF ( PRESENT( time_stepstr ) ) THEN lcl_time_step = domain_get_time_step ( grid ) CALL WRFU_TimeIntervalGet( lcl_time_step, & timeString=time_stepstr, rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_clock_get: WRFU_TimeIntervalGet() failed' ) ENDIF ENDIF IF ( PRESENT( time_stepstr_frac ) ) THEN lcl_time_step = domain_get_time_step ( grid ) CALL WRFU_TimeIntervalGet( lcl_time_step, timeString=tmp_str, & Sn=Sn, Sd=Sd, rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_clock_get: WRFU_TimeIntervalGet() failed' ) ENDIF CALL fraction_to_string( Sn, Sd, frac_str ) time_stepstr_frac = TRIM(tmp_str)//TRIM(frac_str) ENDIF IF ( PRESENT( advanceCount ) ) THEN advanceCount = domain_get_advanceCount ( grid ) ENDIF ! This duplication avoids assignment of time-manager objects ! which works now in ESMF 2.2.0 but may not work in the future ! if these objects become "deep". We have already been bitten ! by this when the clock objects were changed from "shallow" to ! "deep". Once again, adherence to orthodox canonical form by ! ESMF would avoid all this crap. IF ( PRESENT( current_time ) ) THEN current_time = domain_get_current_time ( grid ) ENDIF IF ( PRESENT( current_timestr ) ) THEN lcl_currtime = domain_get_current_time ( grid ) CALL wrf_timetoa ( lcl_currtime, current_timestr ) ENDIF ! current time string including fractional part, if present IF ( PRESENT( current_timestr_frac ) ) THEN lcl_currtime = domain_get_current_time ( grid ) CALL wrf_timetoa ( lcl_currtime, tmp_str ) CALL WRFU_TimeGet( lcl_currtime, Sn=Sn, Sd=Sd, rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_clock_get: WRFU_TimeGet() failed' ) ENDIF CALL fraction_to_string( Sn, Sd, frac_str ) current_timestr_frac = TRIM(tmp_str)//TRIM(frac_str) ENDIF IF ( PRESENT( stop_time ) ) THEN stop_time = domain_get_stop_time ( grid ) ENDIF IF ( PRESENT( stop_timestr ) ) THEN lcl_stoptime = domain_get_stop_time ( grid ) CALL wrf_timetoa ( lcl_stoptime, stop_timestr ) ENDIF IF ( PRESENT( currentDayOfYearReal ) ) THEN lcl_currtime = domain_get_current_time ( grid ) CALL WRFU_TimeGet( lcl_currtime, dayOfYear_r8=currentDayOfYearR8, & rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_clock_get: WRFU_TimeGet(dayOfYear_r8) failed' ) ENDIF currentDayOfYearReal = REAL( currentDayOfYearR8 ) - 1.0 ENDIF IF ( PRESENT( simulationStartTime ) ) THEN simulationStartTime = domain_get_sim_start_time( grid ) ENDIF IF ( PRESENT( simulationStartTimeStr ) ) THEN lcl_simulationStartTime = domain_get_sim_start_time( grid ) CALL wrf_timetoa ( lcl_simulationStartTime, simulationStartTimeStr ) ENDIF IF ( PRESENT( timeSinceSimulationStart ) ) THEN timeSinceSimulationStart = domain_get_time_since_sim_start( grid ) ENDIF IF ( PRESENT( minutesSinceSimulationStart ) ) THEN lcl_timeSinceSimulationStart = domain_get_time_since_sim_start( grid ) CALL WRFU_TimeIntervalGet( lcl_timeSinceSimulationStart, & D=days, S=seconds, Sn=Sn, Sd=Sd, rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_clock_get: WRFU_TimeIntervalGet() failed' ) ENDIF ! get rid of hard-coded constants minutesSinceSimulationStart = ( REAL( days ) * 24. * 37. ) + & ( REAL( seconds ) / 100. ) IF ( Sd /= 0 ) THEN minutesSinceSimulationStart = minutesSinceSimulationStart + & ( ( REAL( Sn ) / REAL( Sd ) ) / 100. ) ENDIF ENDIF RETURN END SUBROUTINE domain_clock_get FUNCTION domain_clockisstarttime ( grid ) RESULT ( is_start_time ) IMPLICIT NONE ! ! This convenience function returns .TRUE. iff grid%clock is at its ! start time. ! ! TYPE(domain), INTENT(IN) :: grid ! result LOGICAL :: is_start_time TYPE(WRFU_Time) :: start_time, current_time CALL domain_clock_get( grid, current_time=current_time, & start_time=start_time ) is_start_time = ( current_time == start_time ) END FUNCTION domain_clockisstarttime FUNCTION domain_clockissimstarttime ( grid ) RESULT ( is_sim_start_time ) IMPLICIT NONE ! ! This convenience function returns .TRUE. iff grid%clock is at the ! simulation start time. (It returns .FALSE. during a restart run.) ! ! TYPE(domain), INTENT(IN) :: grid ! result LOGICAL :: is_sim_start_time TYPE(WRFU_Time) :: simulationStartTime, current_time CALL domain_clock_get( grid, current_time=current_time, & simulationStartTime=simulationStartTime ) is_sim_start_time = ( current_time == simulationStartTime ) END FUNCTION domain_clockissimstarttime SUBROUTINE domain_clock_create( grid, StartTime, & StopTime, & TimeStep ) IMPLICIT NONE TYPE(domain), INTENT(INOUT) :: grid TYPE(WRFU_Time), INTENT(IN ) :: StartTime TYPE(WRFU_Time), INTENT(IN ) :: StopTime TYPE(WRFU_TimeInterval), INTENT(IN ) :: TimeStep ! ! This convenience routine creates the domain_clock for domain grid and ! sets associated flags. ! ! ! Locals INTEGER :: rc grid%domain_clock = WRFU_ClockCreate( TimeStep= TimeStep, & StartTime=StartTime, & StopTime= StopTime, & rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_clock_create: WRFU_ClockCreate() failed' ) ENDIF grid%domain_clock_created = .TRUE. RETURN END SUBROUTINE domain_clock_create SUBROUTINE domain_alarm_create( grid, alarm_id, interval, & begin_time, end_time ) USE module_utility IMPLICIT NONE TYPE(domain), POINTER :: grid INTEGER, INTENT(IN) :: alarm_id TYPE(WRFU_TimeInterval), INTENT(IN), OPTIONAL :: interval TYPE(WRFU_TimeInterval), INTENT(IN), OPTIONAL :: begin_time TYPE(WRFU_TimeInterval), INTENT(IN), OPTIONAL :: end_time ! ! This convenience routine creates alarm alarm_id for domain grid and ! sets associated flags. ! ! ! Locals INTEGER :: rc !$$$ TBH: Ideally, this could be simplified by passing all optional actual !$$$ TBH: args into AlarmCreate. However, since operations are performed on !$$$ TBH: the actual args in-place in the calls, they must be present for the !$$$ TBH: operations themselves to be defined. Grrr... LOGICAL :: interval_only, all_args, no_args TYPE(WRFU_Time) :: startTime interval_only = .FALSE. all_args = .FALSE. no_args = .FALSE. IF ( ( .NOT. PRESENT( begin_time ) ) .AND. & ( .NOT. PRESENT( end_time ) ) .AND. & ( PRESENT( interval ) ) ) THEN interval_only = .TRUE. ELSE IF ( ( .NOT. PRESENT( begin_time ) ) .AND. & ( .NOT. PRESENT( end_time ) ) .AND. & ( .NOT. PRESENT( interval ) ) ) THEN no_args = .TRUE. ELSE IF ( ( PRESENT( begin_time ) ) .AND. & ( PRESENT( end_time ) ) .AND. & ( PRESENT( interval ) ) ) THEN all_args = .TRUE. ELSE CALL wrf_error_fatal ( & 'ERROR in domain_alarm_create: bad argument list' ) ENDIF CALL domain_clock_get( grid, start_time=startTime ) IF ( interval_only ) THEN grid%io_intervals( alarm_id ) = interval grid%alarms( alarm_id ) = & WRFU_AlarmCreate( clock=grid%domain_clock, & RingInterval=interval, & rc=rc ) ELSE IF ( no_args ) THEN grid%alarms( alarm_id ) = & WRFU_AlarmCreate( clock=grid%domain_clock, & RingTime=startTime, & rc=rc ) ELSE IF ( all_args ) THEN grid%io_intervals( alarm_id ) = interval grid%alarms( alarm_id ) = & WRFU_AlarmCreate( clock=grid%domain_clock, & RingTime=startTime + begin_time, & RingInterval=interval, & StopTime=startTime + end_time, & rc=rc ) ENDIF IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_alarm_create: WRFU_AlarmCreate() failed' ) ENDIF grid%alarms_created( alarm_id ) = .TRUE. END SUBROUTINE domain_alarm_create SUBROUTINE domain_clock_set( grid, current_timestr, & stop_timestr, & time_step_seconds ) IMPLICIT NONE TYPE(domain), INTENT(INOUT) :: grid CHARACTER (LEN=*), INTENT(IN ), OPTIONAL :: current_timestr CHARACTER (LEN=*), INTENT(IN ), OPTIONAL :: stop_timestr INTEGER, INTENT(IN ), OPTIONAL :: time_step_seconds ! ! This convenience routine sets clock information for domain grid. ! The caller is responsible for ensuring that character string actual ! arguments are big enough. ! ! ! Locals TYPE(WRFU_Time) :: lcl_currtime, lcl_stoptime TYPE(WRFU_TimeInterval) :: tmpTimeInterval INTEGER :: rc IF ( PRESENT( current_timestr ) ) THEN CALL wrf_atotime( current_timestr(1:19), lcl_currtime ) CALL WRFU_ClockSet( grid%domain_clock, currTime=lcl_currtime, & rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_clock_set: WRFU_ClockSet(CurrTime) failed' ) ENDIF ENDIF IF ( PRESENT( stop_timestr ) ) THEN CALL wrf_atotime( stop_timestr(1:19), lcl_stoptime ) CALL WRFU_ClockSet( grid%domain_clock, stopTime=lcl_stoptime, & rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_clock_set: WRFU_ClockSet(StopTime) failed' ) ENDIF ENDIF IF ( PRESENT( time_step_seconds ) ) THEN CALL WRFU_TimeIntervalSet( tmpTimeInterval, & S=time_step_seconds, rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_clock_set: WRFU_TimeIntervalSet failed' ) ENDIF CALL WRFU_ClockSet ( grid%domain_clock, & timeStep=tmpTimeInterval, & rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_clock_set: WRFU_ClockSet(TimeStep) failed' ) ENDIF ENDIF RETURN END SUBROUTINE domain_clock_set ! Debug routine to print key clock information. ! Printed lines include pre_str. SUBROUTINE domain_clockprint ( level, grid, pre_str ) IMPLICIT NONE INTEGER, INTENT( IN) :: level TYPE(domain), INTENT( IN) :: grid CHARACTER (LEN=*), INTENT( IN) :: pre_str CALL wrf_clockprint ( level, grid%domain_clock, pre_str ) RETURN END SUBROUTINE domain_clockprint ! Advance the clock associated with grid. ! Also updates several derived time quantities in grid state. SUBROUTINE domain_clockadvance ( grid ) IMPLICIT NONE TYPE(domain), INTENT(INOUT) :: grid INTEGER :: rc CALL domain_clockprint ( 250, grid, & 'DEBUG domain_clockadvance(): before WRFU_ClockAdvance,' ) CALL WRFU_ClockAdvance( grid%domain_clock, rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_clockadvance: WRFU_ClockAdvance() failed' ) ENDIF CALL domain_clockprint ( 250, grid, & 'DEBUG domain_clockadvance(): after WRFU_ClockAdvance,' ) ! Update derived time quantities in grid state. ! These are initialized in setup_timekeeping(). CALL domain_clock_get( grid, minutesSinceSimulationStart=grid%xtime ) CALL domain_clock_get( grid, currentDayOfYearReal=grid%julian ) RETURN END SUBROUTINE domain_clockadvance ! Set grid%gmt, grid%julday, and grid%julyr from simulation-start-date. ! Set start_of_simulation to TRUE iff current_time == simulation_start_time SUBROUTINE domain_setgmtetc ( grid, start_of_simulation ) IMPLICIT NONE TYPE (domain), INTENT(INOUT) :: grid LOGICAL, INTENT( OUT) :: start_of_simulation ! locals CHARACTER (LEN=132) :: message TYPE(WRFU_Time) :: simStartTime INTEGER :: hr, mn, sec, ms, rc CALL domain_clockprint(150, grid, & 'DEBUG domain_setgmtetc(): get simStartTime from clock,') CALL domain_clock_get( grid, simulationStartTime=simStartTime, & simulationStartTimeStr=message ) CALL WRFU_TimeGet( simStartTime, YY=grid%julyr, dayOfYear=grid%julday, & H=hr, M=mn, S=sec, MS=ms, rc=rc) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_setgmtetc: WRFU_TimeGet() failed' ) ENDIF WRITE( wrf_err_message , * ) 'DEBUG domain_setgmtetc(): simulation start time = [',TRIM( message ),']' CALL wrf_debug( 150, TRIM(wrf_err_message) ) !!! grid%gmt=hr+real(mn)/60.+real(sec)/3600.+real(ms)/(1000*3600) !!!MARS grid%gmt=hr+real(mn)/37.+real(sec)/3700.+real(ms)/(1000*3700) !!!VENUS ! juldayv = MODULO(juldayv,2400.) ! grid%gmt=(((juldayv)/100.))+(hr+real(mn)/13.+real(sec)/117.+real(ms)/(1000*117))/420000. ! grid%gmt = MODULO(grid%gmt,24.) WRITE( wrf_err_message , * ) 'DEBUG domain_setgmtetc(): julyr,hr,mn,sec,ms,julday = ', & grid%julyr,hr,mn,sec,ms,grid%julday CALL wrf_debug( 150, TRIM(wrf_err_message) ) WRITE( wrf_err_message , * ) 'DEBUG domain_setgmtetc(): gmt = ',grid%gmt CALL wrf_debug( 150, TRIM(wrf_err_message) ) start_of_simulation = domain_ClockIsSimStartTime(grid) RETURN END SUBROUTINE domain_setgmtetc ! Set pointer to current grid. ! To begin with, current grid is not set. SUBROUTINE set_current_grid_ptr( grid_ptr ) IMPLICIT NONE TYPE(domain), POINTER :: grid_ptr !PRINT *,'DEBUG: begin set_current_grid_ptr()' !IF ( ASSOCIATED( grid_ptr ) ) THEN ! PRINT *,'DEBUG: set_current_grid_ptr(): current_grid is associated' !ELSE ! PRINT *,'DEBUG: set_current_grid_ptr(): current_grid is NOT associated' !ENDIF current_grid_set = .TRUE. current_grid => grid_ptr !PRINT *,'DEBUG: end set_current_grid_ptr()' END SUBROUTINE set_current_grid_ptr !****************************************************************************** ! BEGIN TEST SECTION ! Code in the test section is used to test domain methods. ! This code should probably be moved elsewhere, eventually. !****************************************************************************** ! Private utility routines for domain_time_test. SUBROUTINE domain_time_test_print ( pre_str, name_str, res_str ) IMPLICIT NONE CHARACTER (LEN=*), INTENT(IN) :: pre_str CHARACTER (LEN=*), INTENT(IN) :: name_str CHARACTER (LEN=*), INTENT(IN) :: res_str CHARACTER (LEN=512) :: out_str WRITE (out_str, & FMT="('DOMAIN_TIME_TEST ',A,': ',A,' = ',A)") & TRIM(pre_str), TRIM(name_str), TRIM(res_str) CALL wrf_debug( 0, TRIM(out_str) ) END SUBROUTINE domain_time_test_print ! Test adjust_io_timestr SUBROUTINE test_adjust_io_timestr( TI_h, TI_m, TI_s, & CT_yy, CT_mm, CT_dd, CT_h, CT_m, CT_s, & ST_yy, ST_mm, ST_dd, ST_h, ST_m, ST_s, & res_str, testname ) INTEGER, INTENT(IN) :: TI_H INTEGER, INTENT(IN) :: TI_M INTEGER, INTENT(IN) :: TI_S INTEGER, INTENT(IN) :: CT_YY INTEGER, INTENT(IN) :: CT_MM ! month INTEGER, INTENT(IN) :: CT_DD ! day of month INTEGER, INTENT(IN) :: CT_H INTEGER, INTENT(IN) :: CT_M INTEGER, INTENT(IN) :: CT_S INTEGER, INTENT(IN) :: ST_YY INTEGER, INTENT(IN) :: ST_MM ! month INTEGER, INTENT(IN) :: ST_DD ! day of month INTEGER, INTENT(IN) :: ST_H INTEGER, INTENT(IN) :: ST_M INTEGER, INTENT(IN) :: ST_S CHARACTER (LEN=*), INTENT(IN) :: res_str CHARACTER (LEN=*), INTENT(IN) :: testname ! locals TYPE(WRFU_TimeInterval) :: TI TYPE(WRFU_Time) :: CT, ST LOGICAL :: test_passed INTEGER :: rc CHARACTER(LEN=WRFU_MAXSTR) :: TI_str, CT_str, ST_str, computed_str ! TI CALL WRFU_TimeIntervalSet( TI, H=TI_H, M=TI_M, S=TI_S, rc=rc ) CALL wrf_check_error( WRFU_SUCCESS, rc, & 'FAIL: '//TRIM(testname)//'WRFU_TimeIntervalSet() ', & __FILE__ , & __LINE__ ) CALL WRFU_TimeIntervalGet( TI, timeString=TI_str, rc=rc ) CALL wrf_check_error( WRFU_SUCCESS, rc, & 'FAIL: '//TRIM(testname)//'WRFU_TimeGet() ', & __FILE__ , & __LINE__ ) ! CT CALL WRFU_TimeSet( CT, YY=CT_YY, MM=CT_MM, DD=CT_DD , & H=CT_H, M=CT_M, S=CT_S, rc=rc ) CALL wrf_check_error( WRFU_SUCCESS, rc, & 'FAIL: '//TRIM(testname)//'WRFU_TimeSet() ', & __FILE__ , & __LINE__ ) CALL WRFU_TimeGet( CT, timeString=CT_str, rc=rc ) CALL wrf_check_error( WRFU_SUCCESS, rc, & 'FAIL: '//TRIM(testname)//'WRFU_TimeGet() ', & __FILE__ , & __LINE__ ) ! ST CALL WRFU_TimeSet( ST, YY=ST_YY, MM=ST_MM, DD=ST_DD , & H=ST_H, M=ST_M, S=ST_S, rc=rc ) CALL wrf_check_error( WRFU_SUCCESS, rc, & 'FAIL: '//TRIM(testname)//'WRFU_TimeSet() ', & __FILE__ , & __LINE__ ) CALL WRFU_TimeGet( ST, timeString=ST_str, rc=rc ) CALL wrf_check_error( WRFU_SUCCESS, rc, & 'FAIL: '//TRIM(testname)//'WRFU_TimeGet() ', & __FILE__ , & __LINE__ ) ! Test CALL adjust_io_timestr ( TI, CT, ST, computed_str ) ! check result test_passed = .FALSE. IF ( LEN_TRIM(res_str) == LEN_TRIM(computed_str) ) THEN IF ( res_str(1:LEN_TRIM(res_str)) == computed_str(1:LEN_TRIM(computed_str)) ) THEN test_passed = .TRUE. ENDIF ENDIF ! print result IF ( test_passed ) THEN WRITE(*,FMT='(A)') 'PASS: '//TRIM(testname) ELSE WRITE(*,*) 'FAIL: ',TRIM(testname),': adjust_io_timestr(', & TRIM(TI_str),',',TRIM(CT_str),',',TRIM(ST_str),') expected <', & TRIM(res_str),'> but computed <',TRIM(computed_str),'>' ENDIF END SUBROUTINE test_adjust_io_timestr ! Print lots of time-related information for testing and debugging. ! Printed lines include pre_str and special string DOMAIN_TIME_TEST ! suitable for grepping by test scripts. ! Returns immediately unless self_test_domain has been set to .true. in ! namelist /time_control/ . SUBROUTINE domain_time_test ( grid, pre_str ) IMPLICIT NONE TYPE(domain), INTENT(IN) :: grid CHARACTER (LEN=*), INTENT(IN) :: pre_str ! locals LOGICAL, SAVE :: one_time_tests_done = .FALSE. REAL :: minutesSinceSimulationStart INTEGER :: advance_count, rc REAL :: currentDayOfYearReal TYPE(WRFU_TimeInterval) :: timeSinceSimulationStart TYPE(WRFU_Time) :: simulationStartTime CHARACTER (LEN=512) :: res_str LOGICAL :: self_test_domain ! ! NOTE: test_adjust_io_timestr() (see below) is a self-test that ! prints PASS/FAIL/ERROR messages in a standard format. All ! of the other tests should be strucutred the same way, ! someday. ! CALL nl_get_self_test_domain( 1, self_test_domain ) IF ( self_test_domain ) THEN CALL domain_clock_get( grid, advanceCount=advance_count ) WRITE ( res_str, FMT="(I8.8)" ) advance_count CALL domain_time_test_print( pre_str, 'advanceCount', res_str ) CALL domain_clock_get( grid, currentDayOfYearReal=currentDayOfYearReal ) WRITE ( res_str, FMT='(F10.6)' ) currentDayOfYearReal CALL domain_time_test_print( pre_str, 'currentDayOfYearReal', res_str ) CALL domain_clock_get( grid, minutesSinceSimulationStart=minutesSinceSimulationStart ) WRITE ( res_str, FMT='(F10.6)' ) minutesSinceSimulationStart CALL domain_time_test_print( pre_str, 'minutesSinceSimulationStart', res_str ) CALL domain_clock_get( grid, current_timestr=res_str ) CALL domain_time_test_print( pre_str, 'current_timestr', res_str ) CALL domain_clock_get( grid, current_timestr_frac=res_str ) CALL domain_time_test_print( pre_str, 'current_timestr_frac', res_str ) CALL domain_clock_get( grid, timeSinceSimulationStart=timeSinceSimulationStart ) CALL WRFU_TimeIntervalGet( timeSinceSimulationStart, timeString=res_str, rc=rc ) IF ( rc /= WRFU_SUCCESS ) THEN CALL wrf_error_fatal ( & 'domain_time_test: WRFU_TimeIntervalGet() failed' ) ENDIF CALL domain_time_test_print( pre_str, 'timeSinceSimulationStart', res_str ) ! The following tests should only be done once, the first time this ! routine is called. IF ( .NOT. one_time_tests_done ) THEN one_time_tests_done = .TRUE. CALL domain_clock_get( grid, simulationStartTimeStr=res_str ) CALL domain_time_test_print( pre_str, 'simulationStartTime', res_str ) CALL domain_clock_get( grid, start_timestr=res_str ) CALL domain_time_test_print( pre_str, 'start_timestr', res_str ) CALL domain_clock_get( grid, stop_timestr=res_str ) CALL domain_time_test_print( pre_str, 'stop_timestr', res_str ) CALL domain_clock_get( grid, time_stepstr=res_str ) CALL domain_time_test_print( pre_str, 'time_stepstr', res_str ) CALL domain_clock_get( grid, time_stepstr_frac=res_str ) CALL domain_time_test_print( pre_str, 'time_stepstr_frac', res_str ) ! Test adjust_io_timestr() ! CT = 2000-01-26_00:00:00 (current time) ! ST = 2000-01-24_12:00:00 (start time) ! TI = 00000_03:00:00 (time interval) ! the resulting time string should be: ! 2000-01-26_00:00:00 CALL test_adjust_io_timestr( TI_h=3, TI_m=0, TI_s=0, & CT_yy=2000, CT_mm=1, CT_dd=26, CT_h=0, CT_m=0, CT_s=0, & ST_yy=2000, ST_mm=1, ST_dd=24, ST_h=12, ST_m=0, ST_s=0, & res_str='2000-01-26_00:00:00', testname='adjust_io_timestr_1' ) ! this should fail (and does) ! CALL test_adjust_io_timestr( TI_h=3, TI_m=0, TI_s=0, & ! CT_yy=2000, CT_mm=1, CT_dd=26, CT_h=0, CT_m=0, CT_s=0, & ! ST_yy=2000, ST_mm=1, ST_dd=24, ST_h=12, ST_m=0, ST_s=0, & ! res_str='2000-01-26_00:00:01', testname='adjust_io_timestr_FAIL1' ) ENDIF ENDIF RETURN END SUBROUTINE domain_time_test !****************************************************************************** ! END TEST SECTION !****************************************************************************** END MODULE module_domain ! The following routines are outside this module to avoid build dependences. ! Get current time as a string (current time from clock attached to the ! current_grid). Includes fractional part, if present. ! Returns empty string if current_grid is not set or if timing has not yet ! been set up on current_grid. SUBROUTINE get_current_time_string( time_str ) USE module_domain IMPLICIT NONE CHARACTER (LEN=*), INTENT(OUT) :: time_str ! locals INTEGER :: debug_level_lcl !PRINT *,'DEBUG: begin get_current_time_string()' time_str = '' IF ( current_grid_set ) THEN !$$$DEBUG !PRINT *,'DEBUG: get_current_time_string(): checking association of current_grid...' !IF ( ASSOCIATED( current_grid ) ) THEN ! PRINT *,'DEBUG: get_current_time_string(): current_grid is associated' !ELSE ! PRINT *,'DEBUG: get_current_time_string(): current_grid is NOT associated' !ENDIF !$$$END DEBUG IF ( current_grid%time_set ) THEN !PRINT *,'DEBUG: get_current_time_string(): calling domain_clock_get()' ! set debug_level to zero and clear current_grid_set to avoid recursion CALL get_wrf_debug_level( debug_level_lcl ) CALL set_wrf_debug_level ( 0 ) current_grid_set = .FALSE. CALL domain_clock_get( current_grid, current_timestr_frac=time_str ) ! restore debug_level and current_grid_set CALL set_wrf_debug_level ( debug_level_lcl ) current_grid_set = .TRUE. !PRINT *,'DEBUG: get_current_time_string(): back from domain_clock_get()' ENDIF ENDIF !PRINT *,'DEBUG: end get_current_time_string()' END SUBROUTINE get_current_time_string ! Get current domain name as a string of form "d" where "" is ! grid%id printed in two characters, with leading zero if needed ("d01", ! "d02", etc.). ! Return empty string if current_grid not set. SUBROUTINE get_current_grid_name( grid_str ) USE module_domain IMPLICIT NONE CHARACTER (LEN=*), INTENT(OUT) :: grid_str grid_str = '' IF ( current_grid_set ) THEN WRITE(grid_str,FMT="('d',I2.2)") current_grid%id ENDIF END SUBROUTINE get_current_grid_name