source: lmdz_wrf/trunk/WRFV3/main/module_wrf_top.F @ 2760

Last change on this file since 2760 was 1, checked in by lfita, 10 years ago
  • -- --- Opening of the WRF+LMDZ coupling repository --- -- -

WRF: version v3.3
LMDZ: version v1818

More details in:

File size: 20.9 KB
Line 
1!WRF:DRIVER_LAYER:TOP
2!
3
4!TBH:  $$$  move this to ../frame? 
5
6MODULE module_wrf_top
7!<DESCRIPTION>
8! This module defines top-level wrf_init(), wrf_run(), and wrf_finalize()
9! routines. 
10!</DESCRIPTION>
11
12   USE module_machine
13   USE module_domain
14   USE module_integrate
15   USE module_driver_constants
16   USE module_configure
17   USE module_check_a_mundo
18
19   USE module_timing
20   USE module_wrf_error
21   USE module_nesting
22
23#ifdef DM_PARALLEL
24   USE module_dm, ONLY : wrf_dm_initialize
25#endif
26
27   IMPLICIT NONE
28
29   REAL    :: time
30
31   INTEGER :: loop , &
32              levels_to_process
33
34   TYPE (domain) , POINTER :: keep_grid, grid_ptr, null_domain
35   TYPE (domain) , pointer :: parent_grid, new_nest
36   LOGICAL                                :: a_nest_was_opened
37   TYPE (grid_config_rec_type), SAVE :: config_flags
38   INTEGER        :: kid, nestid
39   INTEGER                 :: number_at_same_level
40   INTEGER                 :: time_step_begin_restart
41
42   INTEGER :: max_dom , domain_id , fid , oid , idum1 , idum2 , ierr
43   INTEGER :: debug_level
44   LOGICAL :: input_from_file
45
46#ifdef DM_PARALLEL
47   INTEGER                 :: nbytes
48   INTEGER, PARAMETER      :: configbuflen = 4* CONFIG_BUF_LEN
49   INTEGER                 :: configbuf( configbuflen )
50   LOGICAL , EXTERNAL      :: wrf_dm_on_monitor
51#endif
52
53   CHARACTER (LEN=80)      :: rstname
54   CHARACTER (LEN=80)      :: message
55
56   INTERFACE
57     SUBROUTINE Setup_Timekeeping( grid )
58      USE module_domain
59      TYPE(domain), POINTER :: grid
60     END SUBROUTINE Setup_Timekeeping
61
62! #if (EM_CORE == 1)
63     SUBROUTINE wrf_dfi_write_initialized_state( )
64     END SUBROUTINE wrf_dfi_write_initialized_state
65 
66     SUBROUTINE wrf_dfi_startfwd_init( )
67     END SUBROUTINE wrf_dfi_startfwd_init
68     
69     SUBROUTINE wrf_dfi_startbck_init( )
70     END SUBROUTINE wrf_dfi_startbck_init
71     
72     SUBROUTINE wrf_dfi_bck_init( )
73     END SUBROUTINE wrf_dfi_bck_init
74     
75     SUBROUTINE wrf_dfi_fwd_init( )
76     END SUBROUTINE wrf_dfi_fwd_init
77     
78     SUBROUTINE wrf_dfi_fst_init( )
79     END SUBROUTINE wrf_dfi_fst_init
80     
81     SUBROUTINE wrf_dfi_array_reset ( )
82     END SUBROUTINE wrf_dfi_array_reset
83! #endif
84
85     SUBROUTINE med_nest_initial ( parent , grid , config_flags )
86       USE module_domain
87       USE module_configure
88       TYPE (domain), POINTER ::  grid , parent
89       TYPE (grid_config_rec_type) config_flags
90     END SUBROUTINE med_nest_initial
91
92   END INTERFACE
93
94
95CONTAINS
96
97
98   SUBROUTINE wrf_init( no_init1 )
99!<DESCRIPTION>
100!     WRF initialization routine.
101!</DESCRIPTION>
102#ifdef _OPENMP
103     use omp_lib
104#endif
105#ifdef _ACCEL
106     use accel_lib
107#endif
108     LOGICAL, OPTIONAL, INTENT(IN) :: no_init1
109     INTEGER i, myproc, nproc, hostid, loccomm, ierr, buddcounter, mydevice
110     INTEGER, ALLOCATABLE :: hostids(:), budds(:)
111     CHARACTER*512 hostname
112#ifdef _ACCEL
113     integer :: it, nt, in, devnum
114#endif
115#if defined(DM_PARALLEL) && !defined(STUBMPI) && ( defined(RUN_ON_GPU) || defined(_ACCEL))
116     include "mpif.h"
117#endif
118#include "version_decl"
119
120
121!<DESCRIPTION>
122! Program_name, a global variable defined in frame/module_domain.F, is
123! set, then a routine <a href=init_modules.html>init_modules</a> is
124! called. This calls all the init programs that are provided by the
125! modules that are linked into WRF.  These include initialization of
126! external I/O packages.   Also, some key initializations for
127! distributed-memory parallelism occur here if DM_PARALLEL is specified
128! in the compile: setting up I/O quilt processes to act as I/O servers
129! and dividing up MPI communicators among those as well as initializing
130! external communication packages such as RSL or RSL_LITE.
131!
132!</DESCRIPTION>
133
134   program_name = "WRF " // TRIM(release_version) // " MODEL"
135
136   ! Initialize WRF modules: 
137   ! Phase 1 returns after MPI_INIT() (if it is called)
138   CALL init_modules(1)
139   IF ( .NOT. PRESENT( no_init1 ) ) THEN
140     ! Initialize utilities (time manager, etc.)
141#ifdef NO_LEAP_CALENDAR
142     CALL WRFU_Initialize( defaultCalendar=WRFU_CAL_NOLEAP )
143#else
144     CALL WRFU_Initialize( defaultCalendar=WRFU_CAL_GREGORIAN )
145#endif
146   ENDIF
147   ! Phase 2 resumes after MPI_INIT() (if it is called)
148   CALL init_modules(2)
149
150!<DESCRIPTION>
151! The wrf namelist.input file is read and stored in the USE associated
152! structure model_config_rec, defined in frame/module_configure.F, by the
153! call to <a href=initial_config.html>initial_config</a>.  On distributed
154! memory parallel runs this is done only on one processor, and then
155! broadcast as a buffer.  For distributed-memory, the broadcast of the
156! configuration information is accomplished by first putting the
157! configuration information into a buffer (<a
158! href=get_config_as_buffer.html>get_config_as_buffer</a>), broadcasting
159! the buffer, then setting the configuration information (<a
160! href=set_config_as_buffer.html>set_config_as_buffer</a>).
161!
162!</DESCRIPTION>
163
164#ifdef DM_PARALLEL
165   IF ( wrf_dm_on_monitor() ) THEN
166     CALL initial_config
167   ENDIF
168   CALL get_config_as_buffer( configbuf, configbuflen, nbytes )
169   CALL wrf_dm_bcast_bytes( configbuf, nbytes )
170   CALL set_config_as_buffer( configbuf, configbuflen )
171   CALL wrf_dm_initialize
172#else
173   CALL initial_config
174#endif
175
176   CALL set_derived_rconfigs
177   CALL check_nml_consistency
178   CALL set_physics_rconfigs
179
180#ifdef _ACCEL
181   buddcounter = 1
182   mydevice = 0
183# if defined(DM_PARALLEL) && !defined(STUBMPI)
184   CALL wrf_get_myproc( myproc )
185   CALL wrf_get_nproc( nproc )
186   CALL wrf_get_hostid ( hostid )
187   CALL wrf_get_dm_communicator ( loccomm )
188
189   ALLOCATE( hostids(nproc) )
190   ALLOCATE( budds(nproc) )
191   CALL mpi_allgather( hostid, 1, MPI_INTEGER, hostids, 1, MPI_INTEGER, loccomm, ierr )
192   if ( ierr .NE. 0 ) print * ,'error in mpi_allgather ',ierr
193   budds = -1
194   buddcounter = 0
195   ! mark the ones i am on the same node with
196   DO i = 1, nproc
197      IF ( hostid .EQ. hostids(i) ) THEN
198         budds(i) = buddcounter
199         buddcounter = buddcounter + 1
200      ENDIF
201   ENDDO
202   mydevice = budds(myproc+1)
203   DEALLOCATE( hostids )
204   DEALLOCATE( budds )
205# endif
206   in = acc_get_num_devices(acc_device_nvidia)
207   if (in .le. 0) print *, 'error:  No GPUS present: ',in
208# ifdef _OPENMP
209   !$OMP PARALLEL SHARED(mydevice,in) PRIVATE(it,nt,devnum)
210   it = omp_get_thread_num()
211   nt = omp_get_num_threads()
212   devnum = mod(mod(mydevice*nt,in) + it, in)
213# ifdef _ACCEL_PROF
214   print *, "Process, Thread, Device: ",mydevice, it, devnum
215# endif
216   call acc_set_device_num(devnum, acc_device_nvidia)
217
218   !$OMP END PARALLEL
219# else
220   it = 0
221   nt = 1
222   devnum = mod(mod(mydevice*nt,in) + it, in)
223#  ifdef _ACCEL_PROF
224   print *, "Process, Thread, Device: ",mydevice, it, devnum
225#  endif
226   call acc_set_device_num(devnum, acc_device_nvidia)
227# endif
228#endif
229
230#ifdef RUN_ON_GPU
231   CALL wrf_get_myproc( myproc )
232   CALL wrf_get_nproc( nproc )
233# ifdef DM_PARALLEL
234   CALL wrf_get_hostid ( hostid )
235   CALL wrf_get_dm_communicator ( loccomm )
236   ALLOCATE( hostids(nproc) )
237   ALLOCATE( budds(nproc) )
238   CALL mpi_allgather( hostid, 1, MPI_INTEGER, hostids, 1, MPI_INTEGER, loccomm, ierr )
239   if ( ierr .NE. 0 ) write(0,*)__FILE__,__LINE__,'error in mpi_allgather ',ierr
240   budds = -1
241   buddcounter = 0
242   ! mark the ones i am on the same node with
243   DO i = 1, nproc
244      IF ( hostid .EQ. hostids(i) ) THEN
245         budds(i) = buddcounter
246         buddcounter = buddcounter + 1
247      ENDIF
248   ENDDO
249   mydevice = budds(myproc+1)
250   DEALLOCATE( hostids )
251   DEALLOCATE( budds )
252# else
253   mydevice = 0
254# endif
255   CALL wsm5_gpu_init( myproc, nproc, mydevice )
256#endif
257
258!<DESCRIPTION>
259! Among the configuration variables read from the namelist is
260! debug_level. This is retrieved using nl_get_debug_level (Registry
261! generated and defined in frame/module_configure.F).  The value is then
262! used to set the debug-print information level for use by <a
263! href=wrf_debug.html>wrf_debug</a> throughout the code. Debug_level
264! of zero (the default) causes no information to be printed when the
265! model runs. The higher the number (up to 1000) the more information is
266! printed.
267!
268!</DESCRIPTION>
269
270   CALL nl_get_debug_level ( 1, debug_level )
271   CALL set_wrf_debug_level ( debug_level )
272
273   ! allocated and configure the mother domain
274
275   NULLIFY( null_domain )
276
277!<DESCRIPTION>
278! RSL is required for WRF nesting options.
279! The non-MPI build that allows nesting is only supported on machines
280! with the -DSTUBMPI option.  Check to see if the WRF model is being asked
281! for a for a multi-domain run (max_dom > 1, from the namelist).  If so,
282! then we check to make sure that we are under the parallel
283! run option or we are on an acceptable machine.
284!</DESCRIPTION>
285
286   CALL nl_get_max_dom( 1, max_dom )
287   IF ( max_dom > 1 ) THEN
288#if ( ! defined(DM_PARALLEL)  &&   ! defined(STUBMPI) )
289   CALL wrf_error_fatal( &
290     'nesting requires either an MPI build or use of the -DSTUBMPI option' )
291#endif
292   END IF
293
294!<DESCRIPTION>
295! The top-most domain in the simulation is then allocated and configured
296! by calling <a href=alloc_and_configure_domain.html>alloc_and_configure_domain</a>.
297! Here, in the case of this root domain, the routine is passed the
298! globally accessible pointer to TYPE(domain), head_grid, defined in
299! frame/module_domain.F.  The parent is null and the child index is given
300! as negative, signifying none.  Afterwards, because the call to
301! alloc_and_configure_domain may modify the model's configuration data
302! stored in model_config_rec, the configuration information is again
303! repacked into a buffer, broadcast, and unpacked on each task (for
304! DM_PARALLEL compiles). The call to <a
305! href=setup_timekeeping.html>setup_timekeeping</a> for head_grid relies
306! on this configuration information, and it must occur after the second
307! broadcast of the configuration information.
308!
309!</DESCRIPTION>
310   CALL       wrf_message ( program_name )
311   CALL       wrf_debug ( 100 , 'wrf: calling alloc_and_configure_domain ' )
312   CALL alloc_and_configure_domain ( domain_id  = 1 ,                  &
313                                     grid       = head_grid ,          &
314                                     parent     = null_domain ,        &
315                                     kid        = -1                   )
316
317   CALL       wrf_debug ( 100 , 'wrf: calling model_to_grid_config_rec ' )
318   CALL model_to_grid_config_rec ( head_grid%id , model_config_rec , config_flags )
319   CALL       wrf_debug ( 100 , 'wrf: calling set_scalar_indices_from_config ' )
320   CALL set_scalar_indices_from_config ( head_grid%id , idum1, idum2 )
321   CALL       wrf_debug ( 100 , 'wrf: calling init_wrfio' )
322   CALL init_wrfio
323
324#ifdef DM_PARALLEL
325   CALL get_config_as_buffer( configbuf, configbuflen, nbytes )
326   CALL wrf_dm_bcast_bytes( configbuf, nbytes )
327   CALL set_config_as_buffer( configbuf, configbuflen )
328#endif
329
330! #if (EM_CORE == 1)
331   ! In case we are doing digital filter initialization, set dfi_stage = DFI_SETUP
332   !   to indicate in Setup_Timekeeping that we want forecast start and
333   !   end times at this point
334   IF ( head_grid%dfi_opt .NE. DFI_NODFI ) head_grid%dfi_stage = DFI_SETUP
335! #endif
336
337   CALL Setup_Timekeeping (head_grid)
338
339!<DESCRIPTION>
340! The head grid is initialized with read-in data through the call to <a
341! href=med_initialdata_input.html>med_initialdata_input</a>, which is
342! passed the pointer head_grid and a locally declared configuration data
343! structure, config_flags, that is set by a call to <a
344! href=model_to_grid_config_rec.html>model_to_grid_config_rec</a>.  It is
345! also necessary that the indices into the 4d tracer arrays such as
346! moisture be set with a call to <a
347! href=set_scalar_indices_from_config.html>set_scalar_indices_from_config</a>
348! prior to the call to initialize the domain.  Both of these calls are
349! told which domain they are setting up for by passing in the integer id
350! of the head domain as <tt>head_grid%id</tt>, which is 1 for the
351! top-most domain.
352!
353! In the case that write_restart_at_0h is set to true in the namelist,
354! the model simply generates a restart file using the just read-in data
355! and then shuts down. This is used for ensemble breeding, and is not
356! typically enabled.
357!
358!</DESCRIPTION>
359
360   CALL med_initialdata_input( head_grid , config_flags )
361
362   IF ( config_flags%write_restart_at_0h ) THEN
363      CALL med_restart_out ( head_grid, config_flags )
364#ifndef AUTODOC_BUILD
365! prevent this from showing up before the call to integrate in the autogenerated call tree
366      CALL wrf_debug ( 0 , ' 0 h restart only wrf: SUCCESS COMPLETE WRF' )
367! TBH:  $$$ Unscramble this later... 
368! TBH:  $$$ Need to add state to avoid calling wrf_finalize() twice when ESMF
369! TBH:  $$$ library is used.  Maybe just set clock stop_time=start_time and
370! TBH:  $$$ do not call wrf_finalize here... 
371      CALL wrf_finalize( )
372#endif
373   END IF
374
375   ! set default values for subtimes
376   head_grid%start_subtime = domain_get_start_time ( head_grid )
377   head_grid%stop_subtime = domain_get_stop_time ( head_grid )
378
379   !  For EM (but not DA), if this is a DFI run, we can allocate some space.  We are
380   !  not allowing anyting tricky for nested DFI.  If there are any nested domains,
381   !  they all need to start at the same time.  Otherwise, why even do the DFI?  If
382   !  the domains do not all start at the same time, then there will be inconsistencies,
383   !  which is what DFI is supposed to address.
384
385#if (EM_CORE == 1)
386   IF ( head_grid%dfi_opt .NE. DFI_NODFI ) THEN
387      CALL alloc_doms_for_dfi ( head_grid )
388   END IF
389#endif
390
391
392   END SUBROUTINE wrf_init
393
394
395
396   SUBROUTINE wrf_run( )
397!<DESCRIPTION>
398!     WRF run routine.
399!</DESCRIPTION>
400
401!<DESCRIPTION>
402! Once the top-level domain has been allocated, configured, and
403! initialized, the model time integration is ready to proceed.  The start
404! and stop times for the domain are set to the start and stop time of the
405! model run, and then <a href=integrate.html>integrate</a> is called to
406! advance the domain forward through that specified time interval.  On
407! return, the simulation is completed. 
408!
409!</DESCRIPTION>
410
411   !  The forecast integration for the most coarse grid is now started.  The
412   !  integration is from the first step (1) to the last step of the simulation.
413
414   CALL       wrf_debug ( 100 , 'wrf: calling integrate' )
415   CALL integrate ( head_grid )
416   CALL       wrf_debug ( 100 , 'wrf: back from integrate' )
417
418   END SUBROUTINE wrf_run
419
420
421
422   SUBROUTINE wrf_finalize( no_shutdown )
423!<DESCRIPTION>
424!     WRF finalize routine.
425!</DESCRIPTION>
426
427!<DESCRIPTION>
428! A Mediation Layer-provided
429! subroutine, <a href=med_shutdown_io.html>med_shutdown_io</a> is called
430! to allow the the model to do any I/O specific cleanup and shutdown, and
431! then the WRF Driver Layer routine <a
432! href=wrf_shutdown.html>wrf_shutdown</a> (quilt servers would be
433! directed to shut down here) is called to properly end the run,
434! including shutting down the communications (for example, most comm
435! layers would call MPI_FINALIZE at this point if they're using MPI).
436!
437!</DESCRIPTION>
438     LOGICAL, OPTIONAL, INTENT(IN) :: no_shutdown
439
440   ! shut down I/O
441   CALL med_shutdown_io ( head_grid , config_flags )
442   CALL       wrf_debug ( 100 , 'wrf: back from med_shutdown_io' )
443
444   CALL       wrf_debug (   0 , 'wrf: SUCCESS COMPLETE WRF' )
445
446   ! Call wrf_shutdown() (which calls MPI_FINALIZE()
447   ! for DM parallel runs). 
448   IF ( .NOT. PRESENT( no_shutdown ) ) THEN
449     ! Finalize time manager
450     CALL WRFU_Finalize
451     CALL wrf_shutdown
452   ENDIF
453
454   END SUBROUTINE wrf_finalize
455
456
457   SUBROUTINE wrf_dfi()
458!<DESCRIPTION>
459! Runs a digital filter initialization procedure.
460!</DESCRIPTION>
461      IMPLICIT NONE
462
463! #if (EM_CORE == 1)
464      ! Initialization procedure
465      IF ( config_flags%dfi_opt .NE. DFI_NODFI ) THEN
466   
467         SELECT CASE ( config_flags%dfi_opt )
468     
469            CASE (DFI_DFL)
470               wrf_err_message = 'Initializing with DFL'
471               CALL wrf_message(TRIM(wrf_err_message))
472   
473               wrf_err_message = '   Filtering forward in time'
474               CALL wrf_message(TRIM(wrf_err_message))
475   
476               CALL wrf_dfi_fwd_init()
477               CALL wrf_run()
478   
479               CALL wrf_dfi_array_reset()
480   
481               CALL wrf_dfi_fst_init()
482   
483               IF ( config_flags%dfi_write_filtered_input ) THEN
484                  CALL wrf_dfi_write_initialized_state()
485               END IF
486   
487            CASE (DFI_DDFI)
488               wrf_err_message = 'Initializing with DDFI'
489               CALL wrf_message(TRIM(wrf_err_message))
490   
491               wrf_err_message = '   Integrating backward in time'
492               CALL wrf_message(TRIM(wrf_err_message))
493   
494               CALL wrf_dfi_bck_init()
495               CALL wrf_run()
496   
497               wrf_err_message = '   Filtering forward in time'
498               CALL wrf_message(TRIM(wrf_err_message))
499   
500               CALL wrf_dfi_fwd_init()
501               CALL wrf_run()
502   
503               CALL wrf_dfi_array_reset()
504   
505               CALL wrf_dfi_fst_init()
506   
507               IF ( config_flags%dfi_write_filtered_input ) THEN
508                  CALL wrf_dfi_write_initialized_state()
509               END IF
510   
511            CASE (DFI_TDFI)
512               wrf_err_message = 'Initializing with TDFI'
513               CALL wrf_message(TRIM(wrf_err_message))
514   
515               wrf_err_message = '   Integrating backward in time'
516               CALL wrf_message(TRIM(wrf_err_message))
517   
518               CALL wrf_dfi_bck_init()
519               CALL wrf_run()
520   
521               CALL wrf_dfi_array_reset()
522   
523               wrf_err_message = '   Filtering forward in time'
524               CALL wrf_message(TRIM(wrf_err_message))
525   
526               CALL wrf_dfi_fwd_init()
527               CALL wrf_run()
528   
529               CALL wrf_dfi_array_reset()
530   
531               CALL wrf_dfi_fst_init()
532   
533               IF ( config_flags%dfi_write_filtered_input ) THEN
534                  CALL wrf_dfi_write_initialized_state()
535               END IF
536   
537            CASE DEFAULT
538               wrf_err_message = 'Unrecognized DFI_OPT in namelist'
539               CALL wrf_error_fatal(TRIM(wrf_err_message))
540   
541         END SELECT
542   
543      END IF
544! #endif
545
546   END SUBROUTINE wrf_dfi
547
548   SUBROUTINE set_derived_rconfigs
549!<DESCRIPTION>
550! Some derived rconfig entries need to be set based on the value of other,
551! non-derived entries before package-dependent memory allocation takes place.
552! This might be employed when, for example, we want to allocate arrays in
553! a package that depends on the setting of two or more namelist variables.
554! In this subroutine, we do just that.
555!</DESCRIPTION>
556
557      IMPLICIT NONE
558
559      INTEGER :: i
560
561
562! #if (EM_CORE == 1)
563      IF ( model_config_rec % dfi_opt .EQ. DFI_NODFI ) THEN
564        DO i = 1, model_config_rec % max_dom
565           model_config_rec % mp_physics_dfi(i) = -1
566        ENDDO
567      ELSE
568        DO i = 1, model_config_rec % max_dom
569           model_config_rec % mp_physics_dfi(i) = model_config_rec % mp_physics(i)
570        ENDDO
571      END IF
572! #endif
573
574#if (DA_CORE == 1)
575      IF ( model_config_rec % dyn_opt .EQ. 2 ) THEN
576        DO i = 1, model_config_rec % max_dom
577           model_config_rec % mp_physics_4dvar(i) = -1
578        ENDDO
579      ELSE
580        DO i = 1, model_config_rec % max_dom
581           model_config_rec % mp_physics_4dvar(i) = model_config_rec % mp_physics(i)
582        ENDDO
583      END IF
584#endif
585
586   END SUBROUTINE set_derived_rconfigs
587
588   RECURSIVE SUBROUTINE alloc_doms_for_dfi ( grid )
589   
590      !  Input variables.
591
592      TYPE (domain) , pointer :: grid
593
594      !  Local variables.
595
596      TYPE (domain) , pointer :: new_nest_loc
597      TYPE (grid_config_rec_type) :: parent_config_flags
598      INTEGER :: nestid_loc , kid_loc
599   
600         !  Are there any subdomains from this level.  The output is the nestid (the domain
601         !  ID of the nest), and kid (an index to which of the parent's children this new nested
602         !  domain represents).
603   
604         DO WHILE ( nests_to_open( grid , nestid_loc , kid_loc ) )
605
606            !  If we found another child domain, we continue on: allocate, set up time keeping,
607            !  initialize.
608   
609            CALL alloc_and_configure_domain ( domain_id  = nestid_loc   , &
610                                              grid       = new_nest_loc , &
611                                              parent     = grid         , &
612                                              kid        = kid_loc        )
613         
614print *,'for parent domain id #',grid%id,', found child domain #',nestid_loc
615            !  Since this is a DFI run, set the DFI switches to the same for all domains.
616
617            new_nest_loc%dfi_opt = head_grid%dfi_opt
618            new_nest_loc%dfi_stage = DFI_SETUP
619         
620            !  Set up time keeping for the fine grid space that was just allocated.
621
622            CALL Setup_Timekeeping (new_nest_loc)
623
624            !  With space allocated, and timers set, the fine grid can be initialized with data.
625
626            CALL model_to_grid_config_rec ( grid%id , model_config_rec , parent_config_flags )
627            CALL med_nest_initial ( grid , new_nest_loc , config_flags )
628
629            !  Here's the recursive part.  For each of these child domains, we call this same routine.
630            !  This will find all of "new_nest_loc" first generation progeny.
631   
632            CALL alloc_doms_for_dfi ( new_nest_loc )
633   
634         END DO
635   
636   END SUBROUTINE alloc_doms_for_dfi
637
638END MODULE module_wrf_top
Note: See TracBrowser for help on using the repository browser.