source: trunk/WRF.COMMON/WRFV3/main/module_wrf_top.F @ 3567

Last change on this file since 3567 was 2759, checked in by aslmd, 2 years ago

adding unmodified code from WRFV3.0.1.1, expurged from useless data +1M size

File size: 16.1 KB
Line 
1!WRF:DRIVER_LAYER:TOP
2!
3
4!TBH:  $$$  move this to ../frame? 
5
6MODULE module_wrf_top
7!<DESCRIPTION>
8! This module defines top-level wrf_init(), wrf_run(), and wrf_finalize()
9! routines. 
10!</DESCRIPTION>
11
12   USE module_machine
13   USE module_domain
14   USE module_integrate
15   USE module_driver_constants
16   USE module_configure
17
18   USE module_timing
19   USE module_wrf_error
20
21#ifdef DM_PARALLEL
22   USE module_dm
23#endif
24
25   IMPLICIT NONE
26
27   REAL    :: time
28
29   INTEGER :: loop , &
30              levels_to_process
31
32   TYPE (domain) , POINTER :: keep_grid, grid_ptr, null_domain
33   TYPE (grid_config_rec_type), SAVE :: config_flags
34   INTEGER                 :: number_at_same_level
35   INTEGER                 :: time_step_begin_restart
36
37   INTEGER :: max_dom , domain_id , fid , oid , idum1 , idum2 , ierr
38   INTEGER :: debug_level
39   LOGICAL :: input_from_file
40
41#ifdef DM_PARALLEL
42   INTEGER                 :: nbytes
43   INTEGER, PARAMETER      :: configbuflen = 4* CONFIG_BUF_LEN
44   INTEGER                 :: configbuf( configbuflen )
45   LOGICAL , EXTERNAL      :: wrf_dm_on_monitor
46#endif
47
48   CHARACTER (LEN=80)      :: rstname
49   CHARACTER (LEN=80)      :: message
50
51   INTERFACE
52     SUBROUTINE Setup_Timekeeping( grid )
53      USE module_domain
54      TYPE(domain), POINTER :: grid
55     END SUBROUTINE Setup_Timekeeping
56
57#if (EM_CORE == 1)
58     SUBROUTINE wrf_dfi_write_initialized_state( )
59     END SUBROUTINE wrf_dfi_write_initialized_state
60 
61     SUBROUTINE wrf_dfi_bck_init( )
62     END SUBROUTINE wrf_dfi_bck_init
63     
64     SUBROUTINE wrf_dfi_fwd_init( )
65     END SUBROUTINE wrf_dfi_fwd_init
66     
67     SUBROUTINE wrf_dfi_fst_init( )
68     END SUBROUTINE wrf_dfi_fst_init
69     
70     SUBROUTINE wrf_dfi_array_reset ( )
71     END SUBROUTINE wrf_dfi_array_reset
72#endif
73   END INTERFACE
74
75
76CONTAINS
77
78
79   SUBROUTINE wrf_init( no_init1 )
80!<DESCRIPTION>
81!     WRF initialization routine.
82!</DESCRIPTION>
83     LOGICAL, OPTIONAL, INTENT(IN) :: no_init1
84     INTEGER i, myproc, nproc, hostid, loccomm, ierr, buddcounter, mydevice
85     INTEGER, ALLOCATABLE :: hostids(:), budds(:)
86     CHARACTER*512 hostname
87#if defined(DM_PARALLEL) && defined(RUN_ON_GPU)
88     include "mpif.h"
89#endif
90#include "version_decl"
91
92
93!<DESCRIPTION>
94! Program_name, a global variable defined in frame/module_domain.F, is
95! set, then a routine <a href=init_modules.html>init_modules</a> is
96! called. This calls all the init programs that are provided by the
97! modules that are linked into WRF.  These include initialization of
98! external I/O packages.   Also, some key initializations for
99! distributed-memory parallelism occur here if DM_PARALLEL is specified
100! in the compile: setting up I/O quilt processes to act as I/O servers
101! and dividing up MPI communicators among those as well as initializing
102! external communication packages such as RSL or RSL_LITE.
103!
104!</DESCRIPTION>
105
106   program_name = "WRF " // TRIM(release_version) // " MODEL"
107
108   ! Initialize WRF modules: 
109   ! Phase 1 returns after MPI_INIT() (if it is called)
110   CALL init_modules(1)
111   IF ( .NOT. PRESENT( no_init1 ) ) THEN
112     ! Initialize utilities (time manager, etc.)
113     CALL WRFU_Initialize( defaultCalendar=WRFU_CAL_GREGORIAN )
114   ENDIF
115   ! Phase 2 resumes after MPI_INIT() (if it is called)
116   CALL init_modules(2)
117
118!<DESCRIPTION>
119! The wrf namelist.input file is read and stored in the USE associated
120! structure model_config_rec, defined in frame/module_configure.F, by the
121! call to <a href=initial_config.html>initial_config</a>.  On distributed
122! memory parallel runs this is done only on one processor, and then
123! broadcast as a buffer.  For distributed-memory, the broadcast of the
124! configuration information is accomplished by first putting the
125! configuration information into a buffer (<a
126! href=get_config_as_buffer.html>get_config_as_buffer</a>), broadcasting
127! the buffer, then setting the configuration information (<a
128! href=set_config_as_buffer.html>set_config_as_buffer</a>).
129!
130!</DESCRIPTION>
131
132#ifdef DM_PARALLEL
133   IF ( wrf_dm_on_monitor() ) THEN
134     CALL initial_config
135   ENDIF
136   CALL get_config_as_buffer( configbuf, configbuflen, nbytes )
137   CALL wrf_dm_bcast_bytes( configbuf, nbytes )
138   CALL set_config_as_buffer( configbuf, configbuflen )
139   CALL wrf_dm_initialize
140#else
141   CALL initial_config
142#endif
143
144   CALL set_derived_rconfigs
145
146#ifdef RUN_ON_GPU
147   CALL wrf_get_myproc( myproc )
148   CALL wrf_get_nproc( nproc )
149   CALL wrf_get_hostid ( hostid )
150# ifdef DM_PARALLEL
151   CALL wrf_get_dm_communicator ( loccomm )
152   ALLOCATE( hostids(nproc) )
153   ALLOCATE( budds(nproc) )
154   CALL mpi_allgather( hostid, 1, MPI_INTEGER, hostids, 1, MPI_INTEGER, loccomm, ierr )
155   if ( ierr .NE. 0 ) write(0,*)__FILE__,__LINE__,'error in mpi_allgather ',ierr
156   budds = -1
157   buddcounter = 0
158   ! mark the ones i am on the same node with
159   DO i = 1, nproc
160      IF ( hostid .EQ. hostids(i) ) THEN
161         budds(i) = buddcounter
162         buddcounter = buddcounter + 1
163      ENDIF
164   ENDDO
165   mydevice = budds(myproc+1)
166   DEALLOCATE( hostids )
167   DEALLOCATE( budds )
168# else
169   mydevice = 0
170# endif
171   CALL wsm5_gpu_init( myproc, nproc, mydevice )
172#endif
173
174!<DESCRIPTION>
175! Among the configuration variables read from the namelist is
176! debug_level. This is retrieved using nl_get_debug_level (Registry
177! generated and defined in frame/module_configure.F).  The value is then
178! used to set the debug-print information level for use by <a
179! href=wrf_debug.html>wrf_debug</a> throughout the code. Debug_level
180! of zero (the default) causes no information to be printed when the
181! model runs. The higher the number (up to 1000) the more information is
182! printed.
183!
184!</DESCRIPTION>
185
186   CALL nl_get_debug_level ( 1, debug_level )
187   CALL set_wrf_debug_level ( debug_level )
188
189   ! allocated and configure the mother domain
190
191   NULLIFY( null_domain )
192
193!<DESCRIPTION>
194! RSL is required for WRF nesting options.
195! The non-MPI build that allows nesting is only supported on machines
196! with the -DSTUBMPI option.  Check to see if the WRF model is being asked
197! for a for a multi-domain run (max_dom > 1, from the namelist).  If so,
198! then we check to make sure that we are under the parallel
199! run option or we are on an acceptable machine.
200!</DESCRIPTION>
201
202   CALL nl_get_max_dom( 1, max_dom )
203   IF ( max_dom > 1 ) THEN
204#if ( ! defined(DM_PARALLEL)  &&   ! defined(STUBMPI) )
205   CALL wrf_error_fatal( &
206     'nesting requires either an MPI build or use of the -DSTUBMPI option' )
207#endif
208   END IF
209
210!<DESCRIPTION>
211! The top-most domain in the simulation is then allocated and configured
212! by calling <a href=alloc_and_configure_domain.html>alloc_and_configure_domain</a>.
213! Here, in the case of this root domain, the routine is passed the
214! globally accessible pointer to TYPE(domain), head_grid, defined in
215! frame/module_domain.F.  The parent is null and the child index is given
216! as negative, signifying none.  Afterwards, because the call to
217! alloc_and_configure_domain may modify the model's configuration data
218! stored in model_config_rec, the configuration information is again
219! repacked into a buffer, broadcast, and unpacked on each task (for
220! DM_PARALLEL compiles). The call to <a
221! href=setup_timekeeping.html>setup_timekeeping</a> for head_grid relies
222! on this configuration information, and it must occur after the second
223! broadcast of the configuration information.
224!
225!</DESCRIPTION>
226   CALL       wrf_message ( program_name )
227   CALL       wrf_debug ( 100 , 'wrf: calling alloc_and_configure_domain ' )
228   CALL alloc_and_configure_domain ( domain_id  = 1 ,                  &
229                                     grid       = head_grid ,          &
230                                     parent     = null_domain ,        &
231                                     kid        = -1                   )
232
233   CALL       wrf_debug ( 100 , 'wrf: calling model_to_grid_config_rec ' )
234   CALL model_to_grid_config_rec ( head_grid%id , model_config_rec , config_flags )
235   CALL       wrf_debug ( 100 , 'wrf: calling set_scalar_indices_from_config ' )
236   CALL set_scalar_indices_from_config ( head_grid%id , idum1, idum2 )
237   CALL       wrf_debug ( 100 , 'wrf: calling init_wrfio' )
238   CALL init_wrfio
239
240#ifdef DM_PARALLEL
241   CALL get_config_as_buffer( configbuf, configbuflen, nbytes )
242   CALL wrf_dm_bcast_bytes( configbuf, nbytes )
243   CALL set_config_as_buffer( configbuf, configbuflen )
244#endif
245
246#if (EM_CORE == 1)
247   ! In case we are doing digital filter initialization, set dfi_stage = DFI_SETUP
248   !   to indicate in Setup_Timekeeping that we want forecast start and
249   !   end times at this point
250   IF ( head_grid%dfi_opt .NE. DFI_NODFI ) head_grid%dfi_stage = DFI_SETUP
251#endif
252
253   CALL Setup_Timekeeping (head_grid)
254
255!<DESCRIPTION>
256! The head grid is initialized with read-in data through the call to <a
257! href=med_initialdata_input.html>med_initialdata_input</a>, which is
258! passed the pointer head_grid and a locally declared configuration data
259! structure, config_flags, that is set by a call to <a
260! href=model_to_grid_config_rec.html>model_to_grid_config_rec</a>.  It is
261! also necessary that the indices into the 4d tracer arrays such as
262! moisture be set with a call to <a
263! href=set_scalar_indices_from_config.html>set_scalar_indices_from_config</a>
264! prior to the call to initialize the domain.  Both of these calls are
265! told which domain they are setting up for by passing in the integer id
266! of the head domain as <tt>head_grid%id</tt>, which is 1 for the
267! top-most domain.
268!
269! In the case that write_restart_at_0h is set to true in the namelist,
270! the model simply generates a restart file using the just read-in data
271! and then shuts down. This is used for ensemble breeding, and is not
272! typically enabled.
273!
274!</DESCRIPTION>
275
276   CALL med_initialdata_input( head_grid , config_flags )
277
278   IF ( config_flags%write_restart_at_0h ) THEN
279      CALL med_restart_out ( head_grid, config_flags )
280#ifndef AUTODOC_BUILD
281! prevent this from showing up before the call to integrate in the autogenerated call tree
282      CALL wrf_debug ( 0 , ' 0 h restart only wrf: SUCCESS COMPLETE WRF' )
283! TBH:  $$$ Unscramble this later... 
284! TBH:  $$$ Need to add state to avoid calling wrf_finalize() twice when ESMF
285! TBH:  $$$ library is used.  Maybe just set clock stop_time=start_time and
286! TBH:  $$$ do not call wrf_finalize here... 
287      CALL wrf_finalize( )
288#endif
289   END IF
290
291   ! set default values for subtimes
292   head_grid%start_subtime = domain_get_start_time ( head_grid )
293   head_grid%stop_subtime = domain_get_stop_time ( head_grid )
294
295   END SUBROUTINE wrf_init
296
297
298
299   SUBROUTINE wrf_run( )
300!<DESCRIPTION>
301!     WRF run routine.
302!</DESCRIPTION>
303
304!<DESCRIPTION>
305! Once the top-level domain has been allocated, configured, and
306! initialized, the model time integration is ready to proceed.  The start
307! and stop times for the domain are set to the start and stop time of the
308! model run, and then <a href=integrate.html>integrate</a> is called to
309! advance the domain forward through that specified time interval.  On
310! return, the simulation is completed. 
311!
312!</DESCRIPTION>
313
314   !  The forecast integration for the most coarse grid is now started.  The
315   !  integration is from the first step (1) to the last step of the simulation.
316
317   CALL       wrf_debug ( 100 , 'wrf: calling integrate' )
318   CALL integrate ( head_grid )
319   CALL       wrf_debug ( 100 , 'wrf: back from integrate' )
320
321   END SUBROUTINE wrf_run
322
323
324
325   SUBROUTINE wrf_finalize( no_shutdown )
326!<DESCRIPTION>
327!     WRF finalize routine.
328!</DESCRIPTION>
329
330!<DESCRIPTION>
331! A Mediation Layer-provided
332! subroutine, <a href=med_shutdown_io.html>med_shutdown_io</a> is called
333! to allow the the model to do any I/O specific cleanup and shutdown, and
334! then the WRF Driver Layer routine <a
335! href=wrf_shutdown.html>wrf_shutdown</a> (quilt servers would be
336! directed to shut down here) is called to properly end the run,
337! including shutting down the communications (for example, most comm
338! layers would call MPI_FINALIZE at this point if they're using MPI).
339!
340!</DESCRIPTION>
341     LOGICAL, OPTIONAL, INTENT(IN) :: no_shutdown
342
343   ! shut down I/O
344   CALL med_shutdown_io ( head_grid , config_flags )
345   CALL       wrf_debug ( 100 , 'wrf: back from med_shutdown_io' )
346
347   CALL       wrf_debug (   0 , 'wrf: SUCCESS COMPLETE WRF' )
348
349   ! Call wrf_shutdown() (which calls MPI_FINALIZE()
350   ! for DM parallel runs). 
351   IF ( .NOT. PRESENT( no_shutdown ) ) THEN
352     ! Finalize time manager
353     CALL WRFU_Finalize
354     CALL wrf_shutdown
355   ENDIF
356
357   END SUBROUTINE wrf_finalize
358
359
360   SUBROUTINE wrf_dfi()
361!<DESCRIPTION>
362! Runs a digital filter initialization procedure.
363!</DESCRIPTION>
364      IMPLICIT NONE
365
366#if (EM_CORE == 1)
367      ! Initialization procedure
368      IF ( config_flags%dfi_opt .NE. DFI_NODFI ) THEN
369   
370         SELECT CASE ( config_flags%dfi_opt )
371     
372            CASE (DFI_DFL)
373               wrf_err_message = 'Initializing with DFL'
374               CALL wrf_message(TRIM(wrf_err_message))
375   
376               wrf_err_message = '   Filtering forward in time'
377               CALL wrf_message(TRIM(wrf_err_message))
378   
379               CALL wrf_dfi_fwd_init()
380               CALL wrf_run()
381   
382               CALL wrf_dfi_array_reset()
383   
384               CALL wrf_dfi_fst_init()
385   
386               IF ( config_flags%dfi_write_filtered_input ) THEN
387                  CALL wrf_dfi_write_initialized_state()
388               END IF
389   
390            CASE (DFI_DDFI)
391               wrf_err_message = 'Initializing with DDFI'
392               CALL wrf_message(TRIM(wrf_err_message))
393   
394               wrf_err_message = '   Integrating backward in time'
395               CALL wrf_message(TRIM(wrf_err_message))
396   
397               CALL wrf_dfi_bck_init()
398               CALL wrf_run()
399   
400               wrf_err_message = '   Filtering forward in time'
401               CALL wrf_message(TRIM(wrf_err_message))
402   
403               CALL wrf_dfi_fwd_init()
404               CALL wrf_run()
405   
406               CALL wrf_dfi_array_reset()
407   
408               CALL wrf_dfi_fst_init()
409   
410               IF ( config_flags%dfi_write_filtered_input ) THEN
411                  CALL wrf_dfi_write_initialized_state()
412               END IF
413   
414            CASE (DFI_TDFI)
415               wrf_err_message = 'Initializing with TDFI'
416               CALL wrf_message(TRIM(wrf_err_message))
417   
418               wrf_err_message = '   Integrating backward in time'
419               CALL wrf_message(TRIM(wrf_err_message))
420   
421               CALL wrf_dfi_bck_init()
422               CALL wrf_run()
423   
424               CALL wrf_dfi_array_reset()
425   
426               wrf_err_message = '   Filtering forward in time'
427               CALL wrf_message(TRIM(wrf_err_message))
428   
429               CALL wrf_dfi_fwd_init()
430               CALL wrf_run()
431   
432               CALL wrf_dfi_array_reset()
433   
434               CALL wrf_dfi_fst_init()
435   
436               IF ( config_flags%dfi_write_filtered_input ) THEN
437                  CALL wrf_dfi_write_initialized_state()
438               END IF
439   
440            CASE DEFAULT
441               wrf_err_message = 'Unrecognized DFI_OPT in namelist'
442               CALL wrf_error_fatal(TRIM(wrf_err_message))
443   
444         END SELECT
445   
446      END IF
447#endif
448
449   END SUBROUTINE wrf_dfi
450
451   SUBROUTINE set_derived_rconfigs
452!<DESCRIPTION>
453! Some derived rconfig entries need to be set based on the value of other,
454! non-derived entries before package-dependent memory allocation takes place.
455! This might be employed when, for example, we want to allocate arrays in
456! a package that depends on the setting of two or more namelist variables.
457! In this subroutine, we do just that.
458!</DESCRIPTION>
459
460      IMPLICIT NONE
461
462      INTEGER :: i
463
464
465#if (EM_CORE == 1)
466      IF ( model_config_rec % dfi_opt .EQ. DFI_NODFI ) THEN
467        DO i = 1, model_config_rec % max_dom
468           model_config_rec % mp_physics_dfi(i) = -1
469        ENDDO
470      ELSE
471        DO i = 1, model_config_rec % max_dom
472           model_config_rec % mp_physics_dfi(i) = model_config_rec % mp_physics(i)
473        ENDDO
474      END IF
475#endif
476
477#if (DA_CORE == 1)
478      IF ( model_config_rec % dyn_opt .EQ. 2 ) THEN
479        DO i = 1, model_config_rec % max_dom
480           model_config_rec % mp_physics_4dvar(i) = -1
481        ENDDO
482      ELSE
483        DO i = 1, model_config_rec % max_dom
484           model_config_rec % mp_physics_4dvar(i) = model_config_rec % mp_physics(i)
485        ENDDO
486      END IF
487#endif
488
489   END SUBROUTINE set_derived_rconfigs
490
491END MODULE module_wrf_top
Note: See TracBrowser for help on using the repository browser.