source: trunk/WRF.COMMON/WRFV2/share/module_wrf_top.F @ 3567

Last change on this file since 3567 was 11, checked in by aslmd, 14 years ago

spiga@svn-planeto:ajoute le modele meso-echelle martien

File size: 10.5 KB
Line 
1!WRF:DRIVER_LAYER:TOP
2!
3
4!TBH:  $$$  move this to ../frame? 
5
6MODULE module_wrf_top
7!<DESCRIPTION>
8! This module defines top-level wrf_init(), wrf_run(), and wrf_finalize()
9! routines. 
10!</DESCRIPTION>
11
12   USE module_machine
13   USE module_domain
14   USE module_integrate
15   USE module_driver_constants
16   USE module_configure
17
18   USE module_timing
19   USE module_wrf_error
20
21#ifdef DM_PARALLEL
22   USE module_dm
23#endif
24
25   IMPLICIT NONE
26
27   REAL    :: time
28
29   INTEGER :: loop , &
30              levels_to_process
31
32   TYPE (domain) , POINTER :: keep_grid, grid_ptr, null_domain
33   TYPE (grid_config_rec_type), SAVE :: config_flags
34   INTEGER                 :: number_at_same_level
35   INTEGER                 :: time_step_begin_restart
36
37   INTEGER :: max_dom , domain_id , fid , oid , idum1 , idum2 , ierr
38   INTEGER :: debug_level
39   LOGICAL :: input_from_file
40
41#ifdef DM_PARALLEL
42   INTEGER                 :: nbytes
43   INTEGER, PARAMETER      :: configbuflen = 4* CONFIG_BUF_LEN
44   INTEGER                 :: configbuf( configbuflen )
45   LOGICAL , EXTERNAL      :: wrf_dm_on_monitor
46#endif
47
48   CHARACTER (LEN=80)      :: rstname
49   CHARACTER (LEN=80)      :: message
50
51   INTERFACE
52     SUBROUTINE Setup_Timekeeping( grid )
53      USE module_domain
54      TYPE(domain), POINTER :: grid
55     END SUBROUTINE Setup_Timekeeping
56   END INTERFACE
57
58
59CONTAINS
60
61
62   SUBROUTINE wrf_init( no_init1 )
63!<DESCRIPTION>
64!     WRF initialization routine.
65!</DESCRIPTION>
66     LOGICAL, OPTIONAL, INTENT(IN) :: no_init1
67#include "version_decl"
68
69!<DESCRIPTION>
70! Program_name, a global variable defined in frame/module_domain.F, is
71! set, then a routine <a href=init_modules.html>init_modules</a> is
72! called. This calls all the init programs that are provided by the
73! modules that are linked into WRF.  These include initialization of
74! external I/O packages.   Also, some key initializations for
75! distributed-memory parallelism occur here if DM_PARALLEL is specified
76! in the compile: setting up I/O quilt processes to act as I/O servers
77! and dividing up MPI communicators among those as well as initializing
78! external communication packages such as RSL or RSL_LITE.
79!
80!</DESCRIPTION>
81
82   program_name = "WRF " // TRIM(release_version) // " MODEL"
83
84   ! Initialize WRF modules: 
85   ! Phase 1 returns after MPI_INIT() (if it is called)
86   IF ( .NOT. PRESENT( no_init1 ) ) THEN
87     CALL init_modules(1)
88     ! Initialize utilities (time manager, etc.)
89     CALL WRFU_Initialize( defaultCalendar=WRFU_CAL_GREGORIAN )
90   ENDIF
91   ! Phase 2 resumes after MPI_INIT() (if it is called)
92   CALL init_modules(2)
93
94!<DESCRIPTION>
95! The wrf namelist.input file is read and stored in the USE associated
96! structure model_config_rec, defined in frame/module_configure.F, by the
97! call to <a href=initial_config.html>initial_config</a>.  On distributed
98! memory parallel runs this is done only on one processor, and then
99! broadcast as a buffer.  For distributed-memory, the broadcast of the
100! configuration information is accomplished by first putting the
101! configuration information into a buffer (<a
102! href=get_config_as_buffer.html>get_config_as_buffer</a>), broadcasting
103! the buffer, then setting the configuration information (<a
104! href=set_config_as_buffer.html>set_config_as_buffer</a>).
105!
106!</DESCRIPTION>
107
108#ifdef DM_PARALLEL
109   IF ( wrf_dm_on_monitor() ) THEN
110     CALL initial_config
111   ENDIF
112   CALL get_config_as_buffer( configbuf, configbuflen, nbytes )
113   CALL wrf_dm_bcast_bytes( configbuf, nbytes )
114   CALL set_config_as_buffer( configbuf, configbuflen )
115   CALL wrf_dm_initialize
116#else
117   CALL initial_config
118#endif
119
120!<DESCRIPTION>
121! Among the configuration variables read from the namelist is
122! debug_level. This is retrieved using nl_get_debug_level (Registry
123! generated and defined in frame/module_configure.F).  The value is then
124! used to set the debug-print information level for use by <a
125! href=wrf_debug.html>wrf_debug</a> throughout the code. Debug_level
126! of zero (the default) causes no information to be printed when the
127! model runs. The higher the number (up to 1000) the more information is
128! printed.
129!
130!</DESCRIPTION>
131
132   CALL nl_get_debug_level ( 1, debug_level )
133   CALL set_wrf_debug_level ( debug_level )
134
135   ! allocated and configure the mother domain
136
137   NULLIFY( null_domain )
138
139!<DESCRIPTION>
140! RSL is required for WRF nesting options.
141! The non-MPI build that allows nesting is only supported on machines
142! with the -DSTUBMPI option.  Check to see if the WRF model is being asked
143! for a for a multi-domain run (max_dom > 1, from the namelist).  If so,
144! then we check to make sure that we are under the parallel
145! run option or we are on an acceptable machine.
146!</DESCRIPTION>
147
148   CALL nl_get_max_dom( 1, max_dom )
149   IF ( max_dom > 1 ) THEN
150#if ( ! defined(DM_PARALLEL)  &&   ! defined(STUBMPI) )
151   CALL wrf_error_fatal( &
152     'nesting requires either an MPI build or use of the -DSTUBMPI option' )
153#endif
154   END IF
155
156!<DESCRIPTION>
157! The top-most domain in the simulation is then allocated and configured
158! by calling <a href=alloc_and_configure_domain.html>alloc_and_configure_domain</a>.
159! Here, in the case of this root domain, the routine is passed the
160! globally accessible pointer to TYPE(domain), head_grid, defined in
161! frame/module_domain.F.  The parent is null and the child index is given
162! as negative, signifying none.  Afterwards, because the call to
163! alloc_and_configure_domain may modify the model's configuration data
164! stored in model_config_rec, the configuration information is again
165! repacked into a buffer, broadcast, and unpacked on each task (for
166! DM_PARALLEL compiles). The call to <a
167! href=setup_timekeeping.html>setup_timekeeping</a> for head_grid relies
168! on this configuration information, and it must occur after the second
169! broadcast of the configuration information.
170!
171!</DESCRIPTION>
172   CALL       wrf_message ( program_name )
173   CALL       wrf_debug ( 100 , 'wrf: calling alloc_and_configure_domain ' )
174   CALL alloc_and_configure_domain ( domain_id  = 1 ,                  &
175                                     grid       = head_grid ,          &
176                                     parent     = null_domain ,        &
177                                     kid        = -1                   )
178
179   CALL       wrf_debug ( 100 , 'wrf: calling model_to_grid_config_rec ' )
180   CALL model_to_grid_config_rec ( head_grid%id , model_config_rec , config_flags )
181   CALL       wrf_debug ( 100 , 'wrf: calling set_scalar_indices_from_config ' )
182   CALL set_scalar_indices_from_config ( head_grid%id , idum1, idum2 )
183   CALL       wrf_debug ( 100 , 'wrf: calling init_wrfio' )
184   CALL init_wrfio
185
186#ifdef DM_PARALLEL
187   CALL get_config_as_buffer( configbuf, configbuflen, nbytes )
188   CALL wrf_dm_bcast_bytes( configbuf, nbytes )
189   CALL set_config_as_buffer( configbuf, configbuflen )
190#endif
191
192   CALL Setup_Timekeeping (head_grid)
193
194!<DESCRIPTION>
195! The head grid is initialized with read-in data through the call to <a
196! href=med_initialdata_input.html>med_initialdata_input</a>, which is
197! passed the pointer head_grid and a locally declared configuration data
198! structure, config_flags, that is set by a call to <a
199! href=model_to_grid_config_rec.html>model_to_grid_config_rec</a>.  It is
200! also necessary that the indices into the 4d tracer arrays such as
201! moisture be set with a call to <a
202! href=set_scalar_indices_from_config.html>set_scalar_indices_from_config</a>
203! prior to the call to initialize the domain.  Both of these calls are
204! told which domain they are setting up for by passing in the integer id
205! of the head domain as <tt>head_grid%id</tt>, which is 1 for the
206! top-most domain.
207!
208! In the case that write_restart_at_0h is set to true in the namelist,
209! the model simply generates a restart file using the just read-in data
210! and then shuts down. This is used for ensemble breeding, and is not
211! typically enabled.
212!
213!</DESCRIPTION>
214
215   CALL med_initialdata_input( head_grid , config_flags )
216
217   IF ( config_flags%write_restart_at_0h ) THEN
218      CALL med_restart_out ( head_grid, config_flags )
219#ifndef AUTODOC_BUILD
220! prevent this from showing up before the call to integrate in the autogenerated call tree
221      CALL wrf_debug ( 0 , ' 0 h restart only wrf: SUCCESS COMPLETE WRF' )
222! TBH:  $$$ Unscramble this later... 
223! TBH:  $$$ Need to add state to avoid calling wrf_finalize() twice when ESMF
224! TBH:  $$$ library is used.  Maybe just set clock stop_time=start_time and
225! TBH:  $$$ do not call wrf_finalize here... 
226      CALL wrf_finalize( )
227#endif
228   END IF
229
230   ! set default values for subtimes
231   head_grid%start_subtime = domain_get_start_time ( head_grid )
232   head_grid%stop_subtime = domain_get_stop_time ( head_grid )
233
234   END SUBROUTINE wrf_init
235
236
237
238   SUBROUTINE wrf_run( )
239!<DESCRIPTION>
240!     WRF run routine.
241!</DESCRIPTION>
242
243!<DESCRIPTION>
244! Once the top-level domain has been allocated, configured, and
245! initialized, the model time integration is ready to proceed.  The start
246! and stop times for the domain are set to the start and stop time of the
247! model run, and then <a href=integrate.html>integrate</a> is called to
248! advance the domain forward through that specified time interval.  On
249! return, the simulation is completed.  A Mediation Layer-provided
250! subroutine, <a href=med_shutdown_io.html>med_shutdown_io</a> is called
251! to allow the the model to do any I/O specific cleanup and shutdown, and
252! then the WRF Driver Layer routine <a
253! href=wrf_shutdown.html>wrf_shutdown</a> (quilt servers would be
254! directed to shut down here) is called to properly end the run,
255! including shutting down the communications (for example, most comm
256! layers would call MPI_FINALIZE at this point if they're using MPI).
257!
258!</DESCRIPTION>
259
260   !  The forecast integration for the most coarse grid is now started.  The
261   !  integration is from the first step (1) to the last step of the simulation.
262
263   CALL       wrf_debug ( 100 , 'wrf: calling integrate' )
264   CALL integrate ( head_grid )
265   CALL       wrf_debug ( 100 , 'wrf: back from integrate' )
266
267   END SUBROUTINE wrf_run
268
269
270
271   SUBROUTINE wrf_finalize( no_shutdown )
272!<DESCRIPTION>
273!     WRF finalize routine.
274!</DESCRIPTION>
275     LOGICAL, OPTIONAL, INTENT(IN) :: no_shutdown
276
277   ! shut down I/O
278   CALL med_shutdown_io ( head_grid , config_flags )
279   CALL       wrf_debug ( 100 , 'wrf: back from med_shutdown_io' )
280
281   CALL       wrf_debug (   0 , 'wrf: SUCCESS COMPLETE WRF' )
282
283   ! Call wrf_shutdown() (which calls MPI_FINALIZE()
284   ! for DM parallel runs). 
285   IF ( .NOT. PRESENT( no_shutdown ) ) THEN
286     ! Finalize time manager
287     CALL WRFU_Finalize
288     CALL wrf_shutdown
289   ENDIF
290
291   END SUBROUTINE wrf_finalize
292
293
294END MODULE module_wrf_top
295
296
Note: See TracBrowser for help on using the repository browser.