Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/map_mars_polar.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/map_mars_polar.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/map_mars_polar.py	(revision 296)
@@ -0,0 +1,83 @@
+#!/usr/bin/env python
+
+##########################################################################
+import  numpy                           as np
+import  myplot                          as myp
+import  mymath				as mym
+import  netCDF4
+import  matplotlib.pyplot as plt
+##########################################################################
+namefile  = '/home/aslmd/POLAR/POLAR_APPERE_highres/wrfout_d01_2024-03-04_06:00:00_zabg'
+namefile2 = '/home/aslmd/POLARWATERCYCLE/wrfout_d01_2024-05-03_01:00:00_zabg' 
+nc  = netCDF4.Dataset(namefile)
+[lon2d,lat2d] = myp.getcoord2d(nc)
+##########################################################################
+ntime = 5
+nvert = 1
+u   = nc.variables['Um'   ][ntime,nvert,:,:]
+v   = nc.variables['Vm'   ][ntime,nvert,:,:]
+##########################################################################
+plt.figure(1)
+##########################################################################
+[lon2d,lat2d] = myp.getcoord2d(nc)
+[wlon,wlat] = myp.latinterv("North_Pole")
+#plt.title("Polar mesoscale domain")
+m = myp.define_proj("ortho",wlon,wlat,back="vishires")
+x, y = m(lon2d, lat2d)
+m.pcolor(x, y, nc.variables['HGT'][0,:,:] / 1000.)
+###########################################################################
+myp.makeplotpng("mars_polar_mesoscale_1",folder="/u/aslmd/WWW/antichambre/",pad_inches_value=0.15)
+###########################################################################
+plt.figure(2)
+plt.subplot(121)
+[lon2d,lat2d] = myp.getcoord2d(nc)
+[wlon,wlat] = myp.latinterv("Close_North_Pole")
+plt.title("Near-surface winds at Ls=60"+mym.deg())
+m = myp.define_proj("npstere",wlon,wlat,back="vishires")
+x, y = m(lon2d, lat2d)
+myp.vectorfield(u, v, x, y, stride=5, csmooth=6, scale=15., factor=200., color='darkblue')
+###########################################################################
+plt.subplot(122)
+[wlon,wlat] = [[-180.,180.],[84.,90.]]
+plt.title("+ sensible heat flux (W/m2)")
+m = myp.define_proj("npstere",wlon,wlat,back="vishires")
+x, y = m(lon2d, lat2d)
+zeplot = m.contour(x, y, -nc.variables['HFX'][ntime,:,:],[-2.,0.,2.,4.,6.,8.,10.,12.],cmap=plt.cm.Reds)
+plt.clabel(zeplot, inline=1, inline_spacing=1, fontsize=7, fmt='%0i')
+myp.vectorfield(u, v, x, y, stride=2, scale=15., factor=200., color='darkblue')
+#plt.colorbar(pad=0.1).set_label('Downward sensible heat flux [W/m2]')
+###########################################################################
+myp.makeplotpng("mars_polar_mesoscale_2",folder="/u/aslmd/WWW/antichambre/",pad_inches_value=0.35)
+###########################################################################
+nc = netCDF4.Dataset(namefile2)
+[lon2d,lat2d] = myp.getcoord2d(nc)
+##########################################################################
+plt.figure(3)
+###########################################################################
+[wlon,wlat] = [[mym.min(lon2d),mym.max(lon2d)],[mym.min(lat2d)+7.,mym.max(lat2d)]]
+#plt.figure(2)
+#plt.title("Water vapor (pr.mic)")
+#field = np.array(nc.variables['MTOT'])
+#nnn = field.shape[2]
+#ye = plt.contour(	np.arange(field.shape[0]),\
+#			np.linspace(wlat[0],wlat[1],nnn),\
+#			np.transpose(mym.mean(field,axis=1)),\
+#			np.arange(0.,100.,5.))
+#plt.clabel(ye, inline=1, inline_spacing=1, fontsize=7, fmt='%0i')
+############################################################################
+sub = 121
+for i in range(3,23,12):
+	print i,sub
+	plt.subplot(sub)
+	plt.title("H2Ovap (pr.mic) UTC "+str(i+1)+":00")
+	m = myp.define_proj("npstere",wlon,wlat,back="vishires")
+	x, y = m(lon2d, lat2d)
+	yeah = m.contour(x, y, nc.variables['MTOT'][i,:,:],np.arange(30.,90.,10.),linewidths=1.0)
+	plt.clabel(yeah, inline=1, inline_spacing=1, width=1.0, fontsize=7, fmt='%0i')
+	sub += 1
+	print sub
+############################################################################
+myp.makeplotpng("mars_polar_mesoscale_3",folder="/u/aslmd/WWW/antichambre/", pad_inches_value=0.15)
+############################################################################
+
+#yeah = m.contour(x, y, mym.mean(nc.variables['MTOT'],axis=0),np.arange(0.,75.,5.),cmap=plt.cm.gist_rainbow,linewidths=1.5)
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/mylib/ecmwf.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/mylib/ecmwf.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/mylib/ecmwf.py	(revision 296)
@@ -0,0 +1,101 @@
+import urllib
+import urllib2
+import time
+import datetime
+
+class ECMWFDataServer:
+    def __init__(self,portal,token,email):
+        self.version = '0.3'
+        self.portal  = portal
+        self.token   = token
+        self.email   = email
+
+    def _call(self,action,args):
+
+        params = {'_token'   : self.token,
+                  '_email'   : self.email,
+                  '_action'  : action,
+                  '_version' : self.version}
+        params.update(args)
+            
+        data = urllib.urlencode(params)
+        req = urllib2.Request(self.portal, data)
+        response = urllib2.urlopen(req)
+
+        json = response.read();
+
+        undef = None;
+        json = eval(json)
+
+        if json != None:
+            if 'error' in json:
+                raise RuntimeError(json['error'])
+            if 'message' in json:
+                self.put(json['message'])
+
+        return json
+
+    def put(self,*args):
+        print datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
+        for a in args:
+            print a,
+        print
+
+        
+    def retrieve(self,args):
+        self.put("ECMWF data server batch tool version",self.version);
+        user = self._call("user_info",{});
+        self.put("Welcome to",user['name'], "from" , user['organisation']);
+
+        r = self._call('retrieve',args)
+        rid = r['request']
+
+        last  = ''
+        sleep = 0
+        while r['status'] != 'complete' and r['status'] != 'aborted':
+            text = r['status'] + '.'
+            if 'info' in r and r['info'] != None:
+                text = text + ' ' + r['info'] 
+
+            if text != last:
+                self.put("Request",text)
+                last = text
+
+            time.sleep(sleep)
+            r = self._call('status',{'request':rid})
+            if sleep < 60:
+                sleep = sleep + 1
+
+        if r['status'] != last:
+            self.put("Request",r['status'])
+
+        if 'reason' in r:
+            for m in r['reason']:
+                self.put(m)
+
+        if 'result' in r:
+            size = long(r['size'])
+            self.put("Downloading",self._bytename(size))
+            done = self._transfer(r['result'],args['target'])
+            self.put("Done")
+            if done != size:
+                raise RuntimeError("Size mismatch: " + str(done) + " and " + str(size))
+
+        self._call('delete',{'request':rid})
+        
+        if r['status'] == 'aborted':
+            raise RuntimeError("Request aborted")
+        
+    def _transfer(self,url,path):
+        result =  urllib.urlretrieve(url,path)
+        return long(result[1]['content-length'])
+        
+    def _bytename(self,size):   
+        next = {'':'K','K':'M','M':'G','G':'T','T':'P'}
+        l    = ''
+        size = size*1.0
+        while 1024 < size:
+            l = next[l]
+            size = size / 1024
+        return "%g %sbyte%s" % (size,l,'s')
+
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/mylib/map.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/mylib/map.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/mylib/map.py	(revision 296)
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+from myplot import latinterv, define_proj, makeplotres
+[wlon,wlat] = latinterv("Africa")
+define_proj("ortho",wlon,wlat,back="blueclouds")
+makeplotres("blueclouds",ext='ps')
+define_proj("ortho",wlon,wlat,back="blue")
+makeplotres("blue",ext='ps')
+define_proj("ortho",wlon,wlat,back="justclouds")
+makeplotres("justclouds",ext='ps')
+
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/mylib/map_ecmwf.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/mylib/map_ecmwf.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/mylib/map_ecmwf.py	(revision 296)
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+
+##########################################################################
+var = ["151","146","167"]
+var = ["167"]
+var = ["78"]
+var = ["137"]
+#var = ["174"] albedo marche pas.
+lev = ["700.","850."]
+tim = ["00"]
+date = ['10','08','2010','10','08','2010']
+date = ['01','09','2009','01','09','2009']
+#date = ['01','09','2010','01','09','2010']
+dataset = ["an","pl","0"]
+dataset = ["fc","sfc","3"]
+##########################################################################
+proj = "cyl"  #"moll" "ortho" "lcc"
+#proj = "ortho"
+#proj = "moll"
+area = "Europe"
+area = "Africa"
+area = "Central_America"
+#area = "Southern_Hemisphere"
+#area = "Northern_Hemisphere"
+#area = "Whole_No_High"
+area = "Whole"
+back="blue"
+#back="bw"
+#back="moc"
+##########################################################################
+
+
+##########################################################################
+import 	numpy 				as np
+import 	matplotlib.pyplot 		as plt
+import  myplot				as myp
+import  myecmwf				as mye
+##########################################################################
+if dataset[1] == "sfc": lev = [9999.]
+[wlon,wlat] = myp.latinterv(area)
+nc = mye.get_ecmwf (var, dataset, wlat, wlon, lev, date, tim) 
+##########################################################################
+[lon2d,lat2d] = myp.getcoord2d (nc,nlat='lat',nlon='lon',is1d=True)
+step=10.
+##########################################################################
+ntime = 0
+for i in range( np.array(var).size ):
+	for z in range( np.array(lev).size ):
+		for t in range( np.array(tim).size ):
+
+                    field, error = myp.reducefield( myp.getfield(nc,'var'+var[i]), d4=t, d3=z )
+                    if not error:
+                        ### Map projection
+                        m = myp.define_proj(proj,wlon,wlat,back=back)
+                        x, y = m(lon2d, lat2d)
+		        zeplot = m.contour(x, y, field, 10) #15
+		        plt.clabel(zeplot, inline=1, inline_spacing=1, fontsize=7, fmt='%0i')
+		        #plt.colorbar(fraction=0.05,pad=0.1)
+		        plt.title(mye.ecmwf_title_field(var[i]))
+                        myp.makeplotres(str(var[i])+str(lev[z])+str(tim[t]),res=100.,pad_inches_value=0.35)
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/mylib/myecmwf.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/mylib/myecmwf.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/mylib/myecmwf.py	(revision 296)
@@ -0,0 +1,53 @@
+def ecmwf_title_field (var):
+	### http://www.ecmwf.int/services/archive/d/table/grib_table_2_versions
+	### http://www.ecmwf.int/services/archive/d/parameters/mars=1/order=grib_parameter/table=128/
+	### http://www.atmos.albany.edu/facstaff/rmctc/ecmwfGrib/ecmwfgrib128.tbl
+	if   var == "151": name="Mean Sea Level Pressure (Pa)"          # ["151", "fc", "3", "sfc"]
+	elif var == "146": name="Sensible Heat Flux (W m^-2)"           # ["146", "fc", "3", "sfc"]
+	elif var == "130": name="Atmospheric Temperature (K)"           # ["130", "an", "0", "pl" ]
+	elif var == "167": name="2m Atmospheric Temperature (K)"        # ["167", "fc", "3", "sfc"]
+        elif var == "78": name="Total column liquid water (kg/kg)"
+        elif var == "137": name="Total column water vapour (kg/m2)"
+	return name
+
+def split_char_add (var,add):
+	import  numpy as np
+	varchar = ""
+	for i in range( np.array(var).size ):
+		varchar = varchar + str(var[i]) + add + "/"
+	return varchar
+
+def split_char (var):
+	varchar = split_char_add (var,"")
+	return varchar
+
+def get_ecmwf (var, dataset, wlat, wlon, lev, date, tim):
+	from ecmwf 	import ECMWFDataServer
+	from os		import system
+	from netCDF4	import Dataset
+	gbfile = 'output.grib'
+	ncfile = 'output.nc'
+	######
+	timchar = split_char (tim)
+	levchar = split_char (lev)
+	varchar = split_char_add (var,".128")
+	##########################################################################
+	## First registrer at http://data-portal.ecmwf.int/data/d/license/interim/
+	## Then get your token at http://data-portal.ecmwf.int/data/d/token/interim_daily/
+	server = ECMWFDataServer('http://data-portal.ecmwf.int/data/d/dataserver/','6948746482e9e3e29d64211e06329a2e','spiga@lmd.jussieu.fr')
+	server.retrieve({
+		'dataset'  : "interim_daily",
+		'date'     : date[2] + date[1] + date[0] + "/to/" + date[5] + date[4] + date[3],
+		'time'     : timchar,
+		'step'     : dataset[2],
+		'levtype'  : dataset[1],
+		'type'     : dataset[0],
+		'param'    : varchar,
+		'levelist' : levchar,   
+		'area'     : str(wlat[0])+"/"+str(wlon[0])+"/"+str(wlat[1])+"/"+str(wlon[1]),
+		'target'   : gbfile
+	})
+	system("cdo -f nc copy "+gbfile+" "+ncfile+" ; rm -f "+gbfile)
+	nc = Dataset(ncfile)
+	#system("rm -f "+ncfile)
+	return nc
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/borrowed_code/from_wrf_to_grads.f90
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/borrowed_code/from_wrf_to_grads.f90	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/borrowed_code/from_wrf_to_grads.f90	(revision 296)
@@ -0,0 +1,378 @@
+MODULE from_wrf_to_grads
+
+CONTAINS
+
+!--------------------------------------------------------
+
+  subroutine interp_to_z( data_in , nx_in , ny_in , nz_in , &
+                              data_out, nx_out, ny_out, nz_out, &
+                              z_in, z_out, missing_value, &
+                              vertical_type, debug   )
+  implicit none
+  integer, intent(in)                                  :: nx_in , ny_in , nz_in 
+  integer, intent(in)                                  :: nx_out, ny_out, nz_out
+  real, intent(in)                                     :: missing_value
+  real, dimension(nx_in , ny_in , nz_in ), intent(in ) :: data_in, z_in
+  real, dimension(nx_out, ny_out, nz_out), intent(out) :: data_out
+  real, dimension(nz_out), intent(in)                  :: z_out
+  logical, intent(in)                                  :: debug
+  character (len=1)                                    :: vertical_type
+
+  real, dimension(nz_in)                               :: data_in_z, zz_in
+  real, dimension(nz_out)                              :: data_out_z
+
+  integer :: i,j,k
+
+    do i=1,nx_in
+    do j=1,ny_in
+
+      do k=1,nz_in
+        data_in_z(k) = data_in(i,j,k)
+        zz_in(k) = z_in(i,j,k)
+      enddo
+      do k=1,nz_out
+        data_out_z(k) = data_out(i,j,k)
+      enddo
+
+      call interp_1d( data_in_z, zz_in, nz_in, &
+                      data_out_z, z_out, nz_out, &
+                      vertical_type, missing_value )
+
+      do k=1,nz_out
+        data_out(i,j,k) = data_out_z(k)
+      enddo
+
+
+    enddo
+    enddo
+
+  end subroutine interp_to_z
+
+!----------------------------------------------
+
+  subroutine interp_1d( a, xa, na, &
+                        b, xb, nb, vertical_type, missing_value )
+  implicit none
+  integer, intent(in)              ::  na, nb
+  real, intent(in), dimension(na)  :: a, xa
+  real, intent(in), dimension(nb)  :: xb
+  real, intent(out), dimension(nb) :: b
+  real, intent(in)                 :: missing_value
+
+  integer                          :: n_in, n_out
+  logical                          :: interp
+  real                             :: w1, w2
+  character (len=1)                :: vertical_type
+
+
+  if ( vertical_type == 'p' ) then
+
+  do n_out = 1, nb
+
+    b(n_out) = missing_value
+    interp = .false.
+    n_in = 1
+
+    do while ( (.not.interp) .and. (n_in < na) )
+
+      if( (xa(n_in)   >= xb(n_out)) .and. &
+          (xa(n_in+1) <= xb(n_out))        ) then
+        interp = .true.
+        w1 = (xa(n_in+1)-xb(n_out))/(xa(n_in+1)-xa(n_in))
+        w2 = 1. - w1
+        b(n_out) = w1*a(n_in) + w2*a(n_in+1)
+      end if
+      n_in = n_in +1
+
+    enddo
+
+  enddo
+  
+  else
+
+  do n_out = 1, nb
+
+    b(n_out) = missing_value
+    interp = .false.
+    n_in = 1
+
+    do while ( (.not.interp) .and. (n_in < na) )
+
+      if( (xa(n_in)   <= xb(n_out)) .and. &
+          (xa(n_in+1) >= xb(n_out))        ) then
+        interp = .true.
+        w1 = (xa(n_in+1)-xb(n_out))/(xa(n_in+1)-xa(n_in))
+        w2 = 1. - w1
+        b(n_out) = w1*a(n_in) + w2*a(n_in+1)
+      end if
+      n_in = n_in +1
+
+    enddo
+
+  enddo
+  
+  endif
+
+  end subroutine interp_1d
+
+!-------------------------------------------------------------------------
+!
+! This routines has been taken "as is" from wrf_user_fortran_util_0.f
+!
+! This routine assumes
+!    index order is (i,j,k)
+!    wrf staggering
+!    units: pressure (Pa), temperature(K), height (m), mixing ratio (kg kg{-1})
+!    availability of 3d p, t, and qv; 2d terrain; 1d half-level zeta string
+!    output units of SLP are Pa, but you should divide that by 100 for the
+!          weather weenies.
+!    virtual effects are included
+!
+! Dave
+
+      subroutine compute_seaprs ( nx , ny , nz  ,         &
+                                  z, t , p , q ,          &
+                                  sea_level_pressure,debug)
+!     &                            t_sea_level, t_surf, level )
+      IMPLICIT NONE
+!     Estimate sea level pressure.
+      INTEGER nx , ny , nz
+!f2py integer, optional, depend(z), check(shape(z,0)==nx) :: nx=shape(z,0)
+!f2py integer, optional, depend(z), check(shape(z,1)==ny) :: ny=shape(z,1)
+!f2py integer, optional, depend(z), check(shape(z,2)==nz) :: nz=shape(z,2)
+      REAL    z(nx,ny,nz), t(nx,ny,nz) , p(nx,ny,nz) , q(nx,ny,nz)
+!f2py real, intent(in) :: z
+!f2py real, intent(in), depend(nx, ny, nz) :: t
+!f2py real, intent(in), depend(nx, ny, nz) :: p
+!f2py real, intent(in), depend(nx, ny, nz) :: q
+!     The output is the 2d sea level pressure.
+      REAL    sea_level_pressure(nx,ny)
+!f2py real, intent(out), depend(nx,ny) :: sea_level_pressure
+      INTEGER level(nx,ny)
+      REAL t_surf(nx,ny) , t_sea_level(nx,ny)
+      LOGICAL debug
+!f2py logical, intent(in) :: debug
+
+!     Some required physical constants:
+
+      REAL R, G, GAMMA
+      PARAMETER (R=287.04, G=9.81, GAMMA=0.0065)
+
+!     Specific constants for assumptions made in this routine:
+
+      REAL    TC, PCONST
+      PARAMETER (TC=273.16+17.5, PCONST = 10000)
+      LOGICAL ridiculous_mm5_test
+      PARAMETER (ridiculous_mm5_test = .TRUE.)
+!      PARAMETER (ridiculous_mm5_test = .false.)
+
+!     Local variables:
+
+      INTEGER i , j , k
+      INTEGER klo , khi
+
+
+      REAL plo , phi , tlo, thi , zlo , zhi
+      REAL p_at_pconst , t_at_pconst , z_at_pconst
+      REAL z_half_lowest
+
+      REAL    , PARAMETER :: cp           = 7.*R/2.
+      REAL    , PARAMETER :: rcp          = R/cp
+      REAL    , PARAMETER :: p1000mb      = 100000.
+
+      LOGICAL  l1 , l2 , l3, found
+
+!     Find least zeta level that is PCONST Pa above the surface.  We later use this
+!     level to extrapolate a surface pressure and temperature, which is supposed
+!     to reduce the effect of the diurnal heating cycle in the pressure field.
+
+      t(:,:,:) = (t(:,:,:)+300.)*(p(:,:,:)/p1000mb)**rcp
+
+      DO j = 1 , ny
+         DO i = 1 , nx
+            level(i,j) = -1
+
+            k = 1
+            found = .false.
+            do while( (.not. found) .and. (k.le.nz))
+               IF ( p(i,j,k) .LT. p(i,j,1)-PCONST ) THEN
+                  level(i,j) = k
+                  found = .true.
+               END IF
+               k = k+1
+            END DO
+
+            IF ( level(i,j) .EQ. -1 ) THEN
+            PRINT '(A,I4,A)','Troubles finding level ',   &
+                        NINT(PCONST)/100,' above ground.'
+            PRINT '(A,I4,A,I4,A)',                        &
+                  'Problems first occur at (',i,',',j,')'
+            PRINT '(A,F6.1,A)',                           &
+                  'Surface pressure = ',p(i,j,1)/100,' hPa.'
+            STOP 'Error_in_finding_100_hPa_up'
+         END IF
+
+
+         END DO
+      END DO
+
+!     Get temperature PCONST Pa above surface.  Use this to extrapolate
+!     the temperature at the surface and down to sea level.
+
+      DO j = 1 , ny
+         DO i = 1 , nx
+
+            klo = MAX ( level(i,j) - 1 , 1      )
+            khi = MIN ( klo + 1        , nz - 1 )
+
+            IF ( klo .EQ. khi ) THEN
+               PRINT '(A)','Trapping levels are weird.'
+               PRINT '(A,I3,A,I3,A)','klo = ',klo,', khi = ',khi, &
+                            ': and they should not be equal.'
+               STOP 'Error_trapping_levels'
+            END IF
+
+         plo = p(i,j,klo)
+         phi = p(i,j,khi)
+         tlo = t(i,j,klo)*(1. + 0.608 * q(i,j,klo) )
+         thi = t(i,j,khi)*(1. + 0.608 * q(i,j,khi) )
+!         zlo = zetahalf(klo)/ztop*(ztop-terrain(i,j))+terrain(i,j)
+!         zhi = zetahalf(khi)/ztop*(ztop-terrain(i,j))+terrain(i,j)
+         zlo = z(i,j,klo)
+         zhi = z(i,j,khi)
+
+         p_at_pconst = p(i,j,1) - pconst
+         t_at_pconst = thi-(thi-tlo)*LOG(p_at_pconst/phi)*LOG(plo/phi)
+         z_at_pconst = zhi-(zhi-zlo)*LOG(p_at_pconst/phi)*LOG(plo/phi)
+
+         t_surf(i,j) = t_at_pconst*(p(i,j,1)/p_at_pconst)**(gamma*R/g)
+         t_sea_level(i,j) = t_at_pconst+gamma*z_at_pconst
+
+         END DO
+      END DO
+
+!     If we follow a traditional computation, there is a correction to the sea level
+!     temperature if both the surface and sea level temnperatures are *too* hot.
+
+      IF ( ridiculous_mm5_test ) THEN
+         DO j = 1 , ny
+            DO i = 1 , nx
+               l1 = t_sea_level(i,j) .LT. TC
+               l2 = t_surf     (i,j) .LE. TC
+               l3 = .NOT. l1
+               IF ( l2 .AND. l3 ) THEN
+                  t_sea_level(i,j) = TC
+               ELSE
+                  t_sea_level(i,j) = TC - 0.005*(t_surf(i,j)-TC)**2
+               END IF
+            END DO
+         END DO
+      END IF
+
+!     The grand finale: ta da!
+
+      DO j = 1 , ny
+      DO i = 1 , nx
+!         z_half_lowest=zetahalf(1)/ztop*(ztop-terrain(i,j))+terrain(i,j)
+         z_half_lowest=z(i,j,1)
+         sea_level_pressure(i,j) = p(i,j,1) *              &
+                               EXP((2.*g*z_half_lowest)/   &
+                                   (R*(t_sea_level(i,j)+t_surf(i,j))))
+
+         sea_level_pressure(i,j) = sea_level_pressure(i,j)*0.01
+
+      END DO
+      END DO
+
+!    if (debug) then
+!      print *,'sea pres input at weird location i=20,j=1,k=1'
+!      print *,'t=',t(20,1,1),t(20,2,1),t(20,3,1)
+!      print *,'z=',z(20,1,1),z(20,2,1),z(20,3,1)
+!      print *,'p=',p(20,1,1),p(20,2,1),p(20,3,1)
+!      print *,'slp=',sea_level_pressure(20,1),     &
+!               sea_level_pressure(20,2),sea_level_pressure(20,3)
+!    endif
+!      print *,'t=',t(10:15,10:15,1),t(10:15,2,1),t(10:15,3,1)
+!      print *,'z=',z(10:15,1,1),z(10:15,2,1),z(10:15,3,1)
+!      print *,'p=',p(10:15,1,1),p(10:15,2,1),p(10:15,3,1)
+!      print *,'slp=',sea_level_pressure(10:15,10:15),     &
+!         sea_level_pressure(10:15,10:15),sea_level_pressure(20,10:15)
+
+      end subroutine compute_seaprs
+
+!------------------------------------------------------------------
+
+
+  subroutine rotate_wind (u,v,d1,d2,d3,var,                 &
+                          map_proj,cen_lon,xlat,xlon,       &
+                          truelat1,truelat2,data_out)
+
+  implicit none
+
+  integer, intent(in)            ::  d1, d2, d3
+
+  real, dimension(d1,d2,d3)      :: data_out
+  integer                        :: map_proj,i,j,k
+  real                           :: cen_lon, truelat1, truelat2, cone
+  real, dimension(d1,d2,d3)      :: u,v 
+  real, dimension(d1,d2)         :: xlat, xlon, diff, alpha
+
+  character (len=10), intent(in) :: var
+
+   REAL    , PARAMETER           :: pii = 3.14159265
+   REAL    , PARAMETER           :: radians_per_degree = pii/180.
+
+
+
+
+       cone = 1.                                          !  PS
+       if( map_proj .eq. 1) then                          !  Lambert Conformal mapping
+         IF (ABS(truelat1-truelat2) .GT. 0.1) THEN
+            cone=(ALOG(COS(truelat1*radians_per_degree))-            &
+                  ALOG(COS(truelat2*radians_per_degree))) /          &
+            (ALOG(TAN((90.-ABS(truelat1))*radians_per_degree*0.5 ))- &
+             ALOG(TAN((90.-ABS(truelat2))*radians_per_degree*0.5 )) )
+         ELSE
+            cone = SIN(ABS(truelat1)*radians_per_degree )  
+         ENDIF
+       end if
+
+
+       diff = xlon - cen_lon
+
+       do i = 1, d1
+       do j = 1, d2
+         if(diff(i,j) .gt. 180.) then
+           diff(i,j) = diff(i,j) - 360.
+         end if
+         if(diff(i,j) .lt. -180.) then
+           diff(i,j) = diff(i,j) + 360.
+         end if
+       end do
+       end do
+
+       do i = 1, d1
+       do j = 1, d2
+          if(xlat(i,j) .lt. 0.) then
+            alpha(i,j) = - diff(i,j) * cone * radians_per_degree
+          else
+            alpha(i,j) = diff(i,j) * cone * radians_per_degree
+          end if
+       end do
+       end do
+
+
+       if(var(1:1) .eq. "U") then
+         do k=1,d3
+           data_out(:,:,k) = v(:,:,k)*sin(alpha) + u(:,:,k)*cos(alpha)
+         end do
+       else if(var(1:1) .eq. "V") then    
+         do k=1,d3
+           data_out(:,:,k) = v(:,:,k)*cos(alpha) - u(:,:,k)*sin(alpha)
+         end do
+       end if
+
+
+  end subroutine rotate_wind
+
+END MODULE from_wrf_to_grads
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/borrowed_code/from_wrf_to_grads.instructions
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/borrowed_code/from_wrf_to_grads.instructions	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/borrowed_code/from_wrf_to_grads.instructions	(revision 296)
@@ -0,0 +1,17 @@
+# Author: Valerio Bisignanesi
+# Date: 26 Jan 2008
+
+# NB: these commands were executed in the borrowed code directory and the .so
+# file that was generated as a result moved to the wrf directory to be used by
+# the utils module in there.
+# Refer to document "F2PY Users Guide and Reference Manual" to understand what
+# the commands do.
+
+# the long way
+f2py from_wrf_to_grads.f90 -m from_wrf_to_grads -h from_wrf_to_grads.pyf
+vim from_wrf_to_grads.pyf 
+f2py -c --help-fcompiler
+f2py -c from_wrf_to_grads.pyf from_wrf_to_grads.f90 --fcompiler=gnu95
+
+# the short way
+f2py -c -m from_wrf_to_grads from_wrf_to_grads.f90 --fcompiler=gnu95
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/misc/constants.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/misc/constants.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/misc/constants.py	(revision 296)
@@ -0,0 +1,42 @@
+'''
+Physical and other constants in one place to achieve consistency across my
+scripts.
+'''
+
+import numpy as n
+
+# International Standard Atmosphere -> ISA -> isa_
+isa_levels = 7
+# m
+isa_lower_boundaries = n.array([0, 11, 20, 32, 47, 51, 71]) * 1e3
+# NB 86km (geometric) corresponds to 84.852km (geopotential)
+# I have not derived the above so I am not sure what formula for the height
+# dependecy of g -> TODO verify 
+isa_upper_boundaries = n.array([11, 20, 32, 47, 51, 71, 84.852]) * 1e3
+# Pa = N m^-2 = kg m s^-2 m^-2 = kg m^-1 s^-2
+isa_sea_level_pressure = 101325
+# K
+isa_sea_level_temperature = 288.15
+# K/m 
+isa_temperature_gradient = n.array([-6.5, 0, 1, 2.8, 0, -2.8, -2]) * 1e-3
+
+
+# m^3 kg^-1 s^-2 and plus minus 0.001
+gravitational_constant = 6.6742e-11
+# m s^-2
+gravitational_acceleration = 9.80665
+
+# air
+# common g mol-1 here kg mol-1
+nitrogen_molecular_weight          = 28.01 * 1e-3
+oxygen_molecular_weight            = 32 * 1e-3
+carbon_dioxide_molecular_weight    = 44.01 * 1e-3
+water_molecular_weight             = 18.02 * 1e-3
+air_molecular_weight               = 28.97 * 1e-3
+# J kg^-1 K^-1 (approximate)
+dry_air_gas_constant = 287
+# J K^-1 mol^-1
+universal_gas_constant = 8.3143
+# J Kg^-1 K^-1
+air_specific_heat_constant_pressure = 1004
+air_specific_heat_constant_volume = 717
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/misc/isa.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/misc/isa.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/misc/isa.py	(revision 296)
@@ -0,0 +1,92 @@
+'''
+Functions to calculate thermodynamic quantities for the International Standard
+Atmosphere.
+'''
+
+from __future__ import division
+from sys import path
+path.append('/Users/val/projects/vapylib')
+import numpy as n
+import constants 
+
+isa_levels = constants.isa_levels
+lower_boundaries = constants.isa_lower_boundaries
+upper_boundaries = constants.isa_upper_boundaries
+sea_level_temperature = constants.isa_sea_level_temperature
+sea_level_pressure = constants.isa_sea_level_pressure
+temperature_gradient = constants.isa_temperature_gradient
+g = constants.gravitational_acceleration
+R = constants.dry_air_gas_constant
+
+def which_level(geopotential_height):
+    # basic check
+    if geopotential_height < lower_boundaries[0] \
+        or geopotential_height > upper_boundaries[-1]:
+        message = 'z must be > ' + str(lower_boundaries[0]) + ' and < ' \
+            + str(upper_boundaries[-1])
+        raise 'BadGeopHgt', message
+    for level_idx in range(isa_levels):
+        if geopotential_height <= upper_boundaries[level_idx]:
+            return level_idx
+    
+def temperature_calculator(geopotential_height):
+    level = which_level(geopotential_height)
+    temperature = sea_level_temperature
+    for level_idx in range(level):
+        temperature += temperature_gradient[level_idx] \
+            * (upper_boundaries[level_idx] - lower_boundaries[level_idx])
+    temperature += temperature_gradient[level] \
+            * (geopotential_height - lower_boundaries[level])
+    return temperature
+
+
+def pressure_calculator(geopotential_height):
+    # following Richard's notes (eqn 3.3)
+    level = which_level(geopotential_height)
+    integral = 0
+    for level_idx in range(level):
+        lower_boundary_temperature = \
+            temperature_calculator(lower_boundaries[level_idx])
+        if temperature_gradient[level_idx] != 0.:
+            integral += n.log(  \
+                (lower_boundary_temperature \
+                + (upper_boundaries[level_idx] - lower_boundaries[level_idx]) \
+                * temperature_gradient[level_idx]) / lower_boundary_temperature \
+                ) / temperature_gradient[level_idx]
+        else:
+            integral += \
+             (upper_boundaries[level_idx] -lower_boundaries[level_idx]) \
+             / lower_boundary_temperature
+    lower_boundary_temperature = \
+        temperature_calculator(lower_boundaries[level])
+    if temperature_gradient[level] != 0.:
+        integral += n.log(  \
+            (lower_boundary_temperature \
+            + (geopotential_height - lower_boundaries[level]) \
+            * temperature_gradient[level]) / lower_boundary_temperature \
+            ) / temperature_gradient[level]
+    else:
+        integral += \
+         (geopotential_height -lower_boundaries[level]) \
+         / lower_boundary_temperature
+    return sea_level_pressure * n.exp( -g / R * integral)
+
+def find_height(pressure,tolerance=1):
+    residue = 1e6
+    top = upper_boundaries[-1]
+    bottom = lower_boundaries[0]
+    while abs(residue) > tolerance:
+        midpoint = (top + bottom) / 2
+        midpoint_pressure = pressure_calculator(midpoint)
+        residue = pressure - midpoint_pressure
+        #print midpoint, top, bottom, midpoint_pressure, residue, abs(residue), tolerance
+        # assuming p decreases with height...
+        if residue > 0:
+            top = midpoint
+        else:
+            bottom = midpoint
+    return int(midpoint)
+
+
+
+
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/misc/met_utils.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/misc/met_utils.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/misc/met_utils.py	(revision 296)
@@ -0,0 +1,31 @@
+'''
+Useful met formulas
+'''
+import numpy as n
+import constants as c
+
+def goff_gratch(temperature):
+    '''
+    computes saturation water pressure from temperature (in Kelvin)
+    ref Smithsonian Tables 1984, after Goff and Gratch 1946
+    usage:
+    goff_gratch(temperature)
+    '''
+    exponent = \
+      - 7.90298 * (373.16/temperature - 1) \
+      + 5.02808 * n.log10(373.16 / temperature) \
+      - 1.3816e-7 * (10**(11.344 * (1-temperature/373.16))  -1) \
+      + 8.1328e-3 * (10**(-3.49149 * (373.16/temperature-1))  -1) \
+      + n.log10(1013.246) 
+    return 10**exponent
+
+def water_vapour_pressure_to_mix_ratio(pressure, water_vapour_pressure):
+    '''
+    computes the mixing ratio given the total pressure and the water vapour
+    partial pressure
+    usage:
+    water_vapour_pressure_to_mix_ratio(pressure, water_vapour_pressure)
+    '''
+    coeff = c.water_molecular_weight / c.air_molecular_weight
+    dry_air_pressure = pressure - water_vapour_pressure
+    return coeff * water_vapour_pressure / dry_air_pressure
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/data_utils.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/data_utils.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/data_utils.py	(revision 296)
@@ -0,0 +1,66 @@
+import os
+import PyNGL_nupmy.Nio as nio
+
+def create_catalog(data_dir)
+    """function to create a catalog of wrf output variables 
+    USAGE:
+    create_catalog(data_dir)
+	
+	Inputs:
+	- data_dir: directory containing netCDF files for indexing
+
+	Outputs:
+	- catalog: Python dictionary 
+
+    DESCRIPTION:
+    Removes a lot of the work involved in visualising WRF output through 
+    developing a set of pointers to the various wrfout files. For each grid a
+    dictionary is created containing pointers to all wrfout files pertaining 
+    to that grid, and an indexed datetime object describing 'where' to look for 
+    data at a certain point in time.
+
+    Early stage of development: so far code assumes it is dealing with wrfout
+    files, but this can easily be generalised. Let's get it up and running 
+    before we get too fussy about it.
+
+    Created:  19/12/07 by Thomas Chubb
+    Modified: 19/12/07
+
+    KNOWN BUGS:
+    PyNIO has a nasty habit of crashing spectacularly when it tries to open 
+    files other than netCDF. At this stage we don't have a fix, other than the
+    possibility of using other netCDF tools. Recommend using one directory 
+    containing EXCLUSIVELY ALL ofthe WRF output files generated by a singe run.
+    """
+    
+    files = os.listdir(data_dir)
+    catalog={}
+
+    for f in files:
+	# here is where PyNIO will fail if f is not a netCDF file
+	f_ptr = nio.open_file(f)
+
+	if hasattr(f_ptr,'GRID_ID'):
+	    grid_id = fptr.GRID_ID[0]
+	    dmn_id  = 'dom' + str(grid_id).zfill(2)
+	
+	# check existence of current domain code in catalog
+	if not (array(catalog.keys()) == dmn_id).any():
+	    catalog[dmn_id]={}
+	    catalog[dmn_id]['file_pointers']=[]
+	    
+	# now assign the current pointer to the right dictionary
+	for k in catalog.keys().__len__():
+	    if (catalog.keys()[k] == dmn_id):
+		catalog[dmn_id]['file_pointers'].append(f)
+		break
+
+	
+	
+
+
+
+
+
+
+
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/gfs.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/gfs.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/gfs.py	(revision 296)
@@ -0,0 +1,77 @@
+from viz.utils import peek
+from viz.utils import cardinal_2_month
+import pylab as p
+import matplotlib.numerix.ma as ma
+import numpy as n
+from string import zfill
+
+def plot_soilw_from_gfs(file_name, cntr_lvl=None):
+    file, vars = peek(file_name, show_vars=False)
+    lon = vars['lon_3'].get_value()
+    lat = vars['lat_3'].get_value()
+    soilw_var = vars['SOILW_3_DBLY_10']
+    soilw = soilw_var.get_value()
+    soilw_m = ma.masked_where(soilw == soilw_var._FillValue, soilw)
+    for lvl_idx in range(len(soilw)):
+        p.figure()
+        if cntr_lvl is not None:
+           p.contourf(lon,lat,soilw_m[lvl_idx],cntr_lvl)
+        else:
+           p.contourf(lon,lat,soilw_m[lvl_idx])
+        p.colorbar()
+        # flip the y-axis
+        p.ylim((lat[-1], lat[0]))
+        p.ylabel('degrees North')
+        p.xlabel('degrees East')
+        lvl_bounds = vars['lv_DBLY5_l' + str(lvl_idx)]
+        valid_when = soilw_var.initial_time
+        valid_when = valid_when[3:5] + ' ' \
+          + cardinal_2_month(int(valid_when[:2])) + ' ' + valid_when[6:10] \
+          + ' ' + valid_when[12:17] + ' UTC'
+        title_string = 'Volumetric soil moisture (fraction) valid at ' \
+          + '\n' + valid_when + ' ' \
+          + str(lvl_bounds[0]) + '-' + str(lvl_bounds[1]) + ' cm from GFS'
+        p.title(title_string)
+    return 
+
+def plot_soilt_from_gfs(file_name, cntr_lvl=None):
+    file, vars = peek(file_name, show_vars=False)
+    lon = vars['lon_3'].get_value()
+    lat = vars['lat_3'].get_value()
+    soilt_var = vars['TMP_3_DBLY_10']
+    soilt = soilt_var.get_value()
+    soilt_m = ma.masked_where(soilt == soilt_var._FillValue, soilt)
+    for lvl_idx in range(len(soilt)):
+        p.figure()
+        if cntr_lvl is not None:
+           p.contourf(lon,lat,soilt_m[lvl_idx],cntr_lvl)
+        else:
+           p.contourf(lon,lat,soilt_m[lvl_idx])
+        p.colorbar()
+        # flip the y-axis
+        p.ylim((lat[-1], lat[0]))
+        p.ylabel('degrees North')
+        p.xlabel('degrees East')
+        lvl_bounds = vars['lv_DBLY5_l' + str(lvl_idx)]
+        valid_when = soilt_var.initial_time
+        valid_when = valid_when[3:5] + ' ' \
+          + cardinal_2_month(int(valid_when[:2])) + ' ' + valid_when[6:10] \
+          + ' ' + valid_when[12:17] + ' UTC'
+        title_string = 'Soil temperature (K) valid at ' \
+          + '\n' + valid_when + ' ' \
+          + str(lvl_bounds[0]) + '-' + str(lvl_bounds[1]) + ' cm from GFS'
+        p.title(title_string)
+    return 
+
+# plot_temp_from_gfs
+# plot_wind_from_gfs
+# plot_mr_from_gfs
+# plot_ght_from_gfs
+# plot_sfc_temp_from_gfs
+# plot_sfc_wind_from_gfs
+# plot_sfc_mr_from_gfs
+# plot_mslp_from_gfs
+# plot_sfc_press_from_gfs
+# plot_land_from_gfs
+# plot_terrain_from_gfs
+# plot_skin_temp_from_gfs
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/laps_press.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/laps_press.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/laps_press.py	(revision 296)
@@ -0,0 +1,340 @@
+from viz.utils import peek
+from viz.utils import cardinal_2_month
+from viz.utils import set_default_basemap
+import pylab as p
+import matplotlib.numerix.ma as ma
+import numpy as n
+from string import zfill
+
+a_small_number = 1e-8
+
+def plot_soilw(file_name, cntr_lvl=None):
+    file, vars = peek(file_name, show_vars=False)
+    lon = vars['lon'].get_value()
+    lat = vars['lat'].get_value()
+    soilw_var = vars['soil_mois']
+    soilw = soilw_var.get_value()[0]
+    mask = n.zeros(soilw.shape, dtype=int)
+    for lvl_idx in range(len(soilw)):
+        mask[lvl_idx] = vars['sea_lnd_ice_mask'].get_value()
+    soilw_m = ma.masked_where(mask == 0 , soilw)
+    lvl_bounds = vars['soil_lvl'].get_value()
+    valid_date = str(vars['valid_date'].get_value()[0])
+    valid_time = zfill(str(vars['valid_time'].get_value()[0]), 4)
+    valid_when = valid_date[6:] + ' ' \
+      + cardinal_2_month(int(valid_date[4:6])) + ' ' \
+      + valid_date[0:4] \
+      + ' ' + valid_time[:2] + ':' \
+      + valid_time[2:] + ' UTC'
+    m = set_default_basemap(lon,lat)
+    # must plot using 2d lon and lat
+    LON, LAT = p.meshgrid(lon,lat)
+    #for lvl_idx in range(1):
+    for lvl_idx in range(len(soilw)):
+        p.figure()
+        if cntr_lvl is not None:
+            # let's not forget the scaling by the layer depth
+            m.contourf(LON,LAT,soilw_m[lvl_idx]/lvl_bounds[lvl_idx], cntr_lvl)
+        else:
+            # let's not forget the scaling by the layer depth
+            m.contourf(LON,LAT,soilw_m[lvl_idx]/lvl_bounds[lvl_idx])
+        m.drawcoastlines()
+        m.drawmeridians(n.array(n.arange(lon.min(), lon.max() + a_small_number, 15.)), labels=[1,0,0,1])
+        m.drawparallels(n.array(n.arange(lat.min(), lat.max() + a_small_number, 15.)), labels=[1,0,0,1])
+        p.colorbar(orientation='horizontal', shrink=0.7, fraction=0.02, pad=0.07, aspect=50)
+        if lvl_idx == 0:
+            title_string = 'Volumetric soil moisture (fraction) valid at ' \
+              + '\n' + valid_when + ' ' \
+              + '0' \
+              + '-' + str(int(round(lvl_bounds[lvl_idx]*100))) + ' cm' \
+              + ' from LAPS'
+        else:
+            title_string = 'Volumetric soil moisture (fraction) valid at ' \
+              + '\n' + valid_when + ' ' \
+              + str(int(round(lvl_bounds[lvl_idx-1]*100))) \
+              + '-' + str(int(round(lvl_bounds[lvl_idx]*100))) + ' cm' \
+              + ' from LAPS'
+        p.title(title_string)
+    return 
+
+def plot_soilt(file_name, cntr_lvl=None):
+    file, vars = peek(file_name, show_vars=False)
+    lon = vars['lon'].get_value()
+    lat = vars['lat'].get_value()
+    soilt_var = vars['soil_temp']
+    soilt = soilt_var.get_value()[0]
+    mask = n.zeros(soilt.shape, dtype=int)
+    for lvl_idx in range(len(soilt)):
+        mask[lvl_idx] = vars['sea_lnd_ice_mask'].get_value()
+    soilt_m = ma.masked_where(mask == 0 , soilt)
+    lvl_bounds = vars['soil_lvl'].get_value()
+    valid_date = str(vars['valid_date'].get_value()[0])
+    valid_time = zfill(str(vars['valid_time'].get_value()[0]), 4)
+    valid_when = valid_date[6:] + ' ' \
+      + cardinal_2_month(int(valid_date[4:6])) + ' ' \
+      + valid_date[0:4] \
+      + ' ' + valid_time[:2] + ':' \
+      + valid_time[2:] + ' UTC'
+    m = set_default_basemap(lon,lat)
+    # must plot using 2d lon and lat
+    LON, LAT = p.meshgrid(lon,lat)
+    #for lvl_idx in range(1):
+    for lvl_idx in range(len(soilt)):
+        p.figure()
+        if cntr_lvl is not None:
+            m.contourf(LON,LAT,soilt_m[lvl_idx], cntr_lvl)
+        else:
+            m.contourf(LON,LAT,soilt_m[lvl_idx])
+        m.drawcoastlines()
+        m.drawmeridians(n.array(n.arange(lon.min(), lon.max() + a_small_number, 15.)), labels=[1,0,0,1])
+        m.drawparallels(n.array(n.arange(lat.min(), lat.max() + a_small_number, 15.)), labels=[1,0,0,1])
+        p.colorbar(orientation='horizontal', shrink=0.7, fraction=0.02, pad=0.07, aspect=50)
+        if lvl_idx == 0:
+            title_string = 'Soil temperature (K) valid at ' \
+              + '\n' + valid_when + ' ' \
+              + '0' \
+              + '-' + str(int(round(lvl_bounds[lvl_idx]*100))) + ' cm' \
+              + ' from LAPS'
+        else:
+            title_string = 'Soil temperature (K) valid at ' \
+              + '\n' + valid_when + ' ' \
+              + str(int(round(lvl_bounds[lvl_idx-1]*100))) \
+              + '-' + str(int(round(lvl_bounds[lvl_idx]*100))) + ' cm' \
+              + ' from LAPS'
+        p.title(title_string)
+    return 
+
+def plot_temp(file_name, cntr_lvl=None, save_frames=False):
+    file, vars = peek(file_name, show_vars=False)
+    lon = vars['lon'].get_value()
+    lat = vars['lat'].get_value()
+    air_temp_var = vars['air_temp']
+    air_temp = air_temp_var.get_value()[0]
+    lvl = vars['lvl'].get_value()
+    valid_date = str(vars['valid_date'].get_value()[0])
+    valid_time = zfill(str(vars['valid_time'].get_value()[0]), 4)
+    valid_when = valid_date[6:] + ' ' \
+      + cardinal_2_month(int(valid_date[4:6])) + ' ' \
+      + valid_date[0:4] \
+      + ' ' + valid_time[:2] + ':' \
+      + valid_time[2:] + ' UTC'
+    if save_frames:
+        frame_number = 0
+    m = set_default_basemap(lon,lat)
+    # must plot using 2d lon and lat
+    LON, LAT = p.meshgrid(lon,lat)
+    #for lvl_idx in range(1):
+    for lvl_idx in range(len(lvl)):
+        p.figure()
+        if cntr_lvl is not None:
+            #p.contourf(lon,lat,air_temp_m[lvl_idx], cntr_lvl)
+            #p.contourf(lon,lat,air_temp[lvl_idx], cntr_lvl)
+            m.contourf(LON,LAT,air_temp[lvl_idx], cntr_lvl)
+        else:
+            #p.contourf(lon,lat,air_temp_m[lvl_idx])
+            #p.contourf(lon,lat,air_temp[lvl_idx])
+            m.contourf(LON,LAT,air_temp[lvl_idx])
+        m.drawcoastlines()
+        m.drawmeridians(n.array(n.arange(lon.min(), lon.max() + a_small_number, 15.)), labels=[1,0,0,1])
+        m.drawparallels(n.array(n.arange(lat.min(), lat.max() + a_small_number, 15.)), labels=[1,0,0,1])
+        p.colorbar(orientation='horizontal', shrink=0.7, fraction=0.02, pad=0.07, aspect=50)
+        title_string = 'Air temperature (K) valid at ' \
+          + '\n' + valid_when + ' ' \
+          + str(int(round(lvl[lvl_idx]))) + ' mb' \
+          + ' from LAPS'
+        p.title(title_string)
+        if save_frames:
+            p.savefig('frames/frame_' + zfill(str(frame_number),3) +'_air_temp_' + str(int(lvl[lvl_idx])) + '_mb.png')
+            frame_number += 1
+    return 
+
+# plot_wind
+# plot_mr
+def plot_mr(file_name, cntr_lvl=None, save_frames=False):
+    file, vars = peek(file_name, show_vars=False)
+    lon = vars['lon'].get_value()
+    lat = vars['lat'].get_value()
+    mr_var = vars['mix_rto']
+    # to change between kg/kg to g/kg
+    mr = mr_var.get_value()[0]*1000.
+    lvl = vars['lvl'].get_value()
+    valid_date = str(vars['valid_date'].get_value()[0])
+    valid_time = zfill(str(vars['valid_time'].get_value()[0]), 4)
+    valid_when = valid_date[6:] + ' ' \
+      + cardinal_2_month(int(valid_date[4:6])) + ' ' \
+      + valid_date[0:4] \
+      + ' ' + valid_time[:2] + ':' \
+      + valid_time[2:] + ' UTC'
+    if save_frames:
+        frame_number = 0
+    m = set_default_basemap(lon,lat)
+    # must plot using 2d lon and lat
+    LON, LAT = p.meshgrid(lon,lat)
+    #for lvl_idx in range(2):
+    for lvl_idx in range(len(lvl)):
+        p.figure()
+        if cntr_lvl is not None:
+            m.contourf(LON,LAT,mr[lvl_idx], cntr_lvl)
+        else:
+            m.contourf(LON,LAT,mr[lvl_idx])
+        m.drawcoastlines()
+        m.drawmeridians(n.array(n.arange(lon.min(), lon.max() + a_small_number, 15.)), labels=[1,0,0,1])
+        m.drawparallels(n.array(n.arange(lat.min(), lat.max() + a_small_number, 15.)), labels=[1,0,0,1])
+        p.colorbar(orientation='horizontal', shrink=0.7, fraction=0.02, pad=0.07, aspect=50)
+        title_string = 'Mixing ration (g/kg)' \
+          + '\n' + valid_when + ' ' \
+          + str(int(round(lvl[lvl_idx]))) + ' mb' \
+          + ' from LAPS'
+        p.title(title_string)
+        if save_frames:
+            p.savefig('frames/frame_' + zfill(str(frame_number),3) +'_mr_' + str(int(lvl[lvl_idx])) + '_mb.png')
+            frame_number += 1
+    return 
+
+# plot_ght
+def plot_ght(file_name, cntr_lvl=None, save_frames=False):
+    file, vars = peek(file_name, show_vars=False)
+    lon = vars['lon'].get_value()
+    lat = vars['lat'].get_value()
+    ght_var = vars['geop_ht']
+    ght = ght_var.get_value()[0]
+    lvl = vars['lvl'].get_value()
+    valid_date = str(vars['valid_date'].get_value()[0])
+    valid_time = zfill(str(vars['valid_time'].get_value()[0]), 4)
+    valid_when = valid_date[6:] + ' ' \
+      + cardinal_2_month(int(valid_date[4:6])) + ' ' \
+      + valid_date[0:4] \
+      + ' ' + valid_time[:2] + ':' \
+      + valid_time[2:] + ' UTC'
+    if save_frames:
+        frame_number = 0
+    m = set_default_basemap(lon,lat)
+    # must plot using 2d lon and lat
+    LON, LAT = p.meshgrid(lon,lat)
+    #for lvl_idx in range(2):
+    for lvl_idx in range(len(lvl)):
+        p.figure()
+        if cntr_lvl is not None:
+            m.contourf(LON,LAT,ght[lvl_idx], cntr_lvl)
+        else:
+            m.contourf(LON,LAT,ght[lvl_idx])
+        m.drawcoastlines()
+        m.drawmeridians(n.array(n.arange(lon.min(), lon.max() + a_small_number, 15.)), labels=[1,0,0,1])
+        m.drawparallels(n.array(n.arange(lat.min(), lat.max() + a_small_number, 15.)), labels=[1,0,0,1])
+        p.colorbar(orientation='horizontal', shrink=0.7, fraction=0.02, pad=0.07, aspect=70)
+        title_string = 'Geopotential height (m)' \
+          + '\n' + valid_when + ' ' \
+          + str(int(round(lvl[lvl_idx]))) + ' mb' \
+          + ' from LAPS'
+        p.title(title_string)
+        if save_frames:
+            p.savefig('frames/frame_' + zfill(str(frame_number),3) +'_ght_' + str(int(lvl[lvl_idx])) + '_mb.png')
+            frame_number += 1
+    return 
+
+def plot_sfc_temp(file_name, cntr_lvl=None, save_frames=False):
+    file, vars = peek(file_name, show_vars=False)
+    lon = vars['lon'].get_value()
+    lat = vars['lat'].get_value()
+    sfc_temp_var = vars['sfc_temp']
+    sfc_temp = sfc_temp_var.get_value()[0]
+    valid_date = str(vars['valid_date'].get_value()[0])
+    valid_time = zfill(str(vars['valid_time'].get_value()[0]), 4)
+    valid_when = valid_date[6:] + ' ' \
+      + cardinal_2_month(int(valid_date[4:6])) + ' ' \
+      + valid_date[0:4] \
+      + ' ' + valid_time[:2] + ':' \
+      + valid_time[2:] + ' UTC'
+    m = set_default_basemap(lon,lat)
+    # must plot using 2d lon and lat
+    LON, LAT = p.meshgrid(lon,lat)
+    p.figure()
+    if cntr_lvl is not None:
+        m.contourf(LON,LAT,sfc_temp, cntr_lvl)
+    else:
+        m.contourf(LON,LAT,sfc_temp)
+    m.drawcoastlines()
+    m.drawmeridians(n.array(n.arange(lon.min(), lon.max() + a_small_number, 15.)), labels=[1,0,0,1])
+    m.drawparallels(n.array(n.arange(lat.min(), lat.max() + a_small_number, 15.)), labels=[1,0,0,1])
+    p.colorbar(orientation='horizontal', shrink=0.7, fraction=0.02, pad=0.07, aspect=70)
+    title_string = 'Surface temperature (K) valid at' \
+      + '\n' + valid_when + ' ' \
+      + ' from LAPS'
+    p.title(title_string)
+    if save_frames:
+        p.savefig('frames/frame_' + zfill(str(frame_number),3) +'_sfc_temp_' + str(int(lvl[lvl_idx])) + '.png')
+    return 
+
+def plot_sfc_pres(file_name, cntr_lvl=None, save_frames=False):
+    file, vars = peek(file_name, show_vars=False)
+    lon = vars['lon'].get_value()
+    lat = vars['lat'].get_value()
+    sfc_pres_var = vars['sfc_pres']
+    sfc_pres = sfc_pres_var.get_value()[0]/100.
+    valid_date = str(vars['valid_date'].get_value()[0])
+    valid_time = zfill(str(vars['valid_time'].get_value()[0]), 4)
+    valid_when = valid_date[6:] + ' ' \
+      + cardinal_2_month(int(valid_date[4:6])) + ' ' \
+      + valid_date[0:4] \
+      + ' ' + valid_time[:2] + ':' \
+      + valid_time[2:] + ' UTC'
+    m = set_default_basemap(lon,lat)
+    # must plot using 2d lon and lat
+    LON, LAT = p.meshgrid(lon,lat)
+    p.figure()
+    if cntr_lvl is not None:
+        m.contourf(LON,LAT,sfc_pres, cntr_lvl)
+    else:
+        m.contourf(LON,LAT,sfc_pres)
+    m.drawcoastlines()
+    m.drawmeridians(n.array(n.arange(lon.min(), lon.max() + a_small_number, 15.)), labels=[1,0,0,1])
+    m.drawparallels(n.array(n.arange(lat.min(), lat.max() + a_small_number, 15.)), labels=[1,0,0,1])
+    p.colorbar(orientation='horizontal', shrink=0.7, fraction=0.02, pad=0.07, aspect=70)
+    title_string = 'Surface pressure (K) valid at' \
+      + '\n' + valid_when + ' ' \
+      + ' from LAPS'
+    p.title(title_string)
+    if save_frames:
+        p.savefig('frames/frame_' + zfill(str(0),3) +'_sfc_pres.png')
+    return 
+
+# plot_sfc_wind
+# plot_mslp
+# plot_land
+# plot_terrain
+def plot_skin_temp(file_name, cntr_lvl=None, save_frames=False):
+    file, vars = peek(file_name, show_vars=False)
+    lon = vars['lon'].get_value()
+    lat = vars['lat'].get_value()
+    skin_temp = vars['skin_temp']
+    skin_temp = skin_temp_var.get_value()[0]
+    valid_date = str(vars['valid_date'].get_value()[0])
+    valid_time = zfill(str(vars['valid_time'].get_value()[0]), 4)
+    valid_when = valid_date[6:] + ' ' \
+      + cardinal_2_month(int(valid_date[4:6])) + ' ' \
+      + valid_date[0:4] \
+      + ' ' + valid_time[:2] + ':' \
+      + valid_time[2:] + ' UTC'
+    m = set_default_basemap(lon,lat)
+    # must plot using 2d lon and lat
+    LON, LAT = p.meshgrid(lon,lat)
+    p.figure()
+    if cntr_lvl is not None:
+        m.contourf(LON,LAT,skin_temp, cntr_lvl)
+    else:
+        m.contourf(LON,LAT,skin_temp)
+    m.drawcoastlines()
+    m.drawmeridians(n.array(n.arange(lon.min(), lon.max() + a_small_number, 15.)), labels=[1,0,0,1])
+    m.drawparallels(n.array(n.arange(lat.min(), lat.max() + a_small_number, 15.)), labels=[1,0,0,1])
+    p.colorbar(orientation='horizontal', shrink=0.7, fraction=0.02, pad=0.07, aspect=70)
+    title_string = 'Surface pressure (K) valid at' \
+      + '\n' + valid_when + ' ' \
+      + ' from LAPS'
+    p.title(title_string)
+    if save_frames:
+        p.savefig('frames/frame_' + zfill(str(frame_number),3) +'_skin_temp_' + str(int(lvl[lvl_idx])) + '.png')
+    return 
+
+# plot_skin_temp
+
+
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/laps_sigma.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/laps_sigma.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/laps_sigma.py	(revision 296)
@@ -0,0 +1,64 @@
+from viz.utils import peek
+from viz.utils import cardinal_2_month
+import pylab as p
+import matplotlib.numerix.ma as ma
+import numpy as n
+from string import zfill
+from matplotlib.toolkits.basemap import Basemap
+
+a_small_number = 1e-8
+
+def set_default_basemap(lon, lat):
+    map = Basemap(
+      llcrnrlon=lon.min() - 0.5,
+      llcrnrlat=lat.min() - 0.5,
+      urcrnrlon=lon.max() + 0.5,
+      urcrnrlat=lat.max() + 0.5,
+      resolution='l',
+      projection='cyl',
+      lon_0=lon.min() + (lon.max()-lon.min())/2.,
+      lat_0=lat.min() + (lat.max()-lat.min())/2.
+      )
+    return map
+
+def plot_temp(file_name, cntr_lvl=None, save_frames=False):
+    file, vars = peek(file_name, show_vars=False)
+    lon = vars['lon'].get_value()
+    lat = vars['lat'].get_value()
+    air_temp_var = vars['air_temp']
+    air_temp = air_temp_var.get_value()[0]
+    lvl = vars['lvl'].get_value()
+    valid_date = str(vars['valid_date'].get_value()[0])
+    valid_time = zfill(str(vars['valid_time'].get_value()[0]), 4)
+    valid_when = valid_date[6:] + ' ' \
+      + cardinal_2_month(int(valid_date[4:6])) + ' ' \
+      + valid_date[0:4] \
+      + ' ' + valid_time[:2] + ':' \
+      + valid_time[2:] + ' UTC'
+    if save_frames:
+        frame_number = 0
+    m = set_default_basemap(lon,lat)
+    # must plot using 2d lon and lat
+    LON, LAT = p.meshgrid(lon,lat)
+    for lvl_idx in range(5):
+    #for lvl_idx in range(len(lvl)):
+        p.figure()
+        if cntr_lvl is not None:
+            m.contourf(LON,LAT,air_temp[lvl_idx], cntr_lvl)
+        else:
+            m.contourf(LON,LAT,air_temp[lvl_idx])
+        m.drawcoastlines()
+        m.drawmeridians(n.array(n.arange(lon.min(), lon.max() + a_small_number, 15.)), labels=[1,0,0,1])
+        m.drawparallels(n.array(n.arange(lat.min(), lat.max() + a_small_number, 15.)), labels=[1,0,0,1])
+        p.colorbar(orientation='horizontal', shrink=0.7, fraction=0.02, pad=0.07, aspect=50)
+        title_string = 'Air temperature (K) valid at ' \
+          + '\n' + valid_when + ' ' \
+          + str(round(lvl[lvl_idx],4)) + ' sigma' \
+          + ' from LAPS'
+        p.title(title_string)
+        if save_frames:
+            p.savefig('frames/frame_' + zfill(str(frame_number),3) +'_air_temp_' + str(lvl[lvl_idx]) + '_sigma.png')
+            frame_number += 1
+    return 
+
+
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/pyngl_utils.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/pyngl_utils.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/pyngl_utils.py	(revision 296)
@@ -0,0 +1,60 @@
+import PyNGL_numpy.Ngl as ngl
+
+def res_init(lat, lon, 
+  lat_min=None,
+  lat_max=None,
+  lon_min=None,
+  lon_max=None):
+    """
+    Assumes lat,lon are numpy arrays
+    """
+    if not lat_min:
+        lat_min = lat.min()
+    if not lat_max:
+        lat_max = lat.max()
+    if not lon_min:
+        lon_min = lon.min()
+    if not lon_max:
+        lon_max = lon.max()
+    res = ngl.Resources()
+    res.mpProjection = 'Orthographic'
+    res.mpCenterLonF = lon_min + (lon_max-lon_min)/2.
+    res.mpCenterLatF = lat_min + (lat_max-lat_min)/2.
+    res.mpLimitMode = 'LatLon'
+    res.mpMinLatF = lat_min - 0.2
+    res.mpMaxLatF = lat_max + 0.2
+    res.mpMinLonF = lon_min - 0.2
+    res.mpMaxLonF = lon_max + 0.2
+    res.mpFillOn = True
+    res.mpGridSpacingF = 2.
+    res.vpXF = 0.1
+    res.vpYF = 0.9
+    res.vpWidthF = 0.7
+    res.vpHeightF = 0.7
+    return res
+
+def make_land_gray(wks,res):
+    ic = ngl.new_color(wks, 0.75, 0.75,0.75)
+    res.mpFillColors = [0,-1,ic,-1]
+
+def mslp_contour_levels(res):
+    mnlvl = 980
+    mxlvl = 1024
+    spcng = 2
+    ncn = (mxlvl - mnlvl) / spcng + 1
+    res.cnLevelSelectionMode = 'ManualLevels'
+    res.cnMinLevelValF = mnlvl
+    res.cnMaxLevelValF = mxlvl
+    res.cnLevelSpacingF = spcng
+
+def sfc_pres_contour_levels(res):
+    mnlvl = 840
+    mxlvl = 1024
+    spcng = 2
+    ncn = (mxlvl - mnlvl) / spcng + 1
+    res.cnLevelSelectionMode = 'ManualLevels'
+    res.cnMinLevelValF = mnlvl
+    res.cnMaxLevelValF = mxlvl
+    res.cnLevelSpacingF = spcng  
+
+
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/slabber.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/slabber.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/slabber.py	(revision 296)
@@ -0,0 +1,26 @@
+import sys
+sys.path.append('/Users/val/data/pylib')
+
+import viz.utils as vu
+from string import zfill
+
+data_file = \
+  '/Volumes/DATA/wrf/first_1way/coarse/wrfout_d01_2003-01-17_03-00-00.nc'
+f, fv = vu.peek(data_file, show_vars=False)
+
+u = fv['U'].get_value()
+lon = fv['XLONG_U'].get_value()
+lat = fv['XLAT_U'].get_value()
+
+lvl_idx = 10
+times = fv['Times'].get_value()
+
+for time_idx in range(46):
+    time_string = vu.set_time_string('WRF', times[time_idx])
+    lvl_string = str(lvl_idx) + 'th lvl'
+    title_string = \
+      vu.set_title_string('u wind', 'm/s', time_string, lvl_string, 'WRF:')
+    file_name = 'frames/frame_' + zfill(str(time_idx),3)
+    print file_name
+    vu.plot_slab(lon[time_idx], lat[time_idx], u[time_idx][lvl_idx], 
+      file_name=file_name, title_string=title_string)
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/utils.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/utils.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/viz/utils.py	(revision 296)
@@ -0,0 +1,706 @@
+"""
+repo of useful functions for visualizing stuff
+"""
+import os
+import sys
+
+# the syntax to import nio depends on its version. We try all options from the
+# newest to the oldest
+try:
+    import Nio as nio
+except:
+    try:
+        import PyNGL.Nio as nio
+    except:
+        import PyNGL_numpy.Nio as nio
+
+# The following is only needed for hn3.its.monash.edu.au
+# Unfortunately we must get rid of spurious entries in the sys.path that can
+# lead to the loading of conflicting packages
+for dir_idx in range(len(sys.path)-1,-1,-1):
+    if 'python2.4' in sys.path[dir_idx]:
+        sys.path.pop(dir_idx)
+
+import time
+import pylab as p
+import matplotlib.numerix.ma as ma
+import numpy as n
+from matplotlib.toolkits.basemap import Basemap
+import gc
+
+a_small_number = 1e-8
+
+def get_long_name(var):
+    try: 
+        long_name = var.long_name
+    except AttributeError:
+        try:
+            long_name = var.description
+        except AttributeError:
+            long_name = 'N/A'
+    if long_name == '':
+        long_name = 'N/A'
+    return long_name
+
+def peek(file_name, return_pointers=False, show_vars=True):
+    #print file_name
+    file = nio.open_file(file_name)
+    vars = file.variables
+    if show_vars:
+        keys = vars.keys()
+        keys.sort()
+        page_idx = 0
+        max = 0
+        lines_in_a_page = 50
+        if len(keys) > lines_in_a_page:
+            while max < len(keys):
+                min = page_idx * lines_in_a_page
+                max = (page_idx + 1) * lines_in_a_page
+                if max > len(keys):
+                    max = len(keys)
+                print min, max, type(min), type(max)
+                for key in keys[min:max]:
+                    long_name = get_long_name(vars[key])
+                    if len(key) < 7:
+                        print '\t', key, '\t\t->\t', long_name
+                    else:
+                        print '\t', key, '\t->\t', long_name
+                page_idx += 1
+                raw_input('press enter to continue...')
+        else:
+            for key in keys:
+                long_name = get_long_name(vars[key])
+                if len(key) < 7:
+                    print '\t', key, '\t\t->\t', long_name
+                else:
+                    print '\t', key, '\t->\t', long_name
+    if return_pointers:
+        return file, vars
+    else:
+        return
+
+def cardinal_2_month(cardinal):
+    if cardinal == 1:
+        return 'Jan'
+    elif cardinal == 2:
+        return 'Feb'
+    elif cardinal == 3:
+        return 'Mar'
+    elif cardinal == 4:
+        return 'Apr'
+    elif cardinal == 5:
+        return 'May'
+    elif cardinal == 6:
+        return 'Jun'
+    elif cardinal == 7:
+        return 'Jul'
+    elif cardinal == 8:
+        return 'Aug'
+    elif cardinal == 9:
+        return 'Sep'
+    elif cardinal == 10:
+        return 'Oct'
+    elif cardinal == 11:
+        return 'Nov'
+    elif cardinal == 12:
+        return 'Dec'
+    else:
+        return 'gibberish'
+
+def set_default_basemap(lon, lat, frame_width=5.):
+    test = lon < 0.
+    if True in test:
+        # matplotlib expects 0-360 while WRF for example uses -180-180
+        delta = n.ones(lon.shape)
+        delta *= 360
+        delta = ma.masked_where(lon > 0., delta)
+        lon += delta.filled(fill_value=0)
+    llcrnrlon=lon.min() - frame_width
+    urcrnrlon=lon.max() + frame_width
+    llcrnrlat=lat.min() - frame_width
+    urcrnrlat=lat.max() + frame_width
+    lon_0 = llcrnrlon + (urcrnrlon - llcrnrlon) / 2.
+    lat_0 = llcrnrlat + (urcrnrlat - llcrnrlat) / 2.
+        
+    map = Basemap(
+      llcrnrlon=llcrnrlon,
+      llcrnrlat=llcrnrlat,
+      urcrnrlon=urcrnrlon,
+      urcrnrlat=urcrnrlat,
+      resolution='l',
+      projection='cyl',
+      lon_0=lon_0,
+      lat_0=lat_0
+      )
+    return map
+
+def set_lvl_string(lvl, lvl_type):
+    if lvl_type == 'sigma':
+        pass
+    if lvl_type == 'press':
+        pass
+    if lvl_type == 'soil':
+        pass
+    return lvl_string
+
+def set_title_string(
+  long_name, 
+  units,
+  time_string, 
+  lvl_string, 
+  prefix='', 
+  postfix=''):
+    title_string = \
+      prefix + ' '\
+      + lvl_string + ' ' \
+      + long_name + ' (' + units + ')\n' \
+      + ' valid at ' \
+      + time_string \
+      + postfix
+    return title_string
+
+def set_time_string(model, time):
+    if model == 'WRF':
+        time_string = time.tostring()
+        year = time_string[:4]
+        month = cardinal_2_month(int(time_string[5:7]))
+        day = time_string[8:10]
+        hour = time_string[11:13]
+        minute = time_string[14:16]
+        second = time_string[17:]
+        time_string = day + ' ' \
+          + month + ' ' \
+          + year + ' ' \
+          + hour + ':' + minute + ' UTC'
+        return time_string
+    elif model == 'LAPS':
+        valid_date = str(time[0])
+        valid_time = str(time[1]).zfill(4)
+        time_string = valid_date[6:] + ' ' \
+          + cardinal_2_month(int(valid_date[4:6])) + ' ' \
+          + valid_date[0:4] \
+          + ' ' + valid_time[:2] + ':' \
+          + valid_time[2:] + ' UTC'
+        return time_string
+    elif model == 'manual':
+        # expects list of the form
+        # [year, month, day, hour, minute]
+        time_string = (str(time[2]) + ' ')
+        time_string += (cardinal_2_month(time[1]) + ' ')
+        time_string += (str(time[0]) + ' ')
+        time_string += (str(time[3]).zfill(2) + ':')
+        time_string += (str(time[4]).zfill(2) + ' ')
+        time_string += 'UTC'
+        return time_string
+    else:
+        print 'set_time_string has done nothing'
+        return
+
+def plot_grid(lon,lat,
+  title_string = 'N/A',
+  meridians_delta = 15,
+  parallels_delta = 15,
+  same_figure = False,
+  figsize = None,
+  file_name = None,
+  dpi = 80,
+  skip = 5,
+  return_map = False,
+  marker = '+'
+  ):
+    """Function to plot grids similar to those generated by WPS
+    
+    Modified 27/01/08: Minor mod to plot the boundaries of the domain
+    Comments: There is a chunk of code that gets repeated in a lot of places... the 
+    stuff about plotting meridians and parallels. Maybe we could put this in a seperate 
+    funtion?
+    
+    """
+    map = set_default_basemap(lon,lat)
+    # let's make sure the lat and lon arrays are 2D
+    if len(lon.shape) < 2:
+        lon, lat = p.meshgrid(lon,lat)
+    if not same_figure:
+        if not figsize:
+            p.figure()
+        else:
+            p.figure(figsize=figsize)
+    map.plot(lon[::skip,::skip], lat[::skip,::skip], marker=marker, linestyle='None')
+
+    corners_lon = n.array([lon[0,0], lon[0,-1], lon[-1,-1], lon[-1,0]])
+    corners_lat = n.array([lat[0,0], lat[0,-1], lat[-1,-1], lat[-1,0]])
+    
+    map.plot(corners_lon,corners_lat, 'ro')
+
+    # Here it is =)
+    map.plot(lon[0,:],lat[0,:],'k-')
+    map.plot(lon[:,-1],lat[:,-1],'k-')
+    map.plot(lon[-1,:],lat[-1,:],'k-')
+    map.plot(lon[:,0],lat[:,0],'k-')
+
+#    Thom: I've taken out the plot canberra option for generality
+#    canberra_lon = [149 + 8./60]
+#    canberra_lat = [-35 - 17./60]
+#    map.plot(canberra_lon,canberra_lat, 'gs')
+
+    if not same_figure:
+        map.drawcoastlines()
+	# These blocks seem to get repeated...
+        meridians = n.arange(int(round(lon.min(),0)), 
+          lon.max() + a_small_number, meridians_delta)
+        meridians = n.array(meridians)
+        map.drawmeridians(meridians, labels=[1,0,0,1])
+    
+        parallels = n.arange(int(round(lat.min(),0)), 
+          lat.max() + a_small_number, parallels_delta)
+        parallels = n.array(parallels)
+        map.drawparallels(parallels, labels=[1,0,0,1])
+
+        p.title(title_string)
+    if file_name:
+        p.savefig(file_name,dpi=dpi)
+    if return_map:
+        return map
+    else:
+        return
+
+def plot_slab(lon, lat, slab,
+  map = None,
+  figsize = None,
+  cntr_lvl = None,
+  title_string = 'N/A',
+  meridians_delta = 15,
+  parallels_delta = 15,
+  file_name = None,
+  dpi = 80,
+  wind_vector =None,
+  quiv_skip = 5,
+  frame_width = 5,
+  significant_digits = 0,
+  colorbar = False,
+  contour_labels = True,
+  monochrome = False,
+  quiverkey_length = 10,
+  return_map = False
+  ):
+    from matplotlib.cm import gist_ncar as cmap
+    # let's make sure the lat and lon arrays are 2D
+    if len(lon.shape) < 2:
+        lon, lat = p.meshgrid(lon,lat)
+    if map is None:
+        map = set_default_basemap(lon,lat,frame_width)
+    if not figsize:
+        fig = p.figure()
+    else:
+        fig = p.figure(figsize=figsize)
+    if cntr_lvl is not None:
+        if monochrome:
+            cset = map.contour(lon, lat, slab, cntr_lvl, colors='black')
+        else:
+            csetf = map.contourf(lon, lat, slab, 
+              cntr_lvl, 
+              cmap=cmap)
+            if wind_vector is None:
+                cset = map.contour(lon, lat, slab, cntr_lvl, colors='lightslategray')
+    else:
+        if monochrome:
+            cset = map.contour(lon, lat, slab, colors='black')
+        else:
+            csetf = map.contourf(lon, lat, slab, cmap=cmap)
+            if wind_vector is None:
+                cset = map.contour(lon, lat, slab, colors='lightslategray')
+    if wind_vector is not None:
+        quiv = map.quiver(lon[::quiv_skip,::quiv_skip], 
+          lat[::quiv_skip,::quiv_skip],
+          wind_vector[0][::quiv_skip,::quiv_skip],
+          wind_vector[1][::quiv_skip,::quiv_skip])
+        from scipy.stats import median
+        p.quiverkey(quiv, 
+          0.855, 
+          0.115, 
+          quiverkey_length,
+          str(int(quiverkey_length)) + ' m/s',
+          labelpos='S',
+          coordinates='figure')
+    # plot grid outline
+    map.plot(lon[0,:],lat[0,:],color='lightslategray')
+    map.plot(lon[-1,:],lat[-1,:],color='lightslategray')
+    map.plot(lon[:,0],lat[:,0],color='lightslategray')
+    map.plot(lon[:,-1],lat[:,-1],color='lightslategray')
+    if monochrome:
+        map.fillcontinents(color='0.95')
+
+    map.drawcoastlines()
+
+    # todo
+    # the +5 is a hack in case one uses the default map it should be made an
+    # an explicit parameter that is passed around...
+    meridians = n.arange(int(round(lon.min(),0)), 
+      #int(round(lon.max(),0)) + a_small_number, meridians_delta)
+      int(round(lon.max(),0)) + 5 + a_small_number, meridians_delta)
+    meridians = n.array(meridians)
+    map.drawmeridians(meridians, labels=[1,0,0,1])
+
+    parallels = n.arange(int(round(lat.min(),0)), 
+      #int(round(lat.max(),0)) + a_small_number, parallels_delta)
+      int(round(lat.max(),0)) + 5 + a_small_number, parallels_delta)
+    parallels = n.array(parallels)
+    map.drawparallels(parallels, labels=[1,0,0,1])
+
+    #plot canberra
+    if 1:
+        map.plot([149.133],[-35.283],'bo', ms=3)
+
+    if contour_labels:
+        p.clabel(cset, cset.levels[::2], 
+          colors='k', fontsize=8, fmt='%i')
+
+    if colorbar:
+        format = '%.'+ str(significant_digits) + 'f'
+        p.colorbar(csetf, orientation='horizontal', shrink=0.7, 
+          fraction=0.02, pad=0.07, aspect=70, 
+          format = format)
+
+    p.title(title_string)
+    if file_name:
+        p.savefig(file_name,dpi=dpi)
+        p.close(fig)
+        del fig
+    if not monochrome:
+        del csetf
+    if wind_vector is None:
+        del cset
+    if wind_vector is not None:
+        del quiv
+    gc.collect()
+    if return_map:
+        return map
+    else:
+        del map
+        gc.collect()
+        return
+
+def flip_yaxis(slab):
+    shape = slab.shape
+    # assuming (lat, lon) ordering
+    lat_idx_max = shape[0] - 1
+    flipped_slab = n.zeros(shape)
+    for lat_idx in range(lat_idx_max, -1, -1):
+        flipped_idx = abs(lat_idx - lat_idx_max)
+        flipped_slab[flipped_idx] = slab[lat_idx]
+    return flipped_slab
+
+ 
+def set_cntr_lvl(data, 
+      intervals=12, 
+      only_positive=False, 
+      significant_digits=0,
+      forced_max=None,
+      forced_min=None):
+
+    import math as m
+    dummy = data * 10**significant_digits
+    min = dummy.min()
+    max = dummy.max()
+    floor = m.floor(min)
+    ceil = m.ceil(max)
+    if forced_max:
+        ceil = forced_max * 10**significant_digits
+    if forced_min:
+        floor = forced_min * 10**significant_digits
+    if only_positive:
+        floor = 0
+    extent = float(ceil - floor)
+    delta = extent / intervals
+    #cntr_lvl = n.arange(floor, ceil + a_small_number, delta)
+    cntr_lvl = n.arange(floor - a_small_number, ceil + a_small_number, delta)
+    return cntr_lvl / 10**significant_digits
+
+def lin_interp(xa, xb, ya, yb, x):
+    """linear interpolation in two dimensions
+    
+    USAGE
+    xa = original x-grid
+    ya = original y-grid
+    xb = new x-grid
+    yb = new y-grid
+    x  = data in xa, ya
+
+    """
+    slope = (yb - ya) / (xb - xa)
+    y = ya + slope * (x - xa)
+    return y
+    
+def plot_slice(
+  ordinate,
+  data,
+  abscissa = None,
+  abscissa_label = None,
+  land_mask = None,
+  cntr_lvl = None,
+  wind_vector = None,
+  log_scale = False,
+  pressure = True,
+  ordinate_quiv_skip = 1,
+  abscissa_quiv_skip = 3,
+  xmin = None,
+  xmax = None,
+  ymin = None,
+  ymax = None,
+  significant_digits = 0,
+  title_string = 'N/A',
+  file_name = None,
+  dpi = 100
+  ):
+    '''
+    plot a slice
+    Usage:
+    >>> plot_vertical_slice(ordinate, data, abscissa, wind_vector)
+    where
+      ordinate is either pressure or height. By default pressure is assumed
+      data is a vertical slice
+      abscissa is either 1D or 2D
+    '''
+    if log_scale:
+        ordinate = n.log10(ordinate)
+    # if the abscissa is not supplied than simply use the record numbers
+    if abscissa is None:
+        x = n.arange(1,data.shape[1]+1)
+        abscissa = n.zeros(data.shape)
+        for y_idx in range(data.shape[0]):
+            abscissa[y_idx] = x
+        if cntr_lvl is not None:
+            cset = p.contourf(abscissa, ordinate, data, cntr_lvl)
+        else:
+            cset = p.contourf(abscissa, ordinate, data)
+        p.xlabel('record number')
+    else:
+        # let's handle 1D abscissa arrays
+        if len(abscissa.shape) == 1:
+            dummy = n.zeros(data.shape)
+            for lvl_idx in range(data.shape[0]):
+                dummy[lvl_idx] = abscissa
+            abscissa = dummy
+            del dummy
+        if cntr_lvl is not None:
+            cset = p.contourf(abscissa, ordinate, data, cntr_lvl)
+        else:
+            cset = p.contourf(abscissa, ordinate, data)
+        if abscissa_label:
+            p.xlabel(abscissa_label)
+        else:
+            p.xlabel('N/A')
+    if wind_vector:
+        p.quiver(abscissa[::ordinate_quiv_skip,::abscissa_quiv_skip], 
+          ordinate[::ordinate_quiv_skip,::abscissa_quiv_skip], 
+          wind_vector[0][::ordinate_quiv_skip,::abscissa_quiv_skip], 
+          wind_vector[1][::ordinate_quiv_skip,::abscissa_quiv_skip])
+    if land_mask is not None:
+        land = ma.masked_where(land_mask == 2,ordinate[0])
+        p.plot(abscissa[0], land, color=(0.59,0.29,0.0), linewidth=2.)
+        # if you also want to plot the ocean uncomment the following lines
+        #if log_scale:
+        #    ocean = ma.masked_where(land_mask == 1, n.log10(1013.25))
+        #else:
+        #    ocean = ma.masked_where(land_mask == 1, 1013.25)
+        #p.plot(abscissa[0], ocean, color=(0.0,0.0,1.0), linewidth=2.)
+    if pressure:
+        # I am assuming pressure will be expressed in hPa
+        yticks_location = n.arange(1000.,99.,-100.)
+        if log_scale:
+            p.yticks(n.log10(yticks_location), 
+              [str(int(e)) for e in yticks_location])
+            p.ylabel('log10 of pressure')
+            for e in n.log10(yticks_location):
+                p.axhline(e,linestyle='--',color=(0.7,0.7,0.7))
+            # the -5. is there to create a bit of a top margin
+            if ordinate.max() > n.log10(1013.25):
+                cheat_ymin = ordinate.max()
+            else:
+                cheat_ymin = n.log10(1013.25 + 5.)
+            p.ylim(ymin=cheat_ymin,
+              ymax=n.log10(10**ordinate.min() - 5.))
+        else:
+            p.yticks( yticks_location, 
+              [str(int(e)) for e in yticks_location])
+            p.ylabel('pressure (hPa)')
+            for e in yticks_location:
+                p.axhline(e,linestyle='--',color=(0.7,0.7,0.7))
+            # the -25. is there to create a bit of a top margin
+            if ordinate.max() > 1013.25:
+                cheat_ymin = ordinate.max()
+            else:
+                cheat_ymin = 1013.25 + 10.
+            p.ylim(ymin=cheat_ymin, ymax=ordinate.min() - 25.)
+            # p.ylim(ymin=ordinate.max(), ymax=ordinate.min() - 25.)
+            # if any of the axes boundaries have been given explicitly, we'll 
+            # them
+            if log_scale:
+                if xmin is not None:
+                    p.xlim(xmin=n.log10(xmin))
+                if xmax is not None:
+                    p.xlim(xmax=n.log10(xmax))
+                if ymin is not None:
+                    p.ylim(ymin=n.log10(ymin))
+                if ymax is not None:
+                    p.ylim(ymax=n.log10(ymax))
+            else:
+                if xmin is not None:
+                    p.xlim(xmin=xmin)
+                if xmax is not None:
+                    p.xlim(xmax=xmax)
+                if ymin is not None:
+                    p.ylim(ymin=ymin)
+                if ymax is not None:
+                    p.ylim(ymax=ymax)
+
+    else:
+        print 'I assume you are trying to plot in z coordinate: ' \
+          + 'sorry not implemented yet'
+    format = '%.'+ str(significant_digits) + 'f'
+    p.colorbar(cset, orientation='horizontal', shrink=0.7, 
+      #fraction=0.02, pad=0.095, aspect=70, 
+      fraction=0.02, pad=0.1, aspect=70, 
+      format = format)
+    p.title(title_string)
+    if file_name:
+        p.savefig(file_name,dpi=dpi)
+        p.close()
+
+def write_to_log_file(log_file, message):
+    '''
+    This functions opens, writes to it,
+    and closes the log file so that tools like tail -f
+    will work as intended.
+    It also prepends a time stamp to each entry.
+    It is assumed that the output_dir and log_file are defined in the 
+    namespace from which the function is called.
+    '''
+    log_file = open(log_file, 'a')
+    log_file.write(time.ctime(time.time()) + ' -> ' + message + '\n')
+    log_file.close()
+
+def generate_output_file_name(output_dir, prefix, timetuple):
+    """Returns the output file name built by joining the output directory
+    path with the supplied prefix and the timetuple which is used to
+    construct the suffix/timestamp
+    """
+    output_file_name = prefix
+    output_file_name += str(timetuple[0])
+    output_file_name += str(timetuple[1]).zfill(2)
+    output_file_name += str(timetuple[2]).zfill(2)
+    output_file_name += str(timetuple[3]).zfill(2)
+    output_file_name += str(timetuple[4]).zfill(2)
+    output_file_name += str(timetuple[5]).zfill(2)
+    output_file_name = os.path.join(output_dir, output_file_name)
+    return output_file_name
+    
+def process_command_line_arguments(argv):
+    """
+    Process command line arguments in a consistent fashion for the
+    plot_wrfout elementary scripts.
+    """
+
+    def process_time_window_argument(argument):
+        """
+        Process the time window argument and return a tuple of datetime objects
+        (start_time, end_time)
+        """
+        import datetime
+        try:
+            dummy = (argument).split('-')
+            start_time = dummy[0].split('/')
+            start_time = [int(e) for e in start_time]
+        except:
+            sys.exit('\nERROR:\tI cannot make sense of the time window '
+              + argument + '\n\tI need something like '
+              + 'yyyy/mm/dd/hh/mm/ss-yyyy/mm/dd/hh/mm/ss but will also '
+              + 'cope without hours, minutes or seconds.')
+        # this is only intended to work if we have missing hour, minute, or
+        # seconds. At least the date ought to be complete!
+        while len(start_time) < 6:
+            start_time.append(0)
+        year, month, day, hour, minute, second = start_time
+        start_time = \
+          datetime.datetime(year, month, day, hour, minute, second)
+        end_time = dummy[1].split('/')
+        end_time = [int(e) for e in end_time]
+        # this is only intended to work if we have missing hour, minute, or
+        # seconds. At least the date ought to be complete!
+        while len(end_time) < 6:
+            end_time.append(0)
+        year, month, day, hour, minute, second = end_time
+        end_time = \
+          datetime.datetime(year, month, day, hour, minute, second)
+        #print start_time.ctime(), end_time.ctime()
+        return (start_time, end_time)
+
+    if len(argv) < 2:
+        sys.exit( '\nERROR:\tI need (at least) one file to extract the data from...'
+          + '\n\tPlease try something like:\n\tpython ' + argv[0] \
+          + ' wrfout_dpp_yyyy-mm-dd_hh:mm:ss')
+    else:
+        # Let's store the name of the calling script and considered it
+        # processed
+        caller = argv.pop(0)
+        # let's start with checking that the the first argument is a legit wrfout
+        # file name and that it exists
+        input_file = argv.pop(0)
+        # this should be more stringent, but I don't have time
+        if 'wrfout' not in input_file:
+            sys.exit(
+              '\nERROR:\t' + input_file + ' is not a standard wrf output file'
+              + 'name.\n\tI need something like ' 
+              + 'wrfout_dpp_yyyy-mm-dd_hh:mm:ss')
+        if not os.path.isfile(input_file):
+            sys.exit( '\nERROR:\tfile ' + input_file + ' does not exist!')
+        output_dir = None
+        time_window = None
+        # let's have a look at the arguments left, if any.
+        # every command line argument that is not the input file will come as a
+        # pair flag/value to avoid confusion. If the program cannot make sense of
+        # the input arguments it will bail out with a (possibly helpful) error 
+        # message
+        if '--output_dir' in argv:
+            flag_idx = argv.index('--output_dir')
+            value_idx = flag_idx + 1
+            # the order of the following two statements is important not to
+            # break the way we use pop
+            output_dir = argv.pop(value_idx)
+            flag = argv.pop(flag_idx)
+            if not os.path.isdir(output_dir):
+                sys.exit('\nERROR:\t' + output_dir + 'is not a directory!')
+        if '-o' in argv:
+            flag_idx = argv.index('-o')
+            value_idx = flag_idx + 1
+            # the order of the following two statements is important not to
+            # break the way we use pop
+            output_dir = argv.pop(value_idx)
+            flag = argv.pop(flag_idx)
+            if not os.path.isdir(output_dir):
+                sys.exit('\nERROR:\t' + output_dir + 'is not a directory!')
+        if '--time_window' in argv:
+            flag_idx = argv.index('--time_window')
+            value_idx = flag_idx + 1
+            # the order of the following two statements is important not to
+            # break the way we use pop
+            time_argument = argv.pop(value_idx)
+            flag = argv.pop(flag_idx)
+            time_window = process_time_window_argument(time_argument)
+        if '-t' in argv:
+            flag_idx = argv.index('-t')
+            value_idx = flag_idx + 1
+            # the order of the following two statements is important not to
+            # break the way we use pop
+            time_argument = argv.pop(value_idx)
+            flag = argv.pop(flag_idx)
+            time_window = process_time_window_argument(time_argument)
+        if len(argv) != 0:
+            unknown_arguments = ''
+            for argument in argv:
+                unknown_arguments += argument + '\n\t'
+            sys.exit('\nERROR:\tI do not know how to process the following '
+              + 'arguments:\n\t' + unknown_arguments)
+
+        return input_file, output_dir, time_window
+
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/plot_wrfout/plot_mslp.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/plot_wrfout/plot_mslp.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/plot_wrfout/plot_mslp.py	(revision 296)
@@ -0,0 +1,117 @@
+"""
+This script plots the mslp from a single wrfout file. It can be run from the
+command line as a standalone script or imported into another script and
+executed from there.
+"""
+
+import sys
+import os
+#import gc
+import pywrf.viz.utils as vu
+import pywrf.wrf.utils as wu
+  
+def generate_frames(input_file_name, output_dir=None, time_window=None):
+    """
+    Generate the the mslp frames from a single wrfout file and save them in 
+    the chosen directory. When the script is called as a standalone program
+    this function is called under the hood.
+    NB if no output directory is supplied the default will be the directory
+    from which the script is invoked.
+
+    Usage
+    >>> import plot_mslp
+    >>> plot_mslp(input_file_name, output_dir, time_window)
+    """
+    # if the output directory is not specified or invalid
+    if output_dir is None \
+      or not os.path.isdir(output_dir):
+        output_dir = os.getcwd()
+
+    log_file = os.path.join(output_dir, 'plot_mslp.log')
+    if os.path.isfile(log_file):
+        os.remove(log_file)
+    vu.write_to_log_file(log_file, 'Started execution of plot_mslp.py')
+
+    # let's check for a local plot_wrfout_config or use default
+    if os.path.isfile(os.path.join(os.getcwd(),'plot_wrfout_config.py')):
+        sys.path.insert(0, os.getcwd())
+        vu.write_to_log_file(log_file, 
+          'Using plot_wrfout_config from present working directory')
+        import plot_wrfout_config as pwc
+    else:
+        vu.write_to_log_file(log_file,
+          'Using default plot_wrfout_config')
+        import plot_wrfout_config as pwc
+
+    # We are assuming the standard wrfout names are used!
+    # lets first work out the domain flag so that we can then use it to select
+    # the right set of countours from the dictionary contained in
+    # plot_wrfout_config.py
+    # we index from the end to cope with absolute paths in the input_file_name
+    domain = input_file_name[-23:-20]
+    # we will assume the normal wrfout file name structure without a file
+    # extension. Furthemore, we assume that the file is a netcdf format. The
+    # above is reasonable but potentially fragile.
+    input_file, vars_dict = vu.peek(input_file_name + '.nc', 
+      return_pointers=True, show_vars=False)
+    times = wu.time_string_to_datetime(vars_dict['Times'].get_value())
+    # calculate the indeces for the minimum and maximum times to be plotted
+    if time_window is not None:
+        start_time = time_window[0]
+        end_time = time_window[1]
+        time_min_idx = 0
+        time_max_idx = len(times)
+        for time_idx in range(len(times)):
+            if times[time_min_idx] < start_time:
+                time_min_idx = time_idx
+        for time_idx in range(len(times) - 1, -1, -1):
+            if times[time_idx] >= end_time:
+                time_max_idx = time_idx
+    else: 
+        time_min_idx = 0
+        time_max_idx = len(times)
+    # assuming the grid does not move then we are legitimated to take 
+    # the values of latitude and longitude at the first time step
+    lon = vars_dict['XLONG'].get_value()[0]
+    lat = vars_dict['XLAT'].get_value()[0]
+
+    for time_idx in range(time_min_idx, time_max_idx):
+        vu.write_to_log_file(log_file, 
+          '\tprocessing time ' + times[time_idx].ctime())
+        time_string = vu.set_time_string('manual', times[time_idx].timetuple())
+        title_string = vu.set_title_string('pressure', 'hPa', 
+          time_string, 'Sea-level') 
+        mslp = wu.calculate_mslp_wrapper(vars_dict, time_idx)
+        wind_vector = None
+        if pwc.plot_wind_vectors:
+            zonal_wind = vars_dict['U10'].get_value()[time_idx].copy()
+            meridional_wind = vars_dict['V10'].get_value()[time_idx].copy()
+            wind_vector = (zonal_wind, meridional_wind)
+        output_file_name = vu.generate_output_file_name(output_dir, 'mslp_', 
+          times[time_idx].timetuple())
+        vu.plot_slab(lon, lat, mslp,
+          cntr_lvl=pwc.mslp_cntr_lvl[domain],
+          file_name=output_file_name,
+          colorbar=pwc.plot_colorbar,
+          contour_labels=pwc.plot_contour_labels,
+          meridians_delta=pwc.meridians_delta[domain],
+          parallels_delta=pwc.parallels_delta[domain],
+          quiv_skip=pwc.quiv_skip[domain],
+          frame_width=pwc.frame_width[domain],
+          wind_vector=wind_vector,
+          monochrome=pwc.monochrome,
+          quiverkey_length=pwc.quiverkey_length[domain],
+          title_string=title_string
+          )
+        #del mslp
+
+    input_file.close()
+    #del lon, lat, input_file, vars_dict
+    #gc.collect()
+
+    vu.write_to_log_file(log_file, 'Completed execution of plot_mslp.py')
+
+if __name__ == '__main__':
+    input_file, output_dir, time_window = \
+      vu.process_command_line_arguments_enhanced(sys.argv)
+    generate_frames(input_file, output_dir, time_window)
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/plot_wrfout/plot_sfc_mixing_ratio.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/plot_wrfout/plot_sfc_mixing_ratio.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/plot_wrfout/plot_sfc_mixing_ratio.py	(revision 296)
@@ -0,0 +1,120 @@
+"""
+This script plots the 2m water vapour mixing ratio from a single wrfout file. 
+It can be run from the command line as a standalone script or imported into 
+another script and executed from there.
+"""
+
+import sys
+import os
+#import gc
+import pywrf.viz.utils as vu
+import pywrf.wrf.utils as wu
+  
+def generate_frames(input_file_name, output_dir=None, time_window=None):
+    """
+    Generate the the sfc_water vapour mixing ratio frames from a single wrfout
+    file and save them in the chosen directory. When the script is called as 
+    a standalone program this function is called under the hood.
+    NB if no output directory is supplied the default will be the directory
+    from which the script is invoked.
+
+    Usage
+    >>> import plot_sfc_mixing_ratio
+    >>> plot_sfc_mixing_ratio(input_file_name, output_dir, time_window)
+    """
+    # if the output directory is not specified or invalid
+    if output_dir is None \
+      or not os.path.isdir(output_dir):
+        output_dir = os.getcwd()
+
+    log_file = os.path.join(output_dir, 'plot_sfc_mixing_ratio.log')
+    if os.path.isfile(log_file):
+        os.remove(log_file)
+    vu.write_to_log_file(log_file, 'Started execution of plot_sfc_mixing_ratio.py')
+
+    # let's check for a local plot_wrfout_config or use default
+    if os.path.isfile(os.path.join(os.getcwd(),'plot_wrfout_config.py')):
+        sys.path.insert(0, os.getcwd())
+        vu.write_to_log_file(log_file, 
+          'Using plot_wrfout_config from present working directory')
+        import plot_wrfout_config as pwc
+    else:
+        vu.write_to_log_file(log_file,
+          'Using default plot_wrfout_config')
+        import plot_wrfout_config as pwc
+
+    # We are assuming the standard wrfout names are used!
+    # lets first work out the domain flag so that we can then use it to select
+    # the right set of countours from the dictionary contained in
+    # plot_wrfout_config.py
+    # we index from the end to cope with absolute paths in the input_file_name
+    domain = input_file_name[-23:-20]
+    # we will assume the normal wrfout file name structure without a file
+    # extension. Furthemore, we assume that the file is a netcdf format. The
+    # above is reasonable but potentially fragile.
+    input_file, vars_dict = vu.peek(input_file_name + '.nc', 
+      return_pointers=True, show_vars=False)
+    times = wu.time_string_to_datetime(vars_dict['Times'].get_value())
+    # calculate the indeces for the minimum and maximum times to be plotted
+    if time_window is not None:
+        start_time = time_window[0]
+        end_time = time_window[1]
+        time_min_idx = 0
+        time_max_idx = len(times)
+        for time_idx in range(len(times)):
+            if times[time_min_idx] < start_time:
+                time_min_idx = time_idx
+        for time_idx in range(len(times) - 1, -1, -1):
+            if times[time_idx] >= end_time:
+                time_max_idx = time_idx
+    else: 
+        time_min_idx = 0
+        time_max_idx = len(times)
+    # assuming the grid does not move then we are legitimated to take 
+    # the values of latitude and longitude at the first time step
+    lon = vars_dict['XLONG'].get_value()[0]
+    lat = vars_dict['XLAT'].get_value()[0]
+
+    for time_idx in range(time_min_idx, time_max_idx):
+        vu.write_to_log_file(log_file, 
+          '\tprocessing time ' + times[time_idx].ctime())
+        time_string = vu.set_time_string('manual', times[time_idx].timetuple())
+        title_string = vu.set_title_string('water vapour mixing ratio', 'g/kg', 
+          time_string, 'sfc',) 
+        mixing_ratio = vars_dict['Q2'].get_value()[time_idx].copy()
+        # kg/kg -> g/kg
+        mixing_ratio = mixing_ratio * 1000.
+        wind_vector = None
+        if pwc.plot_wind_vectors:
+            zonal_wind = vars_dict['U10'].get_value()[time_idx].copy()
+            meridional_wind = vars_dict['V10'].get_value()[time_idx].copy()
+            wind_vector = (zonal_wind, meridional_wind)
+        output_file_name = vu.generate_output_file_name(output_dir,
+          'sfc_mix_ratio_', times[time_idx].timetuple())
+        vu.plot_slab(lon, lat, mixing_ratio,
+          cntr_lvl=pwc.sfc_mixing_ratio_cntr_lvl[domain],
+          file_name=output_file_name,
+          colorbar=pwc.plot_colorbar,
+          contour_labels=pwc.plot_contour_labels,
+          meridians_delta=pwc.meridians_delta[domain],
+          parallels_delta=pwc.parallels_delta[domain],
+          quiv_skip=pwc.quiv_skip[domain],
+          frame_width=pwc.frame_width[domain],
+          wind_vector=wind_vector,
+          monochrome=pwc.monochrome,
+          quiverkey_length=pwc.quiverkey_length[domain],
+          title_string=title_string
+          )
+        #del mixing_ratio
+
+    input_file.close()
+    #del lon, lat, input_file, vars_dict
+    #gc.collect()
+
+    vu.write_to_log_file(log_file, 
+      'Completed execution of plot_sfc_mixing_ratio.py')
+
+if __name__ == '__main__':
+    input_file, output_dir, time_window = \
+      vu.process_command_line_arguments_enhanced(sys.argv)
+    generate_frames(input_file, output_dir, time_window)
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/plot_wrfout/plot_wrfout_config.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/plot_wrfout/plot_wrfout_config.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/plot_wrfout/plot_wrfout_config.py	(revision 296)
@@ -0,0 +1,57 @@
+'''
+This module is a central repository of the values that the various contour
+levels should have. Each script will check first in the directory from which
+they are run for a personalized version of these files or default to the one
+in pywrf/wrf/plot_wrfout/plot_wrfout_config.py
+'''
+
+import numpy as n
+
+# I think only one style should be adopted for all the plots hence
+plot_colorbar = True
+plot_contour_labels = False
+plot_wind_vectors = True
+monochrome = False
+
+frame_width = {}
+frame_width['d01'] = 5.0
+frame_width['d02'] = 5.0
+frame_width['d03'] = 0.5
+
+quiv_skip = {}
+quiv_skip['d01'] = 5
+quiv_skip['d02'] = 12
+quiv_skip['d03'] = 10
+
+meridians_delta = {}
+meridians_delta['d01'] = 15
+meridians_delta['d02'] = 15
+meridians_delta['d03'] = 3
+
+parallels_delta = {}
+parallels_delta['d01'] = 15
+parallels_delta['d02'] = 15
+parallels_delta['d03'] = 3
+
+quiverkey_length = {}
+quiverkey_length['d01'] = 10
+quiverkey_length['d02'] = 10
+quiverkey_length['d03'] = 10
+
+mslp_cntr_lvl = {}
+# this looked pretty with the gist_ncar colormap
+# over Australia at 36km resolution
+# and grid corners (following wrfout metadata notation)
+# LAT_LL_T = -50.81659
+# LON_LL_T = 100.8601
+# LAT_UR_T = -13.36167
+# LON_UR_T = 165.0748
+mslp_cntr_lvl['d01'] = n.arange(976,1041,4)
+mslp_cntr_lvl['d02'] = n.arange(976,1041,4)
+mslp_cntr_lvl['d03'] = n.arange(1004,1028,1)
+
+
+sfc_mixing_ratio_cntr_lvl = {}
+sfc_mixing_ratio_cntr_lvl['d01'] = n.arange(0,32,2)
+sfc_mixing_ratio_cntr_lvl['d02'] = n.arange(0,32,2)
+sfc_mixing_ratio_cntr_lvl['d03'] = n.arange(0,28,1)
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/pydown/pydown.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/pydown/pydown.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/pydown/pydown.py	(revision 296)
@@ -0,0 +1,635 @@
+#!/usr/bin/python
+
+import os
+import sys
+import cPickle
+import subprocess as sp
+
+import pywrf.wrf.utils as wu
+import pywrf.wrf.pydown.utils as pdu
+
+
+def phase_description(phase_idx,pp):
+    '''
+    Returns a string that describes the phase that we are at
+
+    The 'phases' are:
+    phase 0: from wps to d01 real
+    phase 1: from d01 real to d01 wrf
+    phase 2: from d01 wrf to ppcheck
+    phase 3 ie (2 + 4*(pp-2) + 1): from ppcheck to d02 (dpp) real
+    phase 4 ie (2 + 4*(pp-2) + 2): from d02 (dpp) real to d02 (dpp) ndown
+    phase 5 ie (2 + 4*(pp-2) + 3): from d02 (dpp) ndown to d02 (dpp) wrf
+    phase 6 ie (2 + 4*(pp-2) + 4): from d02 (dpp) wrf to dppcheck
+    ...
+    for pp <= max_dom
+    '''
+    # if we are dealing we the outermost domain...
+    if phase_idx < 3:
+        if phase_idx == 0:
+            return 'Phase ' + str(phase_idx) + ': ' \
+              + 'We start by running the wps binaries and doing the housekeeping ' \
+              + 'needed to run real.exe for d' + str(pp).zfill(2) + '.'
+        elif phase_idx == 1:
+            return 'Phase ' + str(phase_idx) + ': ' \
+              + 'real.exe has completed for d' + str(pp).zfill(2) + '. ' \
+              + 'we can now move on to prepare for wrf.exe.'
+        elif phase_idx == 2:
+            return 'Phase ' + str(phase_idx) + ': ' \
+              + 'wrf.exe completed for d' + str(pp).zfill(2) + '. '\
+              + 'We are now going to archive the results and check if there ' \
+              + 'are more nests to run.'
+    # if we are dealing with one of the nests
+    else:
+        # reduce to 1-4
+        loop_idx = phase_idx - 4*(pp-2) - 2
+        if loop_idx == 1:
+            return 'Phase ' + str(phase_idx) + ': ' \
+              + 'Yup, we will now work on d' + str(pp).zfill(2) + '. '\
+              + 'We start by running real.exe for this grid by itself.'
+        elif loop_idx == 2:
+            return 'Phase ' + str(phase_idx) + ': ' \
+              + 'real.exe is done for d' + str(pp).zfill(2) + '. '\
+              + 'We now prepare to run ndown.exe for this grid and its parent.'
+        elif loop_idx == 3:
+            return 'Phase ' + str(phase_idx) + ': ' \
+              + 'ndown.exe is done for d' + str(pp).zfill(2) + '. '\
+              + 'We now prepare to run wrf.exe for this grid.'
+        elif loop_idx == 4:
+            return 'Phase ' + str(phase_idx) + ': ' \
+              + 'wrf.exe completed for d' + str(pp).zfill(2) + '. '\
+              + 'We are now going to archive the results and check if there ' \
+              + 'are more nests to run.'
+        
+def this_is_what_you_should_do_next(phase_idx):
+    '''
+    Returns a string that describes the mpi job that should be submitted and
+    checked after the previous phase
+
+    The 'phases' are:
+    phase 0: from wps to d01 real
+    phase 1: from d01 real to d01 wrf
+    phase 2: from d01 wrf to ppcheck
+    phase 3 ie (2 + 4*(pp-2) + 1): from ppcheck to d02 (dpp) real
+    phase 4 ie (2 + 4*(pp-2) + 2): from d02 (dpp) real to d02 (dpp) ndown
+    phase 5 ie (2 + 4*(pp-2) + 3): from d02 (dpp) ndown to d02 (dpp) wrf
+    phase 6 ie (2 + 4*(pp-2) + 4): from d02 (dpp) wrf to dppcheck
+    ...
+    for pp <= max_dom
+    '''
+    # if we are dealing withe the outermost domain...
+    if phase_idx < 3:
+        if phase_idx == 0:
+            return '\nCompleted phase ' + str(phase_idx) + '! ' \
+              + 'Now submit real.exe as an mpi job. Do not forget to check ' \
+              + 'the resuts ;)'
+        elif phase_idx == 1:
+            return 'Completed phase ' + str(phase_idx) + '! ' \
+              + 'Now submit wrf.exe as an mpi job. Do not forget to check ' \
+              + 'the resuts ;)'
+        elif phase_idx == 2:
+            return 'Completed phase ' + str(phase_idx) + '! ' \
+              + 'Just keep going to see if there is any more to do ;]'
+    # if we are dealing with one of the nests
+    else:
+        # reduce to 1-4
+        loop_idx = phase_idx - 4*(pp-2) - 2
+        if loop_idx == 1:
+            return 'Completed phase ' + str(phase_idx) + '! ' \
+              + 'Now submit real.exe as an mpi job. Do not forget to check ' \
+              + 'the resuts ;)'
+        elif loop_idx == 2:
+            return 'Completed phase ' + str(phase_idx) + '! ' \
+              + 'Now submit ndown.exe as an mpi job. Do not forget to check ' \
+              + 'the resuts ;)'
+        elif loop_idx == 3:
+            return 'Completed phase ' + str(phase_idx) + '! ' \
+              + 'Now submit wrf.exe as an mpi job. Do not forget to check ' \
+              + 'the resuts ;)'
+        elif loop_idx == 4:
+            return 'Completed phase ' + str(phase_idx) + '! ' \
+              + 'Just keep going to see if there is any more to do ;]'
+
+def check_with_user():
+    answer = raw_input('\nSowhaddoyouwonnadonow? Should we go ahead? [y/n]: ')
+    if answer not in ['y','n']:
+        check_with_user()
+    if answer == 'n':
+        dump_status()
+        sys.exit('Status saved: see yaa!')
+    else:
+        print
+
+def dump_status():
+    cPickle.dump((phase_completed),open(pydown_status,'w'))
+    return
+
+def load_status():
+    return cPickle.load(open(pydown_status,'r'))
+    
+
+
+# TODO make the pydown_dir a command_line argument (?)
+pydown_dir = os.getcwd()
+pydown_status = os.path.join(pydown_dir,'pydown.status')
+wps_dir = os.path.join(pydown_dir,'WPS')
+run_dir = os.path.join(pydown_dir,'WRFV2','run')
+archive_dir = os.path.join(pydown_dir,'archive')
+namelist_wps = os.path.join(wps_dir,'namelist.wps')
+namelist_input_master = os.path.join(run_dir,'namelist.input.master')
+
+#TODO consistency checks between namelist.wps and namelist.input
+# for now let's take it from namelist.input and assume the two namelists are
+# compatible
+namelist_input_master_dict = wu.read_namelist(namelist_input_master)
+max_dom = namelist_input_master_dict['&domains']['max_dom'][0]
+no_of_phases = (2 + 4*(max_dom-2) + 4) + 1
+phase_idx = 0
+pp = 1
+
+# control tower     
+# is this a brand new run or are we continuing a previous one?
+if os.path.isfile(pydown_status):
+    print '\nThis is what has been already done:\n'
+    phase_completed = load_status()
+    ready_to_roll = False
+else:
+    # Initial checks to protect fragile operations
+    fix_before_we_start = ''
+
+    if os.path.isdir(wps_dir):
+        wps_files = os.listdir(wps_dir)
+        if not ('GRIBFILE.AAA' in wps_files and 'GRIBFILE.AAB' in wps_files):
+            fix_before_we_start += "\t- There is no way for me to know which grib " \
+              + "files you want linked to generate the WRF's ICs and BCs so " \
+              + " please go back and link the ones you want.\n"
+        if 'Vtable' not in wps_files:
+            fix_before_we_start += "\t- There is no way for me to know which " \
+              + "Vtable goes with your grib files of choice so please go back " \
+              + "and link the one you want.\n"
+    else:
+        fix_before_we_start += "I couldn't find the WPS directory " \
+          + wps_dir + '\n'
+
+    if os.path.isdir(run_dir):
+        run_files = os.listdir(run_dir)
+        if 'namelist.input' in run_files:
+            fix_before_we_start += '\t- namelist.input in your ' + run_dir \
+            + ' will bother me so please move/delete it.\n'
+        if 'namelist.input.master' not in run_files:
+            fix_before_we_start += '\t- I expected to find a namelist.input.master ' \
+              + 'in your ' + run_dir + ' please put one (possibly sensible) ' \
+              + 'there for me to know what to do.\n'
+        if True in ['met_em' in file for file in run_files]:
+            fix_before_we_start += "\t- It looks like you've met_em files (or dir) " \
+              + "in your " + run_dir + ', please move/delete them.\n'
+    else:
+        fix_before_we_start += "\t- I couldn't find the run directory\n" \
+          + run_dir 
+
+    if os.path.isdir(archive_dir):
+        archive_files = os.listdir(archive_dir)
+        if not (archive_files == ['README'] 
+          or archive_files == ['README','README~']):
+            # TODO this request might just be me being lazy so make it more general
+            # if are so inclined
+            fix_before_we_start += "\t- All I would like to see in the archive " \
+              + "directory is a README file.\n" \
+              + " \t\tDon't be lazy: jot down a couple of sentences in" \
+              + " archive/README" \
+              + " describing what you set out to achieve with this run... " \
+              + " Yeah, I know it's boring, but you will thank me later ;]\n" \
+              + " \t\tIf you've done this already, then get rid of any other" \
+              + " file in the archive folder."
+    else:
+        fix_before_we_start += "\t- I couldn't find the archive directory " \
+          + archive_dir + '\n'
+
+    if fix_before_we_start != '':
+        # We've got a problem
+        print 'Please address the following and restart the script:'
+        print fix_before_we_start
+        sys.exit()
+    else:
+        # All is good (or the error is semantyc).
+        print "Welcome to pydown. I have found all I need in order to start," \
+         + "so let's begin with:"
+        print phase_description(phase_idx,pp)
+    
+    phase_completed = [False for idx in range(no_of_phases)]
+    check_with_user()
+    ready_to_roll = True
+
+if not ready_to_roll and not phase_completed[phase_idx]:
+    print '\nThis is what has to be done next:\n'
+    print phase_description(phase_idx,pp)
+    check_with_user()   
+    ready_to_roll = True
+print phase_description(phase_idx,pp)
+if ready_to_roll:
+    os.chdir(wps_dir)
+    print '\tMoved to ' + wps_dir + '.'
+    # TODO I am assuming the user has already linked the correct Vtable and
+    # grib files as I cannot extract that information from the namelists...
+
+    # Let's run the wps components
+    print '\tRunning geogrid.exe:'
+    if 'Successful completion of geogrid' not in \
+      sp.Popen('./geogrid.exe', stdout=sp.PIPE).communicate()[0]:
+        # TODO handle the error in a more useful way.
+        print "Houston we've got a problem... with geogrid.exe!"
+    else:
+        print '\t\tDone!'
+    print '\tRunning ungrib.exe:'
+    if 'Successful completion of ungrib' not in \
+      sp.Popen('./ungrib.exe', stdout=sp.PIPE).communicate()[0]:
+        # TODO handle the error in a more useful way.
+        print "Houston we've got a problem... with ungrib.exe!"
+    else:
+        print '\t\tDone!'
+    print '\tRunning metgrid.exe:'
+    if 'Successful completion of metgrid' not in \
+      sp.Popen('./metgrid.exe', stdout=sp.PIPE).communicate()[0]:
+        # TODO handle the error in a more useful way.
+        print "Houston we've got a problem... with metgrid.exe!"
+    else:
+        print '\t\tDone!'
+
+    # By this stage the wps components should have executed successfully so we
+    # move on to archive the appropriate metadata for future reference
+    print '\tZipping metadata archive:'
+    wps_metadata = 'wps_metadata.zip'
+    cmd = r'zip ' + wps_metadata + ' configure.wps Vtable namelist.wps '
+    # TODO I am assuming standard name for the ungrib intermediate files ->
+    # this should be made more general by reading it from namelist.wps
+    for file in os.listdir(os.getcwd()):
+        if 'met_em' in file \
+          or '.log' in file \
+          or 'geo_em' in file \
+          or 'GRIBFILE' in file \
+          or 'FILE' in file :
+            cmd += file + ' '
+    # The following is to ignore both stdout and stderr from the zip command
+    # TODO check if the following syntax represents a portability issue
+    if sp.call(cmd.split(), stdout=open('/dev/null','w'), stderr = sp.STDOUT) == 0:
+        print '\t\tDone!'
+
+    # let's archive the metadata and move the met_em files to a met_en dir in
+    # the run_dir moving them out of the wps directory will free it up for 
+    # other uses
+    # TODO none of the possible exceptions are caught... beware!
+    print '\tArchiving metadata:'
+    os.rename(wps_metadata,os.path.join(archive_dir,wps_metadata))
+    print '\t\tDone!'
+
+    print '\tMoving met_em* files to the run directory:'
+    os.mkdir(os.path.join(run_dir,'met_em'))
+    for file in os.listdir(os.getcwd()):
+        if 'met_em' in file:
+            os.rename(file,os.path.join(run_dir,'met_em',file))
+    print '\t\tDone!'
+    
+    # let's now clean the wps_dir
+    # TODO I am just being lazy here but the user 
+    # should be forced to regenerate Vtable and GRIBFILE.* links every time to
+    # make sure they are the right thing... beware!
+          #or 'GRIBFILE' in file \
+          #or 'Vtable' == file \
+    print '\tCleaning the wps directory:'
+    for file in os.listdir(os.getcwd()):
+        if 'geo_em' in file \
+          or 'FILE:' in file \
+          or 'PFILE:' in file:
+            os.remove(file)
+    print '\t\tDone!'
+
+    print '\tGenerating the namelist.input.d01.real:'
+    current_namelist_input = \
+      pdu.generate_namelist_input_d01_real(namelist_input_master,run_dir)
+    print '\t\tDone!'
+
+    print '\tLinking the just generated namelist.input:'
+    os.symlink(current_namelist_input,os.path.join(run_dir,'namelist.input'))
+    print '\t\tDone!'
+    
+    print '\tLinking the previously generated met_em files:'
+    for file in os.listdir(os.path.join(run_dir,'met_em')):
+        if 'd' + str(pp).zfill(2) in file:
+            os.symlink(os.path.join(run_dir,'met_em',file),
+              os.path.join(run_dir,file[:9] + '1' + file[10:]))
+    print '\t\tDone!'
+
+    phase_completed[phase_idx] = True
+    # let's be patronizing...
+    print this_is_what_you_should_do_next(phase_idx)
+    check_with_user()
+# we are finished let's move on to the next phase
+phase_idx += 1
+
+if not ready_to_roll and not phase_completed[phase_idx]:
+    print '\nThis is what has to be done next:\n'
+    print phase_description(phase_idx,pp)
+    check_with_user()   
+    ready_to_roll = True
+print phase_description(phase_idx,pp)
+if ready_to_roll:
+    # let's start with a little housekeeping to clean after the real.exe
+    os.chdir(run_dir)
+    print '\tMoved to ' + run_dir + '.'
+    print '\tMoving real log files out of the way:'
+    os.mkdir(os.path.join(run_dir,'real_logs'))
+    for file in os.listdir(os.getcwd()):
+        if 'rsl' in file \
+          or 'std' in file:
+            os.rename(file,os.path.join(run_dir,'real_logs',file))
+    print '\t\tDone!'
+    
+    print '\tGenerating the namelist.input.d01.wrf:'
+    current_namelist_input = \
+      pdu.generate_namelist_input_d01_wrf(namelist_input_master,run_dir)
+    print '\t\tDone!'
+
+    print '\tLinking the just generated namelist.input:'
+    os.remove(os.path.join(run_dir,'namelist.input'))
+    os.symlink(current_namelist_input,os.path.join(run_dir,'namelist.input'))
+    print '\t\tDone!'
+
+    phase_completed[phase_idx] = True
+    print this_is_what_you_should_do_next(phase_idx)
+    check_with_user()
+    # we are finished let's move on to the next phase
+phase_idx += 1
+
+if not ready_to_roll and not phase_completed[phase_idx]:
+    print '\nThis is what has to be done next:\n'
+    print phase_description(phase_idx,pp)
+    check_with_user()   
+    ready_to_roll = True
+print phase_description(phase_idx,pp)
+if ready_to_roll:
+    # let's start with a little housekeeping to clean after the wrf.exe
+    os.chdir(run_dir)
+    print '\tMoved to ' + run_dir + '.'
+    print '\tMoving wrf log files out of the way:'
+    os.mkdir(os.path.join(run_dir,'wrf_logs'))
+    for file in os.listdir(os.getcwd()):
+        if 'rsl' in file \
+          or 'std' in file:
+            os.rename(file,os.path.join(run_dir,'wrf_logs',file))
+    print '\t\tDone!'
+
+    # then move on to generating and archiving the metadata and wrfout_d01*
+    print '\tZipping metadata archive:'
+    wrf_metadata = 'wrf_metadata_d' + str(pp).zfill(2) + '.zip'
+    cmd = r'zip -r ' + wrf_metadata + ' wrfndi_d02 ../compile.log ' \
+      + '../configure.wrf wrfinput_d01 wrfbdy_d01 submit_wrf.sh ' \
+      + 'submit_real.sh submit_ndown.sh namelist.input ' \
+      + 'wrf_logs real_logs ndown_logs'
+    # The following is to ignore both stdout and stderr from the zip command
+    # TODO check if the following syntax represents a portability issue
+    if sp.call(cmd.split(), stdout=open('/dev/null','w'), stderr = sp.STDOUT) == 0:
+        print '\t\tDone!'
+
+    # TODO check that the appropriate directory structure exist for the archive
+    # and create it if it doesn't    
+    # TODO none of the possible exceptions are caught... beware!
+    print '\tArchiving metadata:'
+    os.rename(wrf_metadata,os.path.join(archive_dir,wrf_metadata))
+    print '\t\tDone!'
+
+    print '\tMoving wrfout_d01* files to the archive directory:'
+    for file in os.listdir(os.getcwd()):
+        if 'wrfout' in file:
+            os.rename(file,os.path.join(archive_dir,file))
+    print '\t\tDone!'
+    
+    # let's now clean the wrf_dir
+    print '\tCleaning the wrf directory:'
+    for file_or_dir in os.listdir(os.getcwd()):
+        if 'met_em.' in file_or_dir \
+          or 'wrfinput' in file_or_dir \
+          or 'wrfndi' in file_or_dir \
+          or 'namelist.input' == file_or_dir \
+          or 'wrfbdy' in file_or_dir:
+            os.remove(file_or_dir)
+        # only my logs directories will match 'logs'
+        # TODO make this more general/robust or warn the user they should not
+        # have anything that matches this check in their run_dir
+        if 'logs' in file_or_dir:
+            for nested_file in os.listdir(file_or_dir):
+                os.remove(os.path.join(file_or_dir,nested_file))
+            os.rmdir(file_or_dir)
+    print '\t\tDone!'
+
+    phase_completed[phase_idx] = True
+    print this_is_what_you_should_do_next(phase_idx)
+    check_with_user()
+# we are finished let's move on to the next phase
+phase_idx += 1
+
+while pp < max_dom:
+    pp += 1
+
+    if not ready_to_roll and not phase_completed[phase_idx]:
+        print '\nThis is what has to be done next:\n'
+        print phase_description(phase_idx,pp)
+        check_with_user()   
+        ready_to_roll = True
+    print phase_description(phase_idx,pp)
+    if ready_to_roll:
+        print '\tGenerating the namelist.input.d' + str(pp).zfill(2) + '.real:'
+        current_namelist_input = \
+          pdu.generate_namelist_input_dpp_real(pp,namelist_input_master,run_dir)
+        print '\t\tDone!'
+
+        print '\tLinking the just generated namelist.input:'
+        os.symlink(current_namelist_input,os.path.join(run_dir,'namelist.input'))
+        print '\t\tDone!'
+        
+        print '\tLinking the previously generated met_em files:'
+        for file in os.listdir(os.path.join(run_dir,'met_em')):
+            if 'd' + str(pp).zfill(2) in file:
+                os.symlink(os.path.join(run_dir,'met_em',file),
+                  os.path.join(run_dir,file[:9] + '1' + file[10:]))
+        print '\t\tDone!'
+        
+        phase_completed[phase_idx] = True
+        print this_is_what_you_should_do_next(phase_idx)
+        check_with_user()
+    # we are finished let's move on to the next phase
+    phase_idx += 1
+    
+    if not ready_to_roll and not phase_completed[phase_idx]:
+        print '\nThis is what has to be done next:\n'
+        print phase_description(phase_idx,pp)
+        check_with_user()   
+        ready_to_roll = True
+    print phase_description(phase_idx,pp)
+    if ready_to_roll:
+        # let's start with a little housekeeping to clean after the real.exe
+        os.chdir(run_dir)
+        print '\tMoved to ' + run_dir + '.'
+
+        print '\tMoving real log files out of the way:'
+        os.mkdir(os.path.join(run_dir,'real_logs'))
+        for file in os.listdir(os.getcwd()):
+            if 'rsl' in file \
+              or 'std' in file:
+                os.rename(file,os.path.join(run_dir,'real_logs',file))
+        print '\t\tDone!'
+
+        #let's prepare for ndown
+        print '\tRename wrfinput_d01 wrfndi_02 and get rid of wrfbdy_d01:'
+        os.rename('wrfinput_d01','wrfndi_d02')
+        os.remove('wrfbdy_d01')
+        print '\t\tDone!'
+
+        print '\tGenerating the namelist.input.d' + str(pp).zfill(2) + '.ndown:'
+        current_namelist_input = \
+          pdu.generate_namelist_input_dpp_ndown(pp,namelist_input_master,run_dir)
+        print '\t\tDone!'
+
+        print '\tLinking the just generated namelist.input:'
+        if os.path.isfile(os.path.join(run_dir,'namelist.input')):
+            os.remove(os.path.join(run_dir,'namelist.input'))
+        os.symlink(current_namelist_input,os.path.join(run_dir,'namelist.input'))
+        print '\t\tDone!'
+
+        # TODO this statement is untested
+        print '\tLinking the (previously calculated) wrfout_d' \
+          + str(pp-1).zfill(2)
+        for file in os.listdir(archive_dir):
+            if 'wrfout_d' + str(pp-1).zfill(2) in file:
+                os.symlink(os.path.join(archive_dir,file),
+                file[:9] + '1' + file[10:])
+        print '\t\tDone!' 
+
+        phase_completed[phase_idx] = True
+        print this_is_what_you_should_do_next(phase_idx)
+        check_with_user()
+    # we are finished let's move on to the next phase
+    phase_idx += 1
+	    
+    if not ready_to_roll and not phase_completed[phase_idx]:
+        print '\nThis is what has to be done next:\n'
+        print phase_description(phase_idx,pp)
+        check_with_user()   
+        ready_to_roll = True
+    print phase_description(phase_idx,pp)
+    if ready_to_roll:
+        # let's start with a little housekeeping to clean after the ndown.exe
+        os.chdir(run_dir)
+        print '\tMoved to ' + run_dir + '.'
+
+        print '\tMoving ndown log files out of the way:'
+        os.mkdir(os.path.join(run_dir,'ndown_logs'))
+        for file in os.listdir(os.getcwd()):
+            if 'rsl' in file \
+              or 'std' in file:
+                os.rename(file,os.path.join(run_dir,'ndown_logs',file))
+        print '\t\tDone!'
+
+        #let's prepare for wrf
+        print '\tRename wrfinput_d02 and wrfbdy_02 to wrfinput_d01 and wrfbdy_d01:'
+        os.rename('wrfinput_d02','wrfinput_d01')
+        os.rename('wrfbdy_d02','wrfbdy_d01')
+        print '\t\tDone!'
+
+        print '\tGenerating the namelist.input.d' + str(pp).zfill(2) + '.wrf:'
+        current_namelist_input = \
+          pdu.generate_namelist_input_dpp_wrf(pp,namelist_input_master,run_dir)
+        print '\t\tDone!'
+
+        print '\tLinking the just generated namelist.input:'
+        os.remove(os.path.join(run_dir,'namelist.input'))
+        os.symlink(current_namelist_input,os.path.join(run_dir,'namelist.input'))
+        print '\t\tDone!'
+
+        print '\tRemove the links to  wrfout_d' + str(pp-1).zfill(2) + '*'
+        for file in os.listdir(run_dir):
+            #if 'wrfout_d' + str(pp-1).zfill(2) in file:
+            if 'wrfout_d01' in file:
+                os.remove(file)
+        print '\t\tDone!' 
+
+        phase_completed[phase_idx] = True
+        print this_is_what_you_should_do_next(phase_idx)
+        check_with_user()
+    # we are finished let's move on to the next phase
+    phase_idx += 1
+	    
+    if not ready_to_roll and not phase_completed[phase_idx]:
+        print '\nThis is what has to be done next:\n'
+        print phase_description(phase_idx,pp)
+        check_with_user()   
+        ready_to_roll = True
+    print phase_description(phase_idx,pp)
+    if ready_to_roll:
+        # let's start with a little housekeeping to clean after the wrf.exe
+        os.chdir(run_dir)
+        print '\tMoved to ' + run_dir + '.'
+        print '\tMoving wrf log files out of the way:'
+        os.mkdir(os.path.join(run_dir,'wrf_logs'))
+        for file in os.listdir(os.getcwd()):
+            if 'rsl' in file \
+              or 'std' in file:
+                os.rename(file,os.path.join(run_dir,'wrf_logs',file))
+        print '\t\tDone!'
+
+        # then move on to generating and archiving the metadata and wrfout_d01*
+        print '\tZipping metadata archive:'
+        wrf_metadata = 'wrf_metadata_d' + str(pp).zfill(2) + '.zip'
+        cmd = r'zip -r ' + wrf_metadata + ' wrfndi_d02 ../compile.log ' \
+          + '../configure.wrf wrfinput_d01 wrfbdy_d01 submit_wrf.sh ' \
+          + 'submit_real.sh submit_ndown.sh namelist.input ' \
+          + 'wrf_logs real_logs ndown_logs'
+        # The following is to ignore both stdout and stderr from the zip command
+        # TODO check if the following syntax represents a portability issue
+        if sp.call(cmd.split(), stdout=open('/dev/null','w'), stderr = sp.STDOUT) == 0:
+            print '\t\tDone!'
+    
+        # TODO check that the appropriate directory structure exist for the archive
+        # and create it if it doesn't    
+        # TODO none of the possible exceptions are caught... beware!
+        print '\tArchiving metadata:'
+        os.rename(wrf_metadata,os.path.join(archive_dir,wrf_metadata))
+        print '\t\tDone!'
+    
+        print '\tMoving wrfout_d01* files to the archive directory:'
+        for file in os.listdir(os.getcwd()):
+            if 'wrfout' in file:
+                os.rename(file,os.path.join(archive_dir,file[:8] \
+                  + str(pp).zfill(2) + file[10:]))
+        print '\t\tDone!'
+        
+        # let's now clean the wrf_dir
+        print '\tCleaning the wrf directory:'
+        for file_or_dir in os.listdir(os.getcwd()):
+            if 'met_em.' in file_or_dir \
+              or 'wrfinput' in file_or_dir \
+              or 'wrfndi' in file_or_dir \
+              or 'wrfbdy' in file_or_dir:
+                os.remove(file_or_dir)
+            if os.path.isfile(os.path.join(run_dir,'namelist.input')):
+                os.remove(os.path.join(run_dir,'namelist.input'))
+            # only my logs directories will match 'logs'
+            # TODO make this more general/robust or warn the user they should not
+            # have anything that matches this check in their run_dir
+            if 'logs' in file_or_dir:
+                for nested_file in os.listdir(file_or_dir):
+                    os.remove(os.path.join(file_or_dir,nested_file))
+                os.rmdir(file_or_dir)
+        print '\t\tDone!'
+    
+        phase_completed[phase_idx] = True
+        print this_is_what_you_should_do_next(phase_idx)
+        check_with_user()
+    # we are finished let's move on to the next phase
+    phase_idx += 1
+
+# the presence of a pydown.status file will be assumed to mean that we want to
+# continue a partially executed run. We are now finished so we need to clean up
+# the directory not to confuse the program next time it is run.
+# TODO check that this exists before erasing it
+os.remove(pydown_status)
+print 'No more nests to run... good night and good luck.'
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/pydown/utils.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/pydown/utils.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/pydown/utils.py	(revision 296)
@@ -0,0 +1,247 @@
+import sys
+import os
+from socket import gethostname
+
+# VB the following is both host and user specific hence
+hostname = gethostname()
+user = os.getlogin()
+if hostname == 'hn3.its.monash.edu.au':
+    if user == 'vbisign':
+        sys.path.append('/nfs/1/home/vbisign/wrf/pywrf')   
+        import wrf.utils as wu
+    elif user == 'tchubb':
+        print 'Hey Thom where do you keep pywrf on this computer?'
+        sys.exit()
+        sys.path.append('/somewhere/pylib')
+        import pywrf.wrf.utils as wu
+elif hostname == 'linux450':
+    # VB Sorry Thom if this is not correct ;)
+    print 'Hey Thom where do you keep pywrf on this computer?'
+    sys.exit()
+    sys.path.append('/somewhere/pylib')
+    import pywrf.wrf.utils as wu
+elif hostname == 'val.maths.monash.edu.au' \
+  or hostname == 'valerio-bisignanesis-computer.local':
+    sys.path.append('/Users/val/Desktop/workspace/pywrf')
+    import wrf.utils as wu
+else:
+    print 'Warning: since I do not know this user/'\
+      + 'hostname combination, I am not sure of ' \
+      + ' where to find pywrf.wrf.util, I will try\n' \
+      + ' import pywrf.wrf.utils as wu'
+    import pywrf.wrf.utils as wu
+
+
+
+def generate_namelist_input_d01_real(namelist_input_master,run_dir='.'):
+    """Generate a namelist file for real.exe for the outermost domain
+    
+    The namelist file for real.exe for the outermost domain differs from the 
+    'master' namelist file by the entry in max_dom (max_dom=1)
+    """
+    namelist_dict=wu.read_namelist(namelist_input_master)
+    namelist_dict['&domains']['max_dom'][0]=1
+    output_file = os.path.join(run_dir,'namelist.input.d01.real')
+    wu.write_namelist(namelist_dict,output_file)
+    return output_file
+
+def generate_namelist_input_d01_wrf(namelist_input_master,run_dir='.'):
+    """Generate a namelist file for wrf.exe for the outermost domain
+    
+    The namelist file for wrf.exe for the outermost domain differs from the 
+    'master' namelist file by the entry in max_dom (max_dom=1)
+    """
+    namelist_dict=wu.read_namelist(namelist_input_master)
+    namelist_dict['&domains']['max_dom'][0]=1
+    output_file = os.path.join(run_dir,'namelist.input.d01.wrf')
+    wu.write_namelist(namelist_dict,output_file)
+    return output_file
+
+def generate_namelist_input_dpp_real(pp,namelist_input_master,run_dir='.'):
+    """Generate a namelist for real.exe for the pp'th domain
+    
+    This namelist will contain only one column, with max_dom = 1 and 
+    interval_seconds = grib interval
+    """
+
+    namelist_dict=wu.read_namelist(namelist_input_master)
+    idx_discard=range(int(namelist_dict['&domains']['max_dom'][0]))
+    
+    # We'll be using this list as the list of entries to 'pop' from each variable 
+    # in the namelist, so we need ot reverse siht list to preserve only the 'pp'th
+    # datum
+    idx_discard.pop(idx_discard.index(pp-1))
+    idx_discard.reverse()
+
+    for group in namelist_dict.keys():
+	for variable in namelist_dict[group].keys():
+	    dummy=namelist_dict[group][variable]
+	    if len(dummy)==1:
+		pass
+	    else:
+		for idx in idx_discard:
+		    dummy.pop(idx)
+
+
+    # &domains
+    namelist_dict['&domains']['max_dom'][0]=1
+    # grid id
+    namelist_dict['&domains']['grid_id'][0]=1
+    namelist_dict['&domains']['parent_id'][0]=0
+    namelist_dict['&domains']['parent_grid_ratio'][0]=1
+    # j_parent_start of parent is 1
+    namelist_dict['&domains']['j_parent_start'][0]=0
+    # same for i_parent_start
+    namelist_dict['&domains']['i_parent_start'][0]=0
+
+    # &time_control
+    namelist_dict['&time_control']['input_from_file'][0]='.true.'
+
+    # &bdy_control
+    # parent grid is not nested as far as WRF is concerned...
+    namelist_dict['&bdy_control']['nested'][0]='.false.'
+    # parent grid will have specified boundary conditions
+    namelist_dict['&bdy_control']['specified'][0]='.true.'
+
+   # TODO
+    # Shorten the time extent to save on CPU resources (ref. step 15 in dia)
+    # Include explicit definition of eta_levels
+
+    output_file = os.path.join(run_dir,'namelist.input.d'+str(pp).zfill(2)+'.real')
+    wu.write_namelist(namelist_dict,output_file)
+    return output_file
+    
+
+def generate_namelist_input_dpp_ndown(pp,namelist_input_master,run_dir='.'):
+    """Generate a namelist for ndown.exe for the pp'th domain
+    
+    This namelist will contain only one column, with max_dom = 1 and 
+    interval_seconds = grib interval
+    """
+
+    namelist_dict=wu.read_namelist(namelist_input_master)
+    
+    # Correcting to give BC's at time of history_interval for parent grid. 
+    # history_interval given in minutes, interval_seconds, in seconds (duh??)
+    dom_idx = pp-1 
+    history_interval = namelist_dict['&time_control']['history_interval'][dom_idx-1]
+    interval_seconds = history_interval*60
+    namelist_dict['&time_control']['interval_seconds'][0]=interval_seconds
+    
+    idx_discard=range(int(namelist_dict['&domains']['max_dom'][0]))
+    
+    # We'll be using this list as the list of entries to 'pop' from each variable 
+    # in the namelist, so we need ot reverse siht list to preserve only the 'pp'th
+    # datum
+    idx_discard.pop(idx_discard.index(dom_idx))
+    idx_discard.pop(idx_discard.index(dom_idx-1))
+    idx_discard.reverse()
+
+    for group in namelist_dict.keys():
+	for variable in namelist_dict[group].keys():
+	    dummy=namelist_dict[group][variable]
+	    if len(dummy)==1:
+		pass
+	    else:
+		for idx in idx_discard:
+		    dummy.pop(idx)
+
+    # &domain corrections
+    namelist_dict['&domains']['max_dom'][0]=2
+    # grid id
+    namelist_dict['&domains']['grid_id']=[1,2]
+    # ndown reads 1 as parent and 2 as child hence
+    namelist_dict['&domains']['parent_id']=[0,1]
+    # parent grid ratio of 'parent' is 1:
+    namelist_dict['&domains']['parent_grid_ratio'][0]=1
+    # j_parent_start of parent is 1
+    namelist_dict['&domains']['j_parent_start'][0]=0
+    # same for i_parent_start
+    namelist_dict['&domains']['i_parent_start'][0]=0
+    # currently not changing parent_time_step_ratio, but watch this space
+
+    # &time control corrections
+    namelist_dict['&time_control']['input_from_file'][0]='.true.'
+
+    # &bdy_control
+    # parent grid is not nested as far as WRF is concerned...
+    namelist_dict['&bdy_control']['nested'][0]='.false.'
+    # parent grid will have specified boundary conditions
+    namelist_dict['&bdy_control']['specified'][0]='.true.'
+
+
+    output_file = \
+      os.path.join(run_dir,'namelist.input.d'+str(pp).zfill(2)+'.ndown')
+    wu.write_namelist(namelist_dict,output_file)
+    return output_file
+
+def generate_namelist_input_dpp_wrf(pp,namelist_input_master,run_dir='.'):
+    """Generate a namelist for wrf.exe for the pp'th domain
+    
+    This namelist will contain only one column, with max_dom = 1 and 
+    interval_seconds = grib interval
+    """
+
+    namelist_dict=wu.read_namelist(namelist_input_master)
+    idx_discard=range(int(namelist_dict['&domains']['max_dom'][0]))
+    
+    # We'll be using this list as the list of entries to 'pop' from each variable 
+    # in the namelist, so we need ot reverse siht list to preserve only the 'pp'th
+    # datum
+    idx_discard.pop(idx_discard.index(pp-1))
+    idx_discard.reverse()
+
+    # multiplicative time step ratio
+    mult_time_step_ratio=1
+    # Must incluide CURRENT parent_time_step ratio as well, i.e. up to pp rather than pp-1.
+    # time_step_ratio_1*time_step_ratio_2*...*time_step_ratio_pp
+
+    for idx in range(pp):
+	mult_time_step_ratio*=namelist_dict['&domains']['parent_time_step_ratio'][idx]
+
+    for group in namelist_dict.keys():
+	for variable in namelist_dict[group].keys():
+	    dummy=namelist_dict[group][variable]
+	    if len(dummy)==1:
+		pass
+	    else:
+		for idx in idx_discard:
+		    dummy.pop(idx)
+
+    # &domains
+    # take care of time step stuff
+    # be smart... make sure that your INITIAL time step is an integer multiple of the 
+    # product of the set of parent_time_step_ratio OR come here and modify this yourself...
+    # TODO: modify this myself to handle integer arithmetics. Need to consider 
+	# time_step_fract_num and time_step_fract_den
+	
+    namelist_dict['&domains']['parent_time_step_ratio'][0]=1
+    namelist_dict['&domains']['time_step'][0]/=mult_time_step_ratio
+    
+    namelist_dict['&domains']['max_dom'][0]=1
+    # grid id
+    namelist_dict['&domains']['grid_id'][0]=1
+    namelist_dict['&domains']['parent_id'][0]=0
+    namelist_dict['&domains']['parent_grid_ratio'][0]=1
+    # j_parent_start of parent is 1
+    namelist_dict['&domains']['j_parent_start'][0]=0
+    # same for i_parent_start
+    namelist_dict['&domains']['i_parent_start'][0]=0
+
+    # &time_control
+    namelist_dict['&time_control']['input_from_file'][0]='.true.'
+
+    # &bdy_control
+    # parent grid is not nested as far as WRF is concerned...
+    namelist_dict['&bdy_control']['nested'][0]='.false.'
+    # parent grid will have specified boundary conditions
+    namelist_dict['&bdy_control']['specified'][0]='.true.'
+
+   # TODO
+    # Shorten the time extent to save on CPU resources (ref. step 15 in dia)
+    # Include explicit definition of eta_levels
+
+    output_file = os.path.join(run_dir,'namelist.input.d'+str(pp).zfill(2)+'.wrf')
+    wu.write_namelist(namelist_dict,output_file)
+    return output_file
+
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/utils.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/utils.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/utils.py	(revision 296)
@@ -0,0 +1,686 @@
+"""
+repository of wrf utilities
+"""
+import os
+import sys
+import numpy as n
+import pylab as p
+from matplotlib.numerix import ma
+from matplotlib.toolkits.basemap import Basemap
+import pywrf.viz.utils as vu
+
+
+# the syntax to import nio depends on its version. We try all options from the
+# newest to the oldest
+try:
+    import Nio as nio
+except:
+    try:
+        import PyNGL.Nio as nio
+    except:
+        import PyNGL_numpy.Nio as nio
+
+# The following is only needed for hn3.its.monash.edu.au
+# Unfortunately we must get rid of spurious entries in the sys.path that can
+# lead to the loading of conflicting packages
+for dir_idx in range(len(sys.path)-1,-1,-1):
+    if 'python2.4' in sys.path[dir_idx]:
+        sys.path.pop(dir_idx)
+
+a_small_number = 1e-8
+
+def colons_to_underscores(test=False):
+    files = os.listdir('.')
+    print 'I plan to do the following:'
+    for file in files:
+        command = 'mv ' + file + ' ' + file[:-6] + '_00_00'
+        print command
+    print 'Happy? [y/n]'
+    answer = raw_input()
+    if answer == 'y':
+        for file in files:
+            command = 'mv ' + file + ' ' + file[:-6] + '_00_00'
+            os.system(command)
+    else:
+        print 'OK, maybe next time ;]' 
+    return
+
+def wrf_to_pressure(var, pressure, press_lvl, fill_value=1e35, positive_only=False):
+    """
+    this functions vertically interpolates to pressure levels WRF fields
+    usage 
+    >>> interpolated_var(var, pressure, press_lvl, fill_value=1e35, 
+    ...    positive_only=False)
+    where 
+        var -> field to be interpolated
+        pressure -> total pressure field (p + pb)
+        press_lvl -> list or numpy array of desired levels
+        fill_value -> this is assigned to a cell if the requested pressure level
+          lies below or above the column of values supplied for that cell in 
+          the pressure array.
+        positive_only -> set this flag to true to prevent the interpolation to 
+          generate negative values for fields for which this does not make 
+          sense.
+        interpolated_var -> the interpolated field which will have shape 
+          (len(press_lvl), var.shape[1], var.shape[2])
+    NB in this current implementation of the function arrays need to be 
+      destaggerred before they operated upon. Furthermore, it is assumed the
+      function is invoked to process one frame at at time i.e. it works on 3D
+      arrays. As usual COARDS ordering of dimensions i.e. (time,lvl,lat,lon) 
+      is assumed.
+    """
+    # these are the pressure levels we want as output
+    k_max, j_max, i_max = pressure.shape
+    output = n.zeros((len(press_lvl), j_max, i_max))
+    for lvl_idx in range(len(press_lvl)):
+        for j in range(j_max):
+            for i in range(i_max):
+                # make it ascending for searchsorted
+                pressure_column = pressure[::-1,j,i]
+                if press_lvl[lvl_idx] > pressure_column.max() \
+                  or press_lvl[lvl_idx] < pressure_column.min():
+                    output[lvl_idx,j,i] = fill_value
+                else:
+                    var_column = var[::-1,j,i]
+                    press_idx = pressure_column.searchsorted(press_lvl[lvl_idx])
+                    xa = pressure_column[press_idx]
+                    xb = pressure_column[press_idx - 1]
+                    ya = var_column[press_idx]
+                    yb = var_column[press_idx - 1]
+                    x = press_lvl[lvl_idx]
+                    y = vu.lin_interp(xa, xb, ya, yb, x)
+                    if positive_only and y < 0.:
+                        y = 0.
+                    output[lvl_idx,j,i] = y
+    return output
+
+def wrf2latlon(projection, 
+  lat_0, lon_0,
+  lat_1,
+  lat_2,
+  grid_centre_lon,
+  grid_centre_lat,
+  nx,
+  ny,
+  delta,
+  staggered = False,
+  return_extra = False
+  ):
+    from pyproj import Proj
+    proj = Proj(proj=projection, lat_0=lat_0, lon_0=lon_0, lat_1=lat_1, lat_2=lat_2)
+    grid_centre_x, grid_centre_y = proj(grid_centre_lon, grid_centre_lat)
+    grid_x_extent = nx * delta
+    grid_y_extent = ny * delta
+    min_x = grid_centre_x - grid_x_extent/2.
+    min_y = grid_centre_y - grid_y_extent/2.
+    max_x = min_x + grid_x_extent
+    max_y = min_y + grid_y_extent
+    x = n.arange(min_x, max_x + a_small_number, delta)
+    y = n.arange(min_y, max_y + a_small_number, delta)  
+    X, Y = p.meshgrid(x,y)
+    lon, lat = proj(X, Y, inverse=True)
+
+    if staggered:
+        x_u = n.arange(min_x, max_x + delta + a_small_number, delta)
+        x_u -= (delta /2.)
+        X_u, Y_u = p.meshgrid(x_u,y)
+        y_v = n.arange(min_y, max_y + delta + a_small_number, delta)  
+        y_v -= (delta /2.)
+        X_v, Y_v = p.meshgrid(x,y_v)
+        lon_u, lat_u = proj(X_u, Y_u, inverse=True)
+        lon_v, lat_v = proj(X_v, Y_v, inverse=True)
+
+    llcrnrlon, llcrnrlat = proj(min_x, min_y, inverse=True)
+    urcrnrlon, urcrnrlat = proj(max_x, max_y, inverse=True)
+    map = Basemap(
+      projection = projection,
+      lon_0 = lon_0,
+      lat_0 = lat_0,
+      lat_1 = lat_1,
+      lat_2 = lat_2,
+      llcrnrlon = llcrnrlon,
+      llcrnrlat = llcrnrlat,
+      urcrnrlon = urcrnrlon,
+      urcrnrlat = urcrnrlat
+      )
+
+    if return_extra:
+        # it seems that the basemap automatically sets the origin the native
+        # coordinate system at its llcrnr (or close to it...)
+        offset = map(lon_0, lat_0)
+        if staggered:
+            X += offset[0]
+            Y += offset[1]
+            X_u += offset[0]
+            Y_u += offset[1]
+            X_v += offset[0]
+            Y_v += offset[1]
+            return lon, lat, lon_u, lat_u, lon_v, lat_v, \
+              X, Y, X_u, Y_u, X_v, Y_v, map
+        else:
+            X += offset[0]
+            Y += offset[1]
+            return lon, lat, X, Y, map
+    else: 
+        if staggered:
+            return lon, lat, lon_u, lat_u, lon_v, lat_v
+        else:
+            return lon, lat
+  
+def wrf_grid(
+  # WPS -> map_proj
+  projection,
+  # WPS -> truelat1
+  lat_1,
+  # WPS -> truelat2
+  lat_2,
+  # WPS -> stand_lon
+  lon_0,
+  # WPS -> ref_lat
+  grid_centre_lat,
+  # WPS -> ref_lon
+  grid_centre_lon,
+  delta_x,
+  delta_y,
+  # WPS -> e_we
+  nx,
+  # WPS -> e_sn
+  ny,
+  show_mass_grid = False,
+  show_stag_grids = False,
+  ):
+    if lon_0 != grid_centre_lon:
+        print 'not implemented yet -> see the source'
+        print "\tbut let's try it anyways..."
+        #return
+
+    width   = nx * delta_x
+    height  = ny * delta_y
+    frame_x = 10 * delta_x
+    frame_y = 10 * delta_y
+    m = Basemap(
+      lat_0 = grid_centre_lat,
+      # this could be a bad assumption... because lon_0 and grid_centre_lon
+      # need not be aligned, but at the same time I need to give this to
+      # basemap for the grid to be centred... I could probably fix it
+      # assigning lon_0 and then imposing a grid shift in native coordinates
+      # if ref_lon and lon_0 were not the same
+      lon_0 = lon_0,
+      lat_1 = lat_1,
+      lat_2 = lat_2,
+      width = width + 2*frame_x,
+      height = height + 2*frame_y,
+      resolution = 'l',
+      area_thresh=1000.
+      )
+    grid_centre_x, grid_centre_y = m(grid_centre_lon, grid_centre_lat)
+    min_x = grid_centre_x - width/2.
+    min_y = grid_centre_y - height/2.
+    max_x = min_x + width
+    max_y = min_y + height
+    x = n.arange(min_x, max_x + a_small_number, delta_x)
+    y = n.arange(min_y, max_y + a_small_number, delta_y)  
+    x = x[1:-1]
+    y = y[1:-1]
+    x_u = n.arange(min_x, max_x + delta_x + a_small_number, delta_x)
+    x_u -= delta_x/2.
+    x_u = x_u[1:-1]
+    y_v = n.arange(min_y, max_y + delta_y + a_small_number, delta_y)
+    y_v -= delta_y/2.
+    y_v = y_v[1:-1]
+    X, Y = p.meshgrid(x,y)
+    lon, lat = m(X, Y, inverse=True)
+    X_u, Y_u = p.meshgrid(x_u,y)
+    lon_u, lat_u = m(X_u, Y_u, inverse=True)
+    X_v, Y_v = p.meshgrid(x,y_v)
+    lon_v, lat_v = m(X_v, Y_v, inverse=True)
+    if show_mass_grid:
+        m.plot(X, Y, 'b+')
+        m.plot([grid_centre_x], [grid_centre_y], 'r+')
+        if show_stag_grids:
+            m.plot(X_u, Y_u, 'g+')
+            m.plot(X_v, Y_v, 'r+')
+        m.drawcoastlines()
+	p.show()
+    output = {
+      'map' : m,
+      'mass_stag': {
+        'lon_2d' : lon,
+        'lat_2d' : lat,
+        'x'      : x,
+        'y'      : y,
+        'x_2d'   : X,
+        'y_2d'   : Y,
+        }, 
+      'u_stag': {
+        'lon_2d' : lon_u,
+        'lat_2d' : lat_u,
+        'x'      : x_u,
+        'y'      : y,
+        'x_2d'   : X_u,
+        'y_2d'   : Y_u,
+        }, 
+      'v_stag': {
+        'lon_2d' : lon_v,
+        'lat_2d' : lat_v,
+        'x'      : x,
+        'y'      : y_v,
+        'x_2d'   : X_v,
+        'y_2d'   : Y_v,
+        } 
+      }
+
+    return output
+      
+
+def find_parent_ij(outer_grid, inner_grid):
+    projection = outer_grid[0]
+    lat_0 = outer_grid[1]
+    lon_0 = outer_grid[2]
+    lat_1 = outer_grid[3]
+    lat_2 = outer_grid[4]
+    grid_centre_lon = outer_grid[5]
+    grid_centre_lat = outer_grid[6]
+    nx = outer_grid[7]
+    ny = outer_grid[8]
+    delta = outer_grid[9]
+
+    from pyproj import Proj
+    proj = Proj(proj=projection, lat_0=lat_0, lon_0=lon_0, lat_1=lat_1, lat_2=lat_2)
+    grid_centre_x, grid_centre_y = proj(grid_centre_lon, grid_centre_lat)
+    grid_x_extent = nx * delta
+    grid_y_extent = ny * delta
+    min_x = grid_centre_x - grid_x_extent/2.
+    min_y = grid_centre_y - grid_y_extent/2.
+    max_x = min_x + grid_x_extent
+    max_y = min_y + grid_y_extent
+    outer_x = n.arange(min_x, max_x + a_small_number, delta)
+    outer_y = n.arange(min_y, max_y + a_small_number, delta)  
+
+    projection = inner_grid[0]
+    lat_0 = inner_grid[1]
+    lon_0 = inner_grid[2]
+    lat_1 = inner_grid[3]
+    lat_2 = inner_grid[4]
+    grid_centre_lon = inner_grid[5]
+    grid_centre_lat = inner_grid[6]
+    nx = inner_grid[7]
+    ny = inner_grid[8]
+    delta = inner_grid[9]
+ 
+    grid_centre_x, grid_centre_y = proj(grid_centre_lon, grid_centre_lat)
+    grid_x_extent = nx * delta
+    grid_y_extent = ny * delta
+    min_x = grid_centre_x - grid_x_extent/2.
+    min_y = grid_centre_y - grid_y_extent/2.
+    max_x = min_x + grid_x_extent
+    max_y = min_y + grid_y_extent
+    inner_x = n.arange(min_x, max_x + a_small_number, delta)
+    inner_y = n.arange(min_y, max_y + a_small_number, delta)
+
+    return outer_x.searchsorted(inner_x[0]), outer_y.searchsorted(inner_y[0])
+
+def write_namelist(namelist_dict,outfile='outfile'):
+
+    out_string=''
+    for group in namelist_dict.keys():
+	out_string += group + '\n'
+	for variable in namelist_dict[group].keys():
+	    out_string += variable + ' = ' 
+	    for element in namelist_dict[group][variable]:
+		out_string += repr(element).strip("'")+', '
+	    out_string+='\n'
+	out_string+= '/\n\n'
+
+    fid=open(outfile,'w')
+    fid.write(out_string)
+
+    return None
+
+
+
+def read_namelist(namelist_file):
+    """read contents of namelist file and return dictionary containing all options
+    
+    Created 20/01/08 by Thom Chubb.
+    Modified 20/01/08 by Thom Chubb and Valerio Bisignesi
+
+    TODO: mod_levs have a slightly different format in the namelist file, but as 
+    they come last in namelist.wps I have conveniently dropped them (with a warning
+    of course =) ). Whoever needs them first can come up with a fix for this.
+    Untested as yet with the namelist.input file. It should work fine and may be useful 
+    as a consistency check between the two files. This has been buggine me for a while.
+    """
+
+    fid=open(namelist_file)
+
+    out_dict={}
+    data = fid.readlines()
+    num_lines = len(data)
+
+    for line in data:
+	if '&' in line:
+	    # Then this line is a namelist title
+	    is_comment=False
+	    current_label = line.rstrip('\n').lstrip(' ')
+	    out_dict[current_label] ={}
+	elif '/' in line:
+	    # Then lines following this are comments until the next '&'
+	    is_comment=True
+	elif '=' in line:
+	    # Then this line contains variable information to be stored
+	    if not is_comment:
+		variable,values = line.split('=')
+		values = values.rstrip('\n').rstrip(',')
+		try:
+		    values=[int(element) for element in values.split(',')]
+		except ValueError:
+		    try:
+			values=[float(element) for element in values.split(',')]
+		    except ValueError:
+			values=[value.strip() for value in values.split(',')]
+
+		out_dict[current_label][variable.strip()]=values
+
+    return out_dict
+
+def check_namelist_consistency():
+    # TODO
+	# run time vs. run date 
+	# strict consistency between dates in namelist.wps and namelist.input not 
+	# necessary as long as dates in namelist.input are a subset of those in namelist.wps
+	# and the interval_seconds is correct
+    pass
+
+#def read_namelist_old(namelist_file):
+#    """read contents of namelist file and return dictionary containing all options
+#    
+#    Created 20/01/08 by Thom Chubb.
+#
+#    TODO: mod_levs have a slightly different format in the namelist file, but as 
+#    they come last in namelist.wps I have conveniently dropped them (with a warning
+#    of course =) ). Whoever needs them first can come up with a fix for this.
+#    Untested as yet with the namelist.input file. It should work fine and may be useful 
+#    as a consistency check between the two files. This has been buggine me for a while.
+#    """
+#
+#    fid=open(namelist_file)
+#
+#    out_dict={}
+#    data = fid.readlines()
+#    num_lines = len(data)
+#
+#    # for k in range(0,num_lines):
+#    for line in data:
+#	# str = data[k].rstrip('\n').rstrip(',').split()
+#	str = line.rstrip('\n').rstrip(',').split()
+#
+#	if str == []:
+#	    pass
+#	elif str[0] == '':  
+#	    pass
+#	elif str[0][0] == '':
+#	    pass
+#	elif str[0][0] == '/' :
+#	    is_comment=True
+#	elif str[0][0] == '&':
+#	    # Then this line is a namelist title
+#	    is_comment=False
+#	    label = str[0]
+#
+#	    if label == '&mod_levs':
+#		print ">> WARNING: mod levels don't work yet"
+#		break
+#
+#	    out_dict[label] ={}
+#
+#	else: 
+#	    if not is_comment:
+#		field = str[0]
+#		out_dict[label][field] = [] 
+#
+#
+#		for k in range(2,str.__len__()):
+#		    dat = str[k].rstrip(',')
+#		    # dat = str[k].split(',')
+#		    print str, dat
+#		    try:
+#			dat=float(dat)
+#		    except ValueError:
+#			pass
+#		    except TypeError:
+#			pass
+#
+#		    out_dict[label][field].extend(dat) 
+#	    
+#	    # out_dict[label][field] = [] 
+#	    # out_dict[label][field].append(str[2:])
+#
+#    return out_dict
+
+
+def wrf_grid_wrapper(namelist_file='namelist.wps',nest_level=0):
+    """
+    wrf_grid_wrapper(namelist_file='namelist.wps',nest_level=0):
+    Basic wrapper to easily visualise grids specified in namelist.wps
+    
+    Uses wrf.utils.read_namelist() to determine the read the appropriate variables 
+    in a specified namelist file and then calls wrf.utils.wrf_grid() to define 
+    the Basemap projection and show the grid over a map.
+
+    Created 20/01/08 by Thom Chubb.
+    Modified 27/01/08 - implemented viz.utils.plot_grid() to handle plotting and 
+    capability for viewing as many grids as desired.
+
+    TODO: Could use some more error checking, i think it will all fail if nest_levels
+    are not consecutive!! Interpolation implemented is awkward but seems to work.
+	
+	""" 
+
+    # Create namelist dictionary
+    nd = read_namelist(namelist_file)
+
+    # Field editing to make python happy
+    if nd['&geogrid']['map_proj'][0]=="'lambert'":
+	print 'debug: modify input field lambert -> lcc' 
+	nd['&geogrid']['map_proj'][0]='lcc'
+    
+    grid = []
+
+    outer_grid = wrf2latlon(nd['&geogrid']['map_proj'][0],
+		    nd['&geogrid']['ref_lat'][0],
+		    nd['&geogrid']['ref_lon'][0],
+		    nd['&geogrid']['truelat1'][0], 
+		    nd['&geogrid']['truelat2'][0],
+		    nd['&geogrid']['ref_lon'][0],
+		    nd['&geogrid']['ref_lat'][0],
+#		    nd['&geogrid']['e_we'][nest_level[0]],
+#		    nd['&geogrid']['e_sn'][nest_level[0]],
+		    nd['&geogrid']['e_we'][0],
+		    nd['&geogrid']['e_sn'][0],
+		    nd['&geogrid']['dx'][0],
+		    staggered = False,
+		    return_extra = True
+		    )
+    # print "outer_grid.shape =", outer_grid[0].shape
+
+    grid.append(outer_grid)
+    nest_level.sort()
+    # nest_level=p.sort(nest_level)
+
+    # for k in nest_level[1:]:
+    for k in range(1,max(nest_level)+1):
+	this_grid = []
+	try:
+	    e_we = nd['&geogrid']['e_we'][k]
+	except IndexError:
+	    print "Out of range. Not enough grids specified within namelist file"
+	    return 1
+	e_sn = nd['&geogrid']['e_sn'][k]
+	pgr  = nd['&geogrid']['parent_grid_ratio'][k]
+	ips  = nd['&geogrid']['i_parent_start'][k]
+	jps  = nd['&geogrid']['j_parent_start'][k]
+	print 'processing grid: ',k
+	# print e_we,e_sn,pgr,ips,jps
+	# print ips,':',(ips+(e_we/pgr)),',', jps,':',(jps+(e_sn/pgr))
+	
+	# Interpolate in grid space to estimate inner gridpoints - 
+	# care to find a more elegant approach???
+	x1=grid[-1][2][jps, ips:ips+e_we/pgr]
+	y1=grid[-1][3][jps:jps+e_sn/pgr, ips]
+	
+	a1=n.arange(0,x1.__len__(),1) 
+	a2=n.arange(0,x1.__len__(),1./pgr)
+	b1=n.arange(0,y1.__len__(),1)
+	b2=n.arange(0,y1.__len__(),1./pgr)
+
+	x2=n.interp(a2,a1,x1)
+	y2=n.interp(b2,b1,y1)
+
+	[X,Y]=n.meshgrid(x2,y2)
+
+	# convert back to navigational coordinates
+	lon,lat=grid[-1][4](X,Y,nd['&geogrid']['map_proj'])
+
+	for j in [lon,lat,X,Y,grid[-1][4]]:
+	    this_grid.append(j)
+	if (k in nest_level):	
+	    map=vu.plot_grid(this_grid[0],this_grid[1],skip=10,same_figure=True,return_map=True) 
+	grid.append(this_grid)	
+	# print grid[-1][0].shape 
+
+
+    if 0 in nest_level:
+	map=vu.plot_grid(outer_grid[0],outer_grid[1],skip=10,same_figure=True,return_map=True) 
+    map.drawmeridians(n.arange(130,180,15),labels=[1,0,0,1])
+    map.drawparallels(n.arange(0,-90,-15),labels=[1,0,0,1])
+    map.drawcoastlines()
+    map.drawrivers()
+    plot_custom_points(map)
+    p.show()
+    
+    return grid, map
+
+def calculate_mslp(p,pb,ph,phb,t,qvapor):
+    '''
+    calculate sea level pressure starting from 'raw' wrf output fields
+    usage:
+    >>> calculate_mslp(p,pb,ph,phb,t,qvapor)
+    where the arguments names correspond to the variable names in the 
+    wrfout files e.g. p(lvl,lat,lon) or p(time,lvl,lat,lon)
+    '''
+    import  from_wrf_to_grads as fw2g
+    cs = fw2g.from_wrf_to_grads.compute_seaprs
+
+    if len(p.shape) == 3:
+       # recover the full pressure field by adding perturbation and base
+       p = p + pb
+       p_t = p.transpose()
+       # same geopotential height
+       ph = (ph + phb) / 9.81
+       ph_t = ph.transpose()
+       qvapor_t = qvapor.transpose()
+       # do not add the wrf specified 300 factor as the wrapped fortran code
+       # does that for us
+       t_t = t.transpose()
+       nz = ph_t.shape[2]
+       # populate the geopotential_height at mid_levels array with
+       # averages between layers below and above
+       z = (ph_t[:,:,:nz-1] + ph_t[:,:,1:nz]) / 2.0
+       # finally "in one fell sweep"
+       # the zero is for debug purposes
+       return cs(z,t_t,p_t,qvapor_t,0).transpose()
+    elif len(p.shape) == 4:
+       mslp_shape = (p.shape[0], p.shape[2], p.shape[3])
+       mslp = n.zeros(mslp_shape)
+       for time_idx in range(p.shape[0]):
+           # recover the full pressure field by adding perturbation and base
+           dummy_p = p[time_idx] + pb[time_idx]
+           dummy_p_t = dummy_p.transpose()
+           # same geopotential height
+           dummy_ph = (ph[time_idx] + phb[time_idx]) / 9.81
+           dummy_ph_t = dummy_ph.transpose()
+           dummy_qvapor_t = qvapor[time_idx].transpose()
+           # do not add the wrf specified 300 factor as the wrapped fortran code
+           # does that for us
+           dummy_t_t = t[time_idx].transpose()
+           nz = dummy_ph_t.shape[2]
+           # populate the geopotential_height at mid_levels array with
+           # averages between layers below and above
+           z = (dummy_ph_t[:,:,:nz-1] + dummy_ph_t[:,:,1:nz]) / 2.0
+           # finally "in one fell sweep"
+           # the zero is for debug purposes
+           mslp[time_idx] = cs(z,dummy_t_t,dummy_p_t,dummy_qvapor_t,0).transpose()
+       return mslp
+    else:
+       print 'Wrong shape of the array'
+       return
+
+def calculate_mslp_wrapper(vars_dict, time_idx):
+    """Utility function to 
+    pull out the necessary variables from the wrfout file and call
+    calculate_mslp to generate the mslp field.
+    """
+    # accessing the times in the nc_file one at a time and
+    # using the .copy() method reduce the memory footprint
+    perturbation_pressure = vars_dict['P'].get_value()[time_idx].copy()
+    base_pressure = vars_dict['PB'].get_value()[time_idx].copy()
+    perturbation_geopotential = vars_dict['PH'].get_value()[time_idx].copy()
+    base_geopotential = vars_dict['PHB'].get_value()[time_idx].copy()
+    temperature = vars_dict['T'].get_value()[time_idx].copy()
+    mixing_ratio = vars_dict['QVAPOR'].get_value()[time_idx].copy()
+    mslp = calculate_mslp(
+      perturbation_pressure, 
+      base_pressure,
+      perturbation_geopotential,
+      base_geopotential,
+      temperature,
+      mixing_ratio)
+    #del perturbation_pressure, base_pressure
+    #del perturbation_geopotential, base_geopotential
+    #del temperature, mixing_ratio
+    return mslp
+
+def plot_custom_points(map):
+    """back by popular demand"""
+
+#    canberra_lon = [149 + 8./60]
+#    canberra_lat = [-35 - 17./60]
+#    map.plot(canberra_lon,canberra_lat, 'gs')
+
+    blue_calf_lon = [148.3944] 
+    blue_calf_lat = [-36.3869]  
+    map.plot(blue_calf_lon,blue_calf_lat, 'gs')
+    return
+
+def time_string_to_datetime(times):
+    '''
+    This function takes as input a numpy array of numpy string-arrays and
+    returns a list of corresponding datetime objects.
+    The array will be typically generated by a pynio get_value() call for 
+    the 'Times' variable of a wrfout file.
+    
+    Usage:
+    >>> import wrf.utils as wu
+    >>> import viz.utils as vu
+    >>> f,fv = vu.peek('some_wrfout_file' + '.nc', return_pointers=True)
+    >>> times = fv['Times'].get_value()
+    >>> times = wu.time_string_to_datetime(times)
+    '''
+    from datetime import datetime
+    # VB we know that using pynio to access the 'Times' variable in a wrfout 
+    # files we are returned a numpy array of string arrays hence
+    result = []
+    for time in times:
+        time_string = time.tostring()
+        year = int(time_string[:4])
+        month = int(time_string[5:7])
+        day = int(time_string[8:10])
+        hour = int(time_string[11:13])
+        minute = int(time_string[14:16])
+        second = int(time_string[17:])
+        # VB We assume the time is UTC so we will not bother 
+        # specifying a time zone
+        result.append(datetime(year,month,day,hour,minute,second))
+    return result
+
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/wrf_grib_encoder.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/wrf_grib_encoder.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/pywrf.example.r39/wrf/wrf_grib_encoder.py	(revision 296)
@@ -0,0 +1,2051 @@
+"""
+This function accepts the fields needed by WRF and more specifically:
+
+based on the GFS Vtable.
+Assumptions:
+ - all the 3D fields need to be on isobaric levels when they are passed to the
+   encoding function
+ - every 3D field must be supplied with a corresponding surface field
+ - the COARDS ordering of dimensions is assumed i.e. time, level, lat, lon
+
+Usage:
+
+"""
+
+#############################################################################
+# TODO
+# - change the 'data' structure to a dictionary creating appropriate labels 
+# from the dates
+#############################################################################
+import os
+import sys
+
+# the syntax to import nio depends on its version. We try all options from the
+# newest to the oldest
+try:
+    import Nio as nio
+except:
+    try:
+        import PyNGL.Nio as nio
+    except:
+        import PyNGL_numpy.Nio as nio
+
+# The following is only needed for hn3.its.monash.edu.au
+# Unfortunately we must get rid of spurious entries in the sys.path that can
+# lead to the loading of conflicting packages
+for dir_idx in range(len(sys.path)-1,-1,-1):
+    if 'python2.4' in sys.path[dir_idx]:
+        sys.path.pop(dir_idx)
+
+import numpy as n
+import pylab as p
+from matplotlib.numerix import ma
+import grib2
+from string import zfill
+import pywrf.viz.utils as vu
+from pywrf.misc.met_utils import *
+#data_directory = '/Users/val/data/laps_nc_files/press/first_try/'
+#sfc_data_directory = '/Users/val/data/laps_surface_data/'
+#data_directory = '/mnt/mac/Users/val/data/laps_nc_files/press/first_try/'
+#sfc_data_directory = '/mnt/mac/Users/val/data/laps_surface_data/'
+#data_directory = '/media/DATA/wrf/canberra_nc_files/press/'
+#sfc_data_directory = '/media/DATA/wrf/canberra_nc_files/sfc/'
+#data_directory = '/media/DATA/wrf/canberra/press/'
+#sfc_data_directory = '/media/DATA/wrf/canberra/sfc/'
+#data_directory = '/Volumes/COMMON/wrf/canberra/press/'
+#sfc_data_directory = '/Volumes/COMMON/wrf/canberra/sfc/'
+#data_directory = '/Volumes/DATA/wrf/canberra/press/'
+#sfc_data_directory = '/Volumes/DATA/wrf/canberra/sfc/'
+#data_directory = '/Users/val/Desktop/workspace/plotting/canberra/press/'
+#sfc_data_directory = '/Users/val/Desktop/workspace/plotting/canberra/sfc/'
+#data_directory = \
+#  '/nfs/1/home/vbisign/wrf/wps_data/pygrib2_test/canberra/press/analyses/'
+#sfc_data_directory = \
+#  '/nfs/1/home/vbisign/wrf/wps_data/pygrib2_test/canberra/sfc/'
+data_directory = \
+  '/nfs/1/home/vbisign/wrf/wps_data/run_003/press/'
+sfc_data_directory = \
+#sfc_data_directory = '/Users/val/Desktop/workspace/plotting/canberra/sfc/'
+# It is assumed that most people will use a single nc file as the source
+# of their fields. Neverthless, the field nc_file_name is available for those
+# that need to use multiple data sources.
+
+# the code is split in two parts... the first part gathers all the
+# information that is specific to your data source... stuff like the location
+# of the netcdf files, the name of the variables of interest in your files,
+# and any special treatment of the fields like changing units, scaling the
+# data or anything that is needed to get the data in the form required by the
+# encoding function.
+# This translates to simply having to generate a fairly pedantic dictionary
+# that spells out what needs to be done with the fields during the grib
+# encoding
+# the second part is the encoding function itself in which it is assumed the
+# data is passed in a dictionary containing the data as numpy arrays in the 
+# right units
+
+# Unfortunately it proves hard to both generalize the data structure and
+# maintain flexibility... hence at the price of making the code longer I am
+# going to process one field at the time so that it is clear to anyone what
+# the steps that are (may be) involved are...
+# I still believe it is a good idea to collect all the information needed in
+# one big dictionary for ease of retrieval
+
+
+#############################################################################
+# Preliminaries
+# This is the data structure that I will pass to the encoder
+data = []
+
+# to generate a grib2 file using Jeffrey Whitaker's grib2 class, it is necessary
+# to provide the constructor with two pieces of information:
+# - the discipline of the data to be encoded
+# - the identification section
+# this means the class will need to be reconstructed every time the discipline
+# changes
+
+
+def missing(octects):
+    # To say that something is not available in a grib field they use a 
+    # sequence of 1s as long as the space allocated (number of octects).
+    # When this sequence is read as an integer, it corresponds to the maximum
+    # representable number given the octects*8 bits of memory, hence:
+    # PS: refer to the grib documentation to know how many octects are
+    # allocated to any particular field
+    return 2**(8*octects) - 1
+
+def prepare_sfc_data(f, fv):
+    # work out which file to pick based on the base date of the first file
+    # this is based on the (fragile) assumption that nobody changed the file
+    # names from the standard laps...
+    # this was needed for the first batch of data...
+    valid_date = fv['valid_date'].get_value()[0]
+    valid_time = fv['valid_time'].get_value()[0]
+    # the following is old stuff
+    if 0:
+        # the data in each file 'starts' at 12:00pm (UTC)
+        if valid_time < 1200:
+            valid_date -= 1
+        #sfc_file = 'laps' + str(valid_date)[4:] + '_surface.nc'
+        print sfc_data_directory + sfc_file
+    # this is new and (hopefully) better
+    if 1:
+        # there is one sfc data file for each of the analysis containing the
+        # full 73 hours of diagnostic fields
+        # in the simplified case that we do not span analysis -> we start again
+        # with a different file at every analysis:
+        base_date = fv['base_date'].get_value()
+        base_time = fv['base_time'].get_value()
+        sfc_file = 'regprog.laps_pt375.' \
+          + str(base_date)[4:] + '.' + str(base_time).zfill(4)[:2] \
+          + '.slvfld.n3.nc'
+    # if we use data from a single forecast we just name the appropriate file
+    # sfc_file = 'laps0117_surface.nc'
+    sfc_f = nio.open_file(sfc_data_directory + sfc_file)
+    sfc_fv = sfc_f.variables
+    # I need this to work out which time to pick in the file...
+    sfc_valid_time = sfc_fv['valid_time'].get_value()
+    time_idx = sfc_valid_time.tolist().index(valid_time)
+    return sfc_file, sfc_f, sfc_fv, time_idx
+
+#############################################################################
+
+#############################################################################
+# this variables remain unchanged across the different times
+# BTW these (with the exception of the significance_ref_time) should not
+# affect WRF
+
+# Id of originating centre (Common Code Table C-1) -> 1 for Melbourne
+# Id of originating centre (Common Code Table C-1) -> 7 for NCEP
+centre = 7 
+# Id of orginating sub-centre (local table) -> 0 for no subcentre
+# Id of orginating sub-centre (local table) -> 3 for NCEP Central Operations
+subcentre = 0
+# GRIB Master Tables Version Number (Code Table 1.0)
+master_table = 1
+# GRIB Local Tables Version Number (Code Table 1.1) -> 0 for not used
+# GRIB Local Tables Version Number (Code Table 1.1) -> 1 for let's see what happens
+local_table = 1
+# Significance of Reference Time (Code Table 1.2) -> 1 for start of forecast
+significance_ref_time = 1
+# Production status of data (Code Table 1.3) -> 2 for research product
+production_status = 2
+# idsect[12]=Type of processed data (Code Table 1.4) -> 1 -> forecast product
+type_processed_data = 1
+
+#############################################################################
+
+# the first dimension of data will be the times to be processed
+# you will have to modify this to suit your data
+# this could mean just having a time index to get different times from the
+# same file or just pick different files for different times.
+files = os.listdir(data_directory)
+#for file in files[:2]:
+for file in files:
+    f = nio.open_file(data_directory + file)
+    fv = f.variables
+    base_date = str(fv['base_date'].get_value())
+    year = int(base_date[0:4])
+    month = int(base_date[4:6])
+    day = int(base_date[6:8])
+    # padding needed to use a string method on an integer
+    base_time = zfill(str(fv['base_time'].get_value()),4)
+    hour = int(base_time[0:2])
+    minute =  int(base_time[2:])
+    second = 0
+    # I did not generate this in the preamble above because I needed to wait
+    # for the base date and time info from the netcdf files...
+    # NB the order of the variables in the list MATTERS!
+    idsect = [
+      centre,
+      subcentre,
+      master_table,
+      local_table,
+      significance_ref_time,
+      year,
+      month,
+      day,
+      hour,
+      minute,
+      second,
+      production_status,
+      type_processed_data
+    ]
+
+    print file
+    # gdsinfo - Sequence containing information needed for the grid definition 
+    #     section.
+    gdsinfo = n.zeros((5,), dtype=int)
+    # gdsinfo[0] = Source of grid definition (see Code Table 3.0) -> 0 -> will be
+    #     specified in octects 13-14
+    gdsinfo[0] = 0
+    # gdsinfo[1] = Number of grid points in the defined grid.
+    #     TODO make it more general
+    #lat = fv['lat'].get_value()
+    # the idx was chosen to stop the data at 55S and avoid the rubbish far south
+    lat_cheat_idx = 30
+    # 1 for the full range
+    lon_cheat_idx = 1
+    lat = fv['lat'].get_value()[lat_cheat_idx:]
+    lon = fv['lon'].get_value()[:-lon_cheat_idx]
+    gdsinfo[1] = len(lat)*len(lon)
+    # gdsinfo[2] = Number of octets needed for each additional grid points defn. 
+    #     Used to define number of points in each row for non-reg grids 
+    #     (=0 for regular grid).
+    gdsinfo[2] = 0
+    # gdsinfo[3] = Interp. of list for optional points defn (Code Table 3.11)
+    #     0 -> There is no appended list
+    gdsinfo[3] = 0
+    # gdsinfo[4] = Grid Definition Template Number (Code Table 3.1)
+    #     0 -> Latitude/Longitude (or equidistant cylindrical, or Plate Carree)
+    gdsinfo[4] = 0
+    
+    # gdtmpl - Contains the data values for the specified Grid Definition 
+    #     Template ( NN=gds[4] ). Each element of this integer array contains 
+    #     an entry (in the order specified) of Grid Definition Template 3.NN
+    gdtmpl = n.zeros((19,), dtype=n.int64)
+    # In the following we refer to the lat/lon template (grid template 3.0)
+    # Oct: 15      Shape of the Earth (See Table 3.2)
+    #     0 -> spherical with radius = 6,367,470.0 m
+    #     TODO double check the radius they used in Tan's code
+    gdtmpl[0] = 0
+    # Oct: 16      Scale Factor of radius of spherical Earth
+    #     TODO hopefully (given the last option) the next 5 are ignored...
+    #     if 1 octet is the basic unit then a value of 255 should be equivalent
+    #     to all bits set to 1 -> this could be broken if more than one octect
+    #     is assigned -> maybe I should use the maximum represantable number
+    #     tailored to the number of octects?
+    #     are there any rules that make the encoder ignore automatically certain
+    #     options e.g. the oblate spheroid one when a spherical shape is assumed
+    gdtmpl[1] = missing(1)
+    # Oct: 17-20   Scale value of radius of spherical Earth
+    # NB they (grib2 std) interprets all bits set to 1 as a missing value
+    #     hence the value is base*(word_width*words_assigned) - 1
+    #     base is 2 since we are working in binary
+    #     word_width is 8 since they use octects
+    #     words_assigned is the number of words reserved to represent the value
+    #     -1 gives us the greatest representable number since we start from 0
+    gdtmpl[2] = missing(1)
+    # Oct: 21      Scale factor of major axis of oblate spheroid Earth
+    gdtmpl[3] = missing(1)
+    # Oct: 22-25   Scaled value of major axis of oblate spheroid Earth
+    gdtmpl[4] = missing(4)
+    # Oct: 26      Scale factor of minor axis of oblate spheroid Earth
+    gdtmpl[5] = missing(1)
+    # Oct: 27-30   Scaled value of minor axis of oblate spheroid Earth
+    gdtmpl[6] = missing(4)
+    # Oct: 31-34   Number of points along a parallel     
+    gdtmpl[7] = len(lon)
+    # Oct: 35-38   Number of points along a meridian
+    gdtmpl[8] = len(lat)
+    # Oct: 39-42   Basic angle of the initial production domain (see Note 1)
+    gdtmpl[9] = missing(4)
+    # Oct: 43-46   Subdivisions of basic angle used to define extreme longitudes 
+    #             and latitudes, and direction increments (see Note 1)
+    gdtmpl[10] = missing(4)
+    # Oct: 47-50   La1 - Latitude of first grid point (see Note 1)        
+    #gdtmpl.append(in_vars['lat'].get_value()[-1]*1e6)
+    gdtmpl[11] = lat[-1]*1e6
+    #gdtmpl[11] = lat[0]*1e6
+    # Oct: 51-54   Lo1 - Longitude of first grid point (see Note 1)
+    #gdtmpl.append(in_vars['lon'].get_value()[0]*1e6)
+    gdtmpl[12] = lon[0]*1e6
+    # Oct: 55      Resolution and component flags (see Table 3.3)
+    #    This should set all the bits to 0 meaning 
+    #    bit        value   meaning
+    #    1-2        ?       Reserved
+    #    3          0       i direction increments not given
+    #    4	        0       j direction increments not given
+    #    5          0       Resolved u and v components of vector quantities 
+    #                       relative to easterly and northerly directions
+    #    6-8        0       Reserved - set to zero
+    #gdtmpl[13] = 0 
+    #gdtmpl[13] = 12 
+    gdtmpl[13] = 48 
+    # Oct: 56-59   La2 - Latitude of last grid point (see Note1)     
+    #gdtmpl.append(in_vars['lat'].get_value()[0]*1e6)
+    gdtmpl[14] = lat[0]*1e6
+    #gdtmpl[14] = lat[-1]*1e6
+    # Oct: 60-63   Lo2 - Longitude of last grid point (see Note 1)
+    #gdtmpl.append(in_vars['lon'].get_value()[-1]*1e6)
+    gdtmpl[15] = lon[-1]*1e6
+    # Oct: 64-67   i (west-east) Direction increment (see Note1)     
+    #gdtmpl[16] = missing(4)
+    gdtmpl[16] = 0.375*1e6
+    # Oct: 68-71   j (south-north) Direction increment (see Note1)
+    #gdtmpl[17] = missing(4)
+    gdtmpl[17] = 0.375*1e6
+    #gdtmpl[17] = -0.375*1e6
+    #gdtmpl[17] = 33143
+    # Oct: 72      Scanning mode (flags see Table 3.4)
+    #    TODO double check if this is legit considering the flags 
+    #    This should set all the bits to 0 meaning 
+    #    bit        value   meaning
+    #    1          0       Points in the first row or column scan in +i (+x) 
+    #                       direction
+    #    2          0       Points in the first row or column scan in -i (-x) 
+    #                       direction
+    #    3          0       Adjacent pints in i(x) direction are consecutive
+    #    4	        0       All rows scan in the same direction
+    #    5-8        0       Reserved (set to zero)
+    #    NB I suspect the 3 bit might be important for the 'majoring' of the array?
+    #           	# 2^7	2^6	2^5	2^4	2^3	2^2	2^1	2^0
+    #           	# 1	2	3	4	5	6	7	8
+    gdtmpl[18] = 0 	# 0	0	0	0	0	0	0	0
+    #gdtmpl[18] = 16 	# 0	0	0	1	0	0	0	0
+    #gdtmpl[18] = 32 	# 0	0	1	0	0	0	0	0
+    #gdtmpl[18] = 64 	# 0	1	0	0	0	0	0	0
+    #gdtmpl[18] = 128 	# 1	0	0	0	0	0	0	0
+
+    #gdtmpl[18] = 192 	# 1	1	0	0	0	0	0	0
+    #gdtmpl[18] = 160 	# 1	0	1	0	0	0	0	0
+    #gdtmpl[18] = 144 	# 1	0	0	1	0	0	0	0
+    #gdtmpl[18] = 96 	# 0	1	1	0	0	0	0	0
+    #gdtmpl[18] = 80 	# 0	1	0	1	0	0	0	0
+    #gdtmpl[18] = 48  	# 0	0	1	1	0	0	0	0
+
+    #gdtmpl[18] = 224  	# 1	1	1	0	0	0	0	0
+    #gdtmpl[18] = 208  	# 1	1	0	1	0	0	0	0
+    #gdtmpl[18] = 176  	# 1	0	1	1	0	0	0	0
+    #gdtmpl[18] = 112  	# 0	1	1	1	0	0	0	0
+
+    #gdtmpl[18] = 240  	# 1	1	1	1	0	0	0	0
+
+    data.append(
+      {
+        'idsect' : idsect,
+        'gdsinfo' : gdsinfo,
+        'gdtmpl' : gdtmpl,  
+        'discipline':
+          {
+            # the first number in the list is the code for the discipline
+            'atmos': 
+               {
+                 'code':0,
+                 'fields':{'2d':{}, '3d':{}}
+               },
+            'ocean': 
+               {
+                 'code':10,
+                 'fields':{'2d':{}, '3d':{}}
+               },
+            'land': 
+               {
+                 'code':2,
+                 'fields':{'2d':{}, '3d':{}}
+               }
+          }
+      }
+    )
+    # this is the part where we extract the data from the nc files and put
+    # into the structure we will pass later to the encoder
+    # let's start with the atmospheric products specifically the surface
+    # fields
+    # each variables comes with all the metadata necessary to do its grib
+    # encoding using the addfield method of the encoder class i.e. pdtnum,
+    # pdtmpl, drtnum, drtmpl, field
+
+    # -1 stands for the last one (time) added
+    dummy = data[-1]['discipline']['atmos']['fields']['2d']
+    
+    # Let's prepare the surface pressure data and metadata
+    # Product Definition Template Number (see Code Table 4.0)
+    #     0 -> Analysis or forecast at a horizontal level or in a 
+    #          horizontal layer at a point in time.  (see Template 4.0)
+    pdtnum = 0
+    # array of zeros (int) of size 15
+    pdtmpl = n.zeros((15,),dtype=int)
+    # Follows the definition of template 4.0 
+    # Oct 10: Parameter category (see Code table 4.1). 3 -> mass
+    pdtmpl[0] = 3
+    # Oct 11: Parameter number (see Code table 4.2). 0 -> pressure
+    pdtmpl[1] = 0
+    # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+    pdtmpl[2] = 2
+    # Oct 13: Background generating process identifier 
+    # (defined by originating centre)
+    pdtmpl[3] = missing(1)
+    # Oct 14: Analysis or forecast generating process identified 
+    # (defined by originating centre)
+    pdtmpl[4] = missing(1)
+    # Oct 15-16: Hours of observational data cutoff after reference time 
+    pdtmpl[5] = missing(2)
+    # Oct 17: Minutes of observational data cutoff after reference time 
+    pdtmpl[6] = missing(1)
+    # Oct 18: Indicator of unit of time range (see Code table 4.4)
+    # 1 -> hours
+    # NB WPS expects this to be hours!
+    pdtmpl[7] = 1
+    # Oct 19-22: Forecast time in units defined by octet 18
+    pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+    # Oct 23: Type of first fixed surface (see Code table 4.5)
+    # 1 -> ground or water surface
+    pdtmpl[9] = 1
+    # Oct 24: Scale factor of first fixed surface
+    pdtmpl[10] = 0
+    # Oct 25-28: Scaled value of first fixed surface
+    pdtmpl[11] = 0
+    # Oct 29: Type of second fixed surface (see Code table 4.5)
+    pdtmpl[12] = missing(1)
+    # Oct 30: Scale factor of second fixed surface
+    pdtmpl[13] = missing(1)
+    # Oct 31-34: Scaled value of second fixed surfaces
+    pdtmpl[14] = missing(3)
+
+    # drtnum - Data Representation Template Number (see Code Table 5.0).
+    #          0 -> grid point data - simple packing
+    drtnum = 0
+    # drtmpl - Sequence with the data values for the specified Data Representation 
+    #     Template (N=drtnum). Each element of this integer array contains an 
+    #     entry (in the order specified) of Data Representation Template 5.N Note 
+    #     that some values in this template (eg. reference values, number of 
+    #     bits, etc...) may be changed by the data packing algorithms. Use this to 
+    #     specify scaling factors and order of spatial differencing, if desired.
+    drtmpl = n.zeros((5,),dtype=int)
+    # NB to make sense of the following remember (from the wmo std):
+    # The original data value Y (in the units of code table 4.2) can be recovered 
+    # with the formula:
+    # Y * 10^D= R + (X1+X2) * 2^E
+    # For simple packing and all spectral data
+    #     E = Binary scale factor,
+    #     D = Decimal scale factor
+    #     R = Reference value of the whole field,
+    #     X1 = 0,
+    #     X2 = Scaled (encoded) value.
+    
+    # Oct: 12-15. Reference value (R) (IEEE 32-bit floating-point value)
+    drtmpl[0] = 0
+    # Oct: 16-17. Binary scale factor (E)
+    drtmpl[1] = 0
+    # Oct: 18-19. Decimal scale factor (D)
+    drtmpl[2] = 0
+    # Oct: 20. Number of bits used for each packed value for simple packing, or 
+    drtmpl[3] = 24
+    # for each group reference value for complex packing or spatial differencing
+    # Oct: 21. Type of original field values (see Code Table 5.1)
+    # 0 -> floating point
+    drtmpl[4] = 0
+
+    cheat = fv['sfc_pres'].get_value()[0,lat_cheat_idx:,:-lon_cheat_idx]
+    field = n.zeros(cheat.shape)
+    for lat_idx in range(cheat.shape[0]):
+        field[-(lat_idx+1)] = cheat[lat_idx]
+
+    if 0:
+        dummy['sfc_pres'] = {
+            'long_name' : 'surface pressure',
+            'pdtnum' : pdtnum, 
+            'pdtmpl' : pdtmpl, 
+            'drtnum' : drtnum,
+            'drtmpl' : drtmpl,
+            #'field' : fv['sfc_pres'].get_value()[0]
+            #'field' : fv['sfc_pres'].get_value()[0,lat_cheat_idx:,:-lon_cheat_idx]
+            'field' : field
+          }
+
+    # Let's prepare the mean sea level pressure data and metadata
+    # Product Definition Template Number (see Code Table 4.0)
+    #     0 -> Analysis or forecast at a horizontal level or in a 
+    #          horizontal layer at a point in time.  (see Template 4.0)
+    pdtnum = 0
+    # array of zeros (int) of size 15
+    pdtmpl = n.zeros((15,),dtype=int)
+    # Follows the definition of template 4.0 
+    # Oct 10: Parameter category (see Code table 4.1). 3 -> mass
+    pdtmpl[0] = 3
+    # Oct 11: Parameter number (see Code table 4.2). 1 -> MSLP
+    pdtmpl[1] = 1
+    # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+    pdtmpl[2] = 2
+    # Oct 13: Background generating process identifier 
+    # (defined by originating centre)
+    pdtmpl[3] = missing(1)
+    # Oct 14: Analysis or forecast generating process identified 
+    # (defined by originating centre)
+    pdtmpl[4] = missing(1)
+    # Oct 15-16: Hours of observational data cutoff after reference time 
+    pdtmpl[5] = missing(2)
+    # Oct 17: Minutes of observational data cutoff after reference time 
+    pdtmpl[6] = missing(1)
+    # Oct 18: Indicator of unit of time range (see Code table 4.4)
+    # 1 -> hours
+    # NB WPS expects this to be hours!
+    pdtmpl[7] = 1
+    # Oct 19-22: Forecast time in units defined by octet 18
+    pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+    # Oct 23: Type of first fixed surface (see Code table 4.5)
+    # 101 -> sea level
+    pdtmpl[9] = 101
+    # Oct 24: Scale factor of first fixed surface
+    pdtmpl[10] = 0
+    # Oct 25-28: Scaled value of first fixed surface
+    pdtmpl[11] = 0
+    # Oct 29: Type of second fixed surface (see Code table 4.5)
+    pdtmpl[12] = missing(1)
+    # Oct 30: Scale factor of second fixed surface
+    pdtmpl[13] = missing(1)
+    # Oct 31-34: Scaled value of second fixed surfaces
+    pdtmpl[14] = missing(3)
+ 
+    # NB I am using the same drtnum and drtmpl for all data so I want
+    # replicate it in the following fields if you need to change this on a
+    # per-variable basis this is the place to redefine it. If you choose to do
+    # so then make sure you define one for each variable that follows or be
+    # aware that the last one defined will apply to all that follows.
+
+    cheat = fv['mslp'].get_value()[0,lat_cheat_idx:,:-lon_cheat_idx]
+    # to go from mb to Pa
+    #cheat *= 100.
+    cheat = cheat * 100.
+    field = n.zeros(cheat.shape)
+    for lat_idx in range(cheat.shape[0]):
+        field[-(lat_idx+1)] = cheat[lat_idx]
+
+    dummy['mslp'] = {
+        'long_name' : 'mean sea level pressure',
+        # in my data it comes in mb and I want in  Pascals
+        'pdtnum' : pdtnum, 
+        'pdtmpl' : pdtmpl, 
+        'drtnum' : drtnum,
+        'drtmpl' : drtmpl,
+        #'field' : fv['mslp'].get_value()[0]*100.
+        #'field' : fv['mslp'].get_value()[0,lat_cheat_idx:,:-lon_cheat_idx]*100.
+        'field' : field
+      }
+
+    # Let's prepare the 2m temperature data and metadata
+    # Product Definition Template Number (see Code Table 4.0)
+    #     0 -> Analysis or forecast at a horizontal level or in a 
+    #          horizontal layer at a point in time.  (see Template 4.0)
+    pdtnum = 0
+    # array of zeros (int) of size 15
+    pdtmpl = n.zeros((15,),dtype=int)
+    # Follows the definition of template 4.0 
+    # Oct 10: Parameter category (see Code table 4.1). 0 -> temperature
+    pdtmpl[0] = 0
+    # Oct 11: Parameter number (see Code table 4.2). 0 -> temperature
+    pdtmpl[1] = 0
+    # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+    pdtmpl[2] = 2
+    # Oct 13: Background generating process identifier 
+    # (defined by originating centre)
+    pdtmpl[3] = missing(1)
+    # Oct 14: Analysis or forecast generating process identified 
+    # (defined by originating centre)
+    pdtmpl[4] = missing(1)
+    # Oct 15-16: Hours of observational data cutoff after reference time 
+    pdtmpl[5] = missing(2)
+    # Oct 17: Minutes of observational data cutoff after reference time 
+    pdtmpl[6] = missing(1)
+    # Oct 18: Indicator of unit of time range (see Code table 4.4)
+    # 1 -> hours
+    # NB WPS expects this to be hours!
+    pdtmpl[7] = 1
+    # Oct 19-22: Forecast time in units defined by octet 18
+    pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+    # Oct 23: Type of first fixed surface (see Code table 4.5)
+    # 1 -> ground or water surface
+    pdtmpl[9] = 103
+    # Oct 24: Scale factor of first fixed surface
+    pdtmpl[10] = 0
+    # Oct 25-28: Scaled value of first fixed surface
+    # -> 2m above ground
+    pdtmpl[11] = 2
+    # Oct 29: Type of second fixed surface (see Code table 4.5)
+    pdtmpl[12] = missing(1)
+    # Oct 30: Scale factor of second fixed surface
+    pdtmpl[13] = missing(1)
+    # Oct 31-34: Scaled value of second fixed surfaces
+    pdtmpl[14] = missing(3)
+ 
+    # NB I am using the same drtnum and drtmpl for all data so I want
+    # replicate it in the following fields if you need to change this on a
+    # per-variable basis this is the place to redefine it. If you choose to do
+    # so then make sure you define one for each variable that follows or be
+    # aware that the last one defined will apply to all that follows.
+
+    cheat = fv['sfc_temp'].get_value()[0,lat_cheat_idx:,:-lon_cheat_idx]
+    field = n.zeros(cheat.shape)
+    for lat_idx in range(cheat.shape[0]):
+        field[-(lat_idx+1)] = cheat[lat_idx]
+
+    dummy['sfc_temp'] = {
+        'long_name' : 'surface temperature',
+        'pdtnum' : pdtnum, 
+        'pdtmpl' : pdtmpl, 
+        'drtnum' : drtnum,
+        'drtmpl' : drtmpl,
+        #'field' : fv['sfc_temp'].get_value()[0]
+        #'field' : fv['sfc_temp'].get_value()[0,lat_cheat_idx:,:-lon_cheat_idx]
+        'field' : field
+      }
+
+    # Let's prepare the 2m temperature data and metadata
+    # Product Definition Template Number (see Code Table 4.0)
+    #     0 -> Analysis or forecast at a horizontal level or in a 
+    #          horizontal layer at a point in time.  (see Template 4.0)
+    pdtnum = 0
+    # array of zeros (int) of size 15
+    pdtmpl = n.zeros((15,),dtype=int)
+    # Follows the definition of template 4.0 
+    # Oct 10: Parameter category (see Code table 4.1). 0 -> temperature
+    pdtmpl[0] = 0
+    # Oct 11: Parameter number (see Code table 4.2). 0 -> temperature
+    pdtmpl[1] = 0
+    # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+    pdtmpl[2] = 2
+    # Oct 13: Background generating process identifier 
+    # (defined by originating centre)
+    pdtmpl[3] = missing(1)
+    # Oct 14: Analysis or forecast generating process identified 
+    # (defined by originating centre)
+    pdtmpl[4] = missing(1)
+    # Oct 15-16: Hours of observational data cutoff after reference time 
+    pdtmpl[5] = missing(2)
+    # Oct 17: Minutes of observational data cutoff after reference time 
+    pdtmpl[6] = missing(1)
+    # Oct 18: Indicator of unit of time range (see Code table 4.4)
+    # 1 -> hours
+    # NB WPS expects this to be hours!
+    pdtmpl[7] = 1
+    # Oct 19-22: Forecast time in units defined by octet 18
+    pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+    # Oct 23: Type of first fixed surface (see Code table 4.5)
+    # 1 -> surface
+    pdtmpl[9] = 1
+    # Oct 24: Scale factor of first fixed surface
+    pdtmpl[10] = 0
+    # Oct 25-28: Scaled value of first fixed surface
+    # -> 2m above ground
+    pdtmpl[11] = 0
+    # Oct 29: Type of second fixed surface (see Code table 4.5)
+    pdtmpl[12] = missing(1)
+    # Oct 30: Scale factor of second fixed surface
+    pdtmpl[13] = missing(1)
+    # Oct 31-34: Scaled value of second fixed surfaces
+    pdtmpl[14] = missing(3)
+ 
+    # NB I am using the same drtnum and drtmpl for all data so I want
+    # replicate it in the following fields if you need to change this on a
+    # per-variable basis this is the place to redefine it. If you choose to do
+    # so then make sure you define one for each variable that follows or be
+    # aware that the last one defined will apply to all that follows.
+
+    cheat = fv['skin_temp'].get_value()[0,lat_cheat_idx:,:-lon_cheat_idx]
+    field = n.zeros(cheat.shape)
+    for lat_idx in range(cheat.shape[0]):
+        field[-(lat_idx+1)] = cheat[lat_idx]
+
+    dummy['skin_temp'] = {
+        'long_name' : 'skin temperature',
+        'pdtnum' : pdtnum, 
+        'pdtmpl' : pdtmpl, 
+        'drtnum' : drtnum,
+        'drtmpl' : drtmpl,
+        #'field' : fv['skin_temp'].get_value()[0]
+        #'field' : fv['skin_temp'].get_value()[0,lat_cheat_idx:,:-lon_cheat_idx]
+        'field' : field
+      }
+
+    # the next fields will come from a different file...
+    sfc_file, sfc_f, sfc_fv, time_idx = prepare_sfc_data(f,fv)
+    # DEBUG
+    #p.figure()
+    #p.contour(dummy['skin_temp']['field'])
+    #p.figure()
+    #p.contour(sfc_fv['skin_temp'].get_value()[time_idx])
+
+    # Let's prepare the 2m RH data and metadata
+    # Product Definition Template Number (see Code Table 4.0)
+    #     0 -> Analysis or forecast at a horizontal level or in a 
+    #          horizontal layer at a point in time.  (see Template 4.0)
+    pdtnum = 0
+    # array of zeros (int) of size 15
+    pdtmpl = n.zeros((15,),dtype=int)
+    # Follows the definition of template 4.0 
+    # Oct 10: Parameter category (see Code table 4.1). 1 -> moisture
+    pdtmpl[0] = 1
+    # Oct 11: Parameter number (see Code table 4.2). 1 -> RH
+    pdtmpl[1] = 1
+    # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+    pdtmpl[2] = 2
+    # Oct 13: Background generating process identifier 
+    # (defined by originating centre)
+    pdtmpl[3] = missing(1)
+    # Oct 14: Analysis or forecast generating process identified 
+    # (defined by originating centre)
+    pdtmpl[4] = missing(1)
+    # Oct 15-16: Hours of observational data cutoff after reference time 
+    pdtmpl[5] = missing(2)
+    # Oct 17: Minutes of observational data cutoff after reference time 
+    pdtmpl[6] = missing(1)
+    # Oct 18: Indicator of unit of time range (see Code table 4.4)
+    # 1 -> hours
+    # NB WPS expects this to be hours!
+    pdtmpl[7] = 1
+    # Oct 19-22: Forecast time in units defined by octet 18
+    pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+    # Oct 23: Type of first fixed surface (see Code table 4.5)
+    # 103 -> fixed height above ground
+    pdtmpl[9] = 103
+    # Oct 24: Scale factor of first fixed surface
+    pdtmpl[10] = 0
+    # Oct 25-28: Scaled value of first fixed surface
+    # -> 2m above ground
+    pdtmpl[11] = 2
+    # Oct 29: Type of second fixed surface (see Code table 4.5)
+    pdtmpl[12] = missing(1)
+    # Oct 30: Scale factor of second fixed surface
+    pdtmpl[13] = missing(1)
+    # Oct 31-34: Scaled value of second fixed surfaces
+    pdtmpl[14] = missing(3)
+ 
+    # NB I am using the same drtnum and drtmpl for all data so I want
+    # replicate it in the following fields if you need to change this on a
+    # per-variable basis this is the place to redefine it. If you choose to do
+    # so then make sure you define one for each variable that follows or be
+    # aware that the last one defined will apply to all that follows.
+
+    # The surface files do not come with mixing ratio, but we can calculate
+    # that from the dew point temperature...
+    #dew_point = sfc_fv['scrn_dewpt'].get_value()[time_idx]
+    dew_point = sfc_fv['scrn_dewpt'].get_value()[time_idx,lat_cheat_idx:,:-lon_cheat_idx]
+    vapour_pressure = goff_gratch(dew_point)
+    #sfc_temp = sfc_fv['scrn_temp'].get_value()[time_idx]
+    sfc_temp = sfc_fv['scrn_temp'].get_value()[time_idx,lat_cheat_idx:,:-lon_cheat_idx]
+    sat_vapour_pressure = goff_gratch(sfc_temp)
+    #sat_vapour_pressure = ma.masked_where(sat_vapour_pressure == 0., 
+    #  sat_vapour_pressure)
+    sfc_rh = vapour_pressure / sat_vapour_pressure * 100
+    del sfc_temp, vapour_pressure, dew_point, sat_vapour_pressure
+
+    cheat = sfc_rh
+    field = n.zeros(cheat.shape)
+    for lat_idx in range(cheat.shape[0]):
+        field[-(lat_idx+1)] = cheat[lat_idx]
+
+    dummy['sfc_rh'] = {
+        'long_name' : '2m relative humidity',
+        'pdtnum' : pdtnum, 
+        'pdtmpl' : pdtmpl, 
+        'drtnum' : drtnum,
+        'drtmpl' : drtmpl,
+        #'field' : sfc_rh.filled(fill_value=0.)
+        #'field' : sfc_rh
+        'field' : field
+      }
+
+    # Let's prepare the zonal 10m wind data and metadata
+    # Product Definition Template Number (see Code Table 4.0)
+    #     0 -> Analysis or forecast at a horizontal level or in a 
+    #          horizontal layer at a point in time.  (see Template 4.0)
+    pdtnum = 0
+    # array of zeros (int) of size 15
+    pdtmpl = n.zeros((15,),dtype=int)
+    # Follows the definition of template 4.0 
+    # Oct 10: Parameter category (see Code table 4.1). 2 -> momentum
+    pdtmpl[0] = 2
+    # Oct 11: Parameter number (see Code table 4.2). 2 -> u-component of wind
+    pdtmpl[1] = 2
+    # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+    pdtmpl[2] = 2
+    # Oct 13: Background generating process identifier 
+    # (defined by originating centre)
+    pdtmpl[3] = missing(1)
+    # Oct 14: Analysis or forecast generating process identified 
+    # (defined by originating centre)
+    pdtmpl[4] = missing(1)
+    # Oct 15-16: Hours of observational data cutoff after reference time 
+    pdtmpl[5] = missing(2)
+    # Oct 17: Minutes of observational data cutoff after reference time 
+    pdtmpl[6] = missing(1)
+    # Oct 18: Indicator of unit of time range (see Code table 4.4)
+    # 1 -> hours
+    # NB WPS expects this to be hours!
+    pdtmpl[7] = 1
+    # Oct 19-22: Forecast time in units defined by octet 18
+    pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+    # Oct 23: Type of first fixed surface (see Code table 4.5)
+    # 103 -> fixed height above ground
+    pdtmpl[9] = 103
+    # Oct 24: Scale factor of first fixed surface
+    pdtmpl[10] = 0
+    # Oct 25-28: Scaled value of first fixed surface
+    # -> 10m above ground
+    pdtmpl[11] = 10
+    # Oct 29: Type of second fixed surface (see Code table 4.5)
+    pdtmpl[12] = missing(1)
+    # Oct 30: Scale factor of second fixed surface
+    pdtmpl[13] = missing(1)
+    # Oct 31-34: Scaled value of second fixed surfaces
+    pdtmpl[14] = missing(3)
+ 
+    # NB I am using the same drtnum and drtmpl for all data so I want
+    # replicate it in the following fields if you need to change this on a
+    # per-variable basis this is the place to redefine it. If you choose to do
+    # so then make sure you define one for each variable that follows or be
+    # aware that the last one defined will apply to all that follows.
+
+    cheat = sfc_fv['q10m_zonal_wnd'].get_value()[time_idx,lat_cheat_idx:,:-lon_cheat_idx]
+    field = n.zeros(cheat.shape)
+    for lat_idx in range(cheat.shape[0]):
+        field[-(lat_idx+1)] = cheat[lat_idx]
+
+    dummy['sfc_zonal_wnd'] = {
+        'long_name' : '10m zonal wind',
+        'pdtnum' : pdtnum, 
+        'pdtmpl' : pdtmpl, 
+        'drtnum' : drtnum,
+        'drtmpl' : drtmpl,
+        #'field' : sfc_fv['q10m_zonal_wnd'].get_value()[time_idx]
+        #'field' : sfc_fv['q10m_zonal_wnd'].get_value()[time_idx,lat_cheat_idx:,:-lon_cheat_idx]
+        'field' : field
+      }
+
+    # Let's prepare the meridional 10m wind data and metadata
+    # Product Definition Template Number (see Code Table 4.0)
+    #     0 -> Analysis or forecast at a horizontal level or in a 
+    #          horizontal layer at a point in time.  (see Template 4.0)
+    pdtnum = 0
+    # array of zeros (int) of size 15
+    pdtmpl = n.zeros((15,),dtype=int)
+    # Follows the definition of template 4.0 
+    # Oct 10: Parameter category (see Code table 4.1). 2 -> momentum
+    pdtmpl[0] = 2
+    # Oct 11: Parameter number (see Code table 4.2). 3 -> v-component of wind
+    pdtmpl[1] = 3
+    # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+    pdtmpl[2] = 2
+    # Oct 13: Background generating process identifier 
+    # (defined by originating centre)
+    pdtmpl[3] = missing(1)
+    # Oct 14: Analysis or forecast generating process identified 
+    # (defined by originating centre)
+    pdtmpl[4] = missing(1)
+    # Oct 15-16: Hours of observational data cutoff after reference time 
+    pdtmpl[5] = missing(2)
+    # Oct 17: Minutes of observational data cutoff after reference time 
+    pdtmpl[6] = missing(1)
+    # Oct 18: Indicator of unit of time range (see Code table 4.4)
+    # 1 -> hours
+    # NB WPS expects this to be hours!
+    pdtmpl[7] = 1
+    # Oct 19-22: Forecast time in units defined by octet 18
+    pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+    # Oct 23: Type of first fixed surface (see Code table 4.5)
+    # 103 -> fixed height above ground
+    pdtmpl[9] = 103
+    # Oct 24: Scale factor of first fixed surface
+    pdtmpl[10] = 0
+    # Oct 25-28: Scaled value of first fixed surface
+    # -> 10m above ground
+    pdtmpl[11] = 10
+    # Oct 29: Type of second fixed surface (see Code table 4.5)
+    pdtmpl[12] = missing(1)
+    # Oct 30: Scale factor of second fixed surface
+    pdtmpl[13] = missing(1)
+    # Oct 31-34: Scaled value of second fixed surfaces
+    pdtmpl[14] = missing(3)
+ 
+    # NB I am using the same drtnum and drtmpl for all data so I want
+    # replicate it in the following fields if you need to change this on a
+    # per-variable basis this is the place to redefine it. If you choose to do
+    # so then make sure you define one for each variable that follows or be
+    # aware that the last one defined will apply to all that follows.
+
+    cheat = sfc_fv['q10m_merid_wnd'].get_value()[time_idx,lat_cheat_idx:,:-lon_cheat_idx]
+    field = n.zeros(cheat.shape)
+    for lat_idx in range(cheat.shape[0]):
+        field[-(lat_idx+1)] = cheat[lat_idx]
+
+    dummy['sfc_merid_wnd'] = {
+        'long_name' : '10m meridional wind',
+        'pdtnum' : pdtnum, 
+        'pdtmpl' : pdtmpl, 
+        'drtnum' : drtnum,
+        'drtmpl' : drtmpl,
+        #'field' : sfc_fv['q10m_merid_wnd'].get_value()[time_idx]
+        #'field' : sfc_fv['q10m_merid_wnd'].get_value()[time_idx,lat_cheat_idx:,:-lon_cheat_idx]
+        'field' : field
+      }
+  
+    if 1:
+        # Let's prepare the topography data and metadata
+        # Product Definition Template Number (see Code Table 4.0)
+        #     0 -> Analysis or forecast at a horizontal level or in a 
+        #          horizontal layer at a point in time.  (see Template 4.0)
+        pdtnum = 0
+        # array of zeros (int) of size 15
+        pdtmpl = n.zeros((15,),dtype=int)
+        # Follows the definition of template 4.0 
+        # Oct 10: Parameter category (see Code table 4.1). 3 -> mass
+        pdtmpl[0] = 3
+        # Oct 11: Parameter number (see Code table 4.2). 5 -> geopotential height
+        pdtmpl[1] = 5
+        # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+        pdtmpl[2] = 2
+        # Oct 13: Background generating process identifier 
+        # (defined by originating centre)
+        pdtmpl[3] = missing(1)
+        # Oct 14: Analysis or forecast generating process identified 
+        # (defined by originating centre)
+        pdtmpl[4] = missing(1)
+        # Oct 15-16: Hours of observational data cutoff after reference time 
+        pdtmpl[5] = missing(2)
+        # Oct 17: Minutes of observational data cutoff after reference time 
+        pdtmpl[6] = missing(1)
+        # Oct 18: Indicator of unit of time range (see Code table 4.4)
+        # 1 -> hours
+        # NB WPS expects this to be hours!
+        pdtmpl[7] = 1
+        # Oct 19-22: Forecast time in units defined by octet 18
+        pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+        # Oct 23: Type of first fixed surface (see Code table 4.5)
+        # 1 -> surface
+        pdtmpl[9] = 1
+        # Oct 24: Scale factor of first fixed surface
+        pdtmpl[10] = 0
+        # Oct 25-28: Scaled value of first fixed surface
+        pdtmpl[11] = 0
+        # Oct 29: Type of second fixed surface (see Code table 4.5)
+        pdtmpl[12] = missing(1)
+        # Oct 30: Scale factor of second fixed surface
+        pdtmpl[13] = missing(1)
+        # Oct 31-34: Scaled value of second fixed surfaces
+        pdtmpl[14] = missing(3)
+     
+        # NB I am using the same drtnum and drtmpl for all data so I want
+        # replicate it in the following fields if you need to change this on a
+        # per-variable basis this is the place to redefine it. If you choose to do
+        # so then make sure you define one for each variable that follows or be
+        # aware that the last one defined will apply to all that follows.
+    
+        cheat = fv['topog'].get_value()[lat_cheat_idx:,:-lon_cheat_idx]
+        field = n.zeros(cheat.shape)
+        for lat_idx in range(cheat.shape[0]):
+            field[-(lat_idx+1)] = cheat[lat_idx]
+
+        dummy['topog'] = {
+            'long_name' : 'topography',
+            'pdtnum' : pdtnum, 
+            'pdtmpl' : pdtmpl, 
+            'drtnum' : drtnum,
+            'drtmpl' : drtmpl,
+            #'field' : fv['topog'].get_value()
+            #'field' : fv['topog'].get_value()[lat_cheat_idx:,:-lon_cheat_idx]
+            'field' : field
+          }
+
+    if 1:
+        # Let's prepare the water equivalent accumulated snow depth
+        # data and metadata
+        # Product Definition Template Number (see Code Table 4.0)
+        #     0 -> Analysis or forecast at a horizontal level or in a 
+        #          horizontal layer at a point in time.  (see Template 4.0)
+        pdtnum = 0
+        # array of zeros (int) of size 15
+        pdtmpl = n.zeros((15,),dtype=int)
+        # Follows the definition of template 4.0 
+        # Oct 10: Parameter category (see Code table 4.1). 1 -> moisture
+        pdtmpl[0] = 1
+        # Oct 11: Parameter number (see Code table 4.2). 13 -> 
+        #   water equivalent of accumulated snow depth
+        pdtmpl[1] = 13
+        # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+        pdtmpl[2] = 2
+        # Oct 13: Background generating process identifier 
+        # (defined by originating centre)
+        pdtmpl[3] = missing(1)
+        # Oct 14: Analysis or forecast generating process identified 
+        # (defined by originating centre)
+        pdtmpl[4] = missing(1)
+        # Oct 15-16: Hours of observational data cutoff after reference time 
+        pdtmpl[5] = missing(2)
+        # Oct 17: Minutes of observational data cutoff after reference time 
+        pdtmpl[6] = missing(1)
+        # Oct 18: Indicator of unit of time range (see Code table 4.4)
+        # 1 -> hours
+        # NB WPS expects this to be hours!
+        pdtmpl[7] = 1
+        # Oct 19-22: Forecast time in units defined by octet 18
+        pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+        # Oct 23: Type of first fixed surface (see Code table 4.5)
+        # 1 -> surface
+        pdtmpl[9] = 1
+        # Oct 24: Scale factor of first fixed surface
+        pdtmpl[10] = 0
+        # Oct 25-28: Scaled value of first fixed surface
+        pdtmpl[11] = 0
+        # Oct 29: Type of second fixed surface (see Code table 4.5)
+        pdtmpl[12] = missing(1)
+        # Oct 30: Scale factor of second fixed surface
+        pdtmpl[13] = missing(1)
+        # Oct 31-34: Scaled value of second fixed surfaces
+        pdtmpl[14] = missing(3)
+     
+        # NB I am using the same drtnum and drtmpl for all data so I want
+        # replicate it in the following fields if you need to change this on a
+        # per-variable basis this is the place to redefine it. If you choose to do
+        # so then make sure you define one for each variable that follows or be
+        # aware that the last one defined will apply to all that follows.
+    
+        #dummy_field = n.zeros((len(lon),len(lat)),dtype=float)
+        dummy_field = n.zeros((len(lat),len(lon)),dtype=float)
+        dummy['weasd'] = {
+            'long_name' : 'water equivalent of accumulated snow depth',
+            'pdtnum' : pdtnum, 
+            'pdtmpl' : pdtmpl, 
+            'drtnum' : drtnum,
+            'drtmpl' : drtmpl,
+            'field' : dummy_field
+          }
+       
+    #######################################################################
+    # Finished with the 2d fields
+    # Let's move on to the 3d ones
+    #######################################################################
+    
+    dummy = data[-1]['discipline']['atmos']['fields']['3d']
+    
+    # Let's prepare the air temperature data and metadata
+    # Product Definition Template Number (see Code Table 4.0)
+    #     0 -> Analysis or forecast at a horizontal level or in a 
+    #          horizontal layer at a point in time.  (see Template 4.0)
+    pdtnum = 0
+    # array of zeros (int) of size 15
+    pdtmpl = n.zeros((15,),dtype=int)
+    # Follows the definition of template 4.0 
+    # Oct 10: Parameter category (see Code table 4.1). 0 -> temperature
+    pdtmpl[0] = 0
+    # Oct 11: Parameter number (see Code table 4.2). 0 -> temperature
+    pdtmpl[1] = 0
+    # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+    pdtmpl[2] = 2
+    # Oct 13: Background generating process identifier 
+    # (defined by originating centre)
+    pdtmpl[3] = missing(1)
+    # Oct 14: Analysis or forecast generating process identified 
+    # (defined by originating centre)
+    pdtmpl[4] = missing(1)
+    # Oct 15-16: Hours of observational data cutoff after reference time 
+    pdtmpl[5] = missing(2)
+    # Oct 17: Minutes of observational data cutoff after reference time 
+    pdtmpl[6] = missing(1)
+    # Oct 18: Indicator of unit of time range (see Code table 4.4)
+    # 1 -> hours
+    # NB WPS expects this to be hours!
+    pdtmpl[7] = 1
+    # Oct 19-22: Forecast time in units defined by octet 18
+    pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+    # Oct 23: Type of first fixed surface (see Code table 4.5)
+    # 1 -> isobaric level
+    pdtmpl[9] = 100
+    # Oct 24: Scale factor of first fixed surface
+    pdtmpl[10] = 0
+    # Oct 25-28: Scaled value of first fixed surface
+    # -> missing as there is will be defined later when the data is sliced in
+    # horizontal slabs for its encoding
+    pdtmpl[11] = missing(3)
+    # Oct 29: Type of second fixed surface (see Code table 4.5)
+    pdtmpl[12] = missing(1)
+    # Oct 30: Scale factor of second fixed surface
+    pdtmpl[13] = missing(1)
+    # Oct 31-34: Scaled value of second fixed surfaces
+    pdtmpl[14] = missing(3)
+ 
+    # NB I am using the same drtnum and drtmpl for all data so I want
+    # replicate it in the following fields if you need to change this on a
+    # per-variable basis this is the place to redefine it. If you choose to do
+    # so then make sure you define one for each variable that follows or be
+    # aware that the last one defined will apply to all that follows.
+
+    cheat = fv['air_temp'].get_value()[0,:,lat_cheat_idx:,:-lon_cheat_idx]
+    field = n.zeros(cheat.shape)
+    for lvl_idx in range(cheat.shape[0]):
+        for lat_idx in range(cheat.shape[1]):
+            field[lvl_idx,-(lat_idx+1)] = cheat[lvl_idx,lat_idx]
+
+    dummy['air_temp'] = {
+        'long_name' : '3D air temperature',
+        'pdtnum' : pdtnum, 
+        'pdtmpl' : pdtmpl, 
+        'drtnum' : drtnum,
+        'drtmpl' : drtmpl,
+        #'field' : fv['air_temp'].get_value()[0],
+        #'field' : fv['air_temp'].get_value()[0,:,lat_cheat_idx:,:-lon_cheat_idx],
+        'field' : field,
+        #'field' : '',
+        'lvl' : fv['lvl'].get_value()
+      }
+
+    # Let's prepare the 3d relative humidity data and metadata
+    # Product Definition Template Number (see Code Table 4.0)
+    #     0 -> Analysis or forecast at a horizontal level or in a 
+    #          horizontal layer at a point in time.  (see Template 4.0)
+    pdtnum = 0
+    # array of zeros (int) of size 15
+    pdtmpl = n.zeros((15,),dtype=int)
+    # Follows the definition of template 4.0 
+    # Oct 10: Parameter category (see Code table 4.1). 1 -> moisture
+    pdtmpl[0] = 1
+    # Oct 11: Parameter number (see Code table 4.2). 1 -> RH
+    pdtmpl[1] = 1
+    # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+    pdtmpl[2] = 2
+    # Oct 13: Background generating process identifier 
+    # (defined by originating centre)
+    pdtmpl[3] = missing(1)
+    # Oct 14: Analysis or forecast generating process identified 
+    # (defined by originating centre)
+    pdtmpl[4] = missing(1)
+    # Oct 15-16: Hours of observational data cutoff after reference time 
+    pdtmpl[5] = missing(2)
+    # Oct 17: Minutes of observational data cutoff after reference time 
+    pdtmpl[6] = missing(1)
+    # Oct 18: Indicator of unit of time range (see Code table 4.4)
+    # 1 -> hours
+    # NB WPS expects this to be hours!
+    pdtmpl[7] = 1
+    # Oct 19-22: Forecast time in units defined by octet 18
+    pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+    # Oct 23: Type of first fixed surface (see Code table 4.5)
+    # 1 -> isobaric level
+    pdtmpl[9] = 100
+    # Oct 24: Scale factor of first fixed surface
+    pdtmpl[10] = 0
+    # Oct 25-28: Scaled value of first fixed surface
+    # -> missing as there is will be defined later when the data is sliced in
+    # horizontal slabs for its encoding
+    pdtmpl[11] = missing(3)
+    # Oct 29: Type of second fixed surface (see Code table 4.5)
+    pdtmpl[12] = missing(1)
+    # Oct 30: Scale factor of second fixed surface
+    pdtmpl[13] = missing(1)
+    # Oct 31-34: Scaled value of second fixed surfaces
+    pdtmpl[14] = missing(3)
+ 
+    # NB I am using the same drtnum and drtmpl for all data so I want
+    # replicate it in the following fields if you need to change this on a
+    # per-variable basis this is the place to redefine it. If you choose to do
+    # so then make sure you define one for each variable that follows or be
+    # aware that the last one defined will apply to all that follows.
+
+    #mix_rto = fv['mix_rto'].get_value()[0]
+    mix_rto = fv['mix_rto'].get_value()[0,:,lat_cheat_idx:,:-lon_cheat_idx]
+    spec_hum = n.zeros(mix_rto.shape,dtype=float)
+    spec_hum = mix_rto / (mix_rto + 1)
+    #temp = fv['air_temp'].get_value()[0]
+    temp = fv['air_temp'].get_value()[0,:,lat_cheat_idx:,:-lon_cheat_idx]
+    #print temp.min(), temp.max()
+    #print temp.min()-273.16, temp.max()-273.16
+    sat_vapour_pres = goff_gratch(temp)
+    #print sat_vapour_pres.min(), sat_vapour_pres.max()
+    pres = fv['lvl'].get_value()
+    sat_spec_hum = n.zeros(mix_rto.shape,dtype=float)
+    for lvl_idx in range(len(pres)):
+        sat_spec_hum[lvl_idx] = 0.622 * sat_vapour_pres[lvl_idx] \
+          / pres[lvl_idx]
+    #sat_spec_hum = ma.masked_where(sat_spec_hum <= 1.e-9,
+    #sat_spec_hum = ma.masked_where(sat_spec_hum <= 1.e-6,
+      #sat_spec_hum)
+    rh = n.zeros(mix_rto.shape,dtype=float)
+    rh = spec_hum / sat_spec_hum * 100
+    #sys.exit()
+    del mix_rto, spec_hum, temp, sat_vapour_pres, pres, sat_spec_hum
+    
+    #cheat = rh
+    #field = n.zeros(cheat.shape)
+    #for lat_idx in range(cheat.shape[0]):
+    #    field[-(lat_idx+1)] = cheat[lat_idx]
+
+    cheat = rh
+    field = n.zeros(cheat.shape)
+    for lvl_idx in range(cheat.shape[0]):
+        for lat_idx in range(cheat.shape[1]):
+            field[lvl_idx,-(lat_idx+1)] = cheat[lvl_idx,lat_idx]
+
+    dummy['rh'] = {
+        'long_name' : '3D relative humidity',
+        'pdtnum' : pdtnum, 
+        'pdtmpl' : pdtmpl, 
+        'drtnum' : drtnum,
+        'drtmpl' : drtmpl,
+        #'field' : rh.filled(fill_value=0.),
+        #'field' : rh,
+        'field' : field,
+        'lvl' : fv['lvl'].get_value()
+      }
+    #sys.exit()
+
+    # Let's prepare the 3d zonal wind data and metadata
+    # Product Definition Template Number (see Code Table 4.0)
+    #     0 -> Analysis or forecast at a horizontal level or in a 
+    #          horizontal layer at a point in time.  (see Template 4.0)
+    pdtnum = 0
+    # array of zeros (int) of size 15
+    pdtmpl = n.zeros((15,),dtype=int)
+    # Follows the definition of template 4.0 
+    # Oct 10: Parameter category (see Code table 4.1). 2 -> momentum
+    pdtmpl[0] = 2
+    # Oct 11: Parameter number (see Code table 4.2). 2 -> u-component of the
+    # wind
+    pdtmpl[1] = 2
+    # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+    pdtmpl[2] = 2
+    # Oct 13: Background generating process identifier 
+    # (defined by originating centre)
+    pdtmpl[3] = missing(1)
+    # Oct 14: Analysis or forecast generating process identified 
+    # (defined by originating centre)
+    pdtmpl[4] = missing(1)
+    # Oct 15-16: Hours of observational data cutoff after reference time 
+    pdtmpl[5] = missing(2)
+    # Oct 17: Minutes of observational data cutoff after reference time 
+    pdtmpl[6] = missing(1)
+    # Oct 18: Indicator of unit of time range (see Code table 4.4)
+    # 1 -> hours
+    # NB WPS expects this to be hours!
+    pdtmpl[7] = 1
+    # Oct 19-22: Forecast time in units defined by octet 18
+    pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+    # Oct 23: Type of first fixed surface (see Code table 4.5)
+    # 1 -> isobaric level
+    pdtmpl[9] = 100
+    # Oct 24: Scale factor of first fixed surface
+    pdtmpl[10] = 0
+    # Oct 25-28: Scaled value of first fixed surface
+    # -> missing as there is will be defined later when the data is sliced in
+    # horizontal slabs for its encoding
+    pdtmpl[11] = missing(3)
+    # Oct 29: Type of second fixed surface (see Code table 4.5)
+    pdtmpl[12] = missing(1)
+    # Oct 30: Scale factor of second fixed surface
+    pdtmpl[13] = missing(1)
+    # Oct 31-34: Scaled value of second fixed surfaces
+    pdtmpl[14] = missing(3)
+ 
+    # NB I am using the same drtnum and drtmpl for all data so I want
+    # replicate it in the following fields if you need to change this on a
+    # per-variable basis this is the place to redefine it. If you choose to do
+    # so then make sure you define one for each variable that follows or be
+    # aware that the last one defined will apply to all that follows.
+
+    cheat = fv['zonal_wnd'].get_value()[0,:,lat_cheat_idx:,:-lon_cheat_idx]
+    field = n.zeros(cheat.shape)
+    for lvl_idx in range(cheat.shape[0]):
+        for lat_idx in range(cheat.shape[1]):
+            field[lvl_idx,-(lat_idx+1)] = cheat[lvl_idx,lat_idx]
+
+    dummy['zonal_wnd'] = {
+        'long_name' : '3D zonal wind',
+        'pdtnum' : pdtnum, 
+        'pdtmpl' : pdtmpl, 
+        'drtnum' : drtnum,
+        'drtmpl' : drtmpl,
+        #'field' : fv['zonal_wnd'].get_value()[0],
+        #'field' : fv['zonal_wnd'].get_value()[0,:,lat_cheat_idx:,:-lon_cheat_idx],
+        'field' : field,
+        #'field' : '',
+        'lvl' : fv['lvl'].get_value()
+      }
+
+    # Let's prepare the 3d zonal wind data and metadata
+    # Product Definition Template Number (see Code Table 4.0)
+    #     0 -> Analysis or forecast at a horizontal level or in a 
+    #          horizontal layer at a point in time.  (see Template 4.0)
+    pdtnum = 0
+    # array of zeros (int) of size 15
+    pdtmpl = n.zeros((15,),dtype=int)
+    # Follows the definition of template 4.0 
+    # Oct 10: Parameter category (see Code table 4.1). 2 -> momentum
+    pdtmpl[0] = 2
+    # Oct 11: Parameter number (see Code table 4.2). 2 -> v-component of the
+    # wind
+    pdtmpl[1] = 3
+    # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+    pdtmpl[2] = 2
+    # Oct 13: Background generating process identifier 
+    # (defined by originating centre)
+    pdtmpl[3] = missing(1)
+    # Oct 14: Analysis or forecast generating process identified 
+    # (defined by originating centre)
+    pdtmpl[4] = missing(1)
+    # Oct 15-16: Hours of observational data cutoff after reference time 
+    pdtmpl[5] = missing(2)
+    # Oct 17: Minutes of observational data cutoff after reference time 
+    pdtmpl[6] = missing(1)
+    # Oct 18: Indicator of unit of time range (see Code table 4.4)
+    # 1 -> hours
+    # NB WPS expects this to be hours!
+    pdtmpl[7] = 1
+    # Oct 19-22: Forecast time in units defined by octet 18
+    pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+    # Oct 23: Type of first fixed surface (see Code table 4.5)
+    # 1 -> isobaric level
+    pdtmpl[9] = 100
+    # Oct 24: Scale factor of first fixed surface
+    pdtmpl[10] = 0
+    # Oct 25-28: Scaled value of first fixed surface
+    # -> missing as there is will be defined later when the data is sliced in
+    # horizontal slabs for its encoding
+    pdtmpl[11] = missing(3)
+    # Oct 29: Type of second fixed surface (see Code table 4.5)
+    pdtmpl[12] = missing(1)
+    # Oct 30: Scale factor of second fixed surface
+    pdtmpl[13] = missing(1)
+    # Oct 31-34: Scaled value of second fixed surfaces
+    pdtmpl[14] = missing(3)
+ 
+    # NB I am using the same drtnum and drtmpl for all data so I want
+    # replicate it in the following fields if you need to change this on a
+    # per-variable basis this is the place to redefine it. If you choose to do
+    # so then make sure you define one for each variable that follows or be
+    # aware that the last one defined will apply to all that follows.
+
+    cheat =fv['merid_wnd'].get_value()[0,:,lat_cheat_idx:,:-lon_cheat_idx] 
+    field = n.zeros(cheat.shape)
+    for lvl_idx in range(cheat.shape[0]):
+        for lat_idx in range(cheat.shape[1]):
+            field[lvl_idx,-(lat_idx+1)] = cheat[lvl_idx,lat_idx]
+
+    dummy['merid_wnd'] = {
+        'long_name' : '3D meridional wind',
+        'pdtnum' : pdtnum, 
+        'pdtmpl' : pdtmpl, 
+        'drtnum' : drtnum,
+        'drtmpl' : drtmpl,
+        #'field' : fv['merid_wnd'].get_value()[0],
+        #'field' : fv['merid_wnd'].get_value()[0,:,lat_cheat_idx:,:-lon_cheat_idx],
+        'field' : field,
+        #'field' : '',
+        'lvl' : fv['lvl'].get_value()
+      }
+
+    # Let's prepare the 3d geopotential height data and metadata
+    # Product Definition Template Number (see Code Table 4.0)
+    #     0 -> Analysis or forecast at a horizontal level or in a 
+    #          horizontal layer at a point in time.  (see Template 4.0)
+    pdtnum = 0
+    # array of zeros (int) of size 15
+    pdtmpl = n.zeros((15,),dtype=int)
+    # Follows the definition of template 4.0 
+    # Oct 10: Parameter category (see Code table 4.1). 3 -> mass
+    pdtmpl[0] = 3
+    # Oct 11: Parameter number (see Code table 4.2). 5 -> geopotential height
+    pdtmpl[1] = 5
+    # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+    pdtmpl[2] = 2
+    # Oct 13: Background generating process identifier 
+    # (defined by originating centre)
+    pdtmpl[3] = missing(1)
+    # Oct 14: Analysis or forecast generating process identified 
+    # (defined by originating centre)
+    pdtmpl[4] = missing(1)
+    # Oct 15-16: Hours of observational data cutoff after reference time 
+    pdtmpl[5] = missing(2)
+    # Oct 17: Minutes of observational data cutoff after reference time 
+    pdtmpl[6] = missing(1)
+    # Oct 18: Indicator of unit of time range (see Code table 4.4)
+    # 1 -> hours
+    # NB WPS expects this to be hours!
+    pdtmpl[7] = 1
+    # Oct 19-22: Forecast time in units defined by octet 18
+    pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+    # Oct 23: Type of first fixed surface (see Code table 4.5)
+    # 1 -> isobaric level
+    pdtmpl[9] = 100
+    # Oct 24: Scale factor of first fixed surface
+    pdtmpl[10] = 0
+    # Oct 25-28: Scaled value of first fixed surface
+    # -> missing as there is will be defined later when the data is sliced in
+    # horizontal slabs for its encoding
+    pdtmpl[11] = missing(3)
+    # Oct 29: Type of second fixed surface (see Code table 4.5)
+    pdtmpl[12] = missing(1)
+    # Oct 30: Scale factor of second fixed surface
+    pdtmpl[13] = missing(1)
+    # Oct 31-34: Scaled value of second fixed surfaces
+    pdtmpl[14] = missing(3)
+ 
+    # NB I am using the same drtnum and drtmpl for all data so I want
+    # replicate it in the following fields if you need to change this on a
+    # per-variable basis this is the place to redefine it. If you choose to do
+    # so then make sure you define one for each variable that follows or be
+    # aware that the last one defined will apply to all that follows.
+
+    cheat = fv['geop_ht'].get_value()[0,:,lat_cheat_idx:,:-lon_cheat_idx]
+    field = n.zeros(cheat.shape)
+    for lvl_idx in range(cheat.shape[0]):
+        for lat_idx in range(cheat.shape[1]):
+            field[lvl_idx,-(lat_idx+1)] = cheat[lvl_idx,lat_idx]
+
+    dummy['geop_ht'] = {
+        'long_name' : 'geopotential height',
+        'pdtnum' : pdtnum, 
+        'pdtmpl' : pdtmpl, 
+        'drtnum' : drtnum,
+        'drtmpl' : drtmpl,
+        #'field' : fv['geop_ht'].get_value()[0],
+        #'field' : fv['geop_ht'].get_value()[0,:,lat_cheat_idx:,:-lon_cheat_idx],
+        'field' : field,
+        #'field' : '',
+        'lvl' : fv['lvl'].get_value()
+      }
+
+    #######################################################################
+    # Finished with the atmospheric discipline
+    # Let's move on to the oceanographic one
+    #######################################################################
+
+    if 0:
+        # Preliminaries: we need to unpack the land_sea_ice mask from the laps
+        # files.
+        #sea_lnd_ice_mask = fv['sea_lnd_ice_mask'].get_value()[0]
+        sea_lnd_ice_mask = fv['sea_lnd_ice_mask'].get_value()[0,lat_cheat_idx:,:-lon_cheat_idx]
+        shape = sea_lnd_ice_mask.shape
+        land_cover = n.zeros(shape, 'i')
+        ice_cover = n.zeros(shape, 'i')
+        # I assume (know) that the mask has 3 dimensions of which the first is time
+        # which I am going to ignore for now
+        # I am sure this could be done more elegantly with a numpy in-built
+        # function, but I am not sure about which one would it be
+        for i in range(shape[0]):
+            for j in range(shape[1]):
+                if sea_lnd_ice_mask[i,j] == 0:
+                    land_cover[i,j] = 0
+                    ice_cover[i,j] = 0
+                elif sea_lnd_ice_mask[i,j] == 1:
+                    land_cover[i,j] = 1
+                    ice_cover[i,j] = 0
+                elif sea_lnd_ice_mask[i,j] == 2:
+                    # the BoM only considers ice over water
+                    land_cover[i,j] = 0
+                    ice_cover[i,j] = 1
+                else:
+                    print 'Illegal value in the sea_land_ice mask!'
+                    sys.exit()
+    
+        dummy = data[-1]['discipline']['ocean']['fields']['2d']
+    
+        # Let's prepare the ice cover data and metadata
+        # Product Definition Template Number (see Code Table 4.0)
+        #     0 -> Analysis or forecast at a horizontal level or in a 
+        #          horizontal layer at a point in time.  (see Template 4.0)
+        pdtnum = 0
+        # array of zeros (int) of size 15
+        pdtmpl = n.zeros((15,),dtype=int)
+        # Follows the definition of template 4.0 
+        # Oct 10: Parameter category (see Code table 4.1). 2 -> ice
+        pdtmpl[0] = 2
+        # Oct 11: Parameter number (see Code table 4.2). 0 -> ice-cover 
+        # 1=ice, 0=no-ice (over water)
+        pdtmpl[1] = 0
+        # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+        pdtmpl[2] = 2
+        # Oct 13: Background generating process identifier 
+        # (defined by originating centre)
+        pdtmpl[3] = missing(1)
+        # Oct 14: Analysis or forecast generating process identified 
+        # (defined by originating centre)
+        pdtmpl[4] = missing(1)
+        # Oct 15-16: Hours of observational data cutoff after reference time 
+        pdtmpl[5] = missing(2)
+        # Oct 17: Minutes of observational data cutoff after reference time 
+        pdtmpl[6] = missing(1)
+        # Oct 18: Indicator of unit of time range (see Code table 4.4)
+        # 1 -> hours
+        # NB WPS expects this to be hours!
+        pdtmpl[7] = 1
+        # Oct 19-22: Forecast time in units defined by octet 18
+        pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+        # Oct 23: Type of first fixed surface (see Code table 4.5)
+        # 1 -> surface
+        pdtmpl[9] = 1
+        # Oct 24: Scale factor of first fixed surface
+        pdtmpl[10] = 0
+        # Oct 25-28: Scaled value of first fixed surface
+        pdtmpl[11] = 0
+        # Oct 29: Type of second fixed surface (see Code table 4.5)
+        pdtmpl[12] = missing(1)
+        # Oct 30: Scale factor of second fixed surface
+        pdtmpl[13] = missing(1)
+        # Oct 31-34: Scaled value of second fixed surfaces
+        pdtmpl[14] = missing(3)
+     
+        # NB I am using the same drtnum and drtmpl for all data so I want
+        # replicate it in the following fields if you need to change this on a
+        # per-variable basis this is the place to redefine it. If you choose to do
+        # so then make sure you define one for each variable that follows or be
+        # aware that the last one defined will apply to all that follows.
+    
+        cheat = ice_cover
+        field = n.zeros(cheat.shape)
+        for lat_idx in range(cheat.shape[0]):
+            field[-(lat_idx+1)] = cheat[lat_idx]
+
+        dummy['ice_cover'] = {
+            'long_name' : 'ice cover',
+            'pdtnum' : pdtnum, 
+            'pdtmpl' : pdtmpl, 
+            'drtnum' : drtnum,
+            'drtmpl' : drtmpl,
+            #'field' : ice_cover
+            'field' : field
+          }
+
+    #######################################################################
+    # Finished with the oceanographic discipline
+    # Let's move on to the land one
+    #######################################################################
+
+    dummy = data[-1]['discipline']['land']['fields']['2d']
+
+    if 0:
+        # Let's prepare the land cover data and metadata
+        # Product Definition Template Number (see Code Table 4.0)
+        #     0 -> Analysis or forecast at a horizontal level or in a 
+        #          horizontal layer at a point in time.  (see Template 4.0)
+        pdtnum = 0
+        # array of zeros (int) of size 15
+        pdtmpl = n.zeros((15,),dtype=int)
+        # Follows the definition of template 4.0 
+        # Oct 10: Parameter category (see Code table 4.1). 0 -> vegetation/biomass
+        pdtmpl[0] = 0
+        # Oct 11: Parameter number (see Code table 4.2). 0 -> land-cover 
+        # (1=land, 0=ocean)
+        pdtmpl[1] = 0
+        # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+        pdtmpl[2] = 2
+        # Oct 13: Background generating process identifier 
+        # (defined by originating centre)
+        pdtmpl[3] = missing(1)
+        # Oct 14: Analysis or forecast generating process identified 
+        # (defined by originating centre)
+        pdtmpl[4] = missing(1)
+        # Oct 15-16: Hours of observational data cutoff after reference time 
+        pdtmpl[5] = missing(2)
+        # Oct 17: Minutes of observational data cutoff after reference time 
+        pdtmpl[6] = missing(1)
+        # Oct 18: Indicator of unit of time range (see Code table 4.4)
+        # 1 -> hours
+        # NB WPS expects this to be hours!
+        pdtmpl[7] = 1
+        # Oct 19-22: Forecast time in units defined by octet 18
+        pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+        # Oct 23: Type of first fixed surface (see Code table 4.5)
+        # 1 -> surface
+        pdtmpl[9] = 1
+        # Oct 24: Scale factor of first fixed surface
+        pdtmpl[10] = 0
+        # Oct 25-28: Scaled value of first fixed surface
+        pdtmpl[11] = 0
+        # Oct 29: Type of second fixed surface (see Code table 4.5)
+        pdtmpl[12] = missing(1)
+        # Oct 30: Scale factor of second fixed surface
+        pdtmpl[13] = missing(1)
+        # Oct 31-34: Scaled value of second fixed surfaces
+        pdtmpl[14] = missing(3)
+     
+        # NB I am using the same drtnum and drtmpl for all data so I want
+        # replicate it in the following fields if you need to change this on a
+        # per-variable basis this is the place to redefine it. If you choose to do
+        # so then make sure you define one for each variable that follows or be
+        # aware that the last one defined will apply to all that follows.
+        
+        # I seem to remember there was a problem with the land cover that I could
+        # fix using the decimal scale factor... let's try
+        land_cover_decimal_factor = 1
+        # Oct: 18-19. Decimal scale factor (D)
+        #drtmpl[2] = land_cover_decimal_factor
+        #print 'land_cover drtmpl', drtmpl
+    
+        cheat = land_cover * 10**land_cover_decimal_factor
+        field = n.zeros(cheat.shape)
+        for lat_idx in range(cheat.shape[0]):
+            field[-(lat_idx+1)] = cheat[lat_idx]
+
+        dummy['land_cover'] = {
+            'long_name' : 'land cover',
+            'pdtnum' : pdtnum, 
+            'pdtmpl' : pdtmpl, 
+            'drtnum' : drtnum,
+            'drtmpl' : n.array([0,0,land_cover_decimal_factor,24,0]),
+            #'drtmpl' : drtmpl,
+            #'field' : land_cover * 10**land_cover_decimal_factor
+            'field' : field
+          }
+    
+        # Now put it back to what it was
+        # Oct: 18-19. Decimal scale factor (D)
+        #drtmpl[2] = 0
+        
+    if 1:
+        # Let's prepare the topography data and metadata
+        # Product Definition Template Number (see Code Table 4.0)
+        #     0 -> Analysis or forecast at a horizontal level or in a 
+        #          horizontal layer at a point in time.  (see Template 4.0)
+        pdtnum = 0
+        # array of zeros (int) of size 15
+        pdtmpl = n.zeros((15,),dtype=int)
+        # Follows the definition of template 4.0 
+        # Oct 10: Parameter category (see Code table 4.1). 0 -> vegetation/biomass
+        pdtmpl[0] = 0
+        # Oct 11: Parameter number (see Code table 4.2). 7 -> model terrain height
+        pdtmpl[1] = 7
+        # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+        pdtmpl[2] = 2
+        # Oct 13: Background generating process identifier 
+        # (defined by originating centre)
+        pdtmpl[3] = missing(1)
+        # Oct 14: Analysis or forecast generating process identified 
+        # (defined by originating centre)
+        pdtmpl[4] = missing(1)
+        # Oct 15-16: Hours of observational data cutoff after reference time 
+        pdtmpl[5] = missing(2)
+        # Oct 17: Minutes of observational data cutoff after reference time 
+        pdtmpl[6] = missing(1)
+        # Oct 18: Indicator of unit of time range (see Code table 4.4)
+        # 1 -> hours
+        # NB WPS expects this to be hours!
+        pdtmpl[7] = 1
+        # Oct 19-22: Forecast time in units defined by octet 18
+        pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+        # Oct 23: Type of first fixed surface (see Code table 4.5)
+        # 1 -> surface
+        pdtmpl[9] = 1
+        # Oct 24: Scale factor of first fixed surface
+        pdtmpl[10] = 0
+        # Oct 25-28: Scaled value of first fixed surface
+        pdtmpl[11] = 0
+        # Oct 29: Type of second fixed surface (see Code table 4.5)
+        pdtmpl[12] = missing(1)
+        # Oct 30: Scale factor of second fixed surface
+        pdtmpl[13] = missing(1)
+        # Oct 31-34: Scaled value of second fixed surfaces
+        pdtmpl[14] = missing(3)
+     
+        # NB I am using the same drtnum and drtmpl for all data so I want
+        # replicate it in the following fields if you need to change this on a
+        # per-variable basis this is the place to redefine it. If you choose to do
+        # so then make sure you define one for each variable that follows or be
+        # aware that the last one defined will apply to all that follows.
+        cheat = fv['topog'].get_value()[lat_cheat_idx:,:-lon_cheat_idx]
+        field = n.zeros(cheat.shape)
+        for lat_idx in range(cheat.shape[0]):
+            field[-(lat_idx+1)] = cheat[lat_idx]
+    
+        dummy['topog'] = {
+            'long_name' : 'topography',
+            'pdtnum' : pdtnum, 
+            'pdtmpl' : pdtmpl, 
+            'drtnum' : drtnum,
+            'drtmpl' : drtmpl,
+            #'field' : fv['topog'].get_value()
+            #'field' : fv['topog'].get_value()[lat_cheat_idx:,:-lon_cheat_idx]
+            'field' : field
+          }
+    
+    dummy = data[-1]['discipline']['land']['fields']['3d']
+
+    if 1:
+        # Let's prepare the 3d soil temperature data and metadata
+        # Product Definition Template Number (see Code Table 4.0)
+        #     0 -> Analysis or forecast at a horizontal level or in a 
+        #          horizontal layer at a point in time.  (see Template 4.0)
+        pdtnum = 0
+        # array of zeros (int) of size 15
+        pdtmpl = n.zeros((15,),dtype=int)
+        # Follows the definition of template 4.0 
+        # Oct 10: Parameter category (see Code table 4.1). 0 -> vegetation/biomass
+        pdtmpl[0] = 0
+        # Oct 11: Parameter number (see Code table 4.2). 2 -> soil temperature
+        pdtmpl[1] = 2
+        # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+        pdtmpl[2] = 2
+        # Oct 13: Background generating process identifier 
+        # (defined by originating centre)
+        pdtmpl[3] = missing(1)
+        # Oct 14: Analysis or forecast generating process identified 
+        # (defined by originating centre)
+        pdtmpl[4] = missing(1)
+        # Oct 15-16: Hours of observational data cutoff after reference time 
+        pdtmpl[5] = missing(2)
+        # Oct 17: Minutes of observational data cutoff after reference time 
+        pdtmpl[6] = missing(1)
+        # Oct 18: Indicator of unit of time range (see Code table 4.4)
+        # 1 -> hours
+        # NB WPS expects this to be hours!
+        pdtmpl[7] = 1
+        # Oct 19-22: Forecast time in units defined by octet 18
+        pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+        # Oct 23: Type of first fixed surface (see Code table 4.5)
+        # 106 -> depth below land surface
+        pdtmpl[9] = 106
+        # Oct 24: Scale factor of first fixed surface
+        # 3 -> preserve mm accuracy
+        # 2 -> preserve cm accuracy
+        soil_lvl_scaling = 2
+        pdtmpl[10] = soil_lvl_scaling
+        # Oct 25-28: Scaled value of first fixed surface
+        # -> missing as there is will be defined later when the data is sliced in
+        # horizontal slabs for its encoding
+        pdtmpl[11] = missing(3)
+        # Oct 29: Type of second fixed surface (see Code table 4.5)
+        # 106 -> depth below land surface
+        pdtmpl[12] = 106
+        # Oct 30: Scale factor of second fixed surface
+        # 3 -> preserve mm accuracy
+        pdtmpl[13] = soil_lvl_scaling
+        # Oct 31-34: Scaled value of second fixed surfaces
+        pdtmpl[14] = missing(3)
+        # -> missing as there is will be defined later when the data is sliced in
+        # horizontal slabs for its encoding
+     
+        # NB I am using the same drtnum and drtmpl for all data so I want
+        # replicate it in the following fields if you need to change this on a
+        # per-variable basis this is the place to redefine it. If you choose to do
+        # so then make sure you define one for each variable that follows or be
+        # aware that the last one defined will apply to all that follows.
+
+        # Let's interpolate linearly to the Noah levels
+	# NB for the deepest level we actually have an extrapolation
+        laps_soil_lvl = fv['soil_lvl'].get_value()
+        noah_soil_lvl = [0.10, 0.40, 1.0, 2.0]
+        #laps_soil_temp = fv['soil_temp'].get_value()[0]
+        laps_soil_temp = fv['soil_temp'].get_value()[0,:,lat_cheat_idx:,:-lon_cheat_idx]
+        shape = laps_soil_temp.shape
+        noah_soil_temp = n.zeros(shape, dtype=float)
+        max_soil_lvl = 4
+        for soil_idx in range(max_soil_lvl):
+            if soil_idx < max_soil_lvl - 1:
+                x_a = n.ones((shape[1],shape[2])) * laps_soil_lvl[soil_idx]
+                x_b = n.ones((shape[1],shape[2])) * laps_soil_lvl[soil_idx + 1]
+                y_a = laps_soil_temp[soil_idx]
+                y_b = laps_soil_temp[soil_idx + 1]
+            else:
+                # this accomplishes the extrapolation
+                x_a = n.ones((shape[1],shape[2])) * laps_soil_lvl[soil_idx - 1]
+                x_b = n.ones((shape[1],shape[2])) * laps_soil_lvl[soil_idx]
+                y_a = laps_soil_temp[soil_idx - 1]
+                y_b = laps_soil_temp[soil_idx]
+                #print x_a, '\n\n', x_b, '\n\n', y_a, '\n\n', y_b, '\n\n' 
+                #print soil_idx, x_a.shape, x_b.shape, y_a.shape, y_b.shape
+            x = n.ones((shape[1],shape[2])) * noah_soil_lvl[soil_idx]
+            noah_soil_temp[soil_idx] = vu.lin_interp(x_a, x_b, y_a, y_b, x)
+            
+            
+        cheat = noah_soil_temp
+        field = n.zeros(cheat.shape)
+        for lvl_idx in range(cheat.shape[0]):
+            for lat_idx in range(cheat.shape[1]):
+                field[lvl_idx,-(lat_idx+1)] = cheat[lvl_idx,lat_idx]
+
+        dummy['soil_temp'] = {
+            'long_name' : '3D volumetric soil temperature',
+            'pdtnum' : pdtnum, 
+            'pdtmpl' : pdtmpl, 
+            'drtnum' : drtnum,
+            'drtmpl' : drtmpl,
+            #TODO: make sure the scaling is right
+            #'field' : noah_soil_temp,
+            'field' : field,
+            'lvl' : noah_soil_lvl
+          }
+    
+   
+    if 1:
+        # Let's prepare the 3d volumetric soil moisture
+        # Product Definition Template Number (see Code Table 4.0)
+        #     0 -> Analysis or forecast at a horizontal level or in a 
+        #          horizontal layer at a point in time.  (see Template 4.0)
+        pdtnum = 0
+        # array of zeros (int) of size 15
+        pdtmpl = n.zeros((15,),dtype=int)
+        # Follows the definition of template 4.0 
+        # Oct 10: Parameter category (see Code table 4.1). 0 -> vegetation/biomass
+        pdtmpl[0] = 0
+        # Oct 11: Parameter number (see Code table 4.2). 9 -> volumetric soil
+        # moisture
+        # pdtmpl[1] = 9
+        # I don't like this choice but maybe it is going to work more easily with
+        # WRF and other NCAR utilities like cnvgrib
+        pdtmpl[1] = 192
+        # Oct 12: Type of generating process (see Code table 4.3). 2 -> forecast
+        pdtmpl[2] = 2
+        # Oct 13: Background generating process identifier 
+        # (defined by originating centre)
+        pdtmpl[3] = missing(1)
+        # Oct 14: Analysis or forecast generating process identified 
+        # (defined by originating centre)
+        pdtmpl[4] = missing(1)
+        # Oct 15-16: Hours of observational data cutoff after reference time 
+        pdtmpl[5] = missing(2)
+        # Oct 17: Minutes of observational data cutoff after reference time 
+        pdtmpl[6] = missing(1)
+        # Oct 18: Indicator of unit of time range (see Code table 4.4)
+        # 1 -> hours
+        # NB WPS expects this to be hours!
+        pdtmpl[7] = 1
+        # Oct 19-22: Forecast time in units defined by octet 18
+        pdtmpl[8] = fv['forc_hrs'].get_value()[0]
+        # Oct 23: Type of first fixed surface (see Code table 4.5)
+        # 106 -> depth below land surface
+        pdtmpl[9] = 106
+        # Oct 24: Scale factor of first fixed surface
+        # 2 -> preserve cm accuracy
+        pdtmpl[10] = 2
+        # Oct 25-28: Scaled value of first fixed surface
+        # -> missing as there is will be defined later when the data is sliced in
+        # horizontal slabs for its encoding
+        pdtmpl[11] = missing(3)
+        # Oct 29: Type of second fixed surface (see Code table 4.5)
+        # 106 -> depth below land surface
+        pdtmpl[12] = 106
+        # Oct 30: Scale factor of second fixed surface
+        # 2 -> preserve cm accuracy
+        pdtmpl[13] = 2
+        # Oct 31-34: Scaled value of second fixed surfaces
+        pdtmpl[14] = missing(3)
+        # -> missing as there is will be defined later when the data is sliced in
+        # horizontal slabs for its encoding
+     
+        # NB I am using the same drtnum and drtmpl for all data so I want
+        # replicate it in the following fields if you need to change this on a
+        # per-variable basis this is the place to redefine it. If you choose to do
+        # so then make sure you define one for each variable that follows or be
+        # aware that the last one defined will apply to all that follows.
+    
+        # in the ECMWF soil scheme the data is saved scaled by the depth of the
+        # soil layer (or so I understand...)
+    
+        # TODO work out this 'not writable' rubbish
+        #print laps_soil_mois.flags['WRITEABLE'] 
+        #laps_soil_mois.flags['WRITEABLE'] = True
+        #laps_soil_mois = fv['soil_mois'].get_value()[0]
+        #laps_soil_mois = fv['soil_mois'].get_value()[0,:,lat_cheat_idx:,:-lon_cheat_idx]
+        tmp = fv['soil_mois'].get_value()[0,:,lat_cheat_idx:,:-lon_cheat_idx]
+        shape = tmp.shape
+        laps_soil_mois = n.zeros(shape)
+        laps_soil_lvl = fv['soil_lvl'].get_value()
+        for soil_idx in range(len(laps_soil_lvl)):
+            #laps_soil_mois[soil_idx] /= laps_soil_lvl[soil_idx]
+            laps_soil_mois[soil_idx] = tmp[soil_idx] / laps_soil_lvl[soil_idx]
+   
+        # Let's interpolate linearly to the Noah levels
+	# NB for the deepest level we actually have an extrapolation
+        noah_soil_lvl = [0.10, 0.40, 1.0, 2.0]
+        noah_soil_mois = n.zeros(laps_soil_mois.shape, dtype=float)
+        max_soil_lvl = 4
+        for soil_idx in range(max_soil_lvl):
+            if soil_idx < max_soil_lvl - 1:
+                x_a = n.ones((shape[1],shape[2])) * laps_soil_lvl[soil_idx]
+                x_b = n.ones((shape[1],shape[2])) * laps_soil_lvl[soil_idx + 1]
+                y_a = laps_soil_mois[soil_idx]
+                y_b = laps_soil_mois[soil_idx + 1]
+            else:
+                # this accomplishes the extrapolation
+                x_a = n.ones((shape[1],shape[2])) * laps_soil_lvl[soil_idx - 1]
+                x_b = n.ones((shape[1],shape[2])) * laps_soil_lvl[soil_idx]
+                y_a = laps_soil_mois[soil_idx - 1]
+                y_b = laps_soil_mois[soil_idx]
+            x = n.ones((shape[1],shape[2])) * noah_soil_lvl[soil_idx]
+            noah_soil_mois[soil_idx] = vu.lin_interp(x_a, x_b, y_a, y_b, x)
+            
+ 
+        cheat = noah_soil_mois
+        field = n.zeros(cheat.shape)
+        for lvl_idx in range(cheat.shape[0]):
+            for lat_idx in range(cheat.shape[1]):
+                field[lvl_idx,-(lat_idx+1)] = cheat[lvl_idx,lat_idx]
+
+        dummy['soil_mois'] = {
+            'long_name' : '3D volumetric soil moisture',
+            'pdtnum' : pdtnum, 
+            'pdtmpl' : pdtmpl, 
+            'drtnum' : drtnum,
+            'drtmpl' : drtmpl,
+            #TODO: make sure the scaling is right
+            #'field' : noah_soil_mois,
+            'field' : field,
+            'lvl' : noah_soil_lvl
+          }
+
+    f.close()
+    sfc_f.close()
+
+#sys.exit()
+#p.show()
+
+
+
+# Multiple times can be included in a grib files as sequential messages so the
+# outermost 'dimension' of the data structure should be time
+# As I am going to use the GFS grib files from ncep as a template for my own
+# files, I will include only one time in each of the files.
+# It should be easy to wrap to modify the following code to concatenate
+# multiple times
+
+def do_the_encoding(data):
+     dummy_idx = 0
+     for time_slice in data:
+         g2e = grib2.grib2.Grib2Encode(
+           time_slice['discipline']['atmos']['code'],
+           time_slice['idsect'])
+         g2e.addgrid(time_slice['gdsinfo'],time_slice['gdtmpl'])
+         dummy = time_slice['discipline']['atmos']['fields']['2d']
+         for var_key in dummy.keys():
+             print 'Processing ' + dummy[var_key]['long_name']
+             g2e.addfield(
+               dummy[var_key]['pdtnum'],
+               dummy[var_key]['pdtmpl'],
+               dummy[var_key]['drtnum'],
+               dummy[var_key]['drtmpl'],
+               dummy[var_key]['field']
+               )
+         dummy = time_slice['discipline']['atmos']['fields']['3d']
+         for var_key in dummy.keys():
+             print 'Processing ' + dummy[var_key]['long_name']
+             for lvl_idx in range(len(dummy[var_key]['lvl'])):
+                 print '\tlevel ' + str(lvl_idx)
+                 # '*100' because it must be in Pa
+                 dummy[var_key]['pdtmpl'][11] = \
+                   dummy[var_key]['lvl'][lvl_idx]*100
+                 g2e.addfield(
+                   dummy[var_key]['pdtnum'],
+                   dummy[var_key]['pdtmpl'],
+                   dummy[var_key]['drtnum'],
+                   dummy[var_key]['drtmpl'],
+                   dummy[var_key]['field'][lvl_idx]
+                   )
+         g2e.end()
+         file_name = 'laps_' + str(time_slice['idsect'][5]) + '_' \
+           + str(zfill(time_slice['idsect'][6],2)) + '_' \
+           + str(zfill(time_slice['idsect'][7],2)) + '_' \
+           + str(zfill(time_slice['idsect'][8],2)) + 'Z_' \
+           + str(zfill(dummy[var_key]['pdtmpl'][8],2)) + '_hrs_fcst.grb2'
+         #f = open('dummy' + str(dummy_idx) + '.grb2', 'w')
+         f = open(file_name, 'w')
+         f.write(g2e.msg)
+         f.close()
+
+         g2e = grib2.grib2.Grib2Encode(
+           time_slice['discipline']['ocean']['code'],
+           time_slice['idsect'])
+         g2e.addgrid(time_slice['gdsinfo'],time_slice['gdtmpl'])
+         dummy = time_slice['discipline']['ocean']['fields']['2d']
+         for var_key in dummy.keys():
+             print 'Processing ' + dummy[var_key]['long_name']
+             g2e.addfield(
+               dummy[var_key]['pdtnum'],
+               dummy[var_key]['pdtmpl'],
+               dummy[var_key]['drtnum'],
+               dummy[var_key]['drtmpl'],
+               dummy[var_key]['field']
+               )
+         if 0:
+             dummy = time_slice['discipline']['ocean']['fields']['3d']
+             for var_key in dummy.keys():
+                 print 'Processing ' + dummy[var_key]['long_name']
+                 for lvl_idx in range(len(dummy[var_key]['lvl'])):
+                     print '\tlevel ' + str(lvl_idx)
+                     # '*100' because it must be in Pa
+                     dummy[var_key]['pdtmpl'][11] = \
+                       dummy[var_key]['lvl'][lvl_idx]*100
+                     g2e.addfield(
+                       dummy[var_key]['pdtnum'],
+                       dummy[var_key]['pdtmpl'],
+                       dummy[var_key]['drtnum'],
+                       dummy[var_key]['drtmpl'],
+                       dummy[var_key]['field'][lvl_idx]
+                       )
+             g2e.end()
+         #f = open('dummy' + str(dummy_idx) + '.grb2', 'a')
+         f = open(file_name, 'a')
+         f.write(g2e.msg)
+         f.close()
+
+         g2e = grib2.grib2.Grib2Encode(
+           time_slice['discipline']['land']['code'],
+           time_slice['idsect'])
+         g2e.addgrid(time_slice['gdsinfo'],time_slice['gdtmpl'])
+         dummy = time_slice['discipline']['land']['fields']['2d']
+         for var_key in dummy.keys():
+             print 'Processing ' + dummy[var_key]['long_name']
+             g2e.addfield(
+               dummy[var_key]['pdtnum'],
+               dummy[var_key]['pdtmpl'],
+               dummy[var_key]['drtnum'],
+               dummy[var_key]['drtmpl'],
+               dummy[var_key]['field']
+               )
+         dummy = time_slice['discipline']['land']['fields']['3d']
+         for var_key in dummy.keys():
+             print 'Processing ' + dummy[var_key]['long_name']
+             for lvl_idx in range(len(dummy[var_key]['lvl'])):
+                 print '\tlevel ' + str(lvl_idx)
+                 if lvl_idx == 0:
+                     dummy[var_key]['pdtmpl'][11] = 0
+                 else:
+                     dummy[var_key]['pdtmpl'][11] = \
+                       round(dummy[var_key]['lvl'][lvl_idx-1]* 10**soil_lvl_scaling)
+                 dummy[var_key]['pdtmpl'][14] = \
+                   round(dummy[var_key]['lvl'][lvl_idx]* 10**soil_lvl_scaling)
+                 g2e.addfield(
+                   dummy[var_key]['pdtnum'],
+                   dummy[var_key]['pdtmpl'],
+                   dummy[var_key]['drtnum'],
+                   dummy[var_key]['drtmpl'],
+                   dummy[var_key]['field'][lvl_idx]
+                   )
+         g2e.end()
+         #f = open('dummy' + str(dummy_idx) + '.grb2', 'a')
+         f = open(file_name, 'a')
+         f.write(g2e.msg)
+         f.close()
+
+         #os.system('cnvgrib -g21 ' + file_name + ' ' + file_name[:-4] + 'grb')
+         #os.system('/Users/val/src/cnvgrib-1.1.3/cnvgrib -g21 ' + file_name + ' ' + file_name[:-4] + 'grb')
+         os.system('cnvgrib -g21 ' + file_name + ' ' + file_name[:-4] + 'grb')
+         os.system('rm ' + file_name)
+ 
+         dummy_idx += 1
+         
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/scripts/find_devils.py
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/scripts/find_devils.py	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/scripts/find_devils.py	(revision 296)
@@ -0,0 +1,137 @@
+#! /usr/bin/env python
+
+def detsize( xx, res=1, thres=3, loga=False ):
+    import numpy as np
+    import math
+    size = []
+    sizecalc = 1
+    diff = np.asarray( np.roll(xx,-1) - xx )
+    for i in diff:
+        if abs(i) > 1:
+            if sizecalc >= thres: 
+                if loga: addthis = math.log(sizecalc*res)
+                else:    addthis = sizecalc*res
+                size.append(addthis)
+            sizecalc = 1
+        else:
+            sizecalc += 1
+    return size
+
+def getsize(filename):
+
+    import numpy as np
+    from scipy.ndimage.measurements import minimum_position
+    from netCDF4 import Dataset
+    
+    ### LOAD NETCDF DATA
+    filename = "psfc.nc"
+    nc = Dataset(filename) 
+    psfc = nc.variables["PSFC"]
+    
+    ### LOOP on TIME
+    ### NB: a same event could be counted several times...
+    shape = np.array(psfc).shape
+    allsizesx = []
+    allsizesy = []
+    for i in range(0,shape[0]):
+    #for i in range(0,2):
+    
+        print i, ' on ', shape[0]
+        psfc2d = np.array ( psfc [ i, : , : ] )
+    
+        ############### CRITERION
+        ave = np.mean(psfc2d,dtype=np.float64) ## dtype otherwise inaccuracy
+        where = np.where(psfc2d - ave < -0.3)  ## comme le papier Phoenix
+    
+        std = np.std(psfc2d,dtype=np.float64) ## dtype otherwise inaccuracy
+        fac = 2.5  ## not too low, otherwise vortices are not caught. 4 good choice.
+        lim = ave - fac*std
+        print lim, ave, std
+        where = np.where(psfc2d < lim)
+        ############### END CRITERION
+    
+        lab = np.zeros(np.array(psfc2d).shape) ## points to be treated by the minimum_position routine
+        lab[where] = 1.  ## do not treat points close to 'mean' (background) pressure
+    
+        xx = []
+        yy = []
+        while 1 in lab:
+            p = minimum_position(psfc2d,labels=lab)
+            lab[p] = 0 ## once a minimum has been found in a grid point, do not search here again.
+            if p[0] not in xx: xx.append(p[0]) ## if x coordinate not yet in the list add it
+            if p[1] not in yy: yy.append(p[1]) ## if y coordinate not yet in the list add it
+        xx.sort()
+        yy.sort()
+        ### now xx and yy are sorted arrays containing grid points with pressure minimum
+       
+        ######## DETERMINE SIZE OF STRUCTURES
+        ######## this is rather brute-force...
+        sizex = detsize( xx, res = 10, loga=False, thres=2 )
+        sizey = detsize( yy, res = 10, loga=False, thres=2 )
+        ###
+        allsizesx = np.append(allsizesx,sizex)
+        allsizesy = np.append(allsizesy,sizey)
+        ########
+    
+    allsizesx.sort()
+    allsizesy.sort()
+    
+    return allsizesx, allsizesy
+
+def writeascii ( tab, filename ):
+    mydata = tab
+    myfile = open(filename, 'w')
+    for line in mydata:
+        myfile.write(str(line) + '\n')
+    myfile.close()
+    return
+
+import matplotlib.pyplot as plt
+import pickle
+import numpy as np
+import matplotlib.mlab as mlab
+
+save = True
+#save = False
+if save:
+    allsizesx, allsizesy = getsize("psfc.nc")
+    writeascii(allsizesx,'allsizex.txt')
+    writeascii(allsizesy,'allsizey.txt')
+    
+    myfile = open('allsizex.bin', 'wb')
+    pickle.dump(allsizesx, myfile)
+    myfile.close()
+    
+    myfile = open('allsizey.bin', 'wb')
+    pickle.dump(allsizesy, myfile)
+    myfile.close()
+
+
+myfile = open('allsizex.bin', 'r')
+allsizesx = pickle.load(myfile)
+myfile = open('allsizey.bin', 'r')
+allsizesy = pickle.load(myfile)
+
+plothist = np.append(allsizesx,allsizesy)
+plothist.sort()
+
+nbins=100
+zebins = [2.0]
+for i in range(0,nbins):  zebins.append(zebins[i]*np.sqrt(2))
+zebins = np.array(zebins)
+print zebins
+
+zebins = zebins [ zebins > 10. ] 
+zebins = zebins [ zebins < 1000. ]
+print zebins
+
+#### HISTOGRAM
+plt.hist(    plothist,\
+             log=True,\
+             bins=zebins,\
+             #cumulative=-1,\
+        )
+plt.xscale('log')
+
+plt.show()
+
Index: trunk/MESOSCALE_DEV/PLOT/PYTHON/scripts/makefig.sh
===================================================================
--- trunk/MESOSCALE_DEV/PLOT/PYTHON/scripts/makefig.sh	(revision 296)
+++ trunk/MESOSCALE_DEV/PLOT/PYTHON/scripts/makefig.sh	(revision 296)
@@ -0,0 +1,16 @@
+#! /bin/bash
+
+ fold="/home/aslmd/POLAR/"
+ file1="$fold/POLAR_THOMAS_ls50/wrfout_d03_2024-02-45_06:00:00"
+ file2="$fold/POLAR_THOMAS_ls20/wrfout_d03_2024-01-42_06:00:00"
+ #file3="$fold/POLAR_THOMAS_ls0/wrfout_d03_2024-01-06_06:00:00"
+ file3="$fold/POLAR_THOMAS_ls10/wrfout_d03_2024-01-19_06:00:00"
+ dest="/u/aslmd/WWW/antichambre/thomas/"
+
+ domain.py -t $dest -f $file1 -p npstere
+ winds.py  -t $dest -p lcc -n 2 -z 25 -i 4 -s 2 \
+           -v HFX  -m -15. -M 0.0 \
+           -v USTM -m   0. -M 0.8 \
+           -v HGT  -m   0  -M 0 \
+           -f $file1 -f $file2 -f $file3 \
+           -d
