source: trunk/LMDZ.PLUTO/util/script_figures/movie_vmr_ch4/FV3_utils.py @ 3833

Last change on this file since 3833 was 3833, checked in by afalco, 40 hours ago

Pluto: updated plots scripts.
Fixed some issues with reading XIOS, etc.
Included display_netcdf.py tool from Mars PCM.
AF

  • Property svn:executable set to *
File size: 43.8 KB
Line 
1import numpy as np
2import math
3pi=math.pi
4import matplotlib 
5matplotlib.use('Agg')
6
7def fmtsci(x, pos):
8    a, b = '{:.1e}'.format(x).split('e')
9    b = int(b)
10    return r'${} \times 10^{{{}}}$'.format(a, b)
11
12def calcaire(lat,lon):
13 # Calculate the area of each grid point assuming regular grid
14 nblat=np.size(lat)
15 nblon=np.size(lon)
16 radius=3390.
17 # Sup and Inf boundary of latitudes
18 latsup=np.zeros(nblat,dtype='f')
19 latinf=np.zeros(nblat,dtype='f')
20 # Area of each latitudinal band
21 peri=np.zeros(nblat,dtype='f')
22 alpha=np.zeros(nblat,dtype='f')
23
24 for i in range(nblat-1):
25    latsup[i+1]=(lat[i]+lat[i+1])/2.
26    latinf[i]=(lat[i+1]+lat[i])/2.
27 latsup[0]=-90 #lat[0]
28 latinf[nblat-1]=90 #lat[nblat-1]
29
30 print(latsup)
31 print(latinf)
32 # Area of a latitudinal band:
33 for i in range(nblat):
34    alpha[i]=abs(np.sin(latsup[i]*pi/180.)-np.sin(latinf[i]*pi/180.))*2.*radius**2*pi
35 airetot=sum(alpha)
36 return alpha,airetot
37
38def calcairepluto(lat,lon):
39 # Calculate the area of each grid point assuming regular grid
40 nblat=np.size(lat)
41 nblon=np.size(lon)
42 radius=1187.
43 # Sup and Inf boundary of latitudes
44 latsup=np.zeros(nblat,dtype='f')
45 latinf=np.zeros(nblat,dtype='f')
46 # Area of each latitudinal band
47 alpha=np.zeros(nblat,dtype='f')
48
49 for i in range(nblat-1):
50    latsup[i+1]=(lat[i]+lat[i+1])/2.
51    latinf[i]=(lat[i+1]+lat[i])/2.
52 latsup[0]=-90 #lat[0]
53 latinf[nblat-1]=90 #lat[nblat-1]
54 # Area of a latitudinal band:
55 for i in range(nblat):
56    alpha[i]=abs(np.sin(latsup[i]*pi/180.)-np.sin(latinf[i]*pi/180.))*2.*radius**2*pi
57 airetot=sum(alpha)
58 return alpha,airetot
59
60def switchlon(arr):
61# changer les longitudes pour mettre TR au centre
62 vec=np.shape(arr)
63 myvar=np.zeros(vec,dtype='f')
64 # i lat : pas de changement
65 # j lon :
66 for i in range(vec[0]):
67    for j in range(vec[1]):
68        if j < int(vec[1]/2.) :
69           myvar[i,j]=arr[i,j+int(vec[1]/2)]
70        else:
71           myvar[i,j]=arr[i,j-int(vec[1]/2)]
72 return myvar
73
74def switchlon3D(arr):
75# changer les longitudes pour mettre TR au centre
76 vec=np.shape(arr)
77 myvar=np.zeros(vec,dtype='f')
78 # i lat : pas de changement
79 # j lon :
80 for i in range(vec[1]):
81    for j in range(vec[2]):
82        if j < int(vec[2]/2.) :
83           myvar[:,i,j]=arr[:,i,j+int(vec[2]/2)]
84        else:
85           myvar[:,i,j]=arr[:,i,j-int(vec[2]/2)]
86 return myvar
87
88def extractpal(pal,lev):
89    import matplotlib as mpl
90    cmap = mpl.cm.get_cmap(pal)
91    #print '\nlevpal=',lev
92    #rgb is a vector with nbl colors
93    nbl=np.size(lev)
94    rgb=[cmap(0)[0:3]]
95    for i in range(nbl-1):
96     #aaa=((lev[i+1]+lev[i])/2.-lev[0])/(lev[-1]-lev[0])
97     aaa=lev[i+1]
98     #print 'index weight / color =',aaa,cmap(aaa)[0:3]
99     rgb.append(cmap(aaa)[0:3])
100    #print '\nsize rgb=',np.shape(rgb)
101    return rgb
102
103def make_colormap(seq):
104    """Return a LinearSegmentedColormap
105    seq: a sequence of floats and RGB-tuples. The floats should be increasing
106    and in the interval (0,1).
107    """
108    import matplotlib.colors as colors
109
110    seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
111    print('\nNew sequence=',seq)
112    cdict = {'red': [], 'green': [], 'blue': []}
113    for i, item in enumerate(seq):
114        if isinstance(item, float):
115            r1, g1, b1 = seq[i - 1]
116            r2, g2, b2 = seq[i + 1]
117            #print "i,item=",i,item,r1, g1, b1,r2, g2, b2
118            cdict['red'].append([item, r1, r2])
119            cdict['green'].append([item, g1, g2])
120            cdict['blue'].append([item, b1, b2])
121    #print 'cdict=',cdict
122    return colors.LinearSegmentedColormap('CustomMap', cdict)
123
124def getcol(lev,myc):
125  ## myc : palette color, dimension n
126  ## Lev and myc must be of dimension n and n
127  nbl=np.size(lev)
128  # Compute the nivels for each tick
129  onticks=[0] #[lev[0]]
130  #onticks2=np.linspace(0,1,14)
131  for i in range(nbl-1):
132    aaa=(lev[i+1]-lev[0])/(lev[-1]-lev[0])
133    onticks.append(aaa)
134  print('\nlevels asked=',lev)
135  print('\nlevels ticks=',onticks)
136  # compute the seq of color
137  gg=[myc[0]]
138  for tt in range(nbl-2):
139    gg.append(onticks[tt+1])
140    gg.append(myc[tt+1])
141  #gg.append(onticks[nbl])
142  #print '\nsequence=',gg
143  # cmap
144  rvb=make_colormap(gg)
145  return rvb
146
147def name_regions():
148    # lon,lat,name,rotation,font
149    font=18
150    i=2
151    j=4
152    mylist=[]
153    mylist.append([72,-43,'Hellas',0,font,'center'])
154    mylist.append([90,-15,'Tyrrhena\nTerra',0,font,'center'])
155    mylist.append([110,-28,'Hesperia\nPlanum',0,font,'center'])
156    mylist.append([115,-50,'Promethei\nTerra',0,font,'center'])
157    mylist.append([155,-45,'Cimmeria\nTerra',0,font,'center'])
158    mylist.append([195,-40,'Sirenum\nTerra',0,font,'center'])
159    mylist.append([63,8,'Syrtis\nMajor',0,font-j,'center'])
160    #mylist.append([220,-10,'Arsia\nMons',0,font-j,'center'])
161    mylist.append([242,40,'Alba\nPatera',0,font-j,'center'])
162    mylist.append([285,35,'Tempe\nTerra',0,font-i,'center'])
163    mylist.append([41,-3,'Sabaea\nTerra',0,font,'center'])
164    mylist.append([150,12,'Elysium Planitia',0,font,'center'])
165    mylist.append([120,50,'Utopia Planitia',0,font,'center'])
166    #mylist.append([280,-8,'Valles Marineris',-10,font-3,'center'])
167    mylist.append([220,-25,'Daedalia\nPlanum',0,font,'center'])
168    mylist.append([232,-40,'Icaria\nPlanum',0,font-j,'center'])
169    mylist.append([330,45,'Acidalia\nPlanitia',0,font,'center'])
170    mylist.append([315,25,'Chryse\nPlanitia',0,font-i,'center'])
171    mylist.append([90,12,'Isidis\nPlanitia',0,font-j,'center'])
172    mylist.append([185,25,'Amazonis\nPlanitia',0,font,'center'])
173    mylist.append([190,50,'Arcadia\nPlanitia',0,font,'center'])
174    mylist.append([20,30,'Arabia\nTerra',0,font,'center'])
175    mylist.append([17,-45,'Noachis\nTerra',0,font,'center'])
176    mylist.append([260,5,'Tharsis',0,font-i,'center'])
177    mylist.append([315,0,'Xanthe\nTerra',0,font-i,'center'])
178    mylist.append([270,-25,'Solis',0,font-j,'center'])
179    mylist.append([255,-10,'Syria',0,font-j,'center'])
180    mylist.append([270,-15,'Sinai',0,font-j,'center'])
181    mylist.append([315,-50,'Argyre',0,font,'center'])
182    mylist.append([270,-55,'Aonia\nTerra',0,font,'center'])
183    #mylist.append([270,-45,'Thaumasia\nHighlands',0,font-i,'center'])
184    #mylist.append([250,-15,'Claritas Fossae',-80,font-3,'center'])
185
186    return mylist
187
188
189def getwinds(lon,lat,vecx,vecy,svx,svy,scale,width,val):
190          import matplotlib.pyplot as mpl
191          angle='uv'       # 'xy'
192          color='black'    # arrow color
193          pivot='mid'      # arrow around middle of box. Alternative : tip
194          linewidths=0.5   # epaisseur contour arrow
195          edgecolors='k'   # couleur contour arrow
196
197    #  *scale*: [ *None* | float ]
198    #  Data units per arrow length unit, e.g., m/s per plot width; a smaller
199    #  scale parameter makes the arrow longer.  If *None*, a simple
200    #  autoscaling algorithm is used, based on the average vector length
201    #  and the number of vectors.  The arrow length unit is given by
202    #  the *scale_units* parameter
203
204    #  *scale_units*: *None*, or any of the *units* options.
205    #  For example, if *scale_units* is 'inches', *scale* is 2.0, and
206    #  ``(u,v) = (1,0)``, then the vector will be 0.5 inches long.
207    #  If *scale_units* is 'width', then the vector will be half the width
208    #  of the axes.
209
210    #  If *scale_units* is 'x' then the vector will be 0.5 x-axis
211    #  units.  To plot vectors in the x-y plane, with u and v having
212    #  the same units as x and y, use
213    #  "angles='xy', scale_units='xy', scale=1".
214
215          x, y = np.meshgrid(lon,lat)
216          q = mpl.quiver( x[::svy,::svx],y[::svy,::svx],vecx[::svy,::svx],vecy[::svy,::svx],angles=angle,color=color,pivot=pivot,scale=scale,width=width,linewidths=linewidths,edgecolors=edgecolors)
217
218          # make vector key.
219          #keyh = 1.025 ; keyv = 1.05 # upper right corner over colorbar
220          keyh = 0.95 ; keyv = 1.03
221        #   keyh = 0.03 ; keyv = 1.07
222          #keyh = -0.03 ; keyv = 1.08 # upper left corner
223          labelpos='E'    # position label compared to arrow : N S E W
224          p = mpl.quiverkey(q,keyh,keyv,val,str(val)+' m/s',fontproperties={'size': 28,'weight': 'bold'},color='black',labelpos=labelpos,labelsep = 0.07)
225
226###############################################################################
227###############################################################################
228
229DEFAULT = object()
230## function : get local times array
231def tshift2(array, lon=DEFAULT, timex=DEFAULT, nsteps_out=DEFAULT):
232    #================================================================
233    #
234    #    Conversion to uniform local time
235    #   Assume longitude in the first dimension and time in the last dimension
236    #
237    #   Interpolate onto a new time grid with nsteps_out samples per sol
238    #   New time:   [ 0 ... nn-1/nsteps_out ]*24
239    #   Default:   nsteps_out = length(timex)
240    #
241    #    timex should be in units of hours  (only timex(1) is actually relevant)
242
243        if np.shape(array) == len(array):
244                print('Need longitude and time dimensions')
245                return
246
247        dims=np.shape(array)  #get dimensions of array
248        end=len(dims)-1
249        id=dims[0]   #number of longitudes in file
250        if lon is DEFAULT:
251                lon = np.linspace(0.,360.,num=id,endpoint=False)
252        if timex is DEFAULT:
253                nsteps=dims[end]
254                timex = np.linspace(0.,24.,num=nsteps,endpoint=False)
255        else:
256                nsteps=len(timex)
257
258
259        nsf = np.float_(nsteps)
260
261        timex = np.squeeze(timex)
262
263        if timex.max() <= 1.:   #if timex is in fractions of day
264                timex = 24.*timex
265
266        if nsteps_out is DEFAULT:
267                nsteps_out = nsteps
268
269        #Assuming time is last dimension, check if it is local time timex
270        #If not, reshape the array into (stuff, days, local time)
271        print('dims,nsteps=',dims[end],nsteps)
272        if dims[end] != nsteps:
273                ndays = dims[end] / nsteps
274                print('ndays=',ndays,dims[end])
275                if ndays*nsteps != dims[end]:
276                        print('Time dimensions do not conform')
277                        return
278                print('dims=',dims,end)
279                array = np.reshape(array,(dims[0,end-1], nsteps, ndays))
280                newdims=np.linspace(len(dims+1),dtype=np.int32)
281                newdims[len(dims)-1]=len(dims)
282                newdims[len(dims)]=len(dims)-1
283                array = np.transpose(array,newdims)
284
285        dims=np.shape(array) #get new dims of array if reshaped
286
287
288        if len(dims) > 2:
289                recl = np.prod(dims[1:len(dims)-1])
290        else:
291                recl=1
292
293
294        array=np.reshape(array,(id,recl,nsteps))
295        #create output array
296        narray=np.zeros((id,recl,nsteps_out))
297
298        dt_samp = 24.0/nsteps      #   Time increment of input data (in hours)
299        dt_save = 24.0/nsteps_out  #   Time increment of output data (in hours)
300
301        #             calculate interpolation indices
302        # convert east longitude to equivalent hours
303        xshif = 24.0*lon/360.
304        kk=np.where(xshif < 0)
305        xshif[kk]=xshif[kk]+24.
306
307        fraction = np.zeros((id,nsteps_out))
308        imm = np.zeros((id,nsteps_out))
309        ipp = np.zeros((id,nsteps_out))
310
311        for nd in range(nsteps_out):
312                dtt = nd*dt_save - xshif - timex[0] + dt_samp
313                #      insure that data local time is bounded by [0,24] hours
314                kk = np.where(dtt < 0.)
315                dtt[kk] = dtt[kk] + 24.
316
317                im = np.floor(dtt/dt_samp)    #  this is index into the data aray
318                fraction[:,nd] = dtt-im*dt_samp
319                kk = np.where(im < 0.)
320                im[kk] = im[kk] + nsf
321
322                ipa = im + 1.
323                kk = np.where(ipa >= nsf)
324                ipa[kk] = ipa[kk] - nsf
325
326                imm[:,nd] = im[:]
327                ipp[:,nd] = ipa[:]
328        fraction = fraction / dt_samp # assume uniform tinc between input data samples
329        #           Now carry out the interpolation
330        for nd in range(nsteps_out):    #   Number of output time levels
331                for i in range(id):         #   Number of longitudes
332                        im = np.int(imm[i,nd])
333                        ipa= np.int(ipp[i,nd])
334                        frac = fraction[i,nd]
335
336                        narray[i,:,nd] = (1.-frac)*array[i,:,im] + frac*array[i,:,ipa]
337
338        narray = np.squeeze(narray)
339        ndimsfinal=np.zeros(len(dims),dtype=int)
340        for nd in range(end):
341                ndimsfinal[nd]=dims[nd]
342        ndimsfinal[end]=nsteps_out
343        narray = np.reshape(narray,ndimsfinal)
344
345        return narray
346
347####################################################################
348####################################################################
349def getareaff2(nc,latabs):
350  """
351  Compute area over latitude
352  """
353
354  lat0=nc.variables['lat'][:]
355  lon0=nc.variables['lon'][:]
356  nblat=np.size(lat0)
357  nblon=np.size(lon0)
358
359  # Lat sup / Lat inf
360  latsup=np.zeros(nblat,dtype='f')
361  latinf=np.zeros(nblat,dtype='f')
362  for i in range(nblat-1):
363    latsup[i+1]=(lat0[i]+lat0[i+1])/2.
364    latinf[i]=(lat0[i+1]+lat0[i])/2.
365  latsup[0]=lat0[0]
366  latinf[nblat-1]=lat0[nblat-1]
367
368  # Area
369  rad=3390.
370  area=np.zeros(nblat,dtype='f')
371  alpha=np.zeros(nblat,dtype='f')
372  for i in range(nblat):
373    if abs(lat0[i])<latabs:
374     alpha[i]=abs(np.sin(latsup[i]*pi/180.)-np.sin(latinf[i]*pi/180.))*rad
375     area[i]=alpha[i]*rad*2.*pi
376  areatot=sum(area)
377  return area,areatot,nblon,nblat
378
379def getareaff(nc):
380  """
381  Compute area over latitude
382  """
383
384  lat0=nc.variables['lat'][:]
385  lon0=nc.variables['lon']
386  nblat=np.size(lat0)
387  nblon=np.size(lon0)
388
389  # Lat sup / Lat inf
390  latsup=np.zeros(nblat,dtype='f')
391  latinf=np.zeros(nblat,dtype='f')
392  for i in range(nblat-1):
393    latsup[i+1]=(lat0[i]+lat0[i+1])/2.
394    latinf[i]=(lat0[i+1]+lat0[i])/2.
395  latsup[0]=lat0[0]
396  latinf[nblat-1]=lat0[nblat-1]
397
398  # Area
399  rad=3390.
400  area=np.zeros(nblat,dtype='f')
401  alpha=np.zeros(nblat,dtype='f')
402  for i in range(nblat):
403    alpha[i]=abs(np.sin(latsup[i]*pi/180.)-np.sin(latinf[i]*pi/180.))*rad
404    area[i]=alpha[i]*rad*2.*pi
405  areatot=sum(area)
406  return area,areatot,nblon,nblat
407
408def getarea_r(lat0,lon0,rad):
409  """
410  Compute area over latitude
411  """
412
413  nblat=np.size(lat0)
414  nblon=np.size(lon0)
415
416  # Lat sup / Lat inf
417  latsup=np.zeros(nblat,dtype='f')
418  latinf=np.zeros(nblat,dtype='f')
419  for i in range(nblat-1):
420    latsup[i+1]=(lat0[i]+lat0[i+1])/2.
421    latinf[i]=(lat0[i+1]+lat0[i])/2.
422  latsup[0]=lat0[0]
423  latinf[nblat-1]=lat0[nblat-1]
424
425  # Area
426  area=np.zeros(nblat,dtype='f')
427  alpha=np.zeros(nblat,dtype='f')
428  for i in range(nblat):
429    alpha[i]=abs(np.sin(latsup[i]*pi/180.)-np.sin(latinf[i]*pi/180.))*rad
430    area[i]=alpha[i]*rad*2.*pi
431  areatot=sum(area)
432  return area,areatot,nblon,nblat
433
434
435def fms_press_calc(psfc,ak,bk,lev_type='full'):
436    """
437    Return the 3d pressure field from the surface pressure and the ak/bk coefficients.
438
439    Args:
440        psfc: the surface pressure in [Pa] or array of surface pressures 1D or 2D, or 3D (if time dimension)
441        ak: 1st vertical coordinate parameter
442        bk: 2nd vertical coordinate parameter
443        lev_type: "full" (centers of the levels) or "half" (layer interfaces)
444                  Default is "full"
445    Returns:
446        The 3D pressure field at the full PRESS_f(:,:,Nk-1) or half levels PRESS_h(:,:,Nk) in [Pa]
447    --- 0 --- TOP        ========  p_half
448    --- 1 ---
449                         --------  p_full
450
451                         ========  p_half
452    ---Nk-1---           --------  p_full
453    --- Nk --- SFC       ========  p_half
454                        / / / / /
455
456    *NOTE*
457        Some litterature uses pk (pressure) instead of ak.
458        With p3d=  ps*bk +pref*ak  vs the current  p3d= ps*bk +ak
459
460
461    """
462
463    Nk=len(ak)
464    # If psfc is a float (e.g. psfc=7.) make it a one element array (e.g. psfc=[7.])
465    if len(np.atleast_1d(psfc))==1: psfc=np.array([np.squeeze(psfc)])
466
467    #Flatten the pressure array to generalize to N dimensions
468    psfc_flat=psfc.flatten()
469
470    # Expands the dimensions vectorized calculations:
471    psfc_v=np.repeat(psfc_flat[:,np.newaxis],Nk, axis=1)    #(Np) ->(Np,Nk)
472    ak_v=np.repeat(ak[np.newaxis,:],len(psfc_flat), axis=0) #(Nk) ->(Np,Nk)
473    bk_v=np.repeat(bk[np.newaxis,:],1, axis=0)              #(Nk) ->(1, Nk)
474
475    #Pressure at half level = layers interfaces. The size of z axis is Nk
476    PRESS_h=psfc_v*bk_v+ak_v
477
478    #Pressure at full levels = centers of the levels. The size of z axis is Nk-1
479    PRESS_f=np.zeros((len(psfc_flat),Nk-1))
480
481    #Top layer (1st element is i=0 in Python)
482    if ak[0]==0 and bk[0]==0:
483        PRESS_f[:,0]= 0.5*(PRESS_h[:,0]+PRESS_h[:,1])
484    else:
485        PRESS_f[:,0] = (PRESS_h[:,1]-PRESS_h[:,0])/np.log(PRESS_h[:,1]/PRESS_h[:,0])
486
487    #Rest of the column (i=1..Nk).
488    #[2:] goes from the 3rd element to Nk and [1:-1] goes from the 2nd element to Nk-1
489    PRESS_f[:,1:]= (PRESS_h[:,2:]-PRESS_h[:,1:-1])/np.log(PRESS_h[:,2:]/PRESS_h[:,1:-1])
490
491    # Reshape PRESS(:,Nk) to the original pressure shape PRESS(:,:,Nk) (resp. Nk-1)
492
493    if lev_type=="full":
494        new_dim_f=np.append(psfc.shape,Nk-1)
495        return np.squeeze(PRESS_f.reshape(new_dim_f))
496    elif lev_type=="half" :
497        new_dim_h=np.append(psfc.shape,Nk)
498        return np.squeeze(PRESS_h.reshape(new_dim_h))
499    else:
500        raise Exception("""Pressure levels type not recognized in press_lev(): use 'full' or 'half' """)
501
502def fms_Z_calc(psfc,ak,bk,T,topo=0.,lev_type='full'):
503    """
504    Return the 3d altitude field in [m]
505
506    Args:
507        psfc: the surface pressure in [Pa] or array of surface pressures 1D or 2D, or 3D (if time dimension)
508        ak: 1st vertical coordinate parameter
509        bk: 2nd vertical coordinate parameter
510        T : the air temperature profile, 1D array (for a single grid point) or 2D, 3D 4D
511        topo: the surface elevation, same dimension as psfc
512        lev_type: "full" (centers of the levels) or "half" (layer interfaces)
513                  Default is "full"
514    Returns:
515        The layers' altitude  at the full Z_f(:,:,Nk-1) or half levels Z_h(:,:,Nk) in [m]
516
517    --- 0 --- TOP        ========  z_half
518    --- 1 ---
519                         --------  z_full
520
521                         ========  z_half
522    ---Nk-1---           --------  z_full
523    --- Nk --- SFC       ========  z_half
524                        / / / / /
525
526
527    *NOTE*
528        Calculation is derived from ./atmos_cubed_sphere_mars/Mars_phys.F90:
529        We have dp/dz = -rho g => dz= dp/(-rho g) and rho= p/(r T)  => dz=rT/g *(-dp/p)
530        Let's definethe log-pressure u as u = ln(p). We have du = du/dp *dp = (1/p)*dp =dp/p
531
532        Finally , we have dz for the half layers:  dz=rT/g *-(du) => dz=rT/g *(+dp/p)   with N the layers defined from top to bottom.
533    """
534    g=3.72 #acc. m/s2
535    r_co2= 191.00 # kg/mol
536    Nk=len(ak)
537    #===get the half and full pressure levels from fms_press_calc==
538
539    PRESS_f=fms_press_calc(psfc,ak,bk,'full')
540    PRESS_h=fms_press_calc(psfc,ak,bk,'half')
541
542    # If psfc is a float, turn it into a one-element array:
543    if len(np.atleast_1d(psfc))==1:
544        psfc=np.array([np.squeeze(psfc)])
545        topo=np.array([np.squeeze(topo)])
546
547    psfc_flat=psfc.flatten()
548    topo_flat=topo.flatten()
549
550    #  reshape arrays for vector calculations and compute the log pressure====
551
552    PRESS_h=PRESS_h.reshape((len(psfc_flat),Nk))
553    PRESS_f=PRESS_f.reshape((len(psfc_flat),Nk-1))
554    T=T.reshape((len(psfc_flat),Nk-1))
555
556    logPPRESS_h=np.log(PRESS_h)
557
558    #===Initialize the output arrays===
559    Z_f=np.zeros((len(psfc_flat),Nk-1))
560    Z_h=np.zeros((len(psfc_flat),Nk))
561
562    #First helf layer is equal to the surface elevation
563
564    Z_h[:,-1] = topo_flat
565
566    # Other layes, from the bottom-ip:
567    for k in range(Nk-2,-1,-1):
568        Z_h[:,k] = Z_h[:,k+1]+(r_co2*T[:,k]/g)*(logPPRESS_h[:,k+1]-logPPRESS_h[:,k])
569        Z_f[:,k] = Z_h[:,k+1]+(r_co2*T[:,k]/g)*(1-PRESS_h[:,k]/PRESS_f[:,k])
570
571    #return the arrays
572    if lev_type=="full":
573        new_dim_f=np.append(psfc.shape,Nk-1)
574        return np.squeeze(Z_f.reshape(new_dim_f))
575    elif lev_type=="half" :
576        new_dim_h=np.append(psfc.shape,Nk)
577        return  np.squeeze(Z_h.reshape(new_dim_h))
578    #=====return the levels in Z coordinates [m]====
579    else:
580        raise Exception("""Altitudes levels type not recognized: use 'full' or 'half' """)
581
582
583def akbk_loader(NLAY,data_dir='/u/mkahre/MCMC/data_files'):
584    """
585    Return the ak and bk values given a number of layers for standards resolutions
586    Default directory is /lou/s2n/mkahre/MCMC/data_files/
587    Args:
588        NLAY: the number of layers (float or integer)
589    Returns:
590        ak: 1st vertical coordinate parameter [Pa]
591        bk: 2nd vertical coordinate parameter [none]
592
593    *NOTE*    ak,bk have a size NLAY+1 since they define the position of the layer interfaces (half layers):
594              p_half = ak + bk*p_sfc
595    """
596
597    from netCDF4 import Dataset
598    NLAY=int(NLAY)
599    file=Dataset(data_dir+'/akbk_L%i.nc'%(NLAY), 'r', format='NETCDF4')
600    ak=file.variables['pk'][:]
601    bk=file.variables['bk'][:]
602    file.close()
603    return ak,bk
604
605
606def zonal_avg_P_lat(Ls,var,Ls_target,Ls_angle,symmetric=True):
607    """
608    Return the zonally averaged mean value of a pressure interpolated 4D variable.
609
610    Args:
611        Ls: 1D array of solar longitude of the input variable in degree (0->360)
612        var: a 4D variable var [time,levels,lat,lon] interpolated on the pressure levels (f_average_plevs file)
613        Ls_target: central solar longitude of interest.
614        Ls_angle:  requested window angle centered around   Expl:  Ls_angle = 10.  (Window will go from Ls 85
615        symmetric: a boolean (default =True) If True, and if the requested window is out of range, Ls_angle is reduced
616                                             If False, the time average is done on the data available
617    Returns:
618        The zonnally and latitudinally-averaged field zpvar[level,lat]
619
620    Expl:  Ls_target= 90.
621           Ls_angle = 10.
622
623           ---> Nominally, the time average is done over solar longitudes      85 <Ls_target < 95 (10 degree)
624
625           ---> If  symmetric =True and the input data ranges from Ls 88 to 100     88 <Ls_target < 92 (4  degree, symmetric)
626                If  symmetric =False and the input data ranges from Ls 88 to 100    88 <Ls_target < 95 (7  degree, assymetric)
627    *NOTE*
628
629    [Alex] as of 6/8/18, the routine will bin data from muliples Mars years if provided
630
631    """
632    #compute bounds from Ls_target and Ls_angle
633    Ls_min= Ls_target-Ls_angle/2.
634    Ls_max= Ls_target+Ls_angle/2.
635
636    if (Ls_min<0.):Ls_min+=360.
637    if (Ls_max>360.):Ls_max-=360.
638
639    #Initialize output array
640    zpvar=np.zeros((var.shape[1],var.shape[2])) #nlev, nlat
641
642    #check is the Ls of interest is within the data provided, raise execption otherwise
643    if Ls_target <= Ls.min() or Ls_target >=Ls.max() :
644        raise Exception("Error \nNo data found, requested  data :       Ls %.2f <-- (%.2f)--> %.2f\nHowever, data in file only ranges      Ls %.2f <-- (%.2f)--> %.2f"%(Ls_min,Ls_target,Ls_max,Ls.min(),(Ls.min()+Ls.max())/2.,Ls.max()))
645
646
647    else : #If only some of the requested data is outside the ranges, process this data
648        if Ls_min <Ls.min() or Ls_max >Ls.max():
649            print(("In zonal_avg_P_lat() Warning: \nRequested  data ranging    Ls %.2f <-- (%.2f)--> %.2f"%(Ls_min,Ls_target,Ls_max)))
650            if symmetric: #Case 1: reduce the window
651                if Ls_min <Ls.min():
652                    Ls_min =Ls.min()
653                    Ls_angle=2*(Ls_target-Ls_min)
654                    Ls_max= Ls_target+Ls_angle/2.
655
656                if Ls_max >Ls.max():
657                    Ls_max =Ls.max()
658                    Ls_angle=2*(Ls_max-Ls_target)
659                    Ls_min= Ls_target-Ls_angle/2.
660
661                print(("Reshaping data ranging     Ls %.2f <-- (%.2f)--> %.2f"%(Ls_min,Ls_target,Ls_max)))
662            else: #Case 2: Use all data available
663                print(("I am only using            Ls %.2f <-- (%.2f)--> %.2f \n"%(max(Ls.min(),Ls_min),Ls_target,min(Ls.max(),Ls_max))))
664    count=0
665    #perform longitude average on the field
666    zvar= np.mean(var,axis=3)
667
668    for t in range(len(Ls)):
669    #special case Ls around Ls =0 (wrap around)
670        if (Ls_min<=Ls[t] <= Ls_max):
671            zpvar[:,:]=zpvar[:,:]+zvar[t,:,:]
672            count+=1
673
674    if  count>0:
675        zpvar/=count
676    return zpvar
677
678
679
680def alt_KM(press,scale_height_KM=8.,reference_press=610.):
681    """
682    Gives the approximate altitude in km for a given pressure
683    Args:
684        press: the pressure in [Pa]
685        scale_height_KM: a scale height in [km], (default is 10 km)
686        reference_press: reference surface pressure in [Pa], (default is 610 Pa)
687    Returns:
688        z_KM: the equivalent altitude for that pressure level in [km]
689
690    """
691    return -scale_height_KM*np.log(press/reference_press) # p to altitude in km
692
693def press_pa(alt_KM,scale_height_KM=8.,reference_press=610.):
694    """
695    Gives the approximate altitude in km for a given pressure
696    Args:
697        alt_KM: the altitude in  [km]
698        scale_height_KM: a scale height in [km], (default is 8 km)
699        reference_press: reference surface pressure in [Pa], (default is 610 Pa)
700    Returns:
701         press_pa: the equivalent pressure at that altitude in [Pa]
702
703    """
704    return reference_press*np.exp(-alt_KM/scale_height_KM) # p to altitude in km
705
706def lon180_to_360(lon):
707    lon=np.array(lon)
708    """
709    Transform a float or an array from the -180/+180 coordinate system to 0-360
710    Args:
711        lon: a float, 1D or 2D array of longitudes in the 180/+180 coordinate system
712    Returns:
713        lon: the equivalent longitudes in the 0-360 coordinate system
714
715    """
716    if len(np.atleast_1d(lon))==1: #lon180 is a float
717        if lon<0:lon+=360
718    else:                            #lon180 is an array
719        lon[lon<0]+=360
720    return lon
721
722def lon360_to_180(lon):
723    lon=np.array(lon)
724    """
725    Transform a float or an array from the 0-360 coordinate system to -180/+180
726    Args:
727        lon: a float, 1D or 2D array of longitudes in the 0-360 coordinate system
728    Returns:
729        lon: the equivalent longitudes in the -180/+180 coordinate system
730
731    """
732    if len(np.atleast_1d(lon))==1:   #lon is a float
733        if lon>180:lon-=360
734    else:                            #lon is an array
735        lon[lon>180]-=360
736    return lon
737
738
739def second_hhmmss(seconds,lon_180=0.,show_mmss=True):
740    """
741    Given the time seconds return Local true Solar Time at a certain longitude
742    Args:
743        seconds: a float, the time in seconds
744        lon_180: a float, the longitude in a -/+180 coordinate
745        show_mmss: returns min and second if true
746    Returns:
747        hours: float, the local time or  (hours,minutes, seconds)
748
749    """
750    hours = seconds // (60*60)
751    seconds %= (60*60)
752    minutes = seconds // 60
753    seconds %= 60
754    #Add timezone offset (1hr/15 degree)
755    hours=np.mod(hours+lon_180/15.,24)
756
757    if show_mmss:
758        return np.int(hours), np.int(minutes), np.int(seconds)
759    else:
760        return np.int(hours)
761
762
763def sol2LTST(time_sol,lon_180=0.,show_minute=False):
764    """
765    Given the time in days, return the Local true Solar Time at a certain longitude
766    Args:
767        time_sol: a float, the time, eg. sols 2350.24
768        lon_180: a float, the longitude in a -/+180 coordinate
769        show_minute: show minutes if true, otherwise show whole hours
770    Returns:
771        hours: float, the local time or  (hours,minutes, seconds)
772
773    """
774    return second_hhmmss(time_sol*86400.,lon_180,show_minute)
775
776def space_time(lon,timex, varIN,kmx,tmx):
777    """
778    Obtain west and east propagating waves. This is a Python implementation of John Wilson's  space_time routine by Alex
779    Args:
780        lon:   longitude array in [degrees]   0->360
781        timex: 1D time array in units of [day]. Expl 1.5 days sampled every hour is  [0/24,1/24, 2/24,.. 1,.. 1.5]
782        varIN: input array for the Fourier analysis.
783               First axis must be longitude and last axis must be time.  Expl: varIN[lon,time] varIN[lon,lat,time],varIN[lon,lev,lat,time]
784        kmx: an integer for the number of longitudinal wavenumber to extract   (max allowable number of wavenumbers is nlon/2)
785        tmx: an integer for the number of tidal harmonics to extract           (max allowable number of harmonics  is nsamples/2)
786
787    Returns:
788        ampe:   East propagating wave amplitude [same unit as varIN]
789        ampw:   West propagating wave amplitude [same unit as varIN]
790        phasee: East propagating phase [degree]
791        phasew: West propagating phase [degree]
792
793
794
795    *NOTE*  1. ampe,ampw,phasee,phasew have dimensions [kmx,tmx] or [kmx,tmx,lat] [kmx,tmx,lev,lat] etc...
796            2. The x and y axis may be constructed as follow to display the easter and western modes:
797
798                klon=np.arange(0,kmx)  [wavenumber]  [cycle/sol]
799                ktime=np.append(-np.arange(tmx,0,-1),np.arange(0,tmx))
800                KTIME,KLON=np.meshgrid(ktime,klon)
801
802                amplitude=np.concatenate((ampw[:,::-1], ampe), axis=1)
803                phase=    np.concatenate((phasew[:,::-1], phasee), axis=1)
804
805    """
806
807    dims= varIN.shape             #get input variable dimensions
808
809    lon_id= dims[0]    # lon
810    time_id= dims[-1]  # time
811    dim_sup_id=dims[1:-1] #additional dimensions stacked in the middle
812    jd= np.int(np.prod( dim_sup_id))     #jd is the total number of variable in the middle is varIN>3D
813
814    varIN= np.reshape(varIN, (lon_id, jd, time_id) )   #flatten the middle dimensions in any
815
816    #Initialize 4 empty arrays
817    ampw, ampe,phasew,phasee =[np.zeros((kmx,tmx,jd)) for _x in range(0,4)]
818
819    #TODO not implemented yet: zamp,zphas=[np.zeros((jd,tmx)) for _x in range(0,2)]
820
821    tpi= 2*np.pi
822    argx= lon * 2*np.pi/360  #nomalize longitude array
823    rnorm= 2./len(argx)
824
825    arg= timex * 2* np.pi
826    rnormt= 2./len(arg)
827
828    #
829    for kk in range(0,kmx):
830        progress(kk,kmx)
831        cosx= np.cos( kk*argx )*rnorm
832        sinx= np.sin( kk*argx )*rnorm
833
834    #   Inner product to calculate the Fourier coefficients of the cosine
835    #   and sine contributions of the spatial variation
836        acoef = np.dot(varIN.T,cosx)
837        bcoef = np.dot(varIN.T,sinx)
838
839    # Now get the cos/sine series expansions of the temporal
840    #variations of the acoef and bcoef spatial terms.
841        for nn in range(0,tmx):
842            cosray= rnormt*np.cos(nn*arg )
843            sinray= rnormt*np.sin(nn*arg )
844
845            cosA=  np.dot(acoef.T,cosray)
846            sinA=  np.dot(acoef.T,sinray)
847            cosB=  np.dot(bcoef.T,cosray)
848            sinB=  np.dot(bcoef.T,sinray)
849
850
851            wr= 0.5*(  cosA - sinB )
852            wi= 0.5*( -sinA - cosB )
853            er= 0.5*(  cosA + sinB )
854            ei= 0.5*(  sinA - cosB )
855
856            aw= np.sqrt( wr**2 + wi**2 )
857            ae= np.sqrt( er**2 + ei**2)
858            pe= np.arctan2(ei,er) * 180/np.pi
859            pw= np.arctan2(wi,wr) * 180/np.pi
860
861            pe= np.mod( -np.arctan2(ei,er) + tpi, tpi ) * 180/np.pi
862            pw= np.mod( -np.arctan2(wi,wr) + tpi, tpi ) * 180/np.pi
863
864            ampw[kk,nn,:]= aw.T
865            ampe[kk,nn,:]= ae.T
866            phasew[kk,nn,:]= pw.T
867            phasee[kk,nn,:]= pe.T
868    #End loop
869
870
871    ampw=   np.reshape( ampw,    (kmx,tmx)+dim_sup_id )
872    ampe=   np.reshape( ampe,    (kmx,tmx)+dim_sup_id )
873    phasew= np.reshape( phasew,  (kmx,tmx)+dim_sup_id )
874    phasee= np.reshape( phasee,  (kmx,tmx)+dim_sup_id )
875
876    #TODO implement zonal mean: zamp,zphas,stamp,stphs
877    '''
878    #  varIN= reshape( varIN, dims );
879
880    #if nargout < 5;  return;  end ---> only  ampe,ampw,phasee,phasew are requested
881
882
883    #   Now calculate the axisymmetric tides  zamp,zphas
884
885    zvarIN= np.mean(varIN,axis=0)
886    zvarIN= np.reshape( zvarIN, (jd, time_id) )
887
888    arg= timex * 2* np.pi
889    arg= np.reshape( arg, (len(arg), 1 ))
890    rnorm= 2/time_id
891
892    for nn in range(0,tmx):
893        cosray= rnorm*np.cos( nn*arg )
894        sinray= rnorm*np.sin( nn*arg )
895
896        cosser=  np.dot(zvarIN,cosray)
897        sinser=  np.dot(zvarIN,sinray)
898
899        zamp[:,nn]= np.sqrt( cosser[:]**2 + sinser[:]**2 ).T
900        zphas[:,nn]= np.mod( -np.arctan2( sinser, cosser )+tpi, tpi ).T * 180/np.pi
901
902
903    zamp=  zamp.T #np.permute( zamp,  (2 1) )
904    zphas= zphas.T #np.permute( zphas, (2,1) )
905
906    if len(dims)> 2:
907        zamp=  np.reshape( zamp,  (tmx,)+dim_sup_id )
908        zamp=  np.reshape( zphas, (tmx,)+dim_sup_id )
909
910
911
912    #if nargout < 7;  return;  end
913
914    sxx= np.mean(varIN,ndims(varIN));
915    [stamp,stphs]= amp_phase( sxx, lon, kmx );
916
917    if len(dims)> 2;
918        stamp= reshape( stamp, [kmx dims(2:end-1)] );
919        stphs= reshape( stphs, [kmx dims(2:end-1)] );
920    end
921
922    '''
923
924    return ampe,ampw,phasee,phasew
925
926
927def give_permission(filename):
928    '''
929    # NAS system only: set group permission to the file
930    '''
931    import subprocess
932    import os
933
934    try:
935        subprocess.check_call(['setfacl -v'],shell=True,stdout=open(os.devnull, "w"),stderr=open(os.devnull, "w")) #catch error and standard output
936        cmd_txt='setfacl -R -m g:s0846:r '+filename
937        subprocess.call(cmd_txt,shell=True)
938    except subprocess.CalledProcessError:
939        pass
940
941
942def progress(k,Nmax):
943    """
944    Display a progress bar to monitor heavy calculations.
945    Args:
946        k: current iteration of the outer loop
947        Nmax: max iteration of the outer loop
948    Returns:
949        Running... [#---------] 10.64 %
950    """
951    import sys
952    from math import ceil #round yo the 2nd digit
953    progress=float(k)/Nmax
954    barLength = 10 # Modify this to change the length of the progress bar
955    status = ""
956    if isinstance(progress, int):
957        progress = float(progress)
958    if not isinstance(progress, float):
959        progress = 0
960        status = "error: progress var must be float\r\n"
961    if progress < 0:
962        progress = 0
963        status = "Halt...\r\n"
964    if progress >= 1:
965        progress = 1
966        status = "Done...\r\n"
967    block = int(round(barLength*progress))
968    text = "\rRunning... [{0}] {1} {2}%".format( "#"*block + "-"*(barLength-block), ceil(progress*100*100)/100, status)
969    sys.stdout.write(text)
970    sys.stdout.flush()
971
972
973def dvar_dh(arr, h):
974    '''
975    Differentiate an array A(dim1,dim2,dim3...) with respect to h.  h and dim1 must have the same length and be the first dimension.
976    Args:
977        arr:   an array of dimension n
978        h:     the dimensions, eg Z, P, lat, lon
979
980    Returns:
981        d_arr: the array differentiated with respect to h
982
983    *Example*
984     #Compute dT/dz where T[time,lev,lat,lon] is the temperature and Zkm are the level heights in Km:
985     #First we transpose t so the vertical dimension comes first as T[LEV,time,lat,lon] and then we transpose back to get dTdz[time,LEV,lat,lon].
986     dTdz=dvar_dh(t.transpose([1,0,2,3]),Zkm).transpose([1,0,2,3])
987
988    '''
989
990
991    d_arr = np.copy(arr)
992    reshape_shape=np.append([arr.shape[0]-2],[1 for i in range(0,arr.ndim -1)]) #arr.shape[i]
993    d_arr[0,...] = (arr[1,...]-arr[0,...])/(h[1]-h[0])
994    d_arr[-1,...] = (arr[-1,...]-arr[-2,...])/(h[-1]-h[-2])
995    d_arr[1:-1,...] = (arr[2:,...]-arr[0:-2,...])/(np.reshape(h[2:]-h[0:-2],reshape_shape))
996    return d_arr
997
998#=========================================================================
999#=============Wrapper for creation of netcdf files========================
1000#=========================================================================
1001
1002class Ncdf(object):
1003    '''
1004    Alex K.
1005    NetCdf wrapper for quick archiving of data into netcdf format
1006
1007    USAGE:
1008
1009    from netcdf_wrapper import Ncdf
1010
1011    Fgeo= 0.03 #W/m2, a constant
1012    TG=np.ones((24,8)) #ground temperature
1013
1014    #---create file---
1015    filename="/lou/s2n/mkahre/MCMC/analysis/working/myfile.nc"
1016    description="results from new simulation, Alex 01-01-19"
1017    Log=Ncdf(filename,description)
1018
1019    #---Save the constant to the file---
1020    Log.add_constant('Fgeo',Fgeo,"geothermal flux","W/m2")
1021
1022    #---Save the TG array to the file---
1023    Log.add_dimension('Nx',8)
1024    Log.add_dimension('time',24)
1025
1026    Log.log_variable('TG',TG,('time','Nx'),'soil temperature','K')
1027
1028    Log.close()
1029
1030
1031    '''
1032    def __init__(self,filename=None,description_txt="",action='w'):
1033        if filename:
1034            if filename[-3:]!=".nc":
1035            #assume that only path is provided so make a name for the file
1036                import datetime;now = datetime.datetime.now()
1037                filename=filename+\
1038                '/run_%02d-%02d-%04d_%i-%i-%i.nc'%(now.day,now.month,now.year,now.hour,now.minute,now.second)
1039        else:   #create a default file name  if path and filename are not provided
1040            import os #use a default path if not provided
1041            pathname=os.getcwd()+'/'
1042            import datetime;now = datetime.datetime.now()
1043            filename=pathname+\
1044            'run_%02d-%02d-%04d_%i-%i-%i.nc'%(now.day,now.month,now.year,now.hour,now.minute,now.second)
1045        self.filename=filename
1046        from netCDF4 import Dataset
1047        if action=='w':
1048            self.f_Ncdf = Dataset(filename, 'w', format='NETCDF4')
1049            self.f_Ncdf.description = description_txt
1050        elif action=='a': #append to file
1051            self.f_Ncdf = Dataset(filename, 'a', format='NETCDF4')
1052        #create dictionaries to hold dimensions and variables
1053        self.dim_dict=dict()
1054        self.var_dict=dict()
1055        print((filename+ " was created"))
1056
1057    def close(self):
1058        self.f_Ncdf.close()
1059        print((self.filename+" was closed"))
1060
1061    def add_dimension(self,dimension_name,length):
1062        self.dim_dict[dimension_name]= self.f_Ncdf.createDimension(dimension_name,length)
1063
1064    def print_dimension(self):
1065        print((list(self.dim_dict.items())))
1066    def print_variable(self):
1067        print((list(self.var_dict.keys())))
1068
1069    def add_constant(self,variable_name,value,longname_txt="",unit_txt=""):
1070        if not any('constant' in s for s in list(self.dim_dict.keys())):
1071            self.add_dimension('constant',1)
1072        longname_txt =longname_txt+' (%g)'%(value)   #add the value to the longname
1073        self.def_variable(variable_name,('constant'),longname_txt,unit_txt)
1074        self.var_dict[variable_name][:]=value
1075
1076    def def_variable(self,variable_name,dim_array,longname_txt="",unit_txt=""):
1077        self.var_dict[variable_name]= self.f_Ncdf.createVariable(variable_name,'f4',dim_array)
1078        self.var_dict[variable_name].units=unit_txt
1079        self.var_dict[variable_name].long_name=longname_txt
1080        self.var_dict[variable_name].dim_name=dim_array
1081
1082    def log_variable(self,variable_name,DATAin,dim_array,longname_txt="",unit_txt=""):
1083        if not any(variable_name in s for s in list(self.var_dict.keys())):
1084            self.def_variable(variable_name,dim_array,longname_txt,unit_txt)
1085        self.var_dict[variable_name].long_name=longname_txt
1086        self.var_dict[variable_name].dim_name=dim_array
1087        self.var_dict[variable_name].units=unit_txt
1088        self.var_dict[variable_name][:]=DATAin
1089
1090#=========================================================================
1091#=======================vertical grid utilities===========================
1092#=========================================================================
1093def gauss_profile(x, alpha,x0=0.):
1094    """ Return Gaussian line shape at x This can be used to generate a bell-shaped mountain"""
1095    return np.sqrt(np.log(2) / np.pi) / alpha\
1096                             * np.exp(-((x-x0) / alpha)**2 * np.log(2))
1097
1098def compute_uneven_sigma(num_levels, N_scale_heights, surf_res, exponent, zero_top ):
1099    """
1100    Construct an initial array of sigma based on the number of levels, an exponent
1101    Args:
1102        num_levels: the number of levels
1103        N_scale_heights: the number of scale heights to the top of the model (e.g scale_heights =12.5 ~102km assuming 8km scale height)
1104        surf_res: the resolution at the surface
1105        exponent: an exponent to increase th thickness of the levels
1106        zero_top: if True, force the top pressure boundary (in N=0) to 0 Pa
1107    Returns:
1108        b: an array of sigma layers
1109
1110    """
1111    b=np.zeros(int(num_levels)+1)
1112    for k in range(0,num_levels):
1113        zeta = 1.-k/np.float(num_levels) #zeta decreases with k
1114        z  = surf_res*zeta + (1.0 - surf_res)*(zeta**exponent)
1115        b[k] = np.exp(-z*N_scale_heights)
1116    b[-1] = 1.0
1117    if(zero_top):  b[0] = 0.0
1118    return b
1119
1120
1121def transition( pfull, p_sigma=0.1, p_press=0.05):
1122    """
1123    Return the transition factor to construct the ak and bk
1124    Args:
1125        pfull: the pressure in Pa
1126        p_sigma: the pressure level where the vertical grid starts transitioning from sigma to pressure
1127        p_press: the pressure level above those  the vertical grid is pure (constant) pressure
1128    Returns:
1129        t: the transition factor =1 for pure sigma, 0 for pure pressure and 0<t<1 for the transition
1130
1131    NOTE:
1132    In the FV code full pressure are computed from:
1133                       del(phalf)
1134         pfull = -----------------------------
1135                 log(phalf(k+1/2)/phalf(k-1/2))
1136    """
1137    t=np.zeros_like(pfull)
1138    for k in range(0,len(pfull)):
1139        if( pfull[k] <= p_press):
1140            t[k] = 0.0
1141        elif ( pfull[k] >= p_sigma) :
1142            t[k] = 1.0
1143        else:
1144            x  = pfull[k]    - p_press
1145            xx = p_sigma - p_press
1146            t[k] = (np.sin(0.5*np.pi*x/xx))**2
1147
1148    return t
1149
1150
1151def swinbank(plev, psfc, ptrans=1.):
1152    """
1153    Compute ak and bk values with a transition based on Swinbank
1154    Args:
1155        plev: the pressure levels in Pa
1156        psfc: the surface pressure in Pa
1157        ptrans:the transition pressure in Pa
1158    Returns:
1159         aknew, bknew,ks: the coefficients for the new layers
1160    """
1161
1162    ktrans= np.argmin(np.abs( plev- ptrans) ) # ks= number of pure pressure levels
1163    km= len(plev)-1
1164
1165    aknew=np.zeros(len(plev))
1166    bknew=np.zeros(len(plev))
1167
1168    #   pnorm= 1.e5;
1169    pnorm= psfc
1170    eta= plev / pnorm
1171
1172    ep= eta[ktrans+1]       #  ks= number of pure pressure levels
1173    es= eta[-1]
1174    rnorm= 1. / (es-ep)**2
1175
1176    #   Compute alpha, beta, and gamma using Swinbank's formula
1177    alpha = (ep**2 - 2.*ep*es) / (es-ep)**2
1178    beta  =        2.*ep*es**2 / (es-ep)**2
1179    gamma =        -(ep*es)**2 / (es-ep)**2
1180
1181    #   Pure Pressure levels
1182    aknew= eta * pnorm
1183
1184    #  Hybrid pressure-sigma levels
1185    kdex= list(range(ktrans+1,km))
1186    aknew[kdex] = alpha*eta[kdex] + beta + gamma/eta[kdex]
1187    aknew[kdex]= aknew[kdex] * pnorm
1188    aknew[-1]= 0.0
1189
1190    bknew[kdex] = (plev[kdex] - aknew[kdex])/psfc
1191    bknew[-1] = 1.0
1192
1193    #find the transition level ks where (bk[ks]>0)
1194    ks=0
1195    while bknew[ks]==0. :
1196        ks+=1
1197    #ks is the one that would be use in fortran indexing in fv_eta.f90
1198    return  aknew, bknew,ks
1199
1200def printvar(infile):
1201    # Get all variable names for a netCDF file ---
1202    print("Variables:")
1203    variableNames = list(infile.variables.keys());
1204    print(variableNames)
1205    print("\n")
1206
1207def getind(myloc,field):
1208    # get a specific index in the lat, lon, time or pfull 1D field
1209    res=[]
1210    for loc in np.atleast_1d(myloc):
1211        myind=np.where(abs(field[:]-loc)==min(abs(field[:]-loc)))[0][0]
1212        res.append(myind)
1213    res=np.atleast_1d(res)
1214    if len(res)== 1: return res[0]
1215    return np.atleast_1d(res)
1216
1217
1218# getinds=np.vectorize(getind, excluded="field")
1219
1220def getvar(nc1,var,times=None,tim=None,longitudes=None,lon=None,t_mean=False,l_mean=False):
1221    if var == "latitude" and var not in nc1.variables: var="lat"
1222    if var == "longitude" and var not in nc1.variables: var="lon"
1223    if var == "Time" and var not in nc1.variables: var="time_counter"
1224    if var == "aire" and var not in nc1.variables: var="area"
1225    if var == "phisinit" and var not in nc1.variables: var="phisfi"
1226
1227    myvar = nc1.variables[var][:]
1228    try:
1229        t=getind(times,tim)
1230        if len(t)==2:
1231            myvar = myvar[t[0]:t[1]+1]
1232        else:
1233            myvar = myvar[t]
1234    except:
1235        # print("no time selected")
1236        pass
1237    try:
1238        l=getind(longitudes,lon)
1239        if len(l)==2:
1240            myvar = myvar[...,l[0]:l[1]+1]
1241        else:
1242            myvar = myvar[...,l]
1243    except:
1244        # print("no longitude selected")
1245        pass
1246    if t_mean: myvar=np.mean(myvar,axis=0) # temporal mean
1247    if l_mean: myvar=np.mean(myvar,axis=-1) # longitudinal mean
1248    print(np.shape(myvar))
1249    try:
1250        if var == "altitude":
1251            units = nc1.variables[var].units
1252            if units == "m":
1253                myvar/=1000 # plot in km
1254    except:
1255        pass
1256    return myvar
1257
1258def getarea(filename2,var,tint):
1259    myvar = nc2.variables["aire"][:]
1260    print((shape(myvar)))
1261    return myvar
Note: See TracBrowser for help on using the repository browser.