[3823] | 1 | import numpy as np |
---|
| 2 | import math |
---|
| 3 | pi=math.pi |
---|
| 4 | import matplotlib |
---|
| 5 | matplotlib.use('TKAgg') |
---|
| 6 | |
---|
| 7 | name="../Xhistins2015_short" |
---|
| 8 | |
---|
| 9 | def fmtsci(x, pos): |
---|
| 10 | a, b = '{:.1e}'.format(x).split('e') |
---|
| 11 | b = int(b) |
---|
| 12 | return r'${} \times 10^{{{}}}$'.format(a, b) |
---|
| 13 | |
---|
| 14 | def calcaire(lat,lon): |
---|
| 15 | # Calculate the area of each grid point assuming regular grid |
---|
| 16 | nblat=np.size(lat) |
---|
| 17 | nblon=np.size(lon) |
---|
| 18 | radius=3390. |
---|
| 19 | # Sup and Inf boundary of latitudes |
---|
| 20 | latsup=np.zeros(nblat,dtype='f') |
---|
| 21 | latinf=np.zeros(nblat,dtype='f') |
---|
| 22 | # Area of each latitudinal band |
---|
| 23 | peri=np.zeros(nblat,dtype='f') |
---|
| 24 | alpha=np.zeros(nblat,dtype='f') |
---|
| 25 | |
---|
| 26 | for i in range(nblat-1): |
---|
| 27 | latsup[i+1]=(lat[i]+lat[i+1])/2. |
---|
| 28 | latinf[i]=(lat[i+1]+lat[i])/2. |
---|
| 29 | latsup[0]=-90 #lat[0] |
---|
| 30 | latinf[nblat-1]=90 #lat[nblat-1] |
---|
| 31 | |
---|
| 32 | print(latsup) |
---|
| 33 | print(latinf) |
---|
| 34 | # Area of a latitudinal band: |
---|
| 35 | for i in range(nblat): |
---|
| 36 | alpha[i]=abs(np.sin(latsup[i]*pi/180.)-np.sin(latinf[i]*pi/180.))*2.*radius**2*pi |
---|
| 37 | airetot=sum(alpha) |
---|
| 38 | return alpha,airetot |
---|
| 39 | |
---|
| 40 | def calcairepluto(lat,lon): |
---|
| 41 | # Calculate the area of each grid point assuming regular grid |
---|
| 42 | nblat=np.size(lat) |
---|
| 43 | nblon=np.size(lon) |
---|
| 44 | radius=1187. |
---|
| 45 | # Sup and Inf boundary of latitudes |
---|
| 46 | latsup=np.zeros(nblat,dtype='f') |
---|
| 47 | latinf=np.zeros(nblat,dtype='f') |
---|
| 48 | # Area of each latitudinal band |
---|
| 49 | alpha=np.zeros(nblat,dtype='f') |
---|
| 50 | |
---|
| 51 | for i in range(nblat-1): |
---|
| 52 | latsup[i+1]=(lat[i]+lat[i+1])/2. |
---|
| 53 | latinf[i]=(lat[i+1]+lat[i])/2. |
---|
| 54 | latsup[0]=-90 #lat[0] |
---|
| 55 | latinf[nblat-1]=90 #lat[nblat-1] |
---|
| 56 | # Area of a latitudinal band: |
---|
| 57 | for i in range(nblat): |
---|
| 58 | alpha[i]=abs(np.sin(latsup[i]*pi/180.)-np.sin(latinf[i]*pi/180.))*2.*radius**2*pi |
---|
| 59 | airetot=sum(alpha) |
---|
| 60 | return alpha,airetot |
---|
| 61 | |
---|
| 62 | def switchlon(arr): |
---|
| 63 | # changer les longitudes pour mettre TR au centre |
---|
| 64 | vec=np.shape(arr) |
---|
| 65 | myvar=np.zeros(vec,dtype='f') |
---|
| 66 | # i lat : pas de changement |
---|
| 67 | # j lon : |
---|
| 68 | for i in range(vec[0]): |
---|
| 69 | for j in range(vec[1]): |
---|
| 70 | if j < int(vec[1]/2.) : |
---|
| 71 | myvar[i,j]=arr[i,j+int(vec[1]/2)] |
---|
| 72 | else: |
---|
| 73 | myvar[i,j]=arr[i,j-int(vec[1]/2)] |
---|
| 74 | return myvar |
---|
| 75 | |
---|
| 76 | def switchlon3D(arr): |
---|
| 77 | # changer les longitudes pour mettre TR au centre |
---|
| 78 | vec=np.shape(arr) |
---|
| 79 | myvar=np.zeros(vec,dtype='f') |
---|
| 80 | # i lat : pas de changement |
---|
| 81 | # j lon : |
---|
| 82 | for i in range(vec[1]): |
---|
| 83 | for j in range(vec[2]): |
---|
| 84 | if j < int(vec[2]/2.) : |
---|
| 85 | myvar[:,i,j]=arr[:,i,j+int(vec[2]/2)] |
---|
| 86 | else: |
---|
| 87 | myvar[:,i,j]=arr[:,i,j-int(vec[2]/2)] |
---|
| 88 | return myvar |
---|
| 89 | |
---|
| 90 | def extractpal(pal,lev): |
---|
| 91 | import matplotlib as mpl |
---|
| 92 | cmap = mpl.cm.get_cmap(pal) |
---|
| 93 | #print '\nlevpal=',lev |
---|
| 94 | #rgb is a vector with nbl colors |
---|
| 95 | nbl=np.size(lev) |
---|
| 96 | rgb=[cmap(0)[0:3]] |
---|
| 97 | for i in range(nbl-1): |
---|
| 98 | #aaa=((lev[i+1]+lev[i])/2.-lev[0])/(lev[-1]-lev[0]) |
---|
| 99 | aaa=lev[i+1] |
---|
| 100 | #print 'index weight / color =',aaa,cmap(aaa)[0:3] |
---|
| 101 | rgb.append(cmap(aaa)[0:3]) |
---|
| 102 | #print '\nsize rgb=',np.shape(rgb) |
---|
| 103 | return rgb |
---|
| 104 | |
---|
| 105 | def make_colormap(seq): |
---|
| 106 | """Return a LinearSegmentedColormap |
---|
| 107 | seq: a sequence of floats and RGB-tuples. The floats should be increasing |
---|
| 108 | and in the interval (0,1). |
---|
| 109 | """ |
---|
| 110 | import matplotlib.colors as colors |
---|
| 111 | |
---|
| 112 | seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3] |
---|
| 113 | print('\nNew sequence=',seq) |
---|
| 114 | cdict = {'red': [], 'green': [], 'blue': []} |
---|
| 115 | for i, item in enumerate(seq): |
---|
| 116 | if isinstance(item, float): |
---|
| 117 | r1, g1, b1 = seq[i - 1] |
---|
| 118 | r2, g2, b2 = seq[i + 1] |
---|
| 119 | #print "i,item=",i,item,r1, g1, b1,r2, g2, b2 |
---|
| 120 | cdict['red'].append([item, r1, r2]) |
---|
| 121 | cdict['green'].append([item, g1, g2]) |
---|
| 122 | cdict['blue'].append([item, b1, b2]) |
---|
| 123 | #print 'cdict=',cdict |
---|
| 124 | return colors.LinearSegmentedColormap('CustomMap', cdict) |
---|
| 125 | |
---|
| 126 | def getcol(lev,myc): |
---|
| 127 | ## myc : palette color, dimension n |
---|
| 128 | ## Lev and myc must be of dimension n and n |
---|
| 129 | nbl=np.size(lev) |
---|
| 130 | # Compute the nivels for each tick |
---|
| 131 | onticks=[0] #[lev[0]] |
---|
| 132 | #onticks2=np.linspace(0,1,14) |
---|
| 133 | for i in range(nbl-1): |
---|
| 134 | aaa=(lev[i+1]-lev[0])/(lev[-1]-lev[0]) |
---|
| 135 | onticks.append(aaa) |
---|
| 136 | print('\nlevels asked=',lev) |
---|
| 137 | print('\nlevels ticks=',onticks) |
---|
| 138 | # compute the seq of color |
---|
| 139 | gg=[myc[0]] |
---|
| 140 | for tt in range(nbl-2): |
---|
| 141 | gg.append(onticks[tt+1]) |
---|
| 142 | gg.append(myc[tt+1]) |
---|
| 143 | #gg.append(onticks[nbl]) |
---|
| 144 | #print '\nsequence=',gg |
---|
| 145 | # cmap |
---|
| 146 | rvb=make_colormap(gg) |
---|
| 147 | return rvb |
---|
| 148 | |
---|
| 149 | def name_regions(): |
---|
| 150 | # lon,lat,name,rotation,font |
---|
| 151 | font=18 |
---|
| 152 | i=2 |
---|
| 153 | j=4 |
---|
| 154 | mylist=[] |
---|
| 155 | mylist.append([72,-43,'Hellas',0,font,'center']) |
---|
| 156 | mylist.append([90,-15,'Tyrrhena\nTerra',0,font,'center']) |
---|
| 157 | mylist.append([110,-28,'Hesperia\nPlanum',0,font,'center']) |
---|
| 158 | mylist.append([115,-50,'Promethei\nTerra',0,font,'center']) |
---|
| 159 | mylist.append([155,-45,'Cimmeria\nTerra',0,font,'center']) |
---|
| 160 | mylist.append([195,-40,'Sirenum\nTerra',0,font,'center']) |
---|
| 161 | mylist.append([63,8,'Syrtis\nMajor',0,font-j,'center']) |
---|
| 162 | #mylist.append([220,-10,'Arsia\nMons',0,font-j,'center']) |
---|
| 163 | mylist.append([242,40,'Alba\nPatera',0,font-j,'center']) |
---|
| 164 | mylist.append([285,35,'Tempe\nTerra',0,font-i,'center']) |
---|
| 165 | mylist.append([41,-3,'Sabaea\nTerra',0,font,'center']) |
---|
| 166 | mylist.append([150,12,'Elysium Planitia',0,font,'center']) |
---|
| 167 | mylist.append([120,50,'Utopia Planitia',0,font,'center']) |
---|
| 168 | #mylist.append([280,-8,'Valles Marineris',-10,font-3,'center']) |
---|
| 169 | mylist.append([220,-25,'Daedalia\nPlanum',0,font,'center']) |
---|
| 170 | mylist.append([232,-40,'Icaria\nPlanum',0,font-j,'center']) |
---|
| 171 | mylist.append([330,45,'Acidalia\nPlanitia',0,font,'center']) |
---|
| 172 | mylist.append([315,25,'Chryse\nPlanitia',0,font-i,'center']) |
---|
| 173 | mylist.append([90,12,'Isidis\nPlanitia',0,font-j,'center']) |
---|
| 174 | mylist.append([185,25,'Amazonis\nPlanitia',0,font,'center']) |
---|
| 175 | mylist.append([190,50,'Arcadia\nPlanitia',0,font,'center']) |
---|
| 176 | mylist.append([20,30,'Arabia\nTerra',0,font,'center']) |
---|
| 177 | mylist.append([17,-45,'Noachis\nTerra',0,font,'center']) |
---|
| 178 | mylist.append([260,5,'Tharsis',0,font-i,'center']) |
---|
| 179 | mylist.append([315,0,'Xanthe\nTerra',0,font-i,'center']) |
---|
| 180 | mylist.append([270,-25,'Solis',0,font-j,'center']) |
---|
| 181 | mylist.append([255,-10,'Syria',0,font-j,'center']) |
---|
| 182 | mylist.append([270,-15,'Sinai',0,font-j,'center']) |
---|
| 183 | mylist.append([315,-50,'Argyre',0,font,'center']) |
---|
| 184 | mylist.append([270,-55,'Aonia\nTerra',0,font,'center']) |
---|
| 185 | #mylist.append([270,-45,'Thaumasia\nHighlands',0,font-i,'center']) |
---|
| 186 | #mylist.append([250,-15,'Claritas Fossae',-80,font-3,'center']) |
---|
| 187 | |
---|
| 188 | return mylist |
---|
| 189 | |
---|
| 190 | |
---|
| 191 | def getwinds(lon,lat,vecx,vecy,svx,svy,scale,width,val): |
---|
| 192 | import matplotlib.pyplot as mpl |
---|
| 193 | angle='uv' # 'xy' |
---|
| 194 | color='black' # arrow color |
---|
| 195 | pivot='mid' # arrow around middle of box. Alternative : tip |
---|
| 196 | linewidths=0.5 # epaisseur contour arrow |
---|
| 197 | edgecolors='k' # couleur contour arrow |
---|
| 198 | |
---|
| 199 | # *scale*: [ *None* | float ] |
---|
| 200 | # Data units per arrow length unit, e.g., m/s per plot width; a smaller |
---|
| 201 | # scale parameter makes the arrow longer. If *None*, a simple |
---|
| 202 | # autoscaling algorithm is used, based on the average vector length |
---|
| 203 | # and the number of vectors. The arrow length unit is given by |
---|
| 204 | # the *scale_units* parameter |
---|
| 205 | |
---|
| 206 | # *scale_units*: *None*, or any of the *units* options. |
---|
| 207 | # For example, if *scale_units* is 'inches', *scale* is 2.0, and |
---|
| 208 | # ``(u,v) = (1,0)``, then the vector will be 0.5 inches long. |
---|
| 209 | # If *scale_units* is 'width', then the vector will be half the width |
---|
| 210 | # of the axes. |
---|
| 211 | |
---|
| 212 | # If *scale_units* is 'x' then the vector will be 0.5 x-axis |
---|
| 213 | # units. To plot vectors in the x-y plane, with u and v having |
---|
| 214 | # the same units as x and y, use |
---|
| 215 | # "angles='xy', scale_units='xy', scale=1". |
---|
| 216 | |
---|
| 217 | x, y = np.meshgrid(lon,lat) |
---|
| 218 | q = mpl.quiver( x[::svy,::svx],y[::svy,::svx],vecx[::svy,::svx],vecy[::svy,::svx],angles=angle,color=color,pivot=pivot,scale=scale,width=width,linewidths=linewidths,edgecolors=edgecolors) |
---|
| 219 | |
---|
| 220 | # make vector key. |
---|
| 221 | #keyh = 1.025 ; keyv = 1.05 # upper right corner over colorbar |
---|
| 222 | keyh = 0.95 ; keyv = 1.03 |
---|
| 223 | # keyh = 0.03 ; keyv = 1.07 |
---|
| 224 | #keyh = -0.03 ; keyv = 1.08 # upper left corner |
---|
| 225 | labelpos='E' # position label compared to arrow : N S E W |
---|
| 226 | p = mpl.quiverkey(q,keyh,keyv,val,str(val)+' m/s',fontproperties={'size': 28,'weight': 'bold'},color='black',labelpos=labelpos,labelsep = 0.07) |
---|
| 227 | |
---|
| 228 | ############################################################################### |
---|
| 229 | ############################################################################### |
---|
| 230 | |
---|
| 231 | DEFAULT = object() |
---|
| 232 | ## function : get local times array |
---|
| 233 | def tshift2(array, lon=DEFAULT, timex=DEFAULT, nsteps_out=DEFAULT): |
---|
| 234 | #================================================================ |
---|
| 235 | # |
---|
| 236 | # Conversion to uniform local time |
---|
| 237 | # Assume longitude in the first dimension and time in the last dimension |
---|
| 238 | # |
---|
| 239 | # Interpolate onto a new time grid with nsteps_out samples per sol |
---|
| 240 | # New time: [ 0 ... nn-1/nsteps_out ]*24 |
---|
| 241 | # Default: nsteps_out = length(timex) |
---|
| 242 | # |
---|
| 243 | # timex should be in units of hours (only timex(1) is actually relevant) |
---|
| 244 | |
---|
| 245 | if np.shape(array) == len(array): |
---|
| 246 | print('Need longitude and time dimensions') |
---|
| 247 | return |
---|
| 248 | |
---|
| 249 | dims=np.shape(array) #get dimensions of array |
---|
| 250 | end=len(dims)-1 |
---|
| 251 | id=dims[0] #number of longitudes in file |
---|
| 252 | if lon is DEFAULT: |
---|
| 253 | lon = np.linspace(0.,360.,num=id,endpoint=False) |
---|
| 254 | if timex is DEFAULT: |
---|
| 255 | nsteps=dims[end] |
---|
| 256 | timex = np.linspace(0.,24.,num=nsteps,endpoint=False) |
---|
| 257 | else: |
---|
| 258 | nsteps=len(timex) |
---|
| 259 | |
---|
| 260 | |
---|
| 261 | nsf = np.float_(nsteps) |
---|
| 262 | |
---|
| 263 | timex = np.squeeze(timex) |
---|
| 264 | |
---|
| 265 | if timex.max() <= 1.: #if timex is in fractions of day |
---|
| 266 | timex = 24.*timex |
---|
| 267 | |
---|
| 268 | if nsteps_out is DEFAULT: |
---|
| 269 | nsteps_out = nsteps |
---|
| 270 | |
---|
| 271 | #Assuming time is last dimension, check if it is local time timex |
---|
| 272 | #If not, reshape the array into (stuff, days, local time) |
---|
| 273 | print('dims,nsteps=',dims[end],nsteps) |
---|
| 274 | if dims[end] != nsteps: |
---|
| 275 | ndays = dims[end] / nsteps |
---|
| 276 | print('ndays=',ndays,dims[end]) |
---|
| 277 | if ndays*nsteps != dims[end]: |
---|
| 278 | print('Time dimensions do not conform') |
---|
| 279 | return |
---|
| 280 | print('dims=',dims,end) |
---|
| 281 | array = np.reshape(array,(dims[0,end-1], nsteps, ndays)) |
---|
| 282 | newdims=np.linspace(len(dims+1),dtype=np.int32) |
---|
| 283 | newdims[len(dims)-1]=len(dims) |
---|
| 284 | newdims[len(dims)]=len(dims)-1 |
---|
| 285 | array = np.transpose(array,newdims) |
---|
| 286 | |
---|
| 287 | dims=np.shape(array) #get new dims of array if reshaped |
---|
| 288 | |
---|
| 289 | |
---|
| 290 | if len(dims) > 2: |
---|
| 291 | recl = np.prod(dims[1:len(dims)-1]) |
---|
| 292 | else: |
---|
| 293 | recl=1 |
---|
| 294 | |
---|
| 295 | |
---|
| 296 | array=np.reshape(array,(id,recl,nsteps)) |
---|
| 297 | #create output array |
---|
| 298 | narray=np.zeros((id,recl,nsteps_out)) |
---|
| 299 | |
---|
| 300 | dt_samp = 24.0/nsteps # Time increment of input data (in hours) |
---|
| 301 | dt_save = 24.0/nsteps_out # Time increment of output data (in hours) |
---|
| 302 | |
---|
| 303 | # calculate interpolation indices |
---|
| 304 | # convert east longitude to equivalent hours |
---|
| 305 | xshif = 24.0*lon/360. |
---|
| 306 | kk=np.where(xshif < 0) |
---|
| 307 | xshif[kk]=xshif[kk]+24. |
---|
| 308 | |
---|
| 309 | fraction = np.zeros((id,nsteps_out)) |
---|
| 310 | imm = np.zeros((id,nsteps_out)) |
---|
| 311 | ipp = np.zeros((id,nsteps_out)) |
---|
| 312 | |
---|
| 313 | for nd in range(nsteps_out): |
---|
| 314 | dtt = nd*dt_save - xshif - timex[0] + dt_samp |
---|
| 315 | # insure that data local time is bounded by [0,24] hours |
---|
| 316 | kk = np.where(dtt < 0.) |
---|
| 317 | dtt[kk] = dtt[kk] + 24. |
---|
| 318 | |
---|
| 319 | im = np.floor(dtt/dt_samp) # this is index into the data aray |
---|
| 320 | fraction[:,nd] = dtt-im*dt_samp |
---|
| 321 | kk = np.where(im < 0.) |
---|
| 322 | im[kk] = im[kk] + nsf |
---|
| 323 | |
---|
| 324 | ipa = im + 1. |
---|
| 325 | kk = np.where(ipa >= nsf) |
---|
| 326 | ipa[kk] = ipa[kk] - nsf |
---|
| 327 | |
---|
| 328 | imm[:,nd] = im[:] |
---|
| 329 | ipp[:,nd] = ipa[:] |
---|
| 330 | fraction = fraction / dt_samp # assume uniform tinc between input data samples |
---|
| 331 | # Now carry out the interpolation |
---|
| 332 | for nd in range(nsteps_out): # Number of output time levels |
---|
| 333 | for i in range(id): # Number of longitudes |
---|
| 334 | im = np.int(imm[i,nd]) |
---|
| 335 | ipa= np.int(ipp[i,nd]) |
---|
| 336 | frac = fraction[i,nd] |
---|
| 337 | |
---|
| 338 | narray[i,:,nd] = (1.-frac)*array[i,:,im] + frac*array[i,:,ipa] |
---|
| 339 | |
---|
| 340 | narray = np.squeeze(narray) |
---|
| 341 | ndimsfinal=np.zeros(len(dims),dtype=int) |
---|
| 342 | for nd in range(end): |
---|
| 343 | ndimsfinal[nd]=dims[nd] |
---|
| 344 | ndimsfinal[end]=nsteps_out |
---|
| 345 | narray = np.reshape(narray,ndimsfinal) |
---|
| 346 | |
---|
| 347 | return narray |
---|
| 348 | |
---|
| 349 | #################################################################### |
---|
| 350 | #################################################################### |
---|
| 351 | def getareaff2(nc,latabs): |
---|
| 352 | """ |
---|
| 353 | Compute area over latitude |
---|
| 354 | """ |
---|
| 355 | |
---|
| 356 | lat0=nc.variables['lat'][:] |
---|
| 357 | lon0=nc.variables['lon'][:] |
---|
| 358 | nblat=np.size(lat0) |
---|
| 359 | nblon=np.size(lon0) |
---|
| 360 | |
---|
| 361 | # Lat sup / Lat inf |
---|
| 362 | latsup=np.zeros(nblat,dtype='f') |
---|
| 363 | latinf=np.zeros(nblat,dtype='f') |
---|
| 364 | for i in range(nblat-1): |
---|
| 365 | latsup[i+1]=(lat0[i]+lat0[i+1])/2. |
---|
| 366 | latinf[i]=(lat0[i+1]+lat0[i])/2. |
---|
| 367 | latsup[0]=lat0[0] |
---|
| 368 | latinf[nblat-1]=lat0[nblat-1] |
---|
| 369 | |
---|
| 370 | # Area |
---|
| 371 | rad=3390. |
---|
| 372 | area=np.zeros(nblat,dtype='f') |
---|
| 373 | alpha=np.zeros(nblat,dtype='f') |
---|
| 374 | for i in range(nblat): |
---|
| 375 | if abs(lat0[i])<latabs: |
---|
| 376 | alpha[i]=abs(np.sin(latsup[i]*pi/180.)-np.sin(latinf[i]*pi/180.))*rad |
---|
| 377 | area[i]=alpha[i]*rad*2.*pi |
---|
| 378 | areatot=sum(area) |
---|
| 379 | return area,areatot,nblon,nblat |
---|
| 380 | |
---|
| 381 | def getareaff(nc): |
---|
| 382 | """ |
---|
| 383 | Compute area over latitude |
---|
| 384 | """ |
---|
| 385 | |
---|
| 386 | lat0=nc.variables['lat'][:] |
---|
| 387 | lon0=nc.variables['lon'] |
---|
| 388 | nblat=np.size(lat0) |
---|
| 389 | nblon=np.size(lon0) |
---|
| 390 | |
---|
| 391 | # Lat sup / Lat inf |
---|
| 392 | latsup=np.zeros(nblat,dtype='f') |
---|
| 393 | latinf=np.zeros(nblat,dtype='f') |
---|
| 394 | for i in range(nblat-1): |
---|
| 395 | latsup[i+1]=(lat0[i]+lat0[i+1])/2. |
---|
| 396 | latinf[i]=(lat0[i+1]+lat0[i])/2. |
---|
| 397 | latsup[0]=lat0[0] |
---|
| 398 | latinf[nblat-1]=lat0[nblat-1] |
---|
| 399 | |
---|
| 400 | # Area |
---|
| 401 | rad=3390. |
---|
| 402 | area=np.zeros(nblat,dtype='f') |
---|
| 403 | alpha=np.zeros(nblat,dtype='f') |
---|
| 404 | for i in range(nblat): |
---|
| 405 | alpha[i]=abs(np.sin(latsup[i]*pi/180.)-np.sin(latinf[i]*pi/180.))*rad |
---|
| 406 | area[i]=alpha[i]*rad*2.*pi |
---|
| 407 | areatot=sum(area) |
---|
| 408 | return area,areatot,nblon,nblat |
---|
| 409 | |
---|
| 410 | def getarea_r(lat0,lon0,rad): |
---|
| 411 | """ |
---|
| 412 | Compute area over latitude |
---|
| 413 | """ |
---|
| 414 | |
---|
| 415 | nblat=np.size(lat0) |
---|
| 416 | nblon=np.size(lon0) |
---|
| 417 | |
---|
| 418 | # Lat sup / Lat inf |
---|
| 419 | latsup=np.zeros(nblat,dtype='f') |
---|
| 420 | latinf=np.zeros(nblat,dtype='f') |
---|
| 421 | for i in range(nblat-1): |
---|
| 422 | latsup[i+1]=(lat0[i]+lat0[i+1])/2. |
---|
| 423 | latinf[i]=(lat0[i+1]+lat0[i])/2. |
---|
| 424 | latsup[0]=lat0[0] |
---|
| 425 | latinf[nblat-1]=lat0[nblat-1] |
---|
| 426 | |
---|
| 427 | # Area |
---|
| 428 | area=np.zeros(nblat,dtype='f') |
---|
| 429 | alpha=np.zeros(nblat,dtype='f') |
---|
| 430 | for i in range(nblat): |
---|
| 431 | alpha[i]=abs(np.sin(latsup[i]*pi/180.)-np.sin(latinf[i]*pi/180.))*rad |
---|
| 432 | area[i]=alpha[i]*rad*2.*pi |
---|
| 433 | areatot=sum(area) |
---|
| 434 | return area,areatot,nblon,nblat |
---|
| 435 | |
---|
| 436 | |
---|
| 437 | def fms_press_calc(psfc,ak,bk,lev_type='full'): |
---|
| 438 | """ |
---|
| 439 | Return the 3d pressure field from the surface pressure and the ak/bk coefficients. |
---|
| 440 | |
---|
| 441 | Args: |
---|
| 442 | psfc: the surface pressure in [Pa] or array of surface pressures 1D or 2D, or 3D (if time dimension) |
---|
| 443 | ak: 1st vertical coordinate parameter |
---|
| 444 | bk: 2nd vertical coordinate parameter |
---|
| 445 | lev_type: "full" (centers of the levels) or "half" (layer interfaces) |
---|
| 446 | Default is "full" |
---|
| 447 | Returns: |
---|
| 448 | The 3D pressure field at the full PRESS_f(:,:,Nk-1) or half levels PRESS_h(:,:,Nk) in [Pa] |
---|
| 449 | --- 0 --- TOP ======== p_half |
---|
| 450 | --- 1 --- |
---|
| 451 | -------- p_full |
---|
| 452 | |
---|
| 453 | ======== p_half |
---|
| 454 | ---Nk-1--- -------- p_full |
---|
| 455 | --- Nk --- SFC ======== p_half |
---|
| 456 | / / / / / |
---|
| 457 | |
---|
| 458 | *NOTE* |
---|
| 459 | Some litterature uses pk (pressure) instead of ak. |
---|
| 460 | With p3d= ps*bk +pref*ak vs the current p3d= ps*bk +ak |
---|
| 461 | |
---|
| 462 | |
---|
| 463 | """ |
---|
| 464 | |
---|
| 465 | Nk=len(ak) |
---|
| 466 | # If psfc is a float (e.g. psfc=7.) make it a one element array (e.g. psfc=[7.]) |
---|
| 467 | if len(np.atleast_1d(psfc))==1: psfc=np.array([np.squeeze(psfc)]) |
---|
| 468 | |
---|
| 469 | #Flatten the pressure array to generalize to N dimensions |
---|
| 470 | psfc_flat=psfc.flatten() |
---|
| 471 | |
---|
| 472 | # Expands the dimensions vectorized calculations: |
---|
| 473 | psfc_v=np.repeat(psfc_flat[:,np.newaxis],Nk, axis=1) #(Np) ->(Np,Nk) |
---|
| 474 | ak_v=np.repeat(ak[np.newaxis,:],len(psfc_flat), axis=0) #(Nk) ->(Np,Nk) |
---|
| 475 | bk_v=np.repeat(bk[np.newaxis,:],1, axis=0) #(Nk) ->(1, Nk) |
---|
| 476 | |
---|
| 477 | #Pressure at half level = layers interfaces. The size of z axis is Nk |
---|
| 478 | PRESS_h=psfc_v*bk_v+ak_v |
---|
| 479 | |
---|
| 480 | #Pressure at full levels = centers of the levels. The size of z axis is Nk-1 |
---|
| 481 | PRESS_f=np.zeros((len(psfc_flat),Nk-1)) |
---|
| 482 | |
---|
| 483 | #Top layer (1st element is i=0 in Python) |
---|
| 484 | if ak[0]==0 and bk[0]==0: |
---|
| 485 | PRESS_f[:,0]= 0.5*(PRESS_h[:,0]+PRESS_h[:,1]) |
---|
| 486 | else: |
---|
| 487 | PRESS_f[:,0] = (PRESS_h[:,1]-PRESS_h[:,0])/np.log(PRESS_h[:,1]/PRESS_h[:,0]) |
---|
| 488 | |
---|
| 489 | #Rest of the column (i=1..Nk). |
---|
| 490 | #[2:] goes from the 3rd element to Nk and [1:-1] goes from the 2nd element to Nk-1 |
---|
| 491 | PRESS_f[:,1:]= (PRESS_h[:,2:]-PRESS_h[:,1:-1])/np.log(PRESS_h[:,2:]/PRESS_h[:,1:-1]) |
---|
| 492 | |
---|
| 493 | # Reshape PRESS(:,Nk) to the original pressure shape PRESS(:,:,Nk) (resp. Nk-1) |
---|
| 494 | |
---|
| 495 | if lev_type=="full": |
---|
| 496 | new_dim_f=np.append(psfc.shape,Nk-1) |
---|
| 497 | return np.squeeze(PRESS_f.reshape(new_dim_f)) |
---|
| 498 | elif lev_type=="half" : |
---|
| 499 | new_dim_h=np.append(psfc.shape,Nk) |
---|
| 500 | return np.squeeze(PRESS_h.reshape(new_dim_h)) |
---|
| 501 | else: |
---|
| 502 | raise Exception("""Pressure levels type not recognized in press_lev(): use 'full' or 'half' """) |
---|
| 503 | |
---|
| 504 | def fms_Z_calc(psfc,ak,bk,T,topo=0.,lev_type='full'): |
---|
| 505 | """ |
---|
| 506 | Return the 3d altitude field in [m] |
---|
| 507 | |
---|
| 508 | Args: |
---|
| 509 | psfc: the surface pressure in [Pa] or array of surface pressures 1D or 2D, or 3D (if time dimension) |
---|
| 510 | ak: 1st vertical coordinate parameter |
---|
| 511 | bk: 2nd vertical coordinate parameter |
---|
| 512 | T : the air temperature profile, 1D array (for a single grid point) or 2D, 3D 4D |
---|
| 513 | topo: the surface elevation, same dimension as psfc |
---|
| 514 | lev_type: "full" (centers of the levels) or "half" (layer interfaces) |
---|
| 515 | Default is "full" |
---|
| 516 | Returns: |
---|
| 517 | The layers' altitude at the full Z_f(:,:,Nk-1) or half levels Z_h(:,:,Nk) in [m] |
---|
| 518 | |
---|
| 519 | --- 0 --- TOP ======== z_half |
---|
| 520 | --- 1 --- |
---|
| 521 | -------- z_full |
---|
| 522 | |
---|
| 523 | ======== z_half |
---|
| 524 | ---Nk-1--- -------- z_full |
---|
| 525 | --- Nk --- SFC ======== z_half |
---|
| 526 | / / / / / |
---|
| 527 | |
---|
| 528 | |
---|
| 529 | *NOTE* |
---|
| 530 | Calculation is derived from ./atmos_cubed_sphere_mars/Mars_phys.F90: |
---|
| 531 | We have dp/dz = -rho g => dz= dp/(-rho g) and rho= p/(r T) => dz=rT/g *(-dp/p) |
---|
| 532 | Let's definethe log-pressure u as u = ln(p). We have du = du/dp *dp = (1/p)*dp =dp/p |
---|
| 533 | |
---|
| 534 | Finally , we have dz for the half layers: dz=rT/g *-(du) => dz=rT/g *(+dp/p) with N the layers defined from top to bottom. |
---|
| 535 | """ |
---|
| 536 | g=3.72 #acc. m/s2 |
---|
| 537 | r_co2= 191.00 # kg/mol |
---|
| 538 | Nk=len(ak) |
---|
| 539 | #===get the half and full pressure levels from fms_press_calc== |
---|
| 540 | |
---|
| 541 | PRESS_f=fms_press_calc(psfc,ak,bk,'full') |
---|
| 542 | PRESS_h=fms_press_calc(psfc,ak,bk,'half') |
---|
| 543 | |
---|
| 544 | # If psfc is a float, turn it into a one-element array: |
---|
| 545 | if len(np.atleast_1d(psfc))==1: |
---|
| 546 | psfc=np.array([np.squeeze(psfc)]) |
---|
| 547 | topo=np.array([np.squeeze(topo)]) |
---|
| 548 | |
---|
| 549 | psfc_flat=psfc.flatten() |
---|
| 550 | topo_flat=topo.flatten() |
---|
| 551 | |
---|
| 552 | # reshape arrays for vector calculations and compute the log pressure==== |
---|
| 553 | |
---|
| 554 | PRESS_h=PRESS_h.reshape((len(psfc_flat),Nk)) |
---|
| 555 | PRESS_f=PRESS_f.reshape((len(psfc_flat),Nk-1)) |
---|
| 556 | T=T.reshape((len(psfc_flat),Nk-1)) |
---|
| 557 | |
---|
| 558 | logPPRESS_h=np.log(PRESS_h) |
---|
| 559 | |
---|
| 560 | #===Initialize the output arrays=== |
---|
| 561 | Z_f=np.zeros((len(psfc_flat),Nk-1)) |
---|
| 562 | Z_h=np.zeros((len(psfc_flat),Nk)) |
---|
| 563 | |
---|
| 564 | #First helf layer is equal to the surface elevation |
---|
| 565 | |
---|
| 566 | Z_h[:,-1] = topo_flat |
---|
| 567 | |
---|
| 568 | # Other layes, from the bottom-ip: |
---|
| 569 | for k in range(Nk-2,-1,-1): |
---|
| 570 | Z_h[:,k] = Z_h[:,k+1]+(r_co2*T[:,k]/g)*(logPPRESS_h[:,k+1]-logPPRESS_h[:,k]) |
---|
| 571 | Z_f[:,k] = Z_h[:,k+1]+(r_co2*T[:,k]/g)*(1-PRESS_h[:,k]/PRESS_f[:,k]) |
---|
| 572 | |
---|
| 573 | #return the arrays |
---|
| 574 | if lev_type=="full": |
---|
| 575 | new_dim_f=np.append(psfc.shape,Nk-1) |
---|
| 576 | return np.squeeze(Z_f.reshape(new_dim_f)) |
---|
| 577 | elif lev_type=="half" : |
---|
| 578 | new_dim_h=np.append(psfc.shape,Nk) |
---|
| 579 | return np.squeeze(Z_h.reshape(new_dim_h)) |
---|
| 580 | #=====return the levels in Z coordinates [m]==== |
---|
| 581 | else: |
---|
| 582 | raise Exception("""Altitudes levels type not recognized: use 'full' or 'half' """) |
---|
| 583 | |
---|
| 584 | |
---|
| 585 | def akbk_loader(NLAY,data_dir='/u/mkahre/MCMC/data_files'): |
---|
| 586 | """ |
---|
| 587 | Return the ak and bk values given a number of layers for standards resolutions |
---|
| 588 | Default directory is /lou/s2n/mkahre/MCMC/data_files/ |
---|
| 589 | Args: |
---|
| 590 | NLAY: the number of layers (float or integer) |
---|
| 591 | Returns: |
---|
| 592 | ak: 1st vertical coordinate parameter [Pa] |
---|
| 593 | bk: 2nd vertical coordinate parameter [none] |
---|
| 594 | |
---|
| 595 | *NOTE* ak,bk have a size NLAY+1 since they define the position of the layer interfaces (half layers): |
---|
| 596 | p_half = ak + bk*p_sfc |
---|
| 597 | """ |
---|
| 598 | |
---|
| 599 | from netCDF4 import Dataset |
---|
| 600 | NLAY=int(NLAY) |
---|
| 601 | file=Dataset(data_dir+'/akbk_L%i.nc'%(NLAY), 'r', format='NETCDF4') |
---|
| 602 | ak=file.variables['pk'][:] |
---|
| 603 | bk=file.variables['bk'][:] |
---|
| 604 | file.close() |
---|
| 605 | return ak,bk |
---|
| 606 | |
---|
| 607 | |
---|
| 608 | def zonal_avg_P_lat(Ls,var,Ls_target,Ls_angle,symmetric=True): |
---|
| 609 | """ |
---|
| 610 | Return the zonally averaged mean value of a pressure interpolated 4D variable. |
---|
| 611 | |
---|
| 612 | Args: |
---|
| 613 | Ls: 1D array of solar longitude of the input variable in degree (0->360) |
---|
| 614 | var: a 4D variable var [time,levels,lat,lon] interpolated on the pressure levels (f_average_plevs file) |
---|
| 615 | Ls_target: central solar longitude of interest. |
---|
| 616 | Ls_angle: requested window angle centered around Expl: Ls_angle = 10. (Window will go from Ls 85 |
---|
| 617 | symmetric: a boolean (default =True) If True, and if the requested window is out of range, Ls_angle is reduced |
---|
| 618 | If False, the time average is done on the data available |
---|
| 619 | Returns: |
---|
| 620 | The zonnally and latitudinally-averaged field zpvar[level,lat] |
---|
| 621 | |
---|
| 622 | Expl: Ls_target= 90. |
---|
| 623 | Ls_angle = 10. |
---|
| 624 | |
---|
| 625 | ---> Nominally, the time average is done over solar longitudes 85 <Ls_target < 95 (10 degree) |
---|
| 626 | |
---|
| 627 | ---> If symmetric =True and the input data ranges from Ls 88 to 100 88 <Ls_target < 92 (4 degree, symmetric) |
---|
| 628 | If symmetric =False and the input data ranges from Ls 88 to 100 88 <Ls_target < 95 (7 degree, assymetric) |
---|
| 629 | *NOTE* |
---|
| 630 | |
---|
| 631 | [Alex] as of 6/8/18, the routine will bin data from muliples Mars years if provided |
---|
| 632 | |
---|
| 633 | """ |
---|
| 634 | #compute bounds from Ls_target and Ls_angle |
---|
| 635 | Ls_min= Ls_target-Ls_angle/2. |
---|
| 636 | Ls_max= Ls_target+Ls_angle/2. |
---|
| 637 | |
---|
| 638 | if (Ls_min<0.):Ls_min+=360. |
---|
| 639 | if (Ls_max>360.):Ls_max-=360. |
---|
| 640 | |
---|
| 641 | #Initialize output array |
---|
| 642 | zpvar=np.zeros((var.shape[1],var.shape[2])) #nlev, nlat |
---|
| 643 | |
---|
| 644 | #check is the Ls of interest is within the data provided, raise execption otherwise |
---|
| 645 | if Ls_target <= Ls.min() or Ls_target >=Ls.max() : |
---|
| 646 | raise Exception("Error \nNo data found, requested data : Ls %.2f <-- (%.2f)--> %.2f\nHowever, data in file only ranges Ls %.2f <-- (%.2f)--> %.2f"%(Ls_min,Ls_target,Ls_max,Ls.min(),(Ls.min()+Ls.max())/2.,Ls.max())) |
---|
| 647 | |
---|
| 648 | |
---|
| 649 | else : #If only some of the requested data is outside the ranges, process this data |
---|
| 650 | if Ls_min <Ls.min() or Ls_max >Ls.max(): |
---|
| 651 | print(("In zonal_avg_P_lat() Warning: \nRequested data ranging Ls %.2f <-- (%.2f)--> %.2f"%(Ls_min,Ls_target,Ls_max))) |
---|
| 652 | if symmetric: #Case 1: reduce the window |
---|
| 653 | if Ls_min <Ls.min(): |
---|
| 654 | Ls_min =Ls.min() |
---|
| 655 | Ls_angle=2*(Ls_target-Ls_min) |
---|
| 656 | Ls_max= Ls_target+Ls_angle/2. |
---|
| 657 | |
---|
| 658 | if Ls_max >Ls.max(): |
---|
| 659 | Ls_max =Ls.max() |
---|
| 660 | Ls_angle=2*(Ls_max-Ls_target) |
---|
| 661 | Ls_min= Ls_target-Ls_angle/2. |
---|
| 662 | |
---|
| 663 | print(("Reshaping data ranging Ls %.2f <-- (%.2f)--> %.2f"%(Ls_min,Ls_target,Ls_max))) |
---|
| 664 | else: #Case 2: Use all data available |
---|
| 665 | print(("I am only using Ls %.2f <-- (%.2f)--> %.2f \n"%(max(Ls.min(),Ls_min),Ls_target,min(Ls.max(),Ls_max)))) |
---|
| 666 | count=0 |
---|
| 667 | #perform longitude average on the field |
---|
| 668 | zvar= np.mean(var,axis=3) |
---|
| 669 | |
---|
| 670 | for t in range(len(Ls)): |
---|
| 671 | #special case Ls around Ls =0 (wrap around) |
---|
| 672 | if (Ls_min<=Ls[t] <= Ls_max): |
---|
| 673 | zpvar[:,:]=zpvar[:,:]+zvar[t,:,:] |
---|
| 674 | count+=1 |
---|
| 675 | |
---|
| 676 | if count>0: |
---|
| 677 | zpvar/=count |
---|
| 678 | return zpvar |
---|
| 679 | |
---|
| 680 | |
---|
| 681 | |
---|
| 682 | def alt_KM(press,scale_height_KM=8.,reference_press=610.): |
---|
| 683 | """ |
---|
| 684 | Gives the approximate altitude in km for a given pressure |
---|
| 685 | Args: |
---|
| 686 | press: the pressure in [Pa] |
---|
| 687 | scale_height_KM: a scale height in [km], (default is 10 km) |
---|
| 688 | reference_press: reference surface pressure in [Pa], (default is 610 Pa) |
---|
| 689 | Returns: |
---|
| 690 | z_KM: the equivalent altitude for that pressure level in [km] |
---|
| 691 | |
---|
| 692 | """ |
---|
| 693 | return -scale_height_KM*np.log(press/reference_press) # p to altitude in km |
---|
| 694 | |
---|
| 695 | def press_pa(alt_KM,scale_height_KM=8.,reference_press=610.): |
---|
| 696 | """ |
---|
| 697 | Gives the approximate altitude in km for a given pressure |
---|
| 698 | Args: |
---|
| 699 | alt_KM: the altitude in [km] |
---|
| 700 | scale_height_KM: a scale height in [km], (default is 8 km) |
---|
| 701 | reference_press: reference surface pressure in [Pa], (default is 610 Pa) |
---|
| 702 | Returns: |
---|
| 703 | press_pa: the equivalent pressure at that altitude in [Pa] |
---|
| 704 | |
---|
| 705 | """ |
---|
| 706 | return reference_press*np.exp(-alt_KM/scale_height_KM) # p to altitude in km |
---|
| 707 | |
---|
| 708 | def lon180_to_360(lon): |
---|
| 709 | lon=np.array(lon) |
---|
| 710 | """ |
---|
| 711 | Transform a float or an array from the -180/+180 coordinate system to 0-360 |
---|
| 712 | Args: |
---|
| 713 | lon: a float, 1D or 2D array of longitudes in the 180/+180 coordinate system |
---|
| 714 | Returns: |
---|
| 715 | lon: the equivalent longitudes in the 0-360 coordinate system |
---|
| 716 | |
---|
| 717 | """ |
---|
| 718 | if len(np.atleast_1d(lon))==1: #lon180 is a float |
---|
| 719 | if lon<0:lon+=360 |
---|
| 720 | else: #lon180 is an array |
---|
| 721 | lon[lon<0]+=360 |
---|
| 722 | return lon |
---|
| 723 | |
---|
| 724 | def lon360_to_180(lon): |
---|
| 725 | lon=np.array(lon) |
---|
| 726 | """ |
---|
| 727 | Transform a float or an array from the 0-360 coordinate system to -180/+180 |
---|
| 728 | Args: |
---|
| 729 | lon: a float, 1D or 2D array of longitudes in the 0-360 coordinate system |
---|
| 730 | Returns: |
---|
| 731 | lon: the equivalent longitudes in the -180/+180 coordinate system |
---|
| 732 | |
---|
| 733 | """ |
---|
| 734 | if len(np.atleast_1d(lon))==1: #lon is a float |
---|
| 735 | if lon>180:lon-=360 |
---|
| 736 | else: #lon is an array |
---|
| 737 | lon[lon>180]-=360 |
---|
| 738 | return lon |
---|
| 739 | |
---|
| 740 | |
---|
| 741 | def second_hhmmss(seconds,lon_180=0.,show_mmss=True): |
---|
| 742 | """ |
---|
| 743 | Given the time seconds return Local true Solar Time at a certain longitude |
---|
| 744 | Args: |
---|
| 745 | seconds: a float, the time in seconds |
---|
| 746 | lon_180: a float, the longitude in a -/+180 coordinate |
---|
| 747 | show_mmss: returns min and second if true |
---|
| 748 | Returns: |
---|
| 749 | hours: float, the local time or (hours,minutes, seconds) |
---|
| 750 | |
---|
| 751 | """ |
---|
| 752 | hours = seconds // (60*60) |
---|
| 753 | seconds %= (60*60) |
---|
| 754 | minutes = seconds // 60 |
---|
| 755 | seconds %= 60 |
---|
| 756 | #Add timezone offset (1hr/15 degree) |
---|
| 757 | hours=np.mod(hours+lon_180/15.,24) |
---|
| 758 | |
---|
| 759 | if show_mmss: |
---|
| 760 | return np.int(hours), np.int(minutes), np.int(seconds) |
---|
| 761 | else: |
---|
| 762 | return np.int(hours) |
---|
| 763 | |
---|
| 764 | |
---|
| 765 | def sol2LTST(time_sol,lon_180=0.,show_minute=False): |
---|
| 766 | """ |
---|
| 767 | Given the time in days, return the Local true Solar Time at a certain longitude |
---|
| 768 | Args: |
---|
| 769 | time_sol: a float, the time, eg. sols 2350.24 |
---|
| 770 | lon_180: a float, the longitude in a -/+180 coordinate |
---|
| 771 | show_minute: show minutes if true, otherwise show whole hours |
---|
| 772 | Returns: |
---|
| 773 | hours: float, the local time or (hours,minutes, seconds) |
---|
| 774 | |
---|
| 775 | """ |
---|
| 776 | return second_hhmmss(time_sol*86400.,lon_180,show_minute) |
---|
| 777 | |
---|
| 778 | def space_time(lon,timex, varIN,kmx,tmx): |
---|
| 779 | """ |
---|
| 780 | Obtain west and east propagating waves. This is a Python implementation of John Wilson's space_time routine by Alex |
---|
| 781 | Args: |
---|
| 782 | lon: longitude array in [degrees] 0->360 |
---|
| 783 | timex: 1D time array in units of [day]. Expl 1.5 days sampled every hour is [0/24,1/24, 2/24,.. 1,.. 1.5] |
---|
| 784 | varIN: input array for the Fourier analysis. |
---|
| 785 | First axis must be longitude and last axis must be time. Expl: varIN[lon,time] varIN[lon,lat,time],varIN[lon,lev,lat,time] |
---|
| 786 | kmx: an integer for the number of longitudinal wavenumber to extract (max allowable number of wavenumbers is nlon/2) |
---|
| 787 | tmx: an integer for the number of tidal harmonics to extract (max allowable number of harmonics is nsamples/2) |
---|
| 788 | |
---|
| 789 | Returns: |
---|
| 790 | ampe: East propagating wave amplitude [same unit as varIN] |
---|
| 791 | ampw: West propagating wave amplitude [same unit as varIN] |
---|
| 792 | phasee: East propagating phase [degree] |
---|
| 793 | phasew: West propagating phase [degree] |
---|
| 794 | |
---|
| 795 | |
---|
| 796 | |
---|
| 797 | *NOTE* 1. ampe,ampw,phasee,phasew have dimensions [kmx,tmx] or [kmx,tmx,lat] [kmx,tmx,lev,lat] etc... |
---|
| 798 | 2. The x and y axis may be constructed as follow to display the easter and western modes: |
---|
| 799 | |
---|
| 800 | klon=np.arange(0,kmx) [wavenumber] [cycle/sol] |
---|
| 801 | ktime=np.append(-np.arange(tmx,0,-1),np.arange(0,tmx)) |
---|
| 802 | KTIME,KLON=np.meshgrid(ktime,klon) |
---|
| 803 | |
---|
| 804 | amplitude=np.concatenate((ampw[:,::-1], ampe), axis=1) |
---|
| 805 | phase= np.concatenate((phasew[:,::-1], phasee), axis=1) |
---|
| 806 | |
---|
| 807 | """ |
---|
| 808 | |
---|
| 809 | dims= varIN.shape #get input variable dimensions |
---|
| 810 | |
---|
| 811 | lon_id= dims[0] # lon |
---|
| 812 | time_id= dims[-1] # time |
---|
| 813 | dim_sup_id=dims[1:-1] #additional dimensions stacked in the middle |
---|
| 814 | jd= np.int(np.prod( dim_sup_id)) #jd is the total number of variable in the middle is varIN>3D |
---|
| 815 | |
---|
| 816 | varIN= np.reshape(varIN, (lon_id, jd, time_id) ) #flatten the middle dimensions in any |
---|
| 817 | |
---|
| 818 | #Initialize 4 empty arrays |
---|
| 819 | ampw, ampe,phasew,phasee =[np.zeros((kmx,tmx,jd)) for _x in range(0,4)] |
---|
| 820 | |
---|
| 821 | #TODO not implemented yet: zamp,zphas=[np.zeros((jd,tmx)) for _x in range(0,2)] |
---|
| 822 | |
---|
| 823 | tpi= 2*np.pi |
---|
| 824 | argx= lon * 2*np.pi/360 #nomalize longitude array |
---|
| 825 | rnorm= 2./len(argx) |
---|
| 826 | |
---|
| 827 | arg= timex * 2* np.pi |
---|
| 828 | rnormt= 2./len(arg) |
---|
| 829 | |
---|
| 830 | # |
---|
| 831 | for kk in range(0,kmx): |
---|
| 832 | progress(kk,kmx) |
---|
| 833 | cosx= np.cos( kk*argx )*rnorm |
---|
| 834 | sinx= np.sin( kk*argx )*rnorm |
---|
| 835 | |
---|
| 836 | # Inner product to calculate the Fourier coefficients of the cosine |
---|
| 837 | # and sine contributions of the spatial variation |
---|
| 838 | acoef = np.dot(varIN.T,cosx) |
---|
| 839 | bcoef = np.dot(varIN.T,sinx) |
---|
| 840 | |
---|
| 841 | # Now get the cos/sine series expansions of the temporal |
---|
| 842 | #variations of the acoef and bcoef spatial terms. |
---|
| 843 | for nn in range(0,tmx): |
---|
| 844 | cosray= rnormt*np.cos(nn*arg ) |
---|
| 845 | sinray= rnormt*np.sin(nn*arg ) |
---|
| 846 | |
---|
| 847 | cosA= np.dot(acoef.T,cosray) |
---|
| 848 | sinA= np.dot(acoef.T,sinray) |
---|
| 849 | cosB= np.dot(bcoef.T,cosray) |
---|
| 850 | sinB= np.dot(bcoef.T,sinray) |
---|
| 851 | |
---|
| 852 | |
---|
| 853 | wr= 0.5*( cosA - sinB ) |
---|
| 854 | wi= 0.5*( -sinA - cosB ) |
---|
| 855 | er= 0.5*( cosA + sinB ) |
---|
| 856 | ei= 0.5*( sinA - cosB ) |
---|
| 857 | |
---|
| 858 | aw= np.sqrt( wr**2 + wi**2 ) |
---|
| 859 | ae= np.sqrt( er**2 + ei**2) |
---|
| 860 | pe= np.arctan2(ei,er) * 180/np.pi |
---|
| 861 | pw= np.arctan2(wi,wr) * 180/np.pi |
---|
| 862 | |
---|
| 863 | pe= np.mod( -np.arctan2(ei,er) + tpi, tpi ) * 180/np.pi |
---|
| 864 | pw= np.mod( -np.arctan2(wi,wr) + tpi, tpi ) * 180/np.pi |
---|
| 865 | |
---|
| 866 | ampw[kk,nn,:]= aw.T |
---|
| 867 | ampe[kk,nn,:]= ae.T |
---|
| 868 | phasew[kk,nn,:]= pw.T |
---|
| 869 | phasee[kk,nn,:]= pe.T |
---|
| 870 | #End loop |
---|
| 871 | |
---|
| 872 | |
---|
| 873 | ampw= np.reshape( ampw, (kmx,tmx)+dim_sup_id ) |
---|
| 874 | ampe= np.reshape( ampe, (kmx,tmx)+dim_sup_id ) |
---|
| 875 | phasew= np.reshape( phasew, (kmx,tmx)+dim_sup_id ) |
---|
| 876 | phasee= np.reshape( phasee, (kmx,tmx)+dim_sup_id ) |
---|
| 877 | |
---|
| 878 | #TODO implement zonal mean: zamp,zphas,stamp,stphs |
---|
| 879 | ''' |
---|
| 880 | # varIN= reshape( varIN, dims ); |
---|
| 881 | |
---|
| 882 | #if nargout < 5; return; end ---> only ampe,ampw,phasee,phasew are requested |
---|
| 883 | |
---|
| 884 | |
---|
| 885 | # Now calculate the axisymmetric tides zamp,zphas |
---|
| 886 | |
---|
| 887 | zvarIN= np.mean(varIN,axis=0) |
---|
| 888 | zvarIN= np.reshape( zvarIN, (jd, time_id) ) |
---|
| 889 | |
---|
| 890 | arg= timex * 2* np.pi |
---|
| 891 | arg= np.reshape( arg, (len(arg), 1 )) |
---|
| 892 | rnorm= 2/time_id |
---|
| 893 | |
---|
| 894 | for nn in range(0,tmx): |
---|
| 895 | cosray= rnorm*np.cos( nn*arg ) |
---|
| 896 | sinray= rnorm*np.sin( nn*arg ) |
---|
| 897 | |
---|
| 898 | cosser= np.dot(zvarIN,cosray) |
---|
| 899 | sinser= np.dot(zvarIN,sinray) |
---|
| 900 | |
---|
| 901 | zamp[:,nn]= np.sqrt( cosser[:]**2 + sinser[:]**2 ).T |
---|
| 902 | zphas[:,nn]= np.mod( -np.arctan2( sinser, cosser )+tpi, tpi ).T * 180/np.pi |
---|
| 903 | |
---|
| 904 | |
---|
| 905 | zamp= zamp.T #np.permute( zamp, (2 1) ) |
---|
| 906 | zphas= zphas.T #np.permute( zphas, (2,1) ) |
---|
| 907 | |
---|
| 908 | if len(dims)> 2: |
---|
| 909 | zamp= np.reshape( zamp, (tmx,)+dim_sup_id ) |
---|
| 910 | zamp= np.reshape( zphas, (tmx,)+dim_sup_id ) |
---|
| 911 | |
---|
| 912 | |
---|
| 913 | |
---|
| 914 | #if nargout < 7; return; end |
---|
| 915 | |
---|
| 916 | sxx= np.mean(varIN,ndims(varIN)); |
---|
| 917 | [stamp,stphs]= amp_phase( sxx, lon, kmx ); |
---|
| 918 | |
---|
| 919 | if len(dims)> 2; |
---|
| 920 | stamp= reshape( stamp, [kmx dims(2:end-1)] ); |
---|
| 921 | stphs= reshape( stphs, [kmx dims(2:end-1)] ); |
---|
| 922 | end |
---|
| 923 | |
---|
| 924 | ''' |
---|
| 925 | |
---|
| 926 | return ampe,ampw,phasee,phasew |
---|
| 927 | |
---|
| 928 | |
---|
| 929 | def give_permission(filename): |
---|
| 930 | ''' |
---|
| 931 | # NAS system only: set group permission to the file |
---|
| 932 | ''' |
---|
| 933 | import subprocess |
---|
| 934 | import os |
---|
| 935 | |
---|
| 936 | try: |
---|
| 937 | subprocess.check_call(['setfacl -v'],shell=True,stdout=open(os.devnull, "w"),stderr=open(os.devnull, "w")) #catch error and standard output |
---|
| 938 | cmd_txt='setfacl -R -m g:s0846:r '+filename |
---|
| 939 | subprocess.call(cmd_txt,shell=True) |
---|
| 940 | except subprocess.CalledProcessError: |
---|
| 941 | pass |
---|
| 942 | |
---|
| 943 | |
---|
| 944 | def progress(k,Nmax): |
---|
| 945 | """ |
---|
| 946 | Display a progress bar to monitor heavy calculations. |
---|
| 947 | Args: |
---|
| 948 | k: current iteration of the outer loop |
---|
| 949 | Nmax: max iteration of the outer loop |
---|
| 950 | Returns: |
---|
| 951 | Running... [#---------] 10.64 % |
---|
| 952 | """ |
---|
| 953 | import sys |
---|
| 954 | from math import ceil #round yo the 2nd digit |
---|
| 955 | progress=float(k)/Nmax |
---|
| 956 | barLength = 10 # Modify this to change the length of the progress bar |
---|
| 957 | status = "" |
---|
| 958 | if isinstance(progress, int): |
---|
| 959 | progress = float(progress) |
---|
| 960 | if not isinstance(progress, float): |
---|
| 961 | progress = 0 |
---|
| 962 | status = "error: progress var must be float\r\n" |
---|
| 963 | if progress < 0: |
---|
| 964 | progress = 0 |
---|
| 965 | status = "Halt...\r\n" |
---|
| 966 | if progress >= 1: |
---|
| 967 | progress = 1 |
---|
| 968 | status = "Done...\r\n" |
---|
| 969 | block = int(round(barLength*progress)) |
---|
| 970 | text = "\rRunning... [{0}] {1} {2}%".format( "#"*block + "-"*(barLength-block), ceil(progress*100*100)/100, status) |
---|
| 971 | sys.stdout.write(text) |
---|
| 972 | sys.stdout.flush() |
---|
| 973 | |
---|
| 974 | |
---|
| 975 | def dvar_dh(arr, h): |
---|
| 976 | ''' |
---|
| 977 | Differentiate an array A(dim1,dim2,dim3...) with respect to h. h and dim1 must have the same length and be the first dimension. |
---|
| 978 | Args: |
---|
| 979 | arr: an array of dimension n |
---|
| 980 | h: the dimensions, eg Z, P, lat, lon |
---|
| 981 | |
---|
| 982 | Returns: |
---|
| 983 | d_arr: the array differentiated with respect to h |
---|
| 984 | |
---|
| 985 | *Example* |
---|
| 986 | #Compute dT/dz where T[time,lev,lat,lon] is the temperature and Zkm are the level heights in Km: |
---|
| 987 | #First we transpose t so the vertical dimension comes first as T[LEV,time,lat,lon] and then we transpose back to get dTdz[time,LEV,lat,lon]. |
---|
| 988 | dTdz=dvar_dh(t.transpose([1,0,2,3]),Zkm).transpose([1,0,2,3]) |
---|
| 989 | |
---|
| 990 | ''' |
---|
| 991 | |
---|
| 992 | |
---|
| 993 | d_arr = np.copy(arr) |
---|
| 994 | reshape_shape=np.append([arr.shape[0]-2],[1 for i in range(0,arr.ndim -1)]) #arr.shape[i] |
---|
| 995 | d_arr[0,...] = (arr[1,...]-arr[0,...])/(h[1]-h[0]) |
---|
| 996 | d_arr[-1,...] = (arr[-1,...]-arr[-2,...])/(h[-1]-h[-2]) |
---|
| 997 | d_arr[1:-1,...] = (arr[2:,...]-arr[0:-2,...])/(np.reshape(h[2:]-h[0:-2],reshape_shape)) |
---|
| 998 | return d_arr |
---|
| 999 | |
---|
| 1000 | #========================================================================= |
---|
| 1001 | #=============Wrapper for creation of netcdf files======================== |
---|
| 1002 | #========================================================================= |
---|
| 1003 | |
---|
| 1004 | class Ncdf(object): |
---|
| 1005 | ''' |
---|
| 1006 | Alex K. |
---|
| 1007 | NetCdf wrapper for quick archiving of data into netcdf format |
---|
| 1008 | |
---|
| 1009 | USAGE: |
---|
| 1010 | |
---|
| 1011 | from netcdf_wrapper import Ncdf |
---|
| 1012 | |
---|
| 1013 | Fgeo= 0.03 #W/m2, a constant |
---|
| 1014 | TG=np.ones((24,8)) #ground temperature |
---|
| 1015 | |
---|
| 1016 | #---create file--- |
---|
| 1017 | filename="/lou/s2n/mkahre/MCMC/analysis/working/myfile.nc" |
---|
| 1018 | description="results from new simulation, Alex 01-01-19" |
---|
| 1019 | Log=Ncdf(filename,description) |
---|
| 1020 | |
---|
| 1021 | #---Save the constant to the file--- |
---|
| 1022 | Log.add_constant('Fgeo',Fgeo,"geothermal flux","W/m2") |
---|
| 1023 | |
---|
| 1024 | #---Save the TG array to the file--- |
---|
| 1025 | Log.add_dimension('Nx',8) |
---|
| 1026 | Log.add_dimension('time',24) |
---|
| 1027 | |
---|
| 1028 | Log.log_variable('TG',TG,('time','Nx'),'soil temperature','K') |
---|
| 1029 | |
---|
| 1030 | Log.close() |
---|
| 1031 | |
---|
| 1032 | |
---|
| 1033 | ''' |
---|
| 1034 | def __init__(self,filename=None,description_txt="",action='w'): |
---|
| 1035 | if filename: |
---|
| 1036 | if filename[-3:]!=".nc": |
---|
| 1037 | #assume that only path is provided so make a name for the file |
---|
| 1038 | import datetime;now = datetime.datetime.now() |
---|
| 1039 | filename=filename+\ |
---|
| 1040 | '/run_%02d-%02d-%04d_%i-%i-%i.nc'%(now.day,now.month,now.year,now.hour,now.minute,now.second) |
---|
| 1041 | else: #create a default file name if path and filename are not provided |
---|
| 1042 | import os #use a default path if not provided |
---|
| 1043 | pathname=os.getcwd()+'/' |
---|
| 1044 | import datetime;now = datetime.datetime.now() |
---|
| 1045 | filename=pathname+\ |
---|
| 1046 | 'run_%02d-%02d-%04d_%i-%i-%i.nc'%(now.day,now.month,now.year,now.hour,now.minute,now.second) |
---|
| 1047 | self.filename=filename |
---|
| 1048 | from netCDF4 import Dataset |
---|
| 1049 | if action=='w': |
---|
| 1050 | self.f_Ncdf = Dataset(filename, 'w', format='NETCDF4') |
---|
| 1051 | self.f_Ncdf.description = description_txt |
---|
| 1052 | elif action=='a': #append to file |
---|
| 1053 | self.f_Ncdf = Dataset(filename, 'a', format='NETCDF4') |
---|
| 1054 | #create dictionaries to hold dimensions and variables |
---|
| 1055 | self.dim_dict=dict() |
---|
| 1056 | self.var_dict=dict() |
---|
| 1057 | print((filename+ " was created")) |
---|
| 1058 | |
---|
| 1059 | def close(self): |
---|
| 1060 | self.f_Ncdf.close() |
---|
| 1061 | print((self.filename+" was closed")) |
---|
| 1062 | |
---|
| 1063 | def add_dimension(self,dimension_name,length): |
---|
| 1064 | self.dim_dict[dimension_name]= self.f_Ncdf.createDimension(dimension_name,length) |
---|
| 1065 | |
---|
| 1066 | def print_dimension(self): |
---|
| 1067 | print((list(self.dim_dict.items()))) |
---|
| 1068 | def print_variable(self): |
---|
| 1069 | print((list(self.var_dict.keys()))) |
---|
| 1070 | |
---|
| 1071 | def add_constant(self,variable_name,value,longname_txt="",unit_txt=""): |
---|
| 1072 | if not any('constant' in s for s in list(self.dim_dict.keys())): |
---|
| 1073 | self.add_dimension('constant',1) |
---|
| 1074 | longname_txt =longname_txt+' (%g)'%(value) #add the value to the longname |
---|
| 1075 | self.def_variable(variable_name,('constant'),longname_txt,unit_txt) |
---|
| 1076 | self.var_dict[variable_name][:]=value |
---|
| 1077 | |
---|
| 1078 | def def_variable(self,variable_name,dim_array,longname_txt="",unit_txt=""): |
---|
| 1079 | self.var_dict[variable_name]= self.f_Ncdf.createVariable(variable_name,'f4',dim_array) |
---|
| 1080 | self.var_dict[variable_name].units=unit_txt |
---|
| 1081 | self.var_dict[variable_name].long_name=longname_txt |
---|
| 1082 | self.var_dict[variable_name].dim_name=dim_array |
---|
| 1083 | |
---|
| 1084 | def log_variable(self,variable_name,DATAin,dim_array,longname_txt="",unit_txt=""): |
---|
| 1085 | if not any(variable_name in s for s in list(self.var_dict.keys())): |
---|
| 1086 | self.def_variable(variable_name,dim_array,longname_txt,unit_txt) |
---|
| 1087 | self.var_dict[variable_name].long_name=longname_txt |
---|
| 1088 | self.var_dict[variable_name].dim_name=dim_array |
---|
| 1089 | self.var_dict[variable_name].units=unit_txt |
---|
| 1090 | self.var_dict[variable_name][:]=DATAin |
---|
| 1091 | |
---|
| 1092 | #========================================================================= |
---|
| 1093 | #=======================vertical grid utilities=========================== |
---|
| 1094 | #========================================================================= |
---|
| 1095 | def gauss_profile(x, alpha,x0=0.): |
---|
| 1096 | """ Return Gaussian line shape at x This can be used to generate a bell-shaped mountain""" |
---|
| 1097 | return np.sqrt(np.log(2) / np.pi) / alpha\ |
---|
| 1098 | * np.exp(-((x-x0) / alpha)**2 * np.log(2)) |
---|
| 1099 | |
---|
| 1100 | def compute_uneven_sigma(num_levels, N_scale_heights, surf_res, exponent, zero_top ): |
---|
| 1101 | """ |
---|
| 1102 | Construct an initial array of sigma based on the number of levels, an exponent |
---|
| 1103 | Args: |
---|
| 1104 | num_levels: the number of levels |
---|
| 1105 | N_scale_heights: the number of scale heights to the top of the model (e.g scale_heights =12.5 ~102km assuming 8km scale height) |
---|
| 1106 | surf_res: the resolution at the surface |
---|
| 1107 | exponent: an exponent to increase th thickness of the levels |
---|
| 1108 | zero_top: if True, force the top pressure boundary (in N=0) to 0 Pa |
---|
| 1109 | Returns: |
---|
| 1110 | b: an array of sigma layers |
---|
| 1111 | |
---|
| 1112 | """ |
---|
| 1113 | b=np.zeros(int(num_levels)+1) |
---|
| 1114 | for k in range(0,num_levels): |
---|
| 1115 | zeta = 1.-k/np.float(num_levels) #zeta decreases with k |
---|
| 1116 | z = surf_res*zeta + (1.0 - surf_res)*(zeta**exponent) |
---|
| 1117 | b[k] = np.exp(-z*N_scale_heights) |
---|
| 1118 | b[-1] = 1.0 |
---|
| 1119 | if(zero_top): b[0] = 0.0 |
---|
| 1120 | return b |
---|
| 1121 | |
---|
| 1122 | |
---|
| 1123 | def transition( pfull, p_sigma=0.1, p_press=0.05): |
---|
| 1124 | """ |
---|
| 1125 | Return the transition factor to construct the ak and bk |
---|
| 1126 | Args: |
---|
| 1127 | pfull: the pressure in Pa |
---|
| 1128 | p_sigma: the pressure level where the vertical grid starts transitioning from sigma to pressure |
---|
| 1129 | p_press: the pressure level above those the vertical grid is pure (constant) pressure |
---|
| 1130 | Returns: |
---|
| 1131 | t: the transition factor =1 for pure sigma, 0 for pure pressure and 0<t<1 for the transition |
---|
| 1132 | |
---|
| 1133 | NOTE: |
---|
| 1134 | In the FV code full pressure are computed from: |
---|
| 1135 | del(phalf) |
---|
| 1136 | pfull = ----------------------------- |
---|
| 1137 | log(phalf(k+1/2)/phalf(k-1/2)) |
---|
| 1138 | """ |
---|
| 1139 | t=np.zeros_like(pfull) |
---|
| 1140 | for k in range(0,len(pfull)): |
---|
| 1141 | if( pfull[k] <= p_press): |
---|
| 1142 | t[k] = 0.0 |
---|
| 1143 | elif ( pfull[k] >= p_sigma) : |
---|
| 1144 | t[k] = 1.0 |
---|
| 1145 | else: |
---|
| 1146 | x = pfull[k] - p_press |
---|
| 1147 | xx = p_sigma - p_press |
---|
| 1148 | t[k] = (np.sin(0.5*np.pi*x/xx))**2 |
---|
| 1149 | |
---|
| 1150 | return t |
---|
| 1151 | |
---|
| 1152 | |
---|
| 1153 | def swinbank(plev, psfc, ptrans=1.): |
---|
| 1154 | """ |
---|
| 1155 | Compute ak and bk values with a transition based on Swinbank |
---|
| 1156 | Args: |
---|
| 1157 | plev: the pressure levels in Pa |
---|
| 1158 | psfc: the surface pressure in Pa |
---|
| 1159 | ptrans:the transition pressure in Pa |
---|
| 1160 | Returns: |
---|
| 1161 | aknew, bknew,ks: the coefficients for the new layers |
---|
| 1162 | """ |
---|
| 1163 | |
---|
| 1164 | ktrans= np.argmin(np.abs( plev- ptrans) ) # ks= number of pure pressure levels |
---|
| 1165 | km= len(plev)-1 |
---|
| 1166 | |
---|
| 1167 | aknew=np.zeros(len(plev)) |
---|
| 1168 | bknew=np.zeros(len(plev)) |
---|
| 1169 | |
---|
| 1170 | # pnorm= 1.e5; |
---|
| 1171 | pnorm= psfc |
---|
| 1172 | eta= plev / pnorm |
---|
| 1173 | |
---|
| 1174 | ep= eta[ktrans+1] # ks= number of pure pressure levels |
---|
| 1175 | es= eta[-1] |
---|
| 1176 | rnorm= 1. / (es-ep)**2 |
---|
| 1177 | |
---|
| 1178 | # Compute alpha, beta, and gamma using Swinbank's formula |
---|
| 1179 | alpha = (ep**2 - 2.*ep*es) / (es-ep)**2 |
---|
| 1180 | beta = 2.*ep*es**2 / (es-ep)**2 |
---|
| 1181 | gamma = -(ep*es)**2 / (es-ep)**2 |
---|
| 1182 | |
---|
| 1183 | # Pure Pressure levels |
---|
| 1184 | aknew= eta * pnorm |
---|
| 1185 | |
---|
| 1186 | # Hybrid pressure-sigma levels |
---|
| 1187 | kdex= list(range(ktrans+1,km)) |
---|
| 1188 | aknew[kdex] = alpha*eta[kdex] + beta + gamma/eta[kdex] |
---|
| 1189 | aknew[kdex]= aknew[kdex] * pnorm |
---|
| 1190 | aknew[-1]= 0.0 |
---|
| 1191 | |
---|
| 1192 | bknew[kdex] = (plev[kdex] - aknew[kdex])/psfc |
---|
| 1193 | bknew[-1] = 1.0 |
---|
| 1194 | |
---|
| 1195 | #find the transition level ks where (bk[ks]>0) |
---|
| 1196 | ks=0 |
---|
| 1197 | while bknew[ks]==0. : |
---|
| 1198 | ks+=1 |
---|
| 1199 | #ks is the one that would be use in fortran indexing in fv_eta.f90 |
---|
| 1200 | return aknew, bknew,ks |
---|
| 1201 | |
---|
| 1202 | def printvar(infile): |
---|
| 1203 | # Get all variable names for a netCDF file --- |
---|
| 1204 | print("Variables:") |
---|
| 1205 | variableNames = list(infile.variables.keys()); |
---|
| 1206 | print(variableNames) |
---|
| 1207 | print("\n") |
---|
| 1208 | |
---|
| 1209 | def getind(myloc,field): |
---|
| 1210 | # get a specific index in the lat, lon, time or pfull 1D field |
---|
| 1211 | res=[] |
---|
| 1212 | for loc in np.atleast_1d(myloc): |
---|
| 1213 | myind=np.where(abs(field[:]-loc)==min(abs(field[:]-loc)))[0][0] |
---|
| 1214 | res.append(myind) |
---|
| 1215 | res=np.atleast_1d(res) |
---|
| 1216 | if len(res)== 1: return res[0] |
---|
| 1217 | return np.atleast_1d(res) |
---|
| 1218 | |
---|
| 1219 | |
---|
| 1220 | # getinds=np.vectorize(getind, excluded="field") |
---|
| 1221 | |
---|
| 1222 | def getvar(nc1,var,times=None,tim=None,longitudes=None,lon=None,t_mean=False,l_mean=False): |
---|
| 1223 | if var == "latitude" and var not in nc1.variables: var="lat" |
---|
| 1224 | if var == "longitude" and var not in nc1.variables: var="lon" |
---|
| 1225 | if var == "Time" and var not in nc1.variables: var="time_counter" |
---|
| 1226 | if var == "aire" and var not in nc1.variables: var="area" |
---|
| 1227 | if var == "phisinit" and var not in nc1.variables: var="phisfi" |
---|
| 1228 | |
---|
| 1229 | myvar = nc1.variables[var][:] |
---|
| 1230 | try: |
---|
| 1231 | t=getind(times,tim) |
---|
| 1232 | if len(t)==2: |
---|
| 1233 | myvar = myvar[t[0]:t[1]+1] |
---|
| 1234 | else: |
---|
| 1235 | myvar = myvar[t] |
---|
| 1236 | except: |
---|
| 1237 | # print("no time selected") |
---|
| 1238 | pass |
---|
| 1239 | try: |
---|
| 1240 | l=getind(longitudes,lon) |
---|
| 1241 | if len(l)==2: |
---|
| 1242 | myvar = myvar[...,l[0]:l[1]+1] |
---|
| 1243 | else: |
---|
| 1244 | myvar = myvar[...,l] |
---|
| 1245 | except: |
---|
| 1246 | # print("no longitude selected") |
---|
| 1247 | pass |
---|
| 1248 | if t_mean: myvar=np.mean(myvar,axis=0) # temporal mean |
---|
| 1249 | if l_mean: myvar=np.mean(myvar,axis=-1) # longitudinal mean |
---|
| 1250 | print(np.shape(myvar)) |
---|
| 1251 | try: |
---|
| 1252 | if var == "altitude": |
---|
| 1253 | units = nc1.variables[var].units |
---|
| 1254 | if units == "m": |
---|
| 1255 | myvar/=1000 # plot in km |
---|
| 1256 | except: |
---|
| 1257 | pass |
---|
| 1258 | return myvar |
---|
| 1259 | |
---|
| 1260 | def getarea(filename2,var,tint): |
---|
| 1261 | myvar = nc2.variables["aire"][:] |
---|
| 1262 | print((shape(myvar))) |
---|
| 1263 | return myvar |
---|