source: trunk/UTIL/PYTHON/planetoplot_v2/ppcompute.py @ 1242

Last change on this file since 1242 was 1219, checked in by aslmd, 11 years ago

PLANETOPLOT. improve flexibility of code for computations. added computation of perturbations and diff

File size: 11.2 KB
Line 
1###############################################
2## PLANETOPLOT                               ##
3## --> PPCOMPUTE                             ##
4###############################################
5# python built-in librairies
6import os
7# added librairies
8import numpy as np
9import math  as m
10import scipy.signal as sp_signal
11###############################################
12
13## first a useful function to find settings in a folder in PYTHONPATH
14def findset(whereset,string="planetoplot_v2"):
15    # ... set a default whereset if it was set to None
16    # ... default is in the planetoplot_v2 folder
17    if whereset is None:
18        for path in os.environ['PYTHONPATH'].split(os.pathsep):
19            if string in path: whereset = path
20        if whereset is None: 
21            print "!! ERROR !! "+ string + "not in $PYTHONPATH"
22            print "--> either put it in $PYTHONPATH or change whereset"
23            exit()
24    # ... if the last / is missing put it
25    if whereset[-1] != "/": whereset = whereset + "/"
26    return whereset
27
28##########################
29#### MAX MEAN MIN SUM ####
30#####################################
31#### WITH SUPPORT FOR NaN VALUES ####
32#####################################
33
34## compute min
35## author AS + AC
36def min(field,axis=None): 
37        if field is None: return None
38        if type(field).__name__=='MaskedArray':
39              field.set_fill_value(np.NaN)
40              return np.ma.array(field).min(axis=axis)
41        elif (np.isnan(np.sum(field)) and (type(field).__name__ not in 'MaskedArray')):
42              return np.ma.masked_invalid(field).min(axis=axis)
43        else: return np.array(field).min(axis=axis)
44
45## compute max
46## author AS + AC
47def max(field,axis=None):
48        if field is None: return None
49        if type(field).__name__=='MaskedArray':
50              field.set_fill_value(np.NaN)
51              return np.ma.array(field).max(axis=axis)
52        elif (np.isnan(np.sum(field)) and (type(field).__name__ not in 'MaskedArray')):
53              return np.ma.masked_invalid(field).max(axis=axis) 
54        else: return np.array(field).max(axis=axis)
55
56## compute mean
57## author AS + AC
58def mean(field,axis=None):
59        if field is None: return None
60        else: 
61           if type(field).__name__=='MaskedArray':
62              field.set_fill_value(np.NaN)
63              zout=np.ma.array(field).mean(axis=axis)
64              if axis is not None:
65                 zout.set_fill_value(np.NaN)
66                 return zout.filled()
67              else:return zout
68           elif (np.isnan(np.sum(field)) and (type(field).__name__ not in 'MaskedArray')):
69              zout=np.ma.masked_invalid(field).mean(axis=axis)
70              if axis is not None:
71                 zout.set_fill_value([np.NaN])
72                 return zout.filled()
73              else:return zout
74           else: 
75              return np.array(field).mean(axis=axis)
76
77## compute sum
78## author AS + AC
79def sum(field,axis=None):
80        if field is None: return None
81        else:
82           if type(field).__name__=='MaskedArray':
83              field.set_fill_value(np.NaN)
84              zout=np.ma.array(field).sum(axis=axis)
85              if axis is not None:
86                 zout.set_fill_value(np.NaN)
87                 return zout.filled()
88              else:return zout
89           elif (np.isnan(np.sum(field)) and (type(field).__name__ not in 'MaskedArray')):
90              zout=np.ma.masked_invalid(field).sum(axis=axis)
91              if axis is not None:
92                 zout.set_fill_value([np.NaN])
93                 return zout.filled()
94              else:return zout
95           else:
96              return np.array(field).sum(axis=axis)
97
98## compute mean over bins
99## author AS
100def meanbin(y,x,bins):
101    bins.sort() # sorting is needed for binning
102    meanvalues = []
103    for iii in range(len(bins)):
104        ## GET VALUES OVER RELEVANT INTERVALS
105        if iii == 0:
106          ind = x<bins[iii+1] ; ys = y[ind]
107        elif iii == len(bins)-1:
108          ind = x>=bins[iii-1] ; ys = y[ind]
109        else:
110          ind = x>=bins[iii-1] ; interm = x[ind] ; intermf = y[ind]
111          ind = interm<bins[iii+1] ; xs = interm[ind] ; ys = intermf[ind]
112        ## COMPUTE MEAN and TREAT NAN CASE
113        meanvalues.append(mean(ys))
114    ## RETURN A NUMPY ARRAY
115    meanvalues = np.array(meanvalues)
116    return meanvalues
117
118## compute perturbation to mean
119## -- the whole dimension must exist!
120## author AS
121def perturbation(field,axis=None):
122    # calculate mean (averaged dim is reduced)
123    mm = mean(field,axis=axis)
124    # include back the reduced dim
125    if field.ndim == 4: mm = np.tile(mm,(field.shape[axis],1,1,1))
126    elif field.ndim == 3: mm = np.tile(mm,(field.shape[axis],1,1))
127    elif field.ndim == 2: mm = np.tile(mm,(field.shape[axis],1))
128    # array has right shape but not in good order: fix this.
129    mm = np.reshape(mm,field.shape)
130    # compute perturbations
131    field = field - mm
132    return field
133
134################
135#### SMOOTH ####
136################
137### TBD: works with missing values
138
139def smooth2diter(field,n=1):
140        count = 0
141        result = field
142        while count < n:
143            result = smooth2d(result, window=2)
144            count = count + 1
145        return result
146
147## Author: AS. uses gauss_kern and blur_image.
148def smooth2d(field, window=10):
149        ## actually blur_image could work with different coeff on x and y
150        if True in np.isnan(field):
151            print "!! ERROR !! Smooth is a disaster with missing values. This will be fixed."
152            exit()
153        if window > 1:  result = blur_image(field,int(window))
154        else:           result = field
155        return result
156
157## FROM COOKBOOK http://www.scipy.org/Cookbook/SignalSmooth
158def gauss_kern(size, sizey=None):
159        # Returns a normalized 2D gauss kernel array for convolutions
160        size = int(size)
161        if not sizey:
162                sizey = size
163        else:
164                sizey = int(sizey)
165        x, y = np.mgrid[-size:size+1, -sizey:sizey+1]
166        g = np.exp(-(x**2/float(size)+y**2/float(sizey)))
167        return g / g.sum()
168
169## FROM COOKBOOK http://www.scipy.org/Cookbook/SignalSmooth
170def blur_image(im, n, ny=None) :
171        # blurs the image by convolving with a gaussian kernel of typical size n.
172        # The optional keyword argument ny allows for a different size in the y direction.
173        g = gauss_kern(n, sizey=ny)
174        improc = sp_signal.convolve(im, g, mode='same')
175        return improc
176
177## FROM COOKBOOK http://www.scipy.org/Cookbook/SignalSmooth
178def smooth1d(x,window=11,window_type='hanning'):
179    """smooth the data using a window with requested size.
180    This method is based on the convolution of a scaled window with the signal.
181    The signal is prepared by introducing reflected copies of the signal
182    (with the window size) in both ends so that transient parts are minimized
183    in the begining and end part of the output signal.
184    input:
185        x: the input signal
186        window: the dimension of the smoothing window; should be an odd integer
187        window_type: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
188            flat window will produce a moving average smoothing.
189    output:
190        the smoothed signal
191    example:
192    t=linspace(-2,2,0.1)
193    x=sin(t)+randn(len(t))*0.1
194    y=smooth(x)
195    see also:
196    numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
197    scipy.signal.lfilter
198    TODO: the window parameter could be the window itself if an array instead of a string   
199    """
200    if True in np.isnan(field):
201        print "!! ERROR !! Smooth is a disaster with missing values. This will be fixed."
202        exit()   
203    x = np.array(x)
204    if x.ndim != 1:
205        raise ValueError, "smooth only accepts 1 dimension arrays."
206    if x.size < window:
207        raise ValueError, "Input vector needs to be bigger than window size."
208    if window<3:
209        return x
210    if not window_type in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
211        raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
212    s=np.r_[x[window-1:0:-1],x,x[-1:-window:-1]]
213    #print(len(s))
214    if window == 'flat': #moving average
215        w=np.ones(window,'d')
216    else:
217        w=eval('np.'+window_type+'(window)')
218    y=np.convolve(w/w.sum(),s,mode='valid')
219    return y
220
221########################
222#### TIME CONVERTER ####
223########################
224
225#  mars_sol2ls
226#  author T. Navarro
227#  -----------------
228#  convert a given martian day number (sol)
229#  into corresponding solar longitude, Ls (in degr.),
230#  where sol=0=Ls=0 is the
231#  northern hemisphere spring equinox.
232#  -----------------
233def mars_sol2ls(soltabin,forcecontinuity=True):
234      year_day  = 668.6
235      peri_day  = 485.35
236      e_elips   = 0.09340
237      radtodeg  = 57.2957795130823
238      timeperi  = 1.90258341759902
239      if type(soltabin).__name__ in ['int','float','float32','float64']:
240        soltab=[soltabin]
241        solout=np.zeros([1])
242      else:
243        soltab=soltabin
244        solout=np.zeros([len(soltab)])
245      i=0
246      for sol in soltab:
247         zz=(sol-peri_day)/year_day
248         zanom=2.*np.pi*(zz-np.floor(zz))
249         xref=np.abs(zanom)
250#  The equation zx0 - e * sin (zx0) = xref, solved by Newton
251         zx0=xref+e_elips*m.sin(xref)
252         iter=0
253         while iter <= 10:
254            iter=iter+1
255            zdx=-(zx0-e_elips*m.sin(zx0)-xref)/(1.-e_elips*m.cos(zx0))
256            if(np.abs(zdx) <= (1.e-7)):
257              continue
258            zx0=zx0+zdx
259         zx0=zx0+zdx
260         if(zanom < 0.): zx0=-zx0
261# compute true anomaly zteta, now that eccentric anomaly zx0 is known
262         zteta=2.*m.atan(m.sqrt((1.+e_elips)/(1.-e_elips))*m.tan(zx0/2.))
263# compute Ls
264         ls=zteta-timeperi
265         if(ls < 0.): ls=ls+2.*np.pi
266         if(ls > 2.*np.pi): ls=ls-2.*np.pi
267# convert Ls in deg.
268         ls=radtodeg*ls
269         solout[i]=ls
270         i=i+1
271      if forcecontinuity:
272         for iii in range(len(soltab)-1):
273           while solout[iii+1] - solout[iii] > 180. :  solout[iii+1] = solout[iii+1] - 360.
274           while solout[iii] - solout[iii+1] > 180. :  solout[iii+1] = solout[iii+1] + 360.
275      return solout
276
277# mars_date
278# author A. Spiga
279# ------------------------------
280# get Ls, sol, utc from a string with format yyyy-mm-dd_hh:00:00
281# -- argument timechar is a vector of such strings indicating dates
282# -- example: timechar = nc.variables['Times'][:] in mesoscale files
283# NB: uses mars_sol2ls function above
284# ------------------------------
285def mars_date(timechar):
286    # some preliminary information
287    days_in_month = [61, 66, 66, 65, 60, 54, 50, 46, 47, 47, 51, 56]
288    plus_in_month = [ 0, 61,127,193,258,318,372,422,468,515,562,613]
289    # get utc and sol from strings
290    utc = [] ; sol = []
291    for zetime in timechar:
292        dautc = float(zetime[11]+zetime[12]) + float(zetime[14]+zetime[15])/37.
293        dasol = dautc / 24.
294        dasol = dasol + plus_in_month[ int(zetime[5]+zetime[6])-1 ] + int(zetime[8]+zetime[9])
295        dasol = dasol - 1 ##les sols GCM commencent a 0
296        utc.append(dautc)
297        sol.append(dasol)
298    sol = np.array(sol)
299    utc = np.array(utc)
300    # get ls from sol
301    ls = mars_sol2ls(sol)
302    return ls, sol, utc
303
304# timecorrect
305# author A. Spiga
306# -----------------------------
307# ensure time axis is monotonic
308# correct negative values
309# -----------------------------
310def timecorrect(time):
311    for ind in range(len(time)-1):
312        if time[ind] < 0.: time[ind] = time[ind] + 24.
313        if time[ind+1] < time[ind]: time[ind+1] = time[ind+1] + 24.
314    return time
315
Note: See TracBrowser for help on using the repository browser.