# -*- coding: iso-8859-15 -*-
# Generic program to transfrom ASCII observational data in columns to a netcdf
# L. Fita, LMD February 2015
## e.g. # create_OBSnetcdf.py -c '#' -d ACAR/description.dat -e space -f ACAR/2012/10/ACAR_121018.asc -g true -t 19491201000000,seconds -k trajectory

import numpy as np
from netCDF4 import Dataset as NetCDFFile
import os
import re
from optparse import OptionParser

# version
version=1.2

# Filling values for floats, integer and string
fillValueF = 1.e20
fillValueI = -99999
fillValueS = '---'

# Length of the string variables
StringLength = 200

# Size of the map for the complementary variables/maps
Ndim2D = 100

main = 'create_OBSnetcdf.py'
errormsg = 'ERROR -- error -- ERROR -- error'
warnmsg = 'WARNING -- warning -- WARNING -- warning'

fillValue = 1.e20

def searchInlist(listname, nameFind):
    """ Function to search a value within a list
    listname = list
    nameFind = value to find
    >>> searInlist(['1', '2', '3', '5'], '5')
    True
    """
    for x in listname:
      if x == nameFind:
        return True
    return False


def typemod(value, typeval):
    """ Function to give back a value in a given dtype
    >>> print(typemod(8.2223, 'np.float64'))
    <type 'numpy.float64'>
    >>> print(typemod(8.2223, 'tuple'))
    <type 'tuple'>
    """
    fname='typemod'

    if typeval == 'int' or typeval == 'I':
        return int(value)
    elif typeval == 'long':
        return long(value)
    elif typeval == 'float' or typeval == 'F' or typeval == 'R':
        return float(value)
    elif typeval == 'complex':
        return complex(value)
    elif typeval == 'str' or typeval == 'S':
        return str(value)
    elif typeval == 'bool':
        return bool(value)
    elif typeval == 'B':
        return Str_Bool(value)
    elif typeval == 'list':
        newval = []
        newval.append(value)
        return newval
    elif typeval == 'dic':
        newval = {}
        newval[value] = value
        return newval
    elif typeval == 'tuple':
        newv = []
        newv.append(value)
        newval = tuple(newv)
        return newval
    elif typeval == 'np.int8':
        return np.int8(value)
    elif typeval == 'np.int16':
        return np.int16(value)
    elif typeval == 'np.int32':
        return np.int32(value)
    elif typeval == 'np.int64':
        return np.int64(value)
    elif typeval == 'np.uint8':
        return np.uint8(value)
    elif typeval == 'np.uint16':
        return np.uint16(value)
    elif typeval == 'np.np.uint32':
        return np.uint32(value)
    elif typeval == 'np.uint64':
        return np.uint64(value)
    elif typeval == 'np.float' or typeval == 'R':
        return np.float(value)
    elif typeval == 'np.float16':
        return np.float16(value)
    elif typeval == 'np.float32':
        return np.float32(value)
    elif typeval == 'float32':
        return np.float32(value)
    elif typeval == 'np.float64' or typeval == 'D':
        return np.float64(value)
    elif typeval == 'float64':
        return np.float64(value)
    elif typeval == 'np.complex':
        return np.complex(value)
    elif typeval == 'np.complex64':
        return np.complex64(value)
    elif typeval == 'np.complex128':
        return np.complex128(value)
    else:
        print errormsg
        print fname + ':  data type "' + typeval + '" is not ready !'
        print errormsg
        quit(-1)

    return

def str_list_k(string, cdiv, kind):
    """ Function to obtain a list of types of values from a string giving a split character
      string= String from which to obtain a list ('None' for None)
      cdiv= character to use to split the string
      kind= kind of desired value (as string like: 'np.float', 'int', 'np.float64', ....)
    >>> str_list_k('1:@#:$:56', ':', 'S')
    ['1', '@#', '$', '56']
    >>> str_list_k('1:3.4:12.3', ':', 'np.float64')
    [1.0, 3.3999999999999999, 12.300000000000001]
    """
    fname = 'str_list'

    if string.find(cdiv) != -1:
        listv = string.split(cdiv)
    else:
        if string == 'None':
            listv = None
        else:
            listv = [string]

    if listv is not None:
        finalist = []
        for lv in listv:
            finalist.append(typemod(lv, kind))
    else:
        finalist = None

    return finalist


def set_attribute(ncvar, attrname, attrvalue):
    """ Sets a value of an attribute of a netCDF variable. Removes previous attribute value if exists
    ncvar = object netcdf variable
    attrname = name of the attribute
    attrvalue = value of the attribute
    """
    import numpy as np
    from netCDF4 import Dataset as NetCDFFile

    attvar = ncvar.ncattrs()
    if searchInlist(attvar, attrname):
        attr = ncvar.delncattr(attrname)

    attr = ncvar.setncattr(attrname, attrvalue)

    return ncvar

def set_attributek(ncv, attrname, attrval, attrkind):
    """ Sets a value of an attribute of a netCDF variable with a kind. Removes previous attribute value if exists
    ncvar = object netcdf variable
    attrname = name of the attribute
    attrvalue = value of the attribute
    atrtrkind = kind of attribute: 'S', string ('!' as spaces); 'U', unicode ('!' as spaces); 'I', integer; 
      'Inp32', numpy integer 32; 'R', ot 'F' real; ' npfloat', np.float; 'D', np.float64
    """
    fname = 'set_attributek'
    validk = {'S': 'string', 'U': 'unicode', 'I': 'integer',                         \
      'Inp32': 'integer long (np.int32)', 'F': 'float', 'R': 'float',                \
      'npfloat': 'np.float', 'D': 'float long (np.float64)'}

    if type(attrkind) == type('s'):
        if attrkind == 'S':
            attrvalue = str(attrval.replace('!', ' '))
        elif attrkind == 'U':
            attrvalue = unicode(attrval.replace('!',' '))
        elif attrkind == 'I':
            attrvalue = np.int(attrval)
        elif attrkind == 'R' or attrkind == 'F' :
            attrvalue = float(attrval)
        elif attrkind == 'npfloat':
            attrvalue = np.float(attrval)
        elif attrkind == 'D':
            attrvalue = np.float64(attrval)
        else:
            print errormsg
            print '  ' + fname + ": '" + attrkind + "' kind of attribute is not ready!"
            print '  valid values: _______'
            for key in validk.keys():
                print '    ', key,':', validk[key]
            quit(-1)
    else:
        if attrkind == type(str('a')):
            attrvalue = str(attrval.replace('!', ' '))
        elif attrkind == type(unicode('a')):
            attrvalue = unicode(attrval.replace('!',' '))
        elif attrkind == type(np.int(1)):
            attrvalue = np.int(attrval)
        elif attrkind == np.dtype('i'):
            attrvalue = np.int32(attrval)
        elif attrkind == type(float(1.)):
            attrvalue = float(attrval)
        elif attrkind == type(np.float(1.)):
            attrvalue = np.float(attrval)
        elif attrkind == np.dtype('float32'):
            attrvalue = np.array(attrval, dtype='f')
        elif attrkind == type(np.float32(1.)):
            attrvalue = np.float32(attrval)
        elif attrkind == type(np.float64(1.)):
            attrvalue = np.float64(attrval)
        elif attrkind == type(np.array([1.,2.])):
            attrvalue = attrval
        else:
            print errormsg
            print '  ' + fname + ": '" + attrkind + "' kind of attribute is not ready!"
            print '  valid values: _______'
            for key in validk.keys():
                print '    ', key,':', validk[key]
            quit(-1)

    attvar = ncv.ncattrs()
    if searchInlist(attvar, attrname):
        attr = ncv.delncattr(attrname)

    if attrname == 'original_subroutines_author': 
        attrvalue = 'Cindy Bruyere'
    attr = ncv.setncattr(attrname, attrvalue)
        
    return attr


def basicvardef(varobj, vstname, vlname, vunits):
    """ Function to give the basic attributes to a variable
    varobj= netCDF variable object
    vstname= standard name of the variable
    vlname= long name of the variable
    vunits= units of the variable
    """
    attr = varobj.setncattr('standard_name', vstname)
    attr = varobj.setncattr('long_name', vlname)
    attr = varobj.setncattr('units', vunits)

    return

def remove_NONascii(string):
    """ Function to remove that characters which are not in the standard 127 ASCII
      string= string to transform
    >>> remove_NONascii('Lluís')
    Lluis
    """
    fname = 'remove_NONascii'

    newstring = string

    RTFchar= ['á', 'é', 'í', 'ó', 'ú', 'à', 'è', 'ì', 'ò', 'ù', 'â', 'ê', 'î', 'ô',  \
      'û', 'ä', 'ë', 'ï', 'ö', 'ü', 'ç', 'ñ','æ', 'œ', 'Á', 'É', 'Í', 'Ó', 'Ú', 'À', \
      'È', 'Ì', 'Ò', 'Ù', 'Â', 'Ê', 'Î', 'Ô', 'Û', 'Ä', 'Ë', 'Ï', 'Ö', 'Ü', 'Ç', 'Ñ',\
      'Æ', 'Œ', '\n', '\t']
    ASCchar= ['a', 'e', 'i', 'o', 'u', 'a', 'e', 'i', 'o', 'u', 'a', 'e', 'i', 'o',  \
      'u', 'a', 'e', 'i', 'o', 'u', 'c', 'n','ae', 'oe', 'A', 'E', 'I', 'O', 'U', 'A', \
      'E', 'I', 'O', 'U', 'A', 'E', 'I', 'O', 'U', 'A', 'E', 'I', 'O', 'U', 'C', 'N',\
      'AE', 'OE', '', ' ']

    Nchars = len(RTFchar)
    for ichar in range(Nchars):
        foundchar = string.find(RTFchar[ichar])
        if foundchar != -1:
            newstring = newstring.replace(RTFchar[ichar], ASCchar[ichar])

    return newstring

def read_description(fdobs, dbg):
    """ reads the description file of the observational data-set 
    fdobs= descriptive observational data-set
    dbg= boolean argument for debugging
    * Each station should have a 'description.dat' file with:
      institution=Institution who creates the data
      department=Department within the institution
      scientists=names of the data producers
      contact=contact of the data producers
      description=description of the observations
      acknowledgement=sentence of acknowlegement
      comment=comment for the measurements

      MissingValue='|' list of ASCII values for missing values within the data 
        (as they appear!, 'empty' for no value at all)
      comment=comments

      varN='|' list of variable names
      varLN='|' list of long variable names
      varU='|' list units of the variables
      varBUFR='|' list BUFR code of the variables
      varTYPE='|' list of variable types ('D', 'F', 'I', 'I64', 'S')
      varOPER='|' list of operations to do to the variables to meet their units ([oper],[val])
        [oper]:
          -, nothing
          sumc, add [val]
          subc, rest [val]
          mulc, multiply by [val]
          divc, divide by [val]
          rmchar,[val],[pos], remove [val] characters from [pos]='B', beginning, 'E', end
      NAMElon=name of the variable with the longitude (x position)
      NAMElat=name of the variable with the latitude (y position)
      NAMEheight=ind_alt
      NAMEtime=name of the varibale with the time
      FMTtime=format of the time (as in 'C', 'CFtime' for already CF-like time)

    """
    fname = 'read_description'

    descobj = open(fdobs, 'r')
    desc = {}

    namevalues = []

    for line in descobj:
        if line[0:1] != '#' and len(line) > 1 :
            descn = remove_NONascii(line.split('=')[0])
            descv = remove_NONascii(line.split('=')[1])
            namevalues.append(descn)
            if descn[0:3] != 'var':
                if descn != 'MissingValue':
                    desc[descn] = descv
                else:
                    desc[descn] = []
                    for dn in descv.split('|'): desc[descn].append(dn)
                    print '  ' + fname + ': missing values found:',desc[descn]
            elif descn[0:3] == 'var':
                desc[descn] = descv.split('|')
            elif descn[0:4] == 'NAME':
                desc[descn] = descv
            elif descn[0:3] == 'FMT':
                desc[descn] = descv
    if not desc.has_key('varOPER'): desc['varOPER'] = None

    if dbg:
        Nvars = len(desc['varN'])
        print '  ' + fname + ": description content of '" + fdobs + "'______________"
        for varn in namevalues:
            if varn[0:3] != 'var':
                print '    ' + varn + ':',desc[varn]
            elif varn == 'varN':
                print '    * Variables:'
                for ivar in range(Nvars):
                    varname = desc['varN'][ivar]
                    varLname = desc['varLN'][ivar]
                    varunits = desc['varU'][ivar]
                    if desc.has_key('varBUFR'): 
                        varbufr = desc['varBUFR'][ivar]
                    else:
                        varbufr = None
                    if desc['varOPER'] is not None:
                        opv = desc['varOPER'][ivar]
                    else:
                        opv = None
                    print '      ', ivar, varname+':',varLname,'[',varunits, \
                       ']','bufr code:',varbufr,'oper:',opv

    descobj.close()

    return desc

def value_fmt(val, miss, op, fmt):
    """ Function to transform an ASCII value to a given format
      val= value to transform
      miss= list of possible missing values
      op= operation to perform to the value
      fmt= format to take:
        'D': float double precission
        'F': float
        'I': integer
        'I64': 64-bits integer
        'S': string
    >>> value_fmt('9876.12', '-999', 'F')
    9876.12
    """

    fname = 'value_fmt'

    aopers = ['sumc','subc','mulc','divc', 'rmchar']

    fmts = ['D', 'F', 'I', 'I64', 'S']
    Nfmts = len(fmts)

    if not searchInlist(miss,val):
        if searchInlist(miss,'empty') and len(val) == 0: 
            newval = None
        else:
            if op != '-':
                opern = op.split(',')[0]
                operv = op.split(',')[1]

                if not searchInlist(aopers,opern):
                    print errormsg
                    print '  ' + fname + ": operation '" + opern + "' not ready!!"
                    print '    availables:',aopers
                    quit(-1)
            else:
                opern = 'sumc'
                operv = '0'

            if not searchInlist(fmts, fmt):
                print errormsg
                print '  ' + fname + ": format '" + fmt + "' not ready !!"
                quit(-1)
            else:
                if fmt == 'D':
                    opv = np.float32(operv)
                    if opern == 'sumc': newval = np.float32(val) + opv
                    elif opern == 'subc': newval = np.float32(val) - opv
                    elif opern == 'mulc': newval = np.float32(val) * opv
                    elif opern == 'divc': newval = np.float32(val) / opv
                    elif opern == 'rmchar': 
                        opv = int(operv)
                        Lval = len(val)
                        if op.split(',')[2] == 'B':
                            newval = np.float32(val[opv:Lval+1])
                        elif op.split(',')[2] == 'E':
                            newval = np.float32(val[Lval-opv:Lval])
                        else:
                            print errormsg
                            print '  ' + fname + ": operation '" + opern + "' not " +\
                              " work with '" + op.split(',')[2] + "' !!"
                            quit(-1)
                elif fmt == 'F':
                    opv = np.float(operv)
                    if opern == 'sumc': newval = np.float(val) + opv
                    elif opern == 'subc': newval = np.float(val) - opv
                    elif opern == 'mulc': newval = np.float(val) * opv
                    elif opern == 'divc': newval = np.float(val) / opv
                    elif opern == 'rmchar': 
                        opv = int(operv)
                        Lval = len(val)
                        if op.split(',')[2] == 'B':
                            newval = np.float(val[opv:Lval+1])
                        elif op.split(',')[2] == 'E':
                            newval = np.float(val[0:Lval-opv])
                        else:
                            print errormsg
                            print '  ' + fname + ": operation '" + opern + "' not " +\
                              " work with '" + op.split(',')[2] + "' !!"
                            quit(-1)

                elif fmt == 'I':
                    opv = int(operv)
                    if opern == 'sumc': newval = int(val) + opv
                    elif opern == 'subc': newval = int(val) - opv
                    elif opern == 'mulc': newval = int(val) * opv
                    elif opern == 'divc': newval = int(val) / opv
                    elif opern == 'rmchar': 
                        opv = int(operv)
                        Lval = len(val)
                        if op.split(',')[2] == 'B':
                            newval = int(val[opv:Lval+1])
                        elif op.split(',')[2] == 'E':
                            newval = int(val[0:Lval-opv])
                        else:
                            print errormsg
                            print '  ' + fname + ": operation '" + opern + "' not " +\
                              " work with '" + op.split(',')[2] + "' !!"
                            quit(-1)
                elif fmt == 'I64':
                    opv = np.int64(operv)
                    if opern == 'sumc': newval = np.int64(val) + opv
                    elif opern == 'subc': newval = np.int64(val) - opv
                    elif opern == 'mulc': newval = np.int64(val) * opv
                    elif opern == 'divc': newval = np.int64(val) / opv
                    elif opern == 'rmchar': 
                        opv = int(operv)
                        Lval = len(val)
                        if op.split(',')[2] == 'B':
                            newval = np.int64(val[opv:Lval+1])
                        elif op.split(',')[2] == 'E':
                            newval = np.int64(val[0:Lval-opv])
                        else:
                            print errormsg
                            print '  ' + fname + ": operation '" + opern + "' not " +\
                              " work with '" + op.split(',')[2] + "' !!"
                            quit(-1)
                elif fmt == 'S':
                    if opern == 'rmchar': 
                        opv = int(operv)
                        Lval = len(val)
                        if op.split(',')[2] == 'B':
                            newval = val[opv:Lval+1]
                        elif op.split(',')[2] == 'E':
                            newval = val[0:Lval-opv]
                        else:
                            print errormsg
                            print '  ' + fname + ": operation '" + opern + "' not " +\
                              " work with '" + op.split(',')[2] + "' !!"
                            quit(-1)
                    else:
                        newval = val

    else:
        newval = None

    return newval

def getting_fixedline(line, cuts, types, dbg=False):
    """ Function to get the values from a line of text with fixed lenght of different values
      line: line with values
      cuts: character number where a value ends
      types: consecutive type of values
        'I': integer
        'R': real
        'D': float64
        'S': string
        'B': boolean
      dbg: debug mode (default False)
    >>> Sline='   87007 03012015  25.6   6.4             9.4    5   15'
    >>> getting_fixedline(Sline, [8, 17, 23, 29, 36, 40, 45, 50], ['I', 'R', 'R', 'R', 'I', 'R', 'R', 'R'])
    [87007, 3012015.0, 25.6, 6.4, -99999, -99999, 9.4, 5.0, 15.0]
    """
    fname = 'getting_fixedline'

    if len(cuts) + 1 != len(types):
        print errormsg
        print '  ' + fname + ': The number of types :', len(types), 'must be +1',    \
          'number of cuts:', len(cuts)
        quit(-1)

    values = []
    val = line[0:cuts[0]]
    if len(val.replace(' ','')) >= 1:
        values.append(typemod(val, types[0]))
    else:
        if types[0] == 'I': values.append(fillValueI)
        elif types[0] == 'R': values.append(fillValueF)
        elif types[0] == 'D': values.append(fillValueF)
        elif types[0] == 'S': values.append(fillValueS)
        elif types[0] == 'B': values.append(fillValueB)

    Ncuts = len(cuts)
    for ic in range(1,Ncuts):
        val = line[cuts[ic-1]:cuts[ic]]
        if dbg: print ic, ':', val, '-->', types[ic]
        if len(val.replace(' ','')) >= 1:
            values.append(typemod(val, types[ic]))
        else:
            if types[ic] == 'I': values.append(fillValueI)
            elif types[ic] == 'R': values.append(fillValueF)
            elif types[ic] == 'D': values.append(fillValueF)
            elif types[ic] == 'S': values.append(fillValueS)
            elif types[ic] == 'B': values.append(fillValueB)

    # Last value
    Lline = len(line)
    val = line[cuts[Ncuts-1]:Lline]
    if len(val.replace(' ','')) >= 1:
        values.append(typemod(val, types[Ncuts]))
    else:
        if types[Ncuts] == 'I': values.append(fillValueI)
        elif types[Ncuts] == 'R': values.append(fillValueF)
        elif types[Ncuts] == 'D': values.append(fillValueF)
        elif types[Ncuts] == 'S': values.append(fillValueS)
        elif types[Ncuts] == 'B': values.append(fillValueB)

    return values


def getting_fixedline_NOk(line, cuts, miss, dbg=False):
    """ Function to get the values from a line of text with fixed lenght of 
        different values without types
      line: line with values
      cuts: character number where a value ends
      miss: character form issed values
      dbg: debug mode (default False)
    >>> Sline='   87007 03012015  25.6   6.4             9.4    5   15'
    >>> getting_fixedline_NOk(Sline, [8, 17, 23, 29, 36, 40, 45, 50], '#')
    ['87007', '03012015', '25.6', '6.4', '#', '#', '9.4', '5', '15']
    """
    fname = 'getting_fixedline_NOk'

    values = []
    val = line[0:cuts[0]]
    if len(val.replace(' ','')) >= 1:
        values.append(val.replace(' ',''))
    else:
        values.append(miss)

    Ncuts = len(cuts)
    for ic in range(1,Ncuts):
        val = line[cuts[ic-1]:cuts[ic]]
        if dbg: print ic, ':', val
        if len(val.replace(' ','')) >= 1:
            values.append(val.replace(' ',''))
        else:
            values.append(miss)

    # Last value
    Lline = len(line)
    val = line[cuts[Ncuts-1]:Lline]
    if len(val.replace(' ','')) >= 1:
        values.append(val.replace(' ',''))
    else:
        values.append(miss)

    return values

def read_datavalues(dataf, comchar, colchar, fmt, oper, miss, varns, dbg):
    """ Function to read from an ASCII file values in column
      dataf= data file
      comchar= list of the characters indicating comment in the file
      colchar= character which indicate end of value in the column
      dbg= debug mode or not
      fmt= list of kind of values to be found
      oper= list of operations to perform
      miss= missing value
      varns= list of name of the variables to find
    """

    fname = 'read_datavalues'

    ofile = open(dataf, 'r')
    Nvals = len(fmt)

    if oper is None:
        opers = []
        for ioper in range(Nvals):
            opers.append('-')
    else:
        opers = oper

    finalvalues = {}

    iline = 0
    for line in ofile:
        line = line.replace('\n','').replace(chr(13),'')
        if not searchInlist(comchar,line[0:1]) and len(line) > 1:
            # Getting values
            if colchar[0:4] != 'None': 
                values0 = line.split(colchar)
            else:
                ltypes=str_list_k(colchar[5:len(colchar)+1],',','I')
                values0 = getting_fixedline_NOk(line, ltypes, miss[0], dbg=dbg)
# Removing no-value columns
            values = []
            for iv in values0:
                if len(iv) > 0: values.append(iv)

            Nvalues = len(values)
# Checkings for wierd characters at the end of lines (use it to check)
#            if values[Nvalues-1][0:4] == '-999':
#                print line,'last v:',values[Nvalues-1],'len:',len(values[Nvalues-1])
#                for ic in range(len(values[Nvalues-1])):
#                    print ic,ord(values[Nvalues-1][ic:ic+1])
#                quit()

            if len(values[Nvalues-1]) == 0:
                Nvalues = Nvalues - 1
                
            if Nvalues != Nvals:
                print warnmsg
                print '  ' + fname + ': number of formats:',Nvals,' and number of ', \
                  'values:',Nvalues,' with split character *' + colchar +            \
                  '* does not coincide!!'
                print '    * what is found is ________'
                if Nvalues > Nvals:
                    Nshow = Nvals
                    for ivar in range(Nshow):
                        print '    ',varns[ivar],'fmt:',fmt[ivar],'value:',values[ivar]
                    print '      missing formats for:',values[Nshow:Nvalues+1]
                    print '      values not considered, continue'
                else:
                    Nshow = Nvalues
                    for ivar in range(Nshow):
                        print '   ',varns[ivar],'fmt:',fmt[ivar],'value:',values[ivar]
                    print '      excess of formats:',fmt[Nshow:Nvals+1]
                    quit(-1)

# Reading and transforming values
            if dbg: print '  ' + fname + ': values found _________'

            for ivar in range(Nvals):
                if dbg: 
                    print iline, varns[ivar],'value:',values[ivar],miss,opers[ivar], \
                      fmt[ivar]

                if iline == 0:
                    listvals = []
                    listvals.append(value_fmt(values[ivar], miss, opers[ivar],       \
                      fmt[ivar]))
                    finalvalues[varns[ivar]] = listvals
                else:
                    listvals = finalvalues[varns[ivar]]
                    listvals.append(value_fmt(values[ivar], miss, opers[ivar],       \
                      fmt[ivar]))
                    finalvalues[varns[ivar]] = listvals
        else:
# First line without values
            if iline == 0: iline = -1
    
        iline = iline + 1

    ofile.close()

    return finalvalues

def writing_str_nc(varo, values, Lchar):
    """ Function to write string values in a netCDF variable as a chain of 1char values
    varo= netCDF variable object
    values = list of values to introduce
    Lchar = length of the string in the netCDF file
    """

    Nvals = len(values)

    for iv in range(Nvals):    
        stringv=values[iv]  
        charvals = np.chararray(Lchar)
        Lstr = len(stringv)
        charvals[Lstr:Lchar] = ''

        for ich in range(Lstr):
            charvals[ich] = stringv[ich:ich+1]

        varo[iv,:] = charvals

    return

def Stringtimes_CF(tvals, fmt, Srefdate, tunits, dbg):
    """ Function to transform a given data in String formats to a CF date
      tvals= string temporal values
      fmt= format of the the time values
      Srefdate= reference date in [YYYY][MM][DD][HH][MI][SS] format
      tunits= units to use ('weeks', 'days', 'hours', 'minutes', 'seconds') 
      dbg= debug
    >>> Stringtimes_CF(['19760217082712','20150213101837'], '%Y%m%d%H%M%S', 
      '19491201000000', 'hours', False)
    [229784.45333333  571570.31027778]
    """
    import datetime as dt

    fname = 'Stringtimes'

    dimt = len(tvals)

    yrref = int(Srefdate[0:4])
    monref = int(Srefdate[4:6])
    dayref = int(Srefdate[6:8])
    horref = int(Srefdate[8:10])
    minref = int(Srefdate[10:12])
    secref = int(Srefdate[12:14])
    refdate = dt.datetime( yrref, monref, dayref, horref, minref, secref)

    cftimes = np.zeros((dimt), dtype=np.float)

    Nfmt=len(fmt.split('%'))

    if dbg: print '  ' + fname + ': fmt=',fmt,'refdate:',Srefdate,'uits:',tunits,    \
      'date dt_days dt_time deltaseconds CFtime _______'
    for it in range(dimt):

# Removing excess of mili-seconds (up to 6 decimals)
        if fmt.split('%')[Nfmt-1] == 'f':
            tpoints = tvals[it].split('.')
            if len(tpoints[len(tpoints)-1]) > 6:
                milisec = '{0:.6f}'.format(np.float('0.'+tpoints[len(tpoints)-1]))[0:7]
                newtval = ''
                for ipt in range(len(tpoints)-1):
                    newtval = newtval + tpoints[ipt] + '.'
                newtval = newtval + str(milisec)[2:len(milisec)+1]
            else:
                newtval = tvals[it]
            tval = dt.datetime.strptime(newtval, fmt)
        else:
            tval = dt.datetime.strptime(tvals[it], fmt)

        deltatime = tval - refdate
        deltaseconds = deltatime.days*24.*3600. + deltatime.seconds +                \
          deltatime.microseconds/100000.
        if tunits == 'weeks':
            deltat = 7.*24.*3600.
        elif tunits == 'days':
            deltat = 24.*3600.
        elif tunits == 'hours':
            deltat = 3600.
        elif tunits == 'minutes':
            deltat = 60.
        elif tunits == 'seconds':
            deltat = 1.
        else:
            print errormsg
            print '  ' + fname + ": time units '" + tunits + "' not ready !!"
            quit(-1)

        cftimes[it] = deltaseconds / deltat
        if dbg:
            print '  ' + tvals[it], deltatime, deltaseconds, cftimes[it]

    return cftimes

def adding_complementary(onc, dscn, okind, dvalues, tvals, refCFt, CFtu, Nd, dbg):
    """ Function to add complementary variables as function of the observational type
      onc= netcdf objecto file to add the variables
      dscn= description dictionary
      okind= observational kind
      dvalues= values
      tvals= CF time values
      refCFt= reference time of CF time (in [YYYY][MM][DD][HH][MI][SS])
      CFtu= CF time units
      Nd= size of the domain
      dbg= debugging flag
    """
    import numpy.ma as ma

    fname = 'adding_complementary'
  
# Kind of observations which require de integer lon/lat (for the 2D map)
    map2D=['multi-points', 'trajectory']

    SrefCFt = refCFt[0:4] +'-'+ refCFt[4:6] +'-'+ refCFt[6:8] + ' ' + refCFt[8:10] + \
      ':'+ refCFt[10:12] +':'+ refCFt[12:14]

    if dscn['NAMElon'] == '-' or dscn['NAMElat'] == '-':
        print errormsg
        print '  ' + fname + ": to complement a '" + okind + "' observation kind " + \
          " a given longitude ('NAMElon':",dscn['NAMElon'],") and latitude ('" +     \
          "'NAMElat:'", dscn['NAMElat'],') from the data has to be provided and ' +  \
          'any are given !!'
        quit(-1)

    if not dvalues.has_key(dscn['NAMElon']) or not dvalues.has_key(dscn['NAMElat']):
        print errormsg
        print '  ' + fname + ": observations do not have 'NAMElon':",                \
          dscn['NAMElon'],"and/or 'NAMElat:'", dscn['NAMElat'],' !!'
        print '    available data:',dvalues.keys()
        quit(-1)

    if okind == 'trajectory':
        if dscn['NAMEheight'] == '-':
            print warnmsg
            print '  ' + fname + ": to complement a '" + okind + "' observation " +  \
              "kind a given height ('NAMEheight':",dscn['NAMEheight'],"') might " +  \
              'be provided and any is given !!'
            quit(-1)

        if not dvalues.has_key(dscn['NAMEheight']):
            print errormsg
            print '  ' + fname + ": observations do not have 'NAMEtime':",           \
              dscn['NAMEtime'],' !!'
            print '    available data:',dvalues.keys()
            quit(-1)

    if searchInlist(map2D, okind):
# A new 2D map with the number of observation will be added for that 'NAMElon' 
#   and 'NAMElat' are necessary. A NdxNd domain space size will be used. 
        objfile.createDimension('lon2D',Nd)
        objfile.createDimension('lat2D',Nd)
        lonvals = ma.masked_equal(dvalues[dscn['NAMElon']], None)
        latvals = ma.masked_equal(dvalues[dscn['NAMElat']], None)

        minlon = min(lonvals)
        maxlon = max(lonvals)
        minlat = min(latvals)
        maxlat = max(latvals)

        blon = (maxlon - minlon)/(Nd-1)
        blat = (maxlat - minlat)/(Nd-1)

        newvar = onc.createVariable( 'lon2D', 'f8', ('lon2D'))
        basicvardef(newvar, 'longitude', 'longitude map observations','degrees_East')
        newvar[:] = minlon + np.arange(Nd)*blon
        newattr = set_attribute(newvar, 'axis', 'X')

        newvar = onc.createVariable( 'lat2D', 'f8', ('lat2D'))
        basicvardef(newvar, 'latitude', 'latitude map observations', 'degrees_North')
        newvar[:] = minlat + np.arange(Nd)*blat
        newattr = set_attribute(newvar, 'axis', 'Y')

        if dbg: 
            print '  ' + fname + ': minlon=',minlon,'maxlon=',maxlon
            print '  ' + fname + ': minlat=',minlat,'maxlat=',maxlat
            print '  ' + fname + ': precission on x-axis=', blon*(Nd-1), 'y-axis=',  \
              blat*(Nd-1)
        
    if okind == 'multi-points':
        map2D = np.ones((Nd, Nd), dtype=np.float)*fillValueI

        dimt = len(tvals)
        Nlost = 0
        for it in range(dimt):
            lon = dvalues[dscn['NAMElon']][it]
            lat = dvalues[dscn['NAMElat']][it]
            if lon is not None and lat is not None:
                ilon = int((Nd-1)*(lon - minlon)/(maxlon - minlon))
                ilat = int((Nd-1)*(lat - minlat)/(maxlat - minlat))

                if map2D[ilat,ilon] == fillValueI: 
                    map2D[ilat,ilon] = 1
                else:
                    map2D[ilat,ilon] = map2D[ilat,ilon] + 1
                if dbg: print it, lon, lat, ilon, ilat, map2D[ilat,ilon]

        newvar = onc.createVariable( 'mapobs', 'f4', ('lat2D', 'lon2D'),             \
          fill_value = fillValueI)
        basicvardef(newvar, 'map_observations', 'number of observations', '-')
        newvar[:] = map2D
        newattr = set_attribute(newvar, 'coordinates', 'lon2D lat2D')

    elif okind == 'trajectory':
# A new 2D map with the trajectory 'NAMElon' and 'NAMElat' and maybe 'NAMEheight'
#   are necessary. A NdxNdxNd domain space size will be used. Using time as 
#   reference variable
        if dscn['NAMEheight'] == '-':
# No height
            map2D = np.ones((Nd, Nd), dtype=np.float)*fillValueI

            dimt = len(tvals)
            Nlost = 0
            if dbg: print ' time-step lon lat ix iy passes _______'
            for it in range(dimt):
                lon = dvalues[dscn['NAMElon']][it]
                lat = dvalues[dscn['NAMElat']][it]
                if lon is  not None and lat is not None:
                    ilon = int((Nd-1)*(lon - minlon)/(maxlon - minlon))
                    ilat = int((Nd-1)*(lat - minlat)/(maxlat - minlat))

                    if map2D[ilat,ilon] == fillValueI: 
                        map2D[ilat,ilon] = 1
                    else:
                        map2D[ilat,ilon] = map2D[ilat,ilon] + 1
                    if dbg: print it, lon, lat, ilon, ilat, map2D[ilat,ilon]

            newvar = onc.createVariable( 'trjobs', 'i', ('lat2D', 'lon2D'),          \
              fill_value = fillValueI)
            basicvardef(newvar, 'trajectory_observations', 'number of passes', '-' )
            newvar[:] = map2D
            newattr = set_attribute(newvar, 'coordinates', 'lon2D lat2D')

        else:
            ivn = 0
            for vn in dscn['varN']:
                if vn == dscn['NAMEheight']: 
                    zu = dscn['varU'][ivn]
                    break
                ivn = ivn + 1
                    
            objfile.createDimension('z2D',Nd)
            zvals = ma.masked_equal(dvalues[dscn['NAMEheight']], None)
            minz = min(zvals)
            maxz = max(zvals)

            bz = (maxz - minz)/(Nd-1)

            newvar = onc.createVariable( 'z2D', 'f8', ('z2D'))
            basicvardef(newvar, 'z2D', 'z-coordinate map observations', zu)
            newvar[:] = minz + np.arange(Nd)*bz
            newattr = set_attribute(newvar, 'axis', 'Z')

            if dbg:
                print '  ' + fname + ': zmin=',minz,zu,'zmax=',maxz,zu
                print '  ' + fname + ': precission on z-axis=', bz*(Nd-1), zu

            map3D = np.ones((Nd, Nd, Nd), dtype=int)*fillValueI
            dimt = len(tvals)
            Nlost = 0
            if dbg: print ' time-step lon lat z ix iy iz passes _______'
            for it in range(dimt):
                lon = dvalues[dscn['NAMElon']][it]
                lat = dvalues[dscn['NAMElat']][it]
                z = dvalues[dscn['NAMEheight']][it]
                if lon is  not None and lat is not None and z is not None:
                    ilon = int((Nd-1)*(lon - minlon)/(maxlon - minlon))
                    ilat = int((Nd-1)*(lat - minlat)/(maxlat - minlat))
                    iz = int((Nd-1)*(z - minz)/(maxz - minz))

                    if map3D[iz,ilat,ilon] == fillValueI: 
                        map3D[iz,ilat,ilon] = 1
                    else:
                        map3D[iz,ilat,ilon] = map3D[iz,ilat,ilon] + 1
                    if dbg: print it, lon, lat, z, ilon, ilat, iz, map3D[iz,ilat,ilon]

            newvar = onc.createVariable( 'trjobs', 'i', ('z2D', 'lat2D', 'lon2D'),   \
              fill_value = fillValueI)
            basicvardef(newvar, 'trajectory_observations', 'number of passes', '-')
            newvar[:] = map3D
            newattr = set_attribute(newvar, 'coordinates', 'lon2D lat2D z2D')

    onc.sync()
    return

def adding_station_desc(onc,stdesc):
    """ Function to add a station description in a netCDF file
      onc= netCDF object
      stdesc= station description name, lon, lat, height
    """
    fname = 'adding_station_desc'

    newdim = onc.createDimension('nst',1)

    newvar = objfile.createVariable( 'station', 'c', ('nst','StrLength'))
    writing_str_nc(newvar, [stdesc[0].replace('!', ' ')], StringLength)

    newvar = objfile.createVariable( 'lonstGDM', 'c', ('nst','StrLength'))
    Gv = int(stdesc[1])
    Dv = int((stdesc[1] - Gv)*60.)
    Mv = int((stdesc[1] - Gv - Dv/60.)*3600.)
    writing_str_nc(newvar, [str(Gv)+"d" + str(Dv)+"m" + str(Mv)+'s'], StringLength)

    if onc.variables.has_key('lon'):
        print warnmsg
        print '  ' + fname + ": variable 'lon' already exist !!"
        print "    renaming it as 'lonst'"
        lonname = 'lonst'
    else:
        lonname = 'lon'

    newvar = objfile.createVariable( lonname, 'f4', ('nst'))
    basicvardef(newvar, lonname, 'longitude', 'degrees_West' )
    newvar[:] = stdesc[1]

    newvar = objfile.createVariable( 'latstGDM', 'c', ('nst','StrLength'))
    Gv = int(stdesc[2])
    Dv = int((stdesc[2] - Gv)*60.)
    Mv = int((stdesc[2] - Gv - Dv/60.)*3600.)
    writing_str_nc(newvar, [str(Gv)+"d" + str(Dv)+"m" + str(Mv)+'s'], StringLength)

    if onc.variables.has_key('lat'):
        print warnmsg
        print '  ' + fname + ": variable 'lat' already exist !!"
        print "    renaming it as 'latst'"
        latname = 'latst'
    else:
        latname = 'lat'

    newvar = objfile.createVariable( latname, 'f4', ('nst'))
    basicvardef(newvar, lonname, 'latitude', 'degrees_North' )
    newvar[:] = stdesc[2]

    if onc.variables.has_key('height'):
        print warnmsg
        print '  ' + fname + ": variable 'height' already exist !!"
        print "    renaming it as 'heightst'"
        heightname = 'heightst'
    else:
        heightname = 'height'

    newvar = objfile.createVariable( heightname, 'f4', ('nst'))
    basicvardef(newvar, heightname, 'height above sea level', 'm' )
    newvar[:] = stdesc[3]

    return

def oper_values(dvals, opers):
    """ Function to operate the values according to given parameters
      dvals= datavalues
      opers= operations
    """
    fname = 'oper_values'

    newdvals = {}
    varnames = dvals.keys()

    aopers = ['sumc','subc','mulc','divc']

    Nopers = len(opers)
    for iop in range(Nopers):
        vn = varnames[iop]
        print vn,'oper:',opers[iop]
        if opers[iop] != '-':
            opern = opers[iop].split(',')[0]
            operv = np.float(opers[iop].split(',')[1])

            vvals = np.array(dvals[vn])

            if opern == 'sumc': 
              newvals = np.where(vvals is None, None, vvals+operv)
            elif opern == 'subc': 
              newvals = np.where(vvals is None, None, vvals-operv)
            elif opern == 'mulc': 
              newvals = np.where(vvals is None, None, vvals*operv)
            elif opern == 'divc': 
              newvals = np.where(vvals is None, None, vvals/operv)
            else: 
                print errormsg
                print '  ' + fname + ": operation '" + opern + "' not ready!!"
                print '    availables:',aopers
                quit(-1)

            newdvals[vn] = list(newvals)
        else:
            newdvals[vn] = dvals[vn]

    return newdvals

def WMOcodevar(unitsn, onc, Lstrcode=1024):
    """ Function to add a variabe providing description of a units based on WMO code
      unitsn= name of the units (all WMO codes derived units must be labelled as
        'wmo_code_[num]' associated to a file valled wmo_[num].code)
      onc= netCDF object file to add the description
      Lstrcode= length of description of codes

      wmo_[num].code must have the structure: ('#' for comments)
        reference|web page, document reference with the code
        short_description|main description of the code
        long_description|long description of the code
        codeTYPE|type of the code (andy of 'D', 'F', 'I', 'S')
        @| Values line giving the start of the values
        [val1]|[meaning of first value]
        (...)
        [valN]|[meaning of last value]
    """
    fname = 'WMOcodevar'

# From http://stackoverflow.com/questions/4934806/how-can-i-find-scripts-directory-with-python
    folder = os.path.dirname(os.path.realpath(__file__))

    code = unitsn.split('_')[2]

    infile = folder + '/wmo_' + code +'.code'

    if not os.path.isfile(infile):
        print warnmsg
        print '  ' + fname + ": WMO code file '" + infile + "' does not exist !!"
        return
    # main expected values
    descvals = ['wmo_code', 'reference', 'short_description', 'long_description',    \
      'codeTYPE', '@']

    availcodetype = ['D', 'F', 'I', 'S']

    codevalues = {}
    ocode = open(infile, 'r')
    inivals = False
    codvals = []
    codmeanings = []
    for line in ocode:
        if len(line) > 1 and line[0:1] != '#':
            linev = line.replace('\n','').replace('\t','    ').replace('\r','')
            if not inivals:
                Sv = linev.split('|')[0]
                Vv = linev.split('|')[1]
                if searchInlist(descvals, Sv): 
                    if Sv != '@': codevalues[Sv] = Vv
                    else: inivals = True
            else:
                Svv = linev.split('|')[0]
                Vvv = linev.split('|')[1]
                codvals.append(Svv)
                codmeanings.append(Vvv)
                        
    # Creating variable
    if not searchInlist(onc.dimensions, 'Lstringcode'):
        print '  ' + fname + ": Adding string length dimension 'Lstringcode' for " + \
          " code descriptions"
        newdim = onc.createDimension('Lstringcode', Lstrcode)
    Ncodes = len(codvals)
    codedimn = 'wmo_code_' + str(code)
    if not searchInlist(onc.dimensions, codedimn):
        print '  ' + fname + ": Adding '" + codedimn + "' dimension for code " +     \
          "description"
        newdim = onc.createDimension(codedimn, Ncodes)
    onc.sync()

    # Variable with the value of the code
    if not onc.variables.has_key(codedimn):
        if codevalues['codeTYPE'] == 'D':
            newvar = onc.createVariable(codedimn, 'f8', (codedimn))
            for iv in range(Ncodes): newvar[iv] = np.float64(codvals[iv])
        elif codevalues['codeTYPE'] == 'F':
            newvar = onc.createVariable(codedimn, 'f', (codedimn))
            for iv in range(Ncodes): newvar[iv] = np.float(codvals[iv])
        elif codevalues['codeTYPE'] == 'I':
            newvar = onc.createVariable(codedimn, 'i', (codedimn))
            for iv in range(Ncodes): newvar[iv] = int(codvals[iv])
        elif codevalues['codeTYPE'] == 'S':
            newvar = onc.createVariable(codedimn, 'c', (codedimn, 'Lstringcode'))
            writing_str_nc(newvar, codvals, Lstrcode)
        else:
            print errormsg
            print '  ' + fname + ": codeTYPE '" + codevalues['codeTYPE'] + "' not" + \
              " ready !!"
            print '   available ones:', availcodetype
            quit(-1)

        for descv in descvals:
            if descv != '@' and descv != 'codeTYPE':
                newvar.setncattr(descv, codevalues[descv])
    # Variable with the meaning of the code
    if not onc.variables.has_key(codedimn+'_meaning'):
        print '  '+fname+": Adding '" + codedimn + "_meaning' variable for code " +  \
          "description"
        newvar = onc.createVariable(codedimn+'_meaning','c',(codedimn,'Lstringcode'))
        writing_str_nc(newvar, codmeanings, Lstrcode)
        newvar.setncattr('description', 'meaning of WMO code ' + str(code))

    onc.sync()

    return

def add_global_PyNCplot(ObjFile, pyscript, funcname, version):
    """ Function to add global attributes from 'PyNCplot' to a given netCDF
      ObjFile= netCDF file object to which add the global attributes
      funcname= name of the function from which file comes from
      version= version of the function
    """
    fname = 'add_global_PyNCplot'

    # Global values
    ObjFile.setncattr('author', 'L. Fita')
    newattr = set_attributek(ObjFile, 'institution', unicode('Centro de ' +          \
     'Investigaciones del Mar y la Atm' + unichr(243) + 'sfera (CIMA)'), 'U')
    newattr = set_attributek(ObjFile, 'institution2', unicode('Instituto Franco-' +  \
      'Argentino sobre Estudios de Clima y sus Impactos (CNRS, UMI-3351-IFAECI'), 'U')
    newattr = set_attributek(ObjFile, 'center', unicode('Consejo Nacional de ' +     \
      'Investigaciones Cient' + unichr(237) + 'ficas y T' + unichr(233) +            \
      'cnicas (CONICET)'), 'U')
    ObjFile.setncattr('university', 'Universidad de Buenos Aires (UBA)')
    ObjFile.setncattr('city', 'Buenos Aires')
    ObjFile.setncattr('country', 'Argentina')
    ObjFile.setncattr('tool', 'PyNCplot')
    ObjFile.setncattr('url', 'http://www.xn--llusfb-5va.cat/python/PyNCplot')
    ObjFile.setncattr('script', pyscript)
    if funcname is not None:
        ObjFile.setncattr('function', funcname)
    ObjFile.setncattr('version', version)

    ObjFile.sync()  

    return

####### ###### ##### #### ### ## #

strCFt="Refdate,tunits (CF reference date [YYYY][MM][DD][HH][MI][SS] format and " +  \
  " and time units: 'weeks', 'days', 'hours', 'miuntes', 'seconds')"

kindobs=['stations-map','multi-points', 'single-station', 'trajectory']
strkObs="kind of observations; 'multi-points': multiple individual punctual obs " +  \
  "(e.g., lightning strikes), 'single-station': single station on a fixed position,"+\
  "'trajectory': following a trajectory"

parser = OptionParser()
parser.add_option("-c", "--comments", dest="charcom",
  help="':', list of characters used for comments", metavar="VALUES")
parser.add_option("-d", "--descriptionfile", dest="fdesc",
  help="description file to use" + read_description.__doc__, metavar="FILE")
parser.add_option("-e", "--end_column", dest="endcol",
  help="character to indicate end of the column ('space', for ' ', or 'None,[fixcoljumps]' for fixed size columns with [fixcoljumps]: ',' separated list of positions of column ends within line)", metavar="VALUE")
parser.add_option("-f", "--file", dest="obsfile",
  help="observational file to use", metavar="FILE")
parser.add_option("-g", "--debug", dest="debug",
  help="whther debug is required ('false', 'true')", metavar="VALUE")
parser.add_option("-k", "--kindObs", dest="obskind", type='choice', choices=kindobs, 
  help=strkObs, metavar="VALUE")
parser.add_option("-s", "--stationLocation", dest="stloc",  
  help="name ('!' for spaces), longitude, latitude and height of the station (only for 'single-station')", 
  metavar="FILE")
parser.add_option("-t", "--CFtime", dest="CFtime", help=strCFt, metavar="VALUE")

(opts, args) = parser.parse_args()

#######    #######
## MAIN
    #######

ofile='OBSnetcdf.nc'

if opts.charcom is None:
    print warnmsg
    print '  ' + main + ': No list of comment characters provided!!'
    print '    assuming no need!'
    charcomments = []
else:
    charcomments = opts.charcom.split(':')    

if opts.fdesc is None:
    print errormsg
    print '  ' + main + ': No description file for the observtional data provided!!'
    quit(-1)

if opts.endcol is None:
    print warnmsg
    print '  ' + main + ': No list of comment characters provided!!'
    print "    assuming 'space'"
    endcol = ' '
else:
    if opts.endcol == 'space':
        endcol = ' '
    else:
        endcol = opts.endcol

if opts.obsfile is None:
    print errormsg
    print '  ' + main + ': No observations file provided!!'
    quit(-1)

if opts.debug is None:
    print warnmsg
    print '  ' + main + ': No debug provided!!'
    print "    assuming 'False'"
    debug = False
else:
    if opts.debug == 'true':
        debug = True
    else:
        debug = False

if not os.path.isfile(opts.fdesc):
    print errormsg
    print '   ' + main + ": description file '" + opts.fdesc + "' does not exist !!"
    quit(-1)

if not os.path.isfile(opts.obsfile):
    print errormsg
    print '   ' + main + ": observational file '" + opts.obsfile + "' does not exist !!"
    quit(-1)

if opts.CFtime is None:
    print warnmsg
    print '  ' + main + ': No CFtime criteria are provided !!'
    print "    either time is already in CF-format ('timeFMT=CFtime') in '" +        \
      opts.fdesc + "'"
    print "    or assuming refdate: '19491201000000' and time units: 'hours'"
    referencedate = '19491201000000'
    timeunits = 'hours'
else:
    referencedate = opts.CFtime.split(',')[0]
    timeunits = opts.CFtime.split(',')[1]

if opts.obskind is None:
    print warnmsg
    print '  ' + main + ': No kind of observations provided !!'
    print "    assuming 'single-station': single station on a fixed position at 0,0,0"
    obskind = 'single-station'
    stationdesc = [0.0, 0.0, 0.0, 0.0]
else:
    obskind = opts.obskind
    if obskind == 'single-station':
        if opts.stloc is None:
            print errornmsg
            print '  ' + main + ': No station location provided !!'
            quit(-1)
        else:
            stvals = opts.stloc.split(',')
            stationdesc = [stvals[0], np.float(stvals[1]), np.float(stvals[2]),      \
              np.float(stvals[3])]
    else:
        obskind = opts.obskind

# Reading description file
##
description = read_description(opts.fdesc, debug)

Nvariables=len(description['varN'])
formats = description['varTYPE']

if len(formats) != Nvariables: 
    print errormsg
    print '  ' + main + ': number of formats:',len(formats),' and number of ' +     \
      'variables', Nvariables,' does not coincide!!'
    print '    * what is found is _______'
    if Nvariables > len(formats):
        Nshow = len(formats)
        for ivar in range(Nshow):
            print '      ',description['varN'][ivar],':', description['varLN'][ivar],\
               '[', description['varU'][ivar], '] fmt:', formats[ivar]
        print '      missing values for:', description['varN'][Nshow:Nvariables+1]
    else:
        Nshow = Nvariables
        for ivar in range(Nshow):
            print '      ',description['varN'][ivar],':', description['varLN'][ivar],\
               '[', description['varU'][ivar], '] fmt:', formats[ivar]
        print '      excess of formats for:', formats[Nshow:len(formats)+1]

#    quit(-1)

# Reading values
##
datavalues = read_datavalues(opts.obsfile, charcomments, endcol, formats, 
  description['varOPER'], description['MissingValue'], description['varN'], debug)

# Total number of values
Ntvalues = len(datavalues[description['varN'][0]])
if obskind == 'stations-map':
    print main + ': total values found:',Ntvalues
else:
    print main + ': total temporal values found:',Ntvalues

objfile = NetCDFFile(ofile, 'w')
  
# Creation of dimensions
##
if obskind == 'stations-map':
    rowsdim = 'Npoints'
    dimlength = Ntvalues
else:
    rowsdim = 'time'
    dimlength = None

objfile.createDimension(rowsdim,dimlength)
objfile.createDimension('StrLength',StringLength)

# Creation of variables
##
for ivar in range(Nvariables):
    varn = description['varN'][ivar]
    print "  including: '" + varn + "' ... .. ."

    if formats[ivar] == 'D':
        newvar = objfile.createVariable(varn, 'f32', (rowsdim), fill_value=fillValueF)
        basicvardef(newvar, varn, description['varLN'][ivar],                        \
          description['varU'][ivar])
        newvar[:] = np.where(datavalues[varn] is None, fillValueF, datavalues[varn])
    elif formats[ivar] == 'F':
        newvar = objfile.createVariable(varn, 'f', (rowsdim), fill_value=fillValueF)
        basicvardef(newvar, varn, description['varLN'][ivar],                        \
          description['varU'][ivar])
        newvar[:] = np.where(datavalues[varn] is None, fillValueF, datavalues[varn])
    elif formats[ivar] == 'I':
        newvar = objfile.createVariable(varn, 'i', (rowsdim), fill_value=fillValueI)
        basicvardef(newvar, varn, description['varLN'][ivar],                        \
          description['varU'][ivar])
# Why is not wotking with integers?
        vals = np.array(datavalues[varn])
        for iv in range(Ntvalues):
            if vals[iv] is None: vals[iv] = fillValueI
        newvar[:] = vals
    elif formats[ivar] == 'S':
        newvar = objfile.createVariable(varn, 'c', (rowsdim,'StrLength'))
        basicvardef(newvar, varn, description['varLN'][ivar],                        \
          description['varU'][ivar])
        vals = datavalues[varn]
        for iv in range(Ntvalues):
            if vals[iv] is None: vals[iv] = fillValueS
        writing_str_nc(newvar, vals, StringLength)

    # Getting new variables to describe certain units as codeWMO_[num] from an 
    #   external file
    if description['varU'][ivar][0:8] == 'wmo_code':
        WMOcodevar(description['varU'][ivar], objfile)

# Extra variable descriptions/attributes
    if description.has_key('varBUFR'): 
        set_attribute(newvar,'bufr_code',description['varBUFR'][ivar])

    objfile.sync()

# Time variable in CF format
##
if description['FMTtime'] == 'CFtime':
    timevals = datavalues[description['NAMEtime']]
    iv = 0
    for ivn in description['varN']:
        if ivn == description['NAMEtime']:
            tunits = description['varU'][iv]
            break
        iv = iv + 1
else:
    # Time as a composition of different columns
    tcomposite = description['NAMEtime'].find('@')
    if tcomposite != -1:
        timevars = description['NAMEtime'].split('@')

        print warnmsg
        print '  ' + main + ': time values as combination of different columns!'
        print '    combining:',timevars,' with a final format: ',description['FMTtime']

        timeSvals = []
        if debug: print '  ' + main + ': creating times _______'
        for it in range(Ntvalues):
            tSvals = ''
            for tvar in timevars:
                tSvals = tSvals + datavalues[tvar][it] + ' '

            timeSvals.append(tSvals[0:len(tSvals)-1])
            if debug: print it, '*' + timeSvals[it] + '*'

        timevals = Stringtimes_CF(timeSvals, description['FMTtime'].replace('@',' '),\
          referencedate, timeunits, debug)
    else:
        timevals = Stringtimes_CF(datavalues[description['NAMEtime']],                   \
          description['FMTtime'], referencedate, timeunits, debug)

    CFtimeRef = referencedate[0:4] +'-'+ referencedate[4:6] +'-'+ referencedate[6:8] +   \
      ' ' + referencedate[8:10] +':'+ referencedate[10:12] +':'+ referencedate[12:14]
    tunits = timeunits + ' since ' + CFtimeRef

if objfile.variables.has_key('time'):
    print warnmsg
    print '  ' + main + ": variable 'time' already exist !!"
    print "    renaming it as 'CFtime'"
    timeCFname = 'CFtime'
    newdim = objfile.renameDimension('time','CFtime')
    newvar = objfile.createVariable( timeCFname, 'f8', ('CFtime'))
    basicvardef(newvar, timeCFname, 'time', tunits )
else:
    if not searchInlist(objfile.dimensions, 'time'):
        newdim = objfile.createDimension('time',None)
    timeCFname = 'time'
    newvar = objfile.createVariable( timeCFname, 'f8', ('time'))
    newvar[:] = np.zeros(timevals.shape[0])
    basicvardef(newvar, timeCFname, 'time', tunits )

set_attribute(newvar, 'calendar', 'standard')
if obskind == 'stations-map':
    newvar[:] = timevals[0]
else:
    newvar[:] = timevals

# Global attributes
##
for descn in description.keys():
    if descn[0:3] != 'var' and descn[0:4] != 'NAME' and descn[0:3] != 'FMT':
        set_attribute(objfile, descn, description[descn])

add_global_PyNCplot(objfile, main, 'main', version)

objfile.sync()

# Adding new variables as function of the observational type
## 'multi-points', 'single-station', 'trajectory'

if obskind != 'single-station':
    adding_complementary(objfile, description, obskind, datavalues, timevals,        \
      referencedate, timeunits, Ndim2D, debug)
else:
# Adding three variables with the station location, longitude, latitude and height
    adding_station_desc(objfile,stationdesc)

objfile.sync()
objfile.close()

print main + ": Successfull generation of netcdf observational file '" + ofile + "' !!"
