예제 #1
0
파일: Filler.py 프로젝트: NCPP/uvcdat-devel
 def __init__(self,template=None):
     cdat_info.pingPCMDIdb("cdat","genutil.StringConstructor")
     self.template=template
     ## ok we need to generate the keys and set them to empty it seems like a better idea
     keys = self.keys()
     for k in keys:
         setattr(self,k,"")
예제 #2
0
def generateLandSeaMask(target,
                        source=None,
                        threshold_1=.2,
                        threshold_2=.3,
                        regridTool='regrid2'):
    """ Generates a best guess mask on any rectilinear grid, using the method described in PCMDI's report #58
    see: http://www-pcmdi.llnl.gov/publications/ab58.html
    Input:
       target: either a MV2 object with a grid, or a cdms2 grid (rectilinear grid only)
       source: A fractional (0 to 1.) land sea mask, where 1 means all land
       threshold_1 (optional): criteria 1 for detecting cells with possible increment see report for detail
                               difference threshold
       threshold_2 (optional): criteria 2 for detecting cells with possible increment see report for detail
                               water/land content threshold
       regridTool: which cdms2 regridder tool to use, default is regrid2
    Output:
       landsea maks on target grid
    """
    cdat_info.pingPCMDIdb("cdat", "cdutil.generateLandSeaMask")
    if cdms2.isVariable(target):
        target = target.getGrid()
        if target is None:
            raise Exception, "Error target data passed do not have  a grid"
    if not isinstance(target, cdms2.grid.TransientRectGrid):
        raise Exception, "Error: target grid must be rectilinear"

    if source is None:
        source = cdms2.open(
            os.path.join(sys.prefix, 'share', 'cdutil',
                         'navy_land.nc'))('sftlf')

    try:
        navy_frac_t = source.regrid(target, regridTool='regrid2')
    except Exception, err:
        raise "error, cannot regrid source data to target, got error message: %s" % err
예제 #3
0
 def __init__(self, template=None):
     cdat_info.pingPCMDIdb("cdat", "genutil.StringConstructor")
     self.template = template
     ## ok we need to generate the keys and set them to empty it seems like a better idea
     keys = self.keys()
     for k in keys:
         setattr(self, k, "")
예제 #4
0
def picker(*args, **kargs):
    """
    Let the user pick non contiguous values along an axis
    Usage:
    picker(dim2=list1,dim2=list2)
    keyword 'match' is reserved for handling of inexisting values
    match=1 : (default): raise an exception if one of the select-values does not exist
    match=0 : replace inexistince selcet-values with missing
    match=-1: skip inexisting select-values

    Example:
    f=cdms.open('/pcmdi/obs/mo/ta/rnl_ncep/ta.rnl_ncep.ctl')
    #f first levels are 1000.00, 925.00, 850.00, 700.00
    s=f('ta,picker(level=[1000,850,700]))
    #or
    s=f('ta,picker(level=[1000,700,850]) # different order from first example
    #or 
    s=f('ta,picker(level=[1000,700,800]) # raise an exception since 800 doesn't exist
    #or 
    s=f('ta,picker(level=[1000,700,800],match=0) # replace 800 level with missing values
    #or 
    s=f('ta,picker(level=[1000,700,800],match=-1) # skip 800 level
    # or
    s=f('ta',genutil.picker(time=['1987-7','1988-3',cdtime.comptime(1989,3)],level=[1000,700,850]))

    """
    cdat_info.pingPCMDIdb("cdat","genutil.picker")
    import cdms2 as cdms
    a=cdms.selectors.Selector(PickComponent(*args,**kargs))
    return a
예제 #5
0
def generateLandSeaMask(target,source=None,threshold_1 = .2, threshold_2 = .3,regridTool='regrid2'):
    """ Generates a best guess mask on any rectilinear grid, using the method described in PCMDI's report #58
    see: http://www-pcmdi.llnl.gov/publications/ab58.html
    Input:
       target: either a MV2 object with a grid, or a cdms2 grid (rectilinear grid only)
       source: A fractional (0 to 1.) land sea mask, where 1 means all land
       threshold_1 (optional): criteria 1 for detecting cells with possible increment see report for detail
                               difference threshold
       threshold_2 (optional): criteria 2 for detecting cells with possible increment see report for detail
                               water/land content threshold
       regridTool: which cdms2 regridder tool to use, default is regrid2
    Output:
       landsea maks on target grid
    """
    cdat_info.pingPCMDIdb("cdat","cdutil.generateLandSeaMask")
    if cdms2.isVariable(target):
        target = target.getGrid()
        if target is None:
            raise Exception,"Error target data passed do not have  a grid"
    if not isinstance(target,cdms2.grid.TransientRectGrid):
        raise Exception, "Error: target grid must be rectilinear"

    if source is None:
        source = cdms2.open(os.path.join(sys.prefix,'share','cdutil','navy_land.nc'))('sftlf')
        
    try:
        navy_frac_t = source.regrid(target,regridTool='regrid2')
    except Exception,err:
        raise "error, cannot regrid source data to target, got error message: %s" % err
예제 #6
0
파일: selval.py 프로젝트: arulalant/uvcdat
def picker(*args, **kargs):
    """
    Let the user pick non contiguous values along an axis
    Usage:
    picker(dim2=list1,dim2=list2)
    keyword 'match' is reserved for handling of inexisting values
    match=1 : (default): raise an exception if one of the select-values does not exist
    match=0 : replace inexistince selcet-values with missing
    match=-1: skip inexisting select-values

    Example:
    f=cdms.open('/pcmdi/obs/mo/ta/rnl_ncep/ta.rnl_ncep.ctl')
    #f first levels are 1000.00, 925.00, 850.00, 700.00
    s=f('ta,picker(level=[1000,850,700]))
    #or
    s=f('ta,picker(level=[1000,700,850]) # different order from first example
    #or 
    s=f('ta,picker(level=[1000,700,800]) # raise an exception since 800 doesn't exist
    #or 
    s=f('ta,picker(level=[1000,700,800],match=0) # replace 800 level with missing values
    #or 
    s=f('ta,picker(level=[1000,700,800],match=-1) # skip 800 level
    # or
    s=f('ta',genutil.picker(time=['1987-7','1988-3',cdtime.comptime(1989,3)],level=[1000,700,850]))

    """
    cdat_info.pingPCMDIdb("cdat", "genutil.picker")
    import cdms2 as cdms

    a = cdms.selectors.Selector(PickComponent(*args, **kargs))
    return a
예제 #7
0
def smooth121(x,axis=0):
    """
    Function smooth121(x,axis=0)
     
    Description of function:
        Apply a 121 filter to an array over a specified axis 
    Usage:
        filtered = smooth121(unfiltered)
    Options:
        axisoptions: 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n 
            default value = 0. You can pass the name of the dimension or index
            (integer value 0...n) over which you want to compute the statistic.
    """
    cdat_info.pingPCMDIdb("cdat","genutil.filters.smooth121")
    return custom1D(x,[1.,2.,1.],axis=axis)
예제 #8
0
def smooth121(x, axis=0):
    """
    Function smooth121(x,axis=0)
     
    Description of function:
        Apply a 121 filter to an array over a specified axis 
    Usage:
        filtered = smooth121(unfiltered)
    Options:
        axisoptions: 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n 
            default value = 0. You can pass the name of the dimension or index
            (integer value 0...n) over which you want to compute the statistic.
    """
    cdat_info.pingPCMDIdb("cdat", "genutil.filters.smooth121")
    return custom1D(x, [1., 2., 1.], axis=axis)
예제 #9
0
def runningaverage(x, N, axis=0):
    """
    Function runningaverage(x,N,axis=0)
     
    Description of function:
        Apply a running average of length N to an array over a specified axis 
    Usage:
        smooth = runningaverage(x,12)
    Options:
        N: length of the running average
        axisoptions: 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n 
            default value = 0. You can pass the name of the dimension or index
            (integer value 0...n) over which you want to compute the statistic.
    """
    filter = numpy.ma.ones((N, ), dtype='f')
    cdat_info.pingPCMDIdb("cdat", "genutil.filters.runningaverage(%i)" % N)
    return custom1D(x, filter, axis=axis)
예제 #10
0
def runningaverage(x,N,axis=0):
    """
    Function runningaverage(x,N,axis=0)
     
    Description of function:
        Apply a running average of length N to an array over a specified axis 
    Usage:
        smooth = runningaverage(x,12)
    Options:
        N: length of the running average
        axisoptions: 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n 
            default value = 0. You can pass the name of the dimension or index
            (integer value 0...n) over which you want to compute the statistic.
    """
    filter=numpy.ma.ones((N,),dtype='f')
    cdat_info.pingPCMDIdb("cdat","genutil.filters.runningaverage(%i)" % N)
    return custom1D(x,filter,axis=axis)
예제 #11
0
 def testTooManyThreads(self):
     pid = os.getpid()
     print("PID:", pid)
     n = 0
     maximum_num_threads = 0
     if sys.platform == "darwin":
         thread_option = "-M"
     else:
         thread_option = "-T"
     while n < 100:
         n += 1
         cdat_info.pingPCMDIdb("cdat", "cdms2")
         p = Popen("ps {} -p {}".format(thread_option, pid).split(),
                   stdin=PIPE,
                   stdout=PIPE,
                   stderr=PIPE)
         o, e = p.communicate()
         maximum_num_threads = max(len(o.decode().split("\n")),
                                   maximum_num_threads)
         self.assertLess(maximum_num_threads, 15)
예제 #12
0
def reconstructPressureFromHybrid(ps, A, B, Po):
    """
    Reconstruct the Pressure field on sigma levels, from the surface pressure

    Input
    Ps   : Surface pressure
    A,B,Po: Hybrid Convertion Coefficients, such as: p=B.ps+A.Po
    Ps: surface pressure
    B,A are 1D : sigma levels
    Po and Ps must have same units

    Output
    Pressure field
    Such as P=B*Ps+A*Po

    Example
    P=reconstructPressureFromHybrid(ps,A,B,Po)
    """
    # Compute the pressure for the sigma levels
    cdat_info.pingPCMDIdb(
        "cdat",
        "cdutil.vertical.reconstructPressureFromHybrid")
    ps, B = genutil.grower(ps, B)
    ps, A = genutil.grower(ps, A)
    p = ps * B
    p = p + A * Po
    p.setAxisList(ps.getAxisList())
    p.id = 'P'
    try:
        p.units = ps.units
    except:
        pass
    t = p.getTime()
    if not t is None:
        p = p(order='tz...')
    else:
        p = p(order='z...')
    return p
예제 #13
0
def minmax(*data):
    """
  Function : minmax
  
  Description of Function:
    Returns the minimum and maximum of a series of arrays/lists/tuples (or a combination of these)
    You can combine list/tuples/... pretty much any combination is allowed.
    
  Examples of Use
  >>> import genutil
  >>> s = range(7)
  >>> genutil.minmax(s)
  (0.0, 6.0)
  >>> genutil.minmax([s,s])
  (0.0, 6.0)
  >>> genutil.minmax([[s,s*2],4.,[6.,7.,s]],[5.,-7.,8,(6.,1.)])
  (-7.0, 8.0)
  """
    cdat_info.pingPCMDIdb("cdat", "genutil.minmax")
    mx = numpy.finfo(numpy.float).min
    mn = numpy.finfo(numpy.float).max
    if len(data) == 1: data = data[0]
    global myfunction

    def myfunction(d, mx, mn):
        from numpy.ma import maximum, minimum, absolute, greater, count
        try:
            if count(d) == 0: return mx, mn
            mx = float(maximum(mx, float(maximum(d))))
            mn = float(minimum(mn, float(minimum(d))))
        except:
            for i in d:
                mx, mn = myfunction(i, mx, mn)
        return mx, mn

    mx, mn = myfunction(data, mx, mn)
    if mn == 1.E500 and mx == -1.E500: mn = mx = 1.E500
    return mn, mx
예제 #14
0
def reconstructPressureFromHybrid(ps, A, B, Po):
    """
    Reconstruct the Pressure field on sigma levels, from the surface pressure
    
    Input
    Ps   : Surface pressure
    A,B,Po: Hybrid Convertion Coefficients, such as: p=B.ps+A.Po
    Ps: surface pressure
    B,A are 1D : sigma levels
    Po and Ps must have same units
    
    Output
    Pressure field
    Such as P=B*Ps+A*Po

    Example
    P=reconstructPressureFromHybrid(ps,A,B,Po)
    """
    # Compute the pressure for the sigma levels
    cdat_info.pingPCMDIdb("cdat",
                          "cdutil.vertical.reconstructPressureFromHybrid")
    ps, B = genutil.grower(ps, B)
    ps, A = genutil.grower(ps, A)
    p = ps * B
    p = p + A * Po
    p.setAxisList(ps.getAxisList())
    p.id = 'P'
    try:
        p.units = ps.units
    except:
        pass
    t = p.getTime()
    if not t is None:
        p = p(order='tz...')
    else:
        p = p(order='z...')
    return p
예제 #15
0
파일: minmax.py 프로젝트: NCPP/uvcdat-devel
def minmax(*data) :
  """
  Function : minmax
  
  Description of Function:
    Returns the minimum and maximum of a series of arrays/lists/tuples (or a combination of these)
    You can combine list/tuples/... pretty much any combination is allowed.
    
  Examples of Use
  >>> import genutil
  >>> s = range(7)
  >>> genutil.minmax(s)
  (0.0, 6.0)
  >>> genutil.minmax([s,s])
  (0.0, 6.0)
  >>> genutil.minmax([[s,s*2],4.,[6.,7.,s]],[5.,-7.,8,(6.,1.)])
  (-7.0, 8.0)
  """
  cdat_info.pingPCMDIdb("cdat","genutil.minmax")
  mx=numpy.finfo(numpy.float).min
  mn=numpy.finfo(numpy.float).max
  if len(data)==1 : data=data[0]
  global myfunction
  def myfunction(d,mx,mn):
    from numpy.ma import maximum,minimum,absolute,greater,count
    try:
      if count(d)==0 : return mx,mn
      mx=float(maximum(mx,float(maximum(d))))
      mn=float(minimum(mn,float(minimum(d))))
    except:
      for i in d:
        mx,mn=myfunction(i,mx,mn)
    return mx,mn
  mx,mn=myfunction(data,mx,mn)
  if mn==1.E500 and mx==-1.E500 :mn=mx=1.E500
  return mn,mx
예제 #16
0
"""
CDMS module-level API
"""
import cdat_info
cdat_info.pingPCMDIdb("cdat", "cdms2")

__all__ = ["cdmsobj", "axis", "coord", "grid", "hgrid", "avariable", \
"sliceut", "error", "variable", "fvariable", "tvariable", "dataset", \
"database", "cache", "selectors", "MV2", "convention", "bindex", \
"auxcoord", "gengrid", "gsHost", "gsStaticVariable", "gsTimeVariable", \
"mvBaseWriter", "mvSphereMesh", "mvVsWriter", "mvCdmsRegrid"]

# Errors
from error import CDMSError

# CDMS datatypes
from cdmsobj import CdArray, CdChar, CdByte, CdDouble, CdFloat, CdFromObject, CdInt, CdLong, CdScalar, CdShort, CdString

# Functions which operate on all objects or groups of objects
from cdmsobj import Unlimited, getPathFromTemplate, matchPattern, matchingFiles, searchPattern, searchPredicate, setDebugMode

# Axis functions and classes
from axis import AbstractAxis, axisMatches, axisMatchAxis, axisMatchIndex
from axis import createAxis, createEqualAreaAxis, createGaussianAxis, createUniformLatitudeAxis, createUniformLongitudeAxis, setAutoBounds, getAutoBounds

# Grid functions
from grid import createGenericGrid, createGlobalMeanGrid, createRectGrid, createUniformGrid, createZonalGrid, setClassifyGrids, createGaussianGrid, writeScripGrid, isGrid

# Dataset functions
from dataset import createDataset, openDataset, useNetcdf3, \
        getNetcdfClassicFlag, getNetcdfShuffleFlag, getNetcdfDeflateFlag, getNetcdfDeflateLevelFlag,\
예제 #17
0
def area_weights(ds, axisoptions=None):
    '''
    Calculates masked area weights.

    Author: Charles Doutriaux: [email protected]

    Modified version using CDAT 3.0 by Paul Dubois
    Further modified by Krishna AchutaRao to return weights in all axes.

    Returns a masked array of the same dimensions as ds containing area weights
    but masked where ds is masked.
    '''
    cdat_info.pingPCMDIdb("cdat", "genutil.area_weights")
    #
    __DEBUG__ = 0
    #
    if __DEBUG__: print 'Incoming axisoptinos = ', axisoptions
    if __DEBUG__: print 'Shape of Incoming data = ', ds.shape
    seenlon = 0
    seenlat = 0
    if 'x' in list(ds.getOrder()): seenlon = 1
    if 'y' in list(ds.getOrder()): seenlat = 1
    #
    if seenlat and seenlon:
        if __DEBUG__: print 'Found both latitude and longitude'
        initial_order = ds.getOrder()
        if __DEBUG__: print 'initial_order= ', initial_order
        initial_order_list = list(initial_order)
        if '-' in initial_order_list:
            loc = initial_order_list.index('-')
            axisid = '(' + ds.getAxis(loc).id + ')'
            initial_order_list[loc] = axisid
            initial_order = string.joinfields(initial_order_list, '')
            if __DEBUG__: print 'Changed initial_order = ', initial_order
        # end of if '-' in initial_order_list:
        ds = ds(order='...yx')
        dsorder = ds.getOrder()
        if __DEBUG__: print 'Reordered ds ', dsorder
        Lataxisindex = list(dsorder).index('y')
        Lonaxisindex = list(dsorder).index('x')
        if __DEBUG__:
            print 'Lataxisindex = ', Lataxisindex, ' Lonaxisindex = ', Lonaxisindex
        #wt = numpy.outer(__myGetAxisWeights(ds,Lataxisindex), __myGetAxisWeights(ds,Lonaxisindex))
        dsgr = ds.getGrid()
        latwts, lonwts = dsgr.getWeights()
        wt = numpy.outer(numpy.array(latwts), numpy.array(lonwts))
        # At this point wt is an nlat by nlong matrix
        # Now the problem is to propagate this two-dimensional weight mask
        # through the other dimensions. To do this we shuffle these two dimensions
        # to the front of the shape, resize wt, and then permute it back to
        # the order of the dimensions in ds.
        s = ds.shape
        for i in range(len(s) - 1, -1, -1):
            if (i != Lataxisindex) and (i != Lonaxisindex):
                newaxiswt = __myGetAxisWeights(ds, i, axisoptions)
                wtlist = list(wt.shape)
                if __DEBUG__: print 'Before Inserting newdim', wtlist
                wtlist.insert(0, newaxiswt.shape[0])
                if __DEBUG__: print 'After inserting newdim ', wtlist
                new_wtshape = tuple(wtlist)
                wt = numpy.resize(wt, new_wtshape)
                if __DEBUG__:
                    print 'After inserting dimension ', i, ' shape of wt = ', wt.shape
                new_newaxiswt_shape = list(newaxiswt.shape)
                for nn in range(1, len(wt.shape), 1):
                    new_newaxiswt_shape.append(1)
                newaxiswt = numpy.resize(newaxiswt, tuple(new_newaxiswt_shape))
                wt = wt * newaxiswt
            # end of if (i != Lataxisindex) and (i != Lonaxisindex):
        # end of for i in range(len(s)):
        wt = cdms2.createVariable(numpy.ma.masked_array(
            wt, numpy.ma.getmask(ds)),
                                  axes=ds.getAxisList())
        result = wt(order=initial_order)
        if __DEBUG__: print 'Returning something of order', result.getOrder()
        return result
    else:
        wt = __myGetAxisWeights(ds, 0, axisoptions)
        if __DEBUG__: print 'Initial', wt.shape
        for i in range(1, len(ds.shape)):
            wt_newshape = tuple(list(ds.shape)[:i + 1])
            if __DEBUG__: print 'wt_newshape = ', wt_newshape
            wt = numpy.resize(wt, wt_newshape)
            if __DEBUG__: print 'After wt resize wt.shape = ', wt.shape
            newaxiswt = __myGetAxisWeights(ds, i)
            newaxiswt = numpy.resize(newaxiswt, wt.shape)
            wt = wt * newaxiswt
            if __DEBUG__: print 'After axis ', i, ' wt has shape ', wt.shape
        # end of for i in range(2, len(ds.shape)):
        if __DEBUG__: print 'Final Shape of Weight = ', wt.shape
        return cdms2.createVariable(numpy.ma.masked_array(
            wt, numpy.ma.getmask(ds)),
                                    axes=ds.getAxisList())
예제 #18
0
def linearInterpolation(A,
                        I,
                        levels=[
                            100000, 92500, 85000, 70000, 60000, 50000, 40000,
                            30000, 25000, 20000, 15000, 10000, 7000, 5000,
                            3000, 2000, 1000
                        ],
                        status=None):
    """
    Linear interpolation
    to interpolate a field from some levels to another set of levels
    Value below "surface" are masked
    
    Input
    A :      array to interpolate
    I :      interpolation field (usually Pressure or depth) from TOP (level 0) to BOTTOM (last level), i.e P value going up with each level
    levels : levels to interplate to (same units as I), default levels are:[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000]

    I and levels must have same units

    Output
    array on new levels (levels)
    
    Examples:
    A=interpolate(A,I,levels=[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000])
    """

    cdat_info.pingPCMDIdb("cdat", "cdutil.vertical.linearInterpolation")
    try:
        nlev = len(levels)  # Number of pressure levels
    except:
        nlev = 1  # if only one level len(levels) would breaks
        levels = [
            levels,
        ]
    order = A.getOrder()
    A = A(order='z...')
    I = I(order='z...')
    sh = list(I.shape)
    nsigma = sh[0]  #number of sigma levels
    sh[0] = nlev
    t = MV2.zeros(sh, typecode=MV2.float32)
    sh2 = I[0].shape
    prev = -1
    for ilev in range(nlev):  # loop through pressure levels
        if status is not None:
            prev = genutil.statusbar(ilev, nlev - 1., prev)
        lev = levels[ilev]  # get value for the level
        Iabv = MV2.ones(sh2, MV2.float)
        Aabv = -1 * Iabv  # Array on sigma level Above
        Abel = -1 * Iabv  # Array on sigma level Below
        Ibel = -1 * Iabv  # Pressure on sigma level Below
        Iabv = -1 * Iabv  # Pressure on sigma level Above
        Ieq = MV2.masked_equal(Iabv, -1)  # Area where Pressure == levels
        for i in range(1, nsigma):  # loop from second sigma level to last one
            a = MV2.greater_equal(
                I[i], lev)  # Where is the pressure greater than lev
            b = MV2.less_equal(I[i - 1],
                               lev)  # Where is the pressure less than lev
            # Now looks if the pressure level is in between the 2 sigma levels
            # If yes, sets Iabv, Ibel and Aabv, Abel
            a = MV2.logical_and(a, b)
            Iabv = MV2.where(a, I[i], Iabv)  # Pressure on sigma level Above
            Aabv = MV2.where(a, A[i], Aabv)  # Array on sigma level Above
            Ibel = MV2.where(a, I[i - 1],
                             Ibel)  # Pressure on sigma level Below
            Abel = MV2.where(a, A[i - 1], Abel)  # Array on sigma level Below
            Ieq = MV2.where(MV2.equal(I[i], lev), A[i], Ieq)

        val = MV2.masked_where(
            MV2.equal(Ibel, -1.),
            numpy.ones(Ibel.shape) *
            lev)  # set to missing value if no data below lev if there is

        tl = (val - Ibel) / (Iabv - Ibel) * (Aabv -
                                             Abel) + Abel  # Interpolation
        if ((Ieq.mask is None) or (Ieq.mask is MV2.nomask)):
            tl = Ieq
        else:
            tl = MV2.where(1 - Ieq.mask, Ieq, tl)
        t[ilev] = tl.astype(MV2.float32)

    ax = A.getAxisList()
    autobnds = cdms2.getAutoBounds()
    cdms2.setAutoBounds('off')
    lvl = cdms2.createAxis(MV2.array(levels).filled())
    cdms2.setAutoBounds(autobnds)
    try:
        lvl.units = I.units
    except:
        pass
    lvl.id = 'plev'

    try:
        t.units = I.units
    except:
        pass

    ax[0] = lvl
    t.setAxisList(ax)
    t.id = A.id
    for att in A.listattributes():
        setattr(t, att, getattr(A, att))
    return t(order=order)
예제 #19
0
def generateSurfaceTypeByRegionMask(mask,sftbyrgn=None,sftbyrgnmask=215,regions=range(201,223),maximum_regions_per_cell=4,extend_up_to=3,verbose=True):
    """
    Maps a "regions" dataset onto a user provided land/sea mask or grid
    
    Usage:
    -----
    mapped,found = generateSurfaceTypeByRegionMask(mask,sftbyrgn=None,sftbyrgnmask=None,regions=None,maximum_regions_per_cell=4,extend_up_to=3,verbose=True)

    Input:
    -----
    mask                        User provided land/sea mask (100/0) or grid (the land/sea mask will be generated automagically) which will be mapped using the "sftbyrgn" internal dataset (will generate a land/sea mask for you)
    sftbyrgn                    Mask you wish to map onto your grid (if None uses internal "sftbyrgn" dataset (old ezget type))
    sftbyrgnmask                Land/sea mask for sftbyrgn (or a number specifying value limits for sftbyrgn which indicates land/sea threshold (greater values are land) - see URL below for integer region map)
    regions                     Numbers from sftbyrgn array that you want to map onto mask (integers from 201-222)
    maximum_regions_per_cell    Maximum number of regions considered for a single cell
    extend_up_to                How many grid cells around a cell can we extend to identify a guess
    verbose                     Prints to the screen what's going on (default is True)

    Output:
    -----
    mapped                      Mapped input grid/mask using provided (or default) regions - sftbyrgn -> user provided grid/mask
    found                       Matrix containing number of regions matched for each output cell
    
    Notes:
    -----
    - More detailed information, including a region map and tabulated region numbers are available from http://www-pcmdi.llnl.gov/publications/pdf/34.pdf
    """
    
    cdat_info.pingPCMDIdb("cdat","cdutil.generateSurfaceTypeByRegionMask")
    ## OK first determine which regions are available
    ## Must be integer values
    if isinstance(mask,cdms2.grid.TransientRectGrid):
        mask = cdutil.generateLandSeaMask(mask)*100.

    if sftbyrgn is None:
        sftbyrgn = cdms2.open(os.path.join(cdat_info.get_prefix(),'share','cdutil','sftbyrgn.nc'))('sftbyrgn')
        
    if regions is None:
        if verbose: print 'Preparing regions'
        #regions = range(201,223)

        regions = []
        for i in range(0,10000):
            genutil.statusbar(i,9999)
            c = float(MV2.sum(MV2.ravel(MV2.equal(sftbyrgn,i)),0))
            if c != 0: regions.append(i)

    if verbose: print 'Regions:',regions
    ## If no mask passed fr sftbyrgn, assumes everything greater 5000 is land)
    if isinstance(sftbyrgnmask,int):
        split           = sftbyrgnmask
        n               = MV2.maximum(mask)
        sftbyrgnmask    = MV2.greater_equal(sftbyrgn,sftbyrgnmask)*n
    else:
        split           = MV2.maximum(sftbyrgnmask)/2.
    ## Now guess the type for each regions
    keys = {}
    ## ## Nice way to do it
    ##     for r in regions:
    ##         c=MV2.not_equal(sftbyrgn,r)
    ##         c=MV2.masked_where(c,sftbyrgnmask)
    ##         n=MV2.count(c)
    ##         c=float(MV2.sum(MV2.ravel(c),0)/n)
    ##         print r,c,n
    ##         keys[r]=c
    ## Fast but not so "general" way to do it
    for r in regions:
        if r< split:
            keys[r] = 0.
        else:
            keys[r] = 100.
    sh              = list(mask.shape)
    sh.insert(0,maximum_regions_per_cell)
    potential       = MV2.ones(sh,dtype='d')*-999
    potential_reg   = MV2.ones(sh,dtype='d')*-999

    g1  = sftbyrgn.getGrid()
    g2  = mask.getGrid()
    r1  = regrid2.Horizontal(g1,g2)
    w   = cdutil.area_weights(sftbyrgn)

    if verbose: print 'First pass'
    itmp = 0.
    for ireg in keys.keys():
        genutil.statusbar(itmp,len(keys.keys())-1)
        itmp += 1.
        c       = MV2.equal(sftbyrgn,ireg)
        w2      = 1.-c*w
        s2,w3   = r1(sftbyrgn,mask=w2.filled(),returnTuple=1)
        c2      = MV2.equal(mask,keys[ireg])
        loop(potential,potential_reg,c2,w3,ireg)

    found = MV2.zeros(sh[1:],typecode='f')
    for i in range(maximum_regions_per_cell):
        found = found+MV2.not_equal(potential[i],-999)
    sh2 = list(sh)
    for k in range(extend_up_to):
        sh2[1] = sh[1]+2*(k+1)
        sh2[2] = sh[2]+2*(k+1)
        ## Form the possible i/j couples !
        s = MV2.sum(MV2.ravel(MV2.equal(potential[0],-999)),0)
        if verbose: print 'Expanding up to',k+1,'cells while trying to fix',s,'cells'
            #if dump:
                #f=cdms2.open('tmp_'+str(k)+'.nc','w')
                #f.write(sumregions(potential_reg,potential).astype('f'),id='sftbyrgn',axes=mask.getAxisList())
                #f.close()
                #g=sumregions(potential_reg,potential).astype('d')
                #g=MV2.masked_equal(g,-999)
                #g=MV2.greater(g,4999)*100.
                #g=MV2.absolute(mask-g)
                #g=MV2.masked_equal(g,0.)
                #print 'Number of differences:',MV2.count(g)

        if float(s) != 0:
            c0 = MV2.equal(potential[0],-999)
            couples = []
            sft2 = MV2.zeros(sh2[1:],dtype='d')-888.
            sft2[k+1:-k-1,k+1:-k-1] = mask
            for i in range(-k-1,k+2):
                for j in range(-k-1,k+2):
                    if abs(i)>k or abs(j)>k: couples.append([i,j])
            ntot = len(keys.keys())*len(couples)-1
            itmp = 0
            for ireg in keys.keys():
                c = MV2.equal(sftbyrgn,ireg)
                w2 = 1.-c*w
                s2,w3 = r1(sftbyrgn,mask=w2.filled(),returnTuple=1)
                w4 = MV2.zeros(sh2[1:],typecode='d')
                w4[k+1:-k-1,k+1:-k-1] = w3
                for i,j in couples:
                    if verbose: genutil.statusbar(itmp,ntot)
                    itmp += 1.
                    c2 = MV2.equal(sft2[j+k+1:j+k+1+sh[1],i+k+1:i+k+1+sh[2]],keys[ireg])
                    c3 = MV2.equal(sft2[j+k+1:j+k+1+sh[1],i+k+1:i+k+1+sh[2]],mask)
                    c2 = MV2.logical_and(c2,c3)
                    c2 = MV2.logical_and(c2,c0)
                    loop(potential,potential_reg,c2,w4[j+k+1:j+k+1+sh[1],i+k+1:i+k+1+sh[2]],ireg)
           
        found = MV2.where(MV2.equal(potential[0],-999),found-1,found)

    out = sumregions(potential_reg,potential)
    out.setAxisList(mask.getAxisList())
    out.id = 'sftbyrgn'
    out = out.astype('i')
    out.missing_value = -999
    found.setAxisList(mask.getAxisList())
    found.id = 'found'
    found = found.astype('i')
    found.missing_value = -999

    del(out.name)
    del(found.name)
    return out,found
예제 #20
0
def linearInterpolation(
    A, I, levels=[100000, 92500, 85000, 70000, 60000, 50000, 40000,
                  30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000], status=None, axis='z'):
    """
    Linear interpolation
    to interpolate a field from some levels to another set of levels
    Value below "surface" are masked

    Input
    A :      array to interpolate
    I :      interpolation field (usually Pressure or depth) from TOP (level 0) to BOTTOM (last level), i.e P value going up with each level
    levels : levels to interplate to (same units as I), default levels are:[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000]
    axis:    axis over which to do the linear interpolation, default is 'z', accepted: '1' '(myaxis)'

    I and levels must have same units

    Output
    array on new levels (levels)

    Examples:
    A=interpolate(A,I,levels=[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000])
    """

    cdat_info.pingPCMDIdb("cdat", "cdutil.vertical.linearInterpolation")
    try:
        nlev = len(levels)  # Number of pressure levels
    except:
        nlev = 1  # if only one level len(levels) would breaks
        levels = [levels, ]
    order = A.getOrder()
    A = A(order='%s...' % axis)
    I = I(order='%s...' % axis)
    sh = list(I.shape)
    nsigma = sh[0]  # number of sigma levels
    sh[0] = nlev
    t = MV2.zeros(sh, typecode=MV2.float32)
    sh2 = I[0].shape
    prev = -1
    for ilev in range(nlev):  # loop through pressure levels
        if status is not None:
            prev = genutil.statusbar(ilev, nlev - 1., prev)
        lev = levels[ilev]  # get value for the level
        Iabv = MV2.ones(sh2, MV2.float)
        Aabv = -1 * Iabv  # Array on sigma level Above
        Abel = -1 * Iabv  # Array on sigma level Below
        Ibel = -1 * Iabv  # Pressure on sigma level Below
        Iabv = -1 * Iabv  # Pressure on sigma level Above
        Ieq = MV2.masked_equal(Iabv, -1)  # Area where Pressure == levels
        for i in range(1, nsigma):  # loop from second sigma level to last one
            a = MV2.greater_equal(
                I[i],
                lev)  # Where is the pressure greater than lev
            b = MV2.less_equal(
                I[i - 1],
                lev)  # Where is the pressure less than lev
            # Now looks if the pressure level is in between the 2 sigma levels
            # If yes, sets Iabv, Ibel and Aabv, Abel
            a = MV2.logical_and(a, b)
            Iabv = MV2.where(a, I[i], Iabv)  # Pressure on sigma level Above
            Aabv = MV2.where(a, A[i], Aabv)  # Array on sigma level Above
            Ibel = MV2.where(
                a,
                I[i - 1],
                Ibel)  # Pressure on sigma level Below
            Abel = MV2.where(a, A[i - 1], Abel)  # Array on sigma level Below
            Ieq = MV2.where(MV2.equal(I[i], lev), A[i], Ieq)

        val = MV2.masked_where(
            MV2.equal(Ibel, -1.), numpy.ones(Ibel.shape) * lev)
                               # set to missing value if no data below lev if
                               # there is

        tl = (val - Ibel) / (Iabv - Ibel) * \
            (Aabv - Abel) + Abel  # Interpolation
        if ((Ieq.mask is None) or (Ieq.mask is MV2.nomask)):
            tl = Ieq
        else:
            tl = MV2.where(1 - Ieq.mask, Ieq, tl)
        t[ilev] = tl.astype(MV2.float32)

    ax = A.getAxisList()
    autobnds = cdms2.getAutoBounds()
    cdms2.setAutoBounds('off')
    lvl = cdms2.createAxis(MV2.array(levels).filled())
    cdms2.setAutoBounds(autobnds)
    try:
        lvl.units = I.units
    except:
        pass
    lvl.id = 'plev'

    try:
        t.units = I.units
    except:
        pass

    ax[0] = lvl
    t.setAxisList(ax)
    t.id = A.id
    for att in A.listattributes():
        setattr(t, att, getattr(A, att))
    return t(order=order)
예제 #21
0
def custom1D(x,filter,axis=0):
    """
    Function: custom(x,filter,axis=0)
     
    Description of function:
        Apply a custom 1 dimensional filter to an array over a specified axis
        filter can be a list of numbers or a 1D array
    Usage:
        filtered = custom1D(x,filter)
    Options:
        axisoptions: 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n 
            default value = 0. You can pass the name of the dimension or index
            (integer value 0...n) over which you want to compute the statistic.
    """
    cdat_info.pingPCMDIdb("cdat","genutil.filters.custom1D")
    isMV2=cdms2.isVariable(x)
    if isMV2: xatt=x.attributes
    filter=MV2.array(filter)
    newx=MV2.array(x)
    initialorder=newx.getOrder(ids=1)
    n=len(filter)
    newx=newx(order=str(axis)+'...')
    sh=list(newx.shape)
    sh[0]=sh[0]-n+1
    out=numpy.ma.zeros(sh,dtype=newx.dtype.char)
    ax=[]
    bnds=[]
    nax=newx.getAxis(0)
    for i in range(sh[0]):
        sub=newx[i:i+n]
        if i==0:
            filter.setAxis(0,sub.getAxis(0))
            filter,sub=genutil.grower(filter,sub)
        out[i]=numpy.ma.average(sub,weights=filter, axis=0)
        if isMV2:
            a=nax.subAxis(i,i+n)
            try:
                b=a.getBounds()
                b1=b[0][0]
                b2=b[-1][1]
                ax.append((b1+b2)/2.)
                bnds.append([b1,b2])
            except: # No bounds on this axis
                bnds=None
                ax.append(float(numpy.ma.average(a[:], axis=0)))
    out=MV2.array(out,id=newx.id)
    if isMV2:
        for k in xatt.keys():
            setattr(out,k,xatt[k])
        for i in range(1,len(sh)):
            out.setAxis(i,newx.getAxis(i))
        if not bnds is None: bnds=numpy.ma.array(bnds)
        ax=cdms2.createAxis(ax,bounds=bnds)
        a=newx.getAxis(0)
        attr=a.attributes
        ax.id=a.id
        for k in attr.keys():
            setattr(ax,k,attr[k])
        out.setAxis(0,ax)
        
    out=out(order=initialorder)
    if not isMV2:
        out=numpy.ma.array(out)
    return out
예제 #22
0
#               software for the selection, manipulation, and display of        #
#               scientific data. By specification of the desired data, the      #
#               graphics method, and the display template, the VCS user gains   #
#               virtually complete control of the appearance of the data        #
#               display and associated text and animation.                      #
#                                                                               #
# Upgrade to VTK:                                                               #
# Author: Charles Doutriaux                                                     #
# Description: Took out all C code and used VTK's python bindings instead       #
#                                                                               #
#################################################################################
"""
_doValidation = True
next_canvas_id = 1
import cdat_info
cdat_info.pingPCMDIdb("cdat","vcs")
import thread
import time
from utils import *
import Canvas
from vcshelp import *
from queries import *
from pauser import pause
import install_vcs
from install_vcs import list_printers, add_printer, remove_printer
from Canvas import dictionarytovcslist
import os
from manageElements import *

_default_time_units='days since 2000'
예제 #23
0
파일: region.py 프로젝트: NCPP/uvcdat-devel
def domain(*args, **kargs):
    '''construct the selector'''
    import cdms2 as cdms
    cdat_info.pingPCMDIdb("cdat","cdutil.region.domain")
    a=cdms.selectors.Selector(DomainComponent(*args,**kargs))
    return a
예제 #24
0
def averager(V, axis=None, weights=None, action='average', returned=0, weight=None, combinewts=None):
    """
                        Documentation for averager():
                        -----------------------------
The averager() function provides a convenient way of averaging your data giving
you control over the order of operations (i.e which dimensions are averaged
over first) and also the weighting for the different axes. You can pass your
own array of weights for each dimension or use the default (grid) weights or
specify equal weighting. 


Author: Krishna AchutaRao : [email protected]

Returns:
-------
       The average over the specified dimensions.
Usage: 
------
from cdutil import averager
averager( V, axis='axisoptions', weights=weightoptions, action='average',
          returned='0')

Where V is an array. It can be an array of numpy, numpy.ma or MV2 type. In each case
the function returns an array (except when it results in a scalar) of the same
type as V. See examples for more details.

Optional Arguments:
-------------------
axis=axisoptions   
        Restrictions: axisoptions has to be a string
        Default : first dimension in the data you pass to the function.

        You can pass axis='tyx', or '123', or 'x (plev)' etc.  the same way as
        in order= options for variable operations EXCEPT that
        '...'(i.e Ellipses) are not allowed. In the case that V is a numpy or
        numpy.ma array, axis names have no meaning and only axis indexes are valid.


weights=weightoptions
        Default :
                 'weighted' for Transient Variables (MV2s)
                 'unweighted' for numpy.ma or numpy arrays.

        Note that depending on the array being operated on by averager, the
        default weights change!
        
        Weight options are one of 'weighted', 'unweighted',  an array of weights for
        each dimension or a MaskedVariable of the same shape as the data x.

        - 'weighted' means use the grid information to generate weights for
          that dimension.

        - 'unweighted' means use equal weights for all the grid points in that axis. 

        - Also an array of weights (of the same shape as the dimension being
          averaged over or same shape as V) can be passed.

          Additional Notes on 'weighted' option: The weights are generated
          using the bounds for the specified axis. For latitude and Longitude,
          the weights are calculated using the area (see the cdms2 manual
          grid.getWeights() for more details) whereas for the other axes
          weights are the difference between the bounds (when the bounds are
          available). If the bounds are stored in the file being read in, then
          those values are used. Otherwise, bounds are generated as long as
          cdms2.setAutoBounds('on') is set. If cdms2.setAutoBounds() is set to
          'off', then an Error is raised.

action='average' or 'sum'
        Default : 'average'

        You can either return the weighted average or the weighted sum of the
        data by specifying the keyword argument action=
        
returned = 0 or 1
        Default: 0
        
        - 0 implies sum of weights are not returned after averaging operation.
        - 1 implies the sum of weights after the average operation is returned.

combinewts = None, 0 or 1
        Default: None - same as 0
        - 0 implies weights passed for individual axes are not combined into one
          weight array for the full variable V before performing operation.
       - 1 implies weights passed for individual axes are combined into one
         weight array for the full variable before performing average or sum
         operations. One-dimensional weight arrays or key words of 'weighted' or
         'unweighted' must be passed for the axes over which the operation is
         to be performed. Additionally, weights for axes that are not being
         averaged or summed may also bepassed in the order in which they appear.
         If the weights for the other axes are not passed, they  are assumed to
         be equally weighted.

Examples:
---------
        >>> f = cdms2.open('data_file_name')
        >>> averager(f('variable_name'), axis='1')
        # extracts the variable 'variable_name' from f and averages over the 
        # dimension whose position is 1. Since no other options are specified, 
        # defaults kick in i.e weight='weighted' and returned=0

        >>> averager(V, axis='xy', weights=['weighted','unweighted'])
        or
        >>> averager(V, axis='t', weights='unweighted')
        or
        >>> averager(V, axis='x')
        # Default weights option of 'weighted' is implemented
        or
        >>> averager(V, axis='x', weights=mywts) 
        # where mywts is an array of shape (len(xaxis)) or shape(V)
        or
        >>> averager(V, axis='(lon)y', weights=[myxwts, myywts])
        # where myxwts is of shape len(xaxis) and myywts is of shape len(yaxis)
        or
        >>> averager(V, axis='xy', weights=V_wts)
        # where V_wts is a Masked Variable of shape V
        or
        >>> averager(V, axis='x', weights='unweighted', action='sum')
        # will return the equally weighted sum over the x dimension
        or
        >>> ywt = area_weights(y)
        >>> fractional_area = averager(ywt, axis='xy',
                                weights=['unweighted', 'unweighted'], action='sum')
        # is a good way to compute the area fraction that the
        # data y that is non-missing
        
Note:
-----
        When averaging data with missing values, extra care needs to be taken.
        It is recommended that you use the default weights='weighted' option.
        This uses cdutil.area_weights(V) to get the correct weights to
        pass to the averager.
        >>> averager(V, axis='xy', weights='weighted')
        
        The above is equivalent to:
        >>> V_wts = cdutil.area_weights(V)
        >>> result = averager(V, axis='xy', weights=V_wts)
        or
        >>> result = averager(V, axis='xy', weights=cdutil.area_weights(V))

        However, the area_weights function requires that the axis bounds are
        stored or can be calculated (see documentation of area_weights for more
        details). In the case that such weights are not stored with the axis
        specifications (or the user desires to specify weights from another
        source), the use of combinewts option can produce the same results.
        In short, the following two are equivalent:
        >>> xavg_1 = averager(X, axis = 'xy', weights = area_weights(X))
        >>> xavg_2 = averager(X, axis = 'xy', weights = ['weighted', 'weighted', 'weighted'], combinewts=1)

        Where X is a function of x, y and a third dimension such as time or level.
        
        In general, the above can be substituted with arrays of weights where
        the 'weighted' keyword appears.
        """
    __DEBUG__ = 0
    cdat_info.pingPCMDIdb("cdat","genutil.averager")
    #
    # Check the weight = option. This is done for backward compatibility since
    # weights= is the current default syntax.
    #
    if not weight is None:
        if not weights is None:
            raise AveragerError, \
                  'Error: You cannot set both weight and weights!. weight is obsolete please use weights only !!!'
        else:
            weights = weight
        # end of if not weights in ['generate','weighted'] :
    # end of if not weight is None:
    #
    # Note: Further checking on weights is done later - in the numpy.ma & MV2 sections also.
    #
    # Check the returned option
    #
    if returned not in [0,1]:
        raise AveragerError, \
              'Error: Invalid option for returned. Pass 0 or 1.'
    # end of if returned not in [0,1]:
    #
    # Check the action = options
    #
    if string.upper(action) in ['AVERAGE', 'AVE', 'AVG']:
        action = 'average'
    elif string.upper(action) in ['SUM', 'ADD']:
        action = 'sum'
    else:
        raise AveragerError, 'Error: Invalid option for action. Pass \'average\' or \'sum\''
    # end of if string.upper(action) in ['AVERAGE', 'AVE', 'AVG']:
    #
    # Check the combinewts option
    #
    if not combinewts :
        combinewts = 0
    elif combinewts not in [0, 1]:
         raise AveragerError, \
               "Error: combinewts must be set to 0 or 1"
    # end of if not combinewts :
    # ************************* End of option checking ************************* 
    #
    # Account for MV2, numpy.ma or numpy arrays sent in by users. Return result of same type.
    #
    #
    # Case 1. numpy array
    #         Convert numpy array to numpy.ma and remember it using _NUM_FLAG so you
    #         can convert the answer to numpy array before returning
    #
    if isinstance(V, numpy.ndarray):
        if __DEBUG__: print 'Converting to numpy.ma so I can do an numpy.ma.average or sum'
        V = numpy.ma.array(V)
        _NUM_FLAG = 1
    else:
        _NUM_FLAG = 0
    # end of if isinstance(V, numpy.ndarray):
    #
    #
    # Case 2. Masked Array (numpy.ma)
    #
    if numpy.ma.isMaskedArray(V) and not MV2.isMaskedVariable(V):     
        #
        # The passed array is an numpy.ma
        #
        if __DEBUG__: print 'Entered numpy.ma only....'
        if __DEBUG__: print '!!!!!!Checking weights for numpy.ma', weights
        #
        #
        if isinstance(weights, types.StringType) and weights in ['weighted', 'generate']:
            if __DEBUG__: print 'VOILA!'
            print 'cdutil.averager Warning: \n\tNot operating on a TransientVariable.'
            print '\tChanging default weights to \'unweighted\' (equally weighted)'
            weights = None
        # end of if weights == 'weighted':
        #
        # Check the axis options.
        #
        axis = _check_MA_axisoptions(axis, V.ndim)
        #
        # Now reorder the original MA to the order in which operations need to be done
        #
        newaxorder = []
        for i in axis:
            newaxorder.append(i)
        # end of for i in axis:
        for i in range(len(V.shape)):
            if not i in newaxorder:
                newaxorder.append(i)
            # if not i in newaxorder:
        # end of for i in range(len(numpy.ma.shape(V))):
        #doloop = False
        if newaxorder != range(len(V.shape)):
            x = numpy.ma.transpose(V, newaxorder)
            if __DEBUG__: print 'Reordered shape = ', x.shape
            #osh=list(x.shape)
            #na=len(axis)
            #if n!=x.rank():
            #    nsh=osh[:na] # the axes of operations....
            #    n=1
            #    for m in osh[na:]:
            #        n*=m
            #    nsh.append(n)
            #    x = numpy.ma.reshape(x,nsh)
            #    if n>35000000:
            #        doloop=
        else:
            x = V

        
        # end of if newaxorder != range(len(V.shape)):
        #
        # Check the weight options
        #
        weights = _check_MA_weight_options(weights, x.shape, len(axis))
        #
        #
        if __DEBUG__: print 'Length of axis = ', len(axis)
        if __DEBUG__: print 'Length of weights = ', len(weights)
        #
        # If the user has passed combinewts = 1, then do the combining of weights here
        #
        if combinewts == 1:
            weights = _combine_weights(x, weights)
        # end of if combinewts == 1:
        #
        # Now decide if we need to average or sum
        #
        if action == 'average':
            #
            # The actual averaging.........
            #
            for i in range(len(axis)):
                #
                if __DEBUG__: print 'Averaging axis # = ', i,
                #
                if  isinstance(weights[i] , types.StringType) or (weights[i] is None):
                    pass
                else:
                    if __DEBUG__: print weights[i].shape
                # end of if not isinstance(weights[i] , types.StringType):
                if i > len(weights)-1:
                    if not retwts:
                        raise AveragerError, 'An unknown error occurred (retwts). Report this bug.'
                    else:
                        weights.append(retwts)
                    # end of if not retwts:
                # end of if i > len(weights)-1:
                try:
                    x, retwts = numpy.ma.average(x, weights=weights[i], returned=1, axis=0)
                except:
                    raise AveragerError, 'Some problem with averaging MA'
                #
            # end of for i in range(len(axis)):
        elif action == 'sum':
            #
            # Come to sum function here
            #
            for i in range(len(axis)):
                if __DEBUG__: print 'Summing axis #', i
                if i > len(weights)-1:
                    try:
                        x = numpy.ma.sum(x, returned=0, axis=0)
                        retwts = numpy.ma.sum(retwts, axis=0)
                    except:
                        raise AveragerError, 'Some problem with summing numpy.ma'
                    # end of try:
                else:
                    try:
                        x, retwts = numpy.ma.average(x, weights=weights[i], returned=1, axis=0)
                        x = x * retwts
                    except:
                        raise AveragerError, 'Some problem with summing numpy.ma'
                    # end of try:
                # end of if i > len(filled_wtoptions):            
                if __DEBUG__: print 'Finished Summing axis #', i
            # end of for i in range(N_axes):
        # end of if action == 'sum':
        #
        # If we started out with a numpy array, convert the numpy.ma to numpy
        #
        if _NUM_FLAG:
            if numpy.ma.isMaskedArray(x):
                x = x.filled()
            # end of if numpy.ma.isMaskedArray(x):
            #
            if numpy.ma.isMaskedArray(retwts):
                retwts = retwts.filled()
            # end of if numpy.ma.isMaskedArray(retwts):
        # end of if _NUM_FLAG:
        #
        if returned:
            return x, retwts
        else:
            return x
        # end of if returned:
        #
        return None
    # end of if numpy.ma.isMaskedArray(V):
    #
    #******************************************************************************************
    #
    # Case 3: Masked Variable.
    #
    if weights is None: weights = 'weighted'
    #
    axis_order = []
    if __DEBUG__: print 'Inside averager axis = ', axis
    if axis == None:
        if __DEBUG__: print 'Default axis is the first axis.........'
        axis = V.getOrder()[0]
        axis_order.append(axis)
    else:
        if type(axis)==type(0): axis=str(axis)
        axis_order = _check_axisoptions(V, axis)
        if __DEBUG__: print 'Axes to be addressed in the order ', axis_order
        for an in range(len(axis_order)):
            item = axis_order[an]
            if isinstance(item, types.IntType):
                loc = string.find(axis, str(item))
                if loc != -1:
                    xlist = list(axis)
                    xlist[loc] = V.getOrder()[item]
                    if xlist[loc] == '-': xlist[loc] = '(' + V.getAxis(item).id + ')'
                    if __DEBUG__: print '*** the axisoption is about to be modified. Before mod  = ', axis 
                    axis = string.joinfields(xlist, '')
                    if __DEBUG__: print '*** the axisoption has been modified. It is = ', axis
            # end of if type(item) = type(1):
        # end of for an in range(len(axis_order)):
        if __DEBUG__: print 'NEW! Axes to be addressed in the order ', axis_order
        if axis_order != None:
            if __DEBUG__: print 'axis = ', axis
            V= V(order=axis)
            if __DEBUG__: print '********** I have reordered V= V(order=axis) **********'
        else:
            return None
        # end of if axis_order != None:
    # end of if axis == None:
    #
    if __DEBUG__: print 'Passed axis checks......'
    if __DEBUG__: print 'Axes to be addressed in the order ', axis_order
    #
    # Number of axes to average/sum over = len(axis_order)
    #
    N_axes = len(axis_order)    
    #
    # Parse the weights = options
    #
    if __DEBUG__: print 'Checking weights= options:',weights
    #
    filled_wtoptions = __check_weightoptions(V, axis, weights)
    if __DEBUG__: print 'The weights options are ', filled_wtoptions
    #
    if not isinstance(filled_wtoptions, types.ListType):
        filled_wtoptions = [filled_wtoptions]
    # end of if not isinstance(filled_wtoptions, types.ListType):
    #
    # 
    if __DEBUG__: print 'Length of axis_order = ', N_axes
    if __DEBUG__: print 'Length of filled_wtoptions = ', len(filled_wtoptions)
    #
    # If the user has passed combinewts = 1, then do the combining of weights here
    #
    if combinewts == 1:
        filled_wtoptions = _combine_weights(V, weights)
    # end of if combinewts == 1:
    #
    # Now decide if we need to average or sum
    #
    if __DEBUG__: print 'type(weights) = ', type(weights)
    try:
        if __DEBUG__: print 'Are they equal?', MV2.allclose(weights, area_weights(V,axisoptions))
    except:
        pass
    #
    if action == 'average':
        #
        # Come to averaging function here....
        #
        for i in range(N_axes):
            #
            if __DEBUG__: print 'Averaging axis #', i
            #
            if i > len(filled_wtoptions)-1:
                if sumwts is None:
                    raise AveragerError, 'An unknown error occurred (sumwts). Report this bug.'
                else:
                    filled_wtoptions.append(sumwts)
                # end of if not sumwts:
            # end of if i > len(filled_wtoptions):
            V, sumwts = average_engine(V, filled_wtoptions[i])
            if __DEBUG__: print 'Finished Averaging axis #', i
        # end of for i in range(N_axes):
        if returned == 1:
            return V, sumwts
        else:
            return V
        # end of if returned == 1:
    elif action == 'sum':
        #
        # Come to sum function here
        #
        for i in range(N_axes):
            if __DEBUG__: print 'Summing axis #', i
            if i > len(filled_wtoptions)-1:
                V, dummy_wts = sum_engine(V, 'unweighted')
                sumwts = MV2.sum(sumwts, axis=0)
            else:
                V, sumwts = sum_engine(V, filled_wtoptions[i])
            # end of if i > len(filled_wtoptions):            
            if __DEBUG__: print 'Finished Summing axis #', i
        # end of for i in range(N_axes):
        y = V
        # end of if len(filled_wtoptions) == 1:
        if returned == 1:
            return y, sumwts
        else:
            return y
예제 #25
0
def area_weights(ds,axisoptions=None):
    '''
    Calculates masked area weights.

    Author: Charles Doutriaux: [email protected]

    Modified version using CDAT 3.0 by Paul Dubois
    Further modified by Krishna AchutaRao to return weights in all axes.

    Returns a masked array of the same dimensions as ds containing area weights
    but masked where ds is masked.
    '''
    cdat_info.pingPCMDIdb("cdat","genutil.area_weights")
    #
    __DEBUG__ = 0
    #
    if __DEBUG__: print 'Incoming axisoptinos = ', axisoptions
    if __DEBUG__: print 'Shape of Incoming data = ', ds.shape
    seenlon = 0
    seenlat = 0
    if 'x' in list(ds.getOrder()): seenlon = 1
    if 'y' in list(ds.getOrder()): seenlat = 1
    #
    if seenlat and seenlon:
        if __DEBUG__: print 'Found both latitude and longitude'
        initial_order = ds.getOrder()
        if __DEBUG__: print 'initial_order= ', initial_order
        initial_order_list = list(initial_order)
        if '-' in initial_order_list:
            loc = initial_order_list.index('-')
            axisid = '(' + ds.getAxis(loc).id + ')'
            initial_order_list[loc] = axisid
            initial_order = string.joinfields(initial_order_list, '')
            if __DEBUG__: print 'Changed initial_order = ', initial_order
        # end of if '-' in initial_order_list:
        ds = ds(order='...yx')
        dsorder = ds.getOrder()
        if __DEBUG__: print 'Reordered ds ', dsorder        
        Lataxisindex = list(dsorder).index('y')
        Lonaxisindex = list(dsorder).index('x')
        if __DEBUG__: print 'Lataxisindex = ', Lataxisindex, ' Lonaxisindex = ', Lonaxisindex
        #wt = numpy.outer(__myGetAxisWeights(ds,Lataxisindex), __myGetAxisWeights(ds,Lonaxisindex))
        dsgr = ds.getGrid()
        latwts, lonwts = dsgr.getWeights()
        wt = numpy.outer(numpy.array(latwts), numpy.array(lonwts))
        # At this point wt is an nlat by nlong matrix  
        # Now the problem is to propagate this two-dimensional weight mask 
        # through the other dimensions. To do this we shuffle these two dimensions
        # to the front of the shape, resize wt, and then permute it back to 
        # the order of the dimensions in ds.
        s = ds.shape
        for i in range(len(s)-1, -1, -1):
            if (i != Lataxisindex) and (i != Lonaxisindex):
                newaxiswt = __myGetAxisWeights(ds, i,axisoptions)
                wtlist = list(wt.shape)
                if __DEBUG__: print 'Before Inserting newdim', wtlist
                wtlist.insert(0, newaxiswt.shape[0])
                if __DEBUG__: print 'After inserting newdim ', wtlist
                new_wtshape = tuple(wtlist)
                wt = numpy.resize(wt, new_wtshape)
                if __DEBUG__: print 'After inserting dimension ', i, ' shape of wt = ', wt.shape
                new_newaxiswt_shape = list(newaxiswt.shape)
                for nn in range(1, len(wt.shape), 1): new_newaxiswt_shape.append(1)
                newaxiswt = numpy.resize(newaxiswt, tuple(new_newaxiswt_shape))
                wt =  wt * newaxiswt
            # end of if (i != Lataxisindex) and (i != Lonaxisindex):
        # end of for i in range(len(s)):
        wt = cdms2.createVariable(numpy.ma.masked_array(wt, numpy.ma.getmask(ds)), axes=ds.getAxisList())
        result = wt(order=initial_order)
        if __DEBUG__: print 'Returning something of order', result.getOrder()
        return result
    else:
        wt = __myGetAxisWeights(ds, 0, axisoptions)
        if __DEBUG__: print 'Initial', wt.shape
        for i in range(1, len(ds.shape)):
            wt_newshape = tuple(list(ds.shape)[:i+1])
            if __DEBUG__: print 'wt_newshape = ', wt_newshape
            wt = numpy.resize(wt, wt_newshape)
            if __DEBUG__: print 'After wt resize wt.shape = ', wt.shape
            newaxiswt = __myGetAxisWeights(ds, i)
            newaxiswt = numpy.resize(newaxiswt, wt.shape)
            wt = wt * newaxiswt
            if __DEBUG__: print 'After axis ', i, ' wt has shape ', wt.shape
        # end of for i in range(2, len(ds.shape)):
        if __DEBUG__: print 'Final Shape of Weight = ', wt.shape
        return cdms2.createVariable(numpy.ma.masked_array(wt, numpy.ma.getmask(ds)), axes=ds.getAxisList())
예제 #26
0
"""Module cdutil contains miscellaneous routines for manipulating variables.
"""
import region
#import continent_fill
from genutil.averager import averager, AveragerError, area_weights, getAxisWeight, getAxisWeightByName, __check_weightoptions
from times import *
from retrieve import WeightsMaker, WeightedGridMaker, VariableConditioner, VariablesMatcher
from vertical import sigma2Pressure, reconstructPressureFromHybrid, logLinearInterpolation, linearInterpolation
from create_landsea_mask import generateLandSeaMask
from sftbyrgn import generateSurfaceTypeByRegionMask
import cdat_info
cdat_info.pingPCMDIdb("cdat", "cdutil")
예제 #27
0
#                                                                               #
# Description:  Python command wrapper for VCS's functionality. VCS is computer #
#               software for the selection, manipulation, and display of        #
#               scientific data. By specification of the desired data, the      #
#               graphics method, and the display template, the VCS user gains   #
#               virtually complete control of the appearance of the data        #
#               display and associated text and animation.                      #
#                                                                               #
#################################################################################
"""
import vcs_legacy.info
import sys
if sys.executable[-4:]!='cdat' and sys.platform in ['darwin'] and (vcs_legacy.info.WM=='QT' or vcs_legacy.info.EM=='QT'):
    raise ImportError,"if you are going to use vcs_legacy you need to run this as 'cdat' not %s" % sys.executable
import cdat_info
cdat_info.pingPCMDIdb("cdat","vcs_legacy_legacy")
import slabapi # to make sure it is initialized
import _vcs_legacy
import thread
import time
import Canvas
from vcs_legacyhelp import *
from queries import *
from pauser import pause
from utils import *
import install_vcs_legacy
from install_vcs_legacy import list_printers, add_printer, remove_printer
from Canvas import dictionarytovcs_legacylist

_default_time_units='days since 2000'
import pcmdi_metrics
import sys
import argparse
import os
import json
import genutil
import warnings
import cdms2
import MV2
import cdutil
import collections
import cdat_info
import unidata

# Statistical tracker
cdat_info.pingPCMDIdb("pcmdi_metrics", "pcmdi_metrics_driver")

# Before we do anything else we need to create some units
# Salinity Units
unidata.udunits_wrap.init()

# Create a dimensionless units named dimless
unidata.addDimensionlessUnit("dimless")

# Created scaled units for dimless
unidata.addScaledUnit("psu", .001, "dimless")
unidata.addScaledUnit("PSS-78", .001, "dimless")
unidata.addScaledUnit("Practical Salinity Scale 78", .001, "dimless")

# Following are actually created in excfile bit, this is to make flae8 happy
regions_specs = {}
예제 #29
0
#               scientific data. By specification of the desired data, the      #
#               graphics method, and the display template, the VCS user gains   #
#               virtually complete control of the appearance of the data        #
#               display and associated text and animation.                      #
#                                                                               #
#################################################################################
"""
import vcs.info
import sys
if sys.executable[-4:] != 'cdat' and sys.platform in [
        'darwin'
] and (vcs.info.WM == 'QT' or vcs.info.EM == 'QT'):
    raise ImportError, "if you are going to use vcs you need to run this as 'cdat' not %s" % sys.executable
import cdat_info

cdat_info.pingPCMDIdb("cdat", "vcs")
import slabapi  # to make sure it is initialized
import _vcs
import thread
import time
import Canvas
from vcshelp import *
from queries import *
from pauser import pause
from utils import *
import install_vcs
from install_vcs import list_printers, add_printer, remove_printer
from Canvas import dictionarytovcslist

_default_time_units = 'days since 2000'
예제 #30
0
#               software for the selection, manipulation, and display of        #
#               scientific data. By specification of the desired data, the      #
#               graphics method, and the display template, the VCS user gains   #
#               virtually complete control of the appearance of the data        #
#               display and associated text and animation.                      #
#                                                                               #
#################################################################################
"""
import vcs_legacy.info
import sys
if sys.executable[-4:] != 'cdat' and sys.platform in [
        'darwin'
] and (vcs_legacy.info.WM == 'QT' or vcs_legacy.info.EM == 'QT'):
    raise ImportError, "if you are going to use vcs_legacy you need to run this as 'cdat' not %s" % sys.executable
import cdat_info
cdat_info.pingPCMDIdb("cdat", "vcs_legacy_legacy")
import slabapi  # to make sure it is initialized
import _vcs_legacy
import thread
import time
import Canvas
from vcs_legacyhelp import *
from queries import *
from pauser import pause
from utils import *
import install_vcs_legacy
from install_vcs_legacy import list_printers, add_printer, remove_printer
from Canvas import dictionarytovcs_legacylist

_default_time_units = 'days since 2000'
예제 #31
0
import pcmdi_metrics
import sys
import argparse
import os
import json
import genutil
import warnings
import cdms2
import MV2
import cdutil
import collections
import cdat_info
import unidata

# Statistical tracker
cdat_info.pingPCMDIdb("pcmdi_metrics", "pcmdi_metrics_driver")

# Before we do anything else we need to create some units
# Salinity Units
unidata.udunits_wrap.init()

# Create a dimensionless units named dimless
unidata.addDimensionlessUnit("dimless")

# Created scaled units for dimless
unidata.addScaledUnit("psu", .001, "dimless")
unidata.addScaledUnit("PSS-78", .001, "dimless")
unidata.addScaledUnit("Practical Salinity Scale 78", .001, "dimless")

# Following are actually created in excfile bit, this is to make flae8 happy
regions_specs = {}
예제 #32
0
def logLinearInterpolation(A,
                           P,
                           levels=[
                               100000, 92500, 85000, 70000, 60000, 50000,
                               40000, 30000, 25000, 20000, 15000, 10000, 7000,
                               5000, 3000, 2000, 1000
                           ],
                           status=None):
    """
    Log-linear interpolation
    to convert a field from sigma levels to pressure levels
    Value below surface are masked
    
    Input
    A :    array on sigma levels
    P :    pressure field from TOP (level 0) to BOTTOM (last level)
    levels : pressure levels to interplate to (same units as P), default levels are:[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000]

    P and levels must have same units

    Output
    array on pressure levels (levels)
    
    Examples:
    A=logLinearInterpolation(A,P),levels=[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000])
    """

    cdat_info.pingPCMDIdb("cdat", "cdutil.vertical.logLinearInterpolation")
    try:
        nlev = len(levels)  # Number of pressure levels
    except:
        nlev = 1  # if only one level len(levels) would breaks
        levels = [
            levels,
        ]
    order = A.getOrder()
    A = A(order='z...')
    P = P(order='z...')
    sh = list(P.shape)
    nsigma = sh[0]  #number of sigma levels
    sh[0] = nlev
    t = MV2.zeros(sh, typecode=MV2.float32)
    sh2 = P[0].shape
    prev = -1
    for ilev in range(nlev):  # loop through pressure levels
        if status is not None:
            prev = genutil.statusbar(ilev, nlev - 1., prev)
        lev = levels[ilev]  # get value for the level
        Pabv = MV2.ones(sh2, MV2.float)
        Aabv = -1 * Pabv  # Array on sigma level Above
        Abel = -1 * Pabv  # Array on sigma level Below
        Pbel = -1 * Pabv  # Pressure on sigma level Below
        Pabv = -1 * Pabv  # Pressure on sigma level Above
        Peq = MV2.masked_equal(Pabv, -1)  # Area where Pressure == levels
        for i in range(1, nsigma):  # loop from second sigma level to last one
            a = MV2.greater_equal(
                P[i], lev)  # Where is the pressure greater than lev
            b = MV2.less_equal(P[i - 1],
                               lev)  # Where is the pressure less than lev
            # Now looks if the pressure level is in between the 2 sigma levels
            # If yes, sets Pabv, Pbel and Aabv, Abel
            a = MV2.logical_and(a, b)
            Pabv = MV2.where(a, P[i], Pabv)  # Pressure on sigma level Above
            Aabv = MV2.where(a, A[i], Aabv)  # Array on sigma level Above
            Pbel = MV2.where(a, P[i - 1],
                             Pbel)  # Pressure on sigma level Below
            Abel = MV2.where(a, A[i - 1], Abel)  # Array on sigma level Below
            Peq = MV2.where(MV2.equal(P[i], lev), A[i], Peq)

        val = MV2.masked_where(
            MV2.equal(Pbel, -1),
            numpy.ones(Pbel.shape) *
            lev)  # set to missing value if no data below lev if there is

        tl = MV2.log(val / Pbel) / MV2.log(
            Pabv / Pbel) * (Aabv - Abel) + Abel  # Interpolation
        if ((Peq.mask is None) or (Peq.mask is MV2.nomask)):
            tl = Peq
        else:
            tl = MV2.where(1 - Peq.mask, Peq, tl)
        t[ilev] = tl.astype(MV2.float32)

    ax = A.getAxisList()
    autobnds = cdms2.getAutoBounds()
    cdms2.setAutoBounds('off')
    lvl = cdms2.createAxis(MV2.array(levels).filled())
    cdms2.setAutoBounds(autobnds)
    try:
        lvl.units = P.units
    except:
        pass
    lvl.id = 'plev'

    try:
        t.units = P.units
    except:
        pass

    ax[0] = lvl
    t.setAxisList(ax)
    t.id = A.id
    for att in A.listattributes():
        setattr(t, att, getattr(A, att))
    return t(order=order)
예제 #33
0
"""
CDMS module-level API
"""
import cdat_info
cdat_info.pingPCMDIdb("cdat","cdms2")

__all__ = ["cdmsobj", "axis", "coord", "grid", "hgrid", "avariable", \
"sliceut", "error", "variable", "fvariable", "tvariable", "dataset", \
"database", "cache", "selectors", "MV2", "convention", "bindex", \
"auxcoord", "gengrid", "gsHost", "gsStaticVariable", "gsTimeVariable", \
"mvBaseWriter", "mvSphereMesh", "mvVsWriter", "mvCdmsRegrid"]

# Errors
from error import CDMSError

# CDMS datatypes
from cdmsobj import CdArray, CdChar, CdByte, CdDouble, CdFloat, CdFromObject, CdInt, CdLong, CdScalar, CdShort, CdString

# Functions which operate on all objects or groups of objects
from cdmsobj import Unlimited, getPathFromTemplate, matchPattern, matchingFiles, searchPattern, searchPredicate, setDebugMode

# Axis functions and classes
from axis import AbstractAxis, axisMatches, axisMatchAxis, axisMatchIndex
from axis import createAxis, createEqualAreaAxis, createGaussianAxis, createUniformLatitudeAxis, createUniformLongitudeAxis, setAutoBounds, getAutoBounds

# Grid functions
from grid import createGenericGrid, createGlobalMeanGrid, createRectGrid, createUniformGrid, createZonalGrid, setClassifyGrids, createGaussianGrid, writeScripGrid, isGrid

# Dataset functions
from dataset import createDataset, openDataset, useNetcdf3, getNetcdfShuffleFlag, getNetcdfDeflateFlag, getNetcdfDeflateLevelFlag, setNetcdfShuffleFlag, setNetcdfDeflateFlag, setNetcdfDeflateLevelFlag, setCompressionWarnings
open = openDataset
예제 #34
0
"""Module cdutil contains miscellaneous routines for manipulating variables.
"""
import region
#import continent_fill
from genutil.averager import averager, AveragerError, area_weights, getAxisWeight, getAxisWeightByName, __check_weightoptions
from times import *
from retrieve import WeightsMaker, WeightedGridMaker, VariableConditioner, VariablesMatcher
from vertical import sigma2Pressure, reconstructPressureFromHybrid, logLinearInterpolation, linearInterpolation
from create_landsea_mask import generateLandSeaMask
from sftbyrgn import generateSurfaceTypeByRegionMask
import cdat_info
cdat_info.pingPCMDIdb("cdat", "start")
예제 #35
0
"""
CDMS module-level API
"""
import cdat_info
cdat_info.pingPCMDIdb("cdat","start")

__all__ = ["cdmsobj", "axis", "coord", "grid", "hgrid", "avariable", \
"sliceut", "error", "variable", "fvariable", "tvariable", "dataset", \
"database", "cache", "selectors", "MV2", "convention", "bindex", \
"auxcoord", "gengrid", "gsHost", "gsStaticVariable", "gsTimeVariable", \
"mvBaseWriter", "mvSphereMesh", "mvVsWriter", "mvCdmsRegrid"]

# Errors
from error import CDMSError

# CDMS datatypes
from cdmsobj import CdArray, CdChar, CdByte, CdDouble, CdFloat, CdFromObject, CdInt, CdLong, CdScalar, CdShort, CdString

# Functions which operate on all objects or groups of objects
from cdmsobj import Unlimited, getPathFromTemplate, matchPattern, matchingFiles, searchPattern, searchPredicate, setDebugMode

# Axis functions and classes
from axis import AbstractAxis, axisMatches, axisMatchAxis, axisMatchIndex
from axis import createAxis, createEqualAreaAxis, createGaussianAxis, createUniformLatitudeAxis, createUniformLongitudeAxis, setAutoBounds, getAutoBounds

# Grid functions
from grid import createGenericGrid, createGlobalMeanGrid, createRectGrid, createUniformGrid, createZonalGrid, setClassifyGrids, createGaussianGrid, writeScripGrid, isGrid

# Dataset functions
from dataset import createDataset, openDataset, useNetcdf3, getNetcdfShuffleFlag, getNetcdfDeflateFlag, getNetcdfDeflateLevelFlag, setNetcdfShuffleFlag, setNetcdfDeflateFlag, setNetcdfDeflateLevelFlag, setCompressionWarnings
open = openDataset
예제 #36
0
def averager(V,
             axis=None,
             weights=None,
             action='average',
             returned=0,
             weight=None,
             combinewts=None):
    """
                        Documentation for averager():
                        -----------------------------
The averager() function provides a convenient way of averaging your data giving
you control over the order of operations (i.e which dimensions are averaged
over first) and also the weighting for the different axes. You can pass your
own array of weights for each dimension or use the default (grid) weights or
specify equal weighting. 


Author: Krishna AchutaRao : [email protected]

Returns:
-------
       The average over the specified dimensions.
Usage: 
------
from cdutil import averager
averager( V, axis='axisoptions', weights=weightoptions, action='average',
          returned='0')

Where V is an array. It can be an array of numpy, numpy.ma or MV2 type. In each case
the function returns an array (except when it results in a scalar) of the same
type as V. See examples for more details.

Optional Arguments:
-------------------
axis=axisoptions   
        Restrictions: axisoptions has to be a string
        Default : first dimension in the data you pass to the function.

        You can pass axis='tyx', or '123', or 'x (plev)' etc.  the same way as
        in order= options for variable operations EXCEPT that
        '...'(i.e Ellipses) are not allowed. In the case that V is a numpy or
        numpy.ma array, axis names have no meaning and only axis indexes are valid.


weights=weightoptions
        Default :
                 'weighted' for Transient Variables (MV2s)
                 'unweighted' for numpy.ma or numpy arrays.

        Note that depending on the array being operated on by averager, the
        default weights change!
        
        Weight options are one of 'weighted', 'unweighted',  an array of weights for
        each dimension or a MaskedVariable of the same shape as the data x.

        - 'weighted' means use the grid information to generate weights for
          that dimension.

        - 'unweighted' means use equal weights for all the grid points in that axis. 

        - Also an array of weights (of the same shape as the dimension being
          averaged over or same shape as V) can be passed.

          Additional Notes on 'weighted' option: The weights are generated
          using the bounds for the specified axis. For latitude and Longitude,
          the weights are calculated using the area (see the cdms2 manual
          grid.getWeights() for more details) whereas for the other axes
          weights are the difference between the bounds (when the bounds are
          available). If the bounds are stored in the file being read in, then
          those values are used. Otherwise, bounds are generated as long as
          cdms2.setAutoBounds('on') is set. If cdms2.setAutoBounds() is set to
          'off', then an Error is raised.

action='average' or 'sum'
        Default : 'average'

        You can either return the weighted average or the weighted sum of the
        data by specifying the keyword argument action=
        
returned = 0 or 1
        Default: 0
        
        - 0 implies sum of weights are not returned after averaging operation.
        - 1 implies the sum of weights after the average operation is returned.

combinewts = None, 0 or 1
        Default: None - same as 0
        - 0 implies weights passed for individual axes are not combined into one
          weight array for the full variable V before performing operation.
       - 1 implies weights passed for individual axes are combined into one
         weight array for the full variable before performing average or sum
         operations. One-dimensional weight arrays or key words of 'weighted' or
         'unweighted' must be passed for the axes over which the operation is
         to be performed. Additionally, weights for axes that are not being
         averaged or summed may also bepassed in the order in which they appear.
         If the weights for the other axes are not passed, they  are assumed to
         be equally weighted.

Examples:
---------
        >>> f = cdms2.open('data_file_name')
        >>> averager(f('variable_name'), axis='1')
        # extracts the variable 'variable_name' from f and averages over the 
        # dimension whose position is 1. Since no other options are specified, 
        # defaults kick in i.e weight='weighted' and returned=0

        >>> averager(V, axis='xy', weights=['weighted','unweighted'])
        or
        >>> averager(V, axis='t', weights='unweighted')
        or
        >>> averager(V, axis='x')
        # Default weights option of 'weighted' is implemented
        or
        >>> averager(V, axis='x', weights=mywts) 
        # where mywts is an array of shape (len(xaxis)) or shape(V)
        or
        >>> averager(V, axis='(lon)y', weights=[myxwts, myywts])
        # where myxwts is of shape len(xaxis) and myywts is of shape len(yaxis)
        or
        >>> averager(V, axis='xy', weights=V_wts)
        # where V_wts is a Masked Variable of shape V
        or
        >>> averager(V, axis='x', weights='unweighted', action='sum')
        # will return the equally weighted sum over the x dimension
        or
        >>> ywt = area_weights(y)
        >>> fractional_area = averager(ywt, axis='xy',
                                weights=['unweighted', 'unweighted'], action='sum')
        # is a good way to compute the area fraction that the
        # data y that is non-missing
        
Note:
-----
        When averaging data with missing values, extra care needs to be taken.
        It is recommended that you use the default weights='weighted' option.
        This uses cdutil.area_weights(V) to get the correct weights to
        pass to the averager.
        >>> averager(V, axis='xy', weights='weighted')
        
        The above is equivalent to:
        >>> V_wts = cdutil.area_weights(V)
        >>> result = averager(V, axis='xy', weights=V_wts)
        or
        >>> result = averager(V, axis='xy', weights=cdutil.area_weights(V))

        However, the area_weights function requires that the axis bounds are
        stored or can be calculated (see documentation of area_weights for more
        details). In the case that such weights are not stored with the axis
        specifications (or the user desires to specify weights from another
        source), the use of combinewts option can produce the same results.
        In short, the following two are equivalent:
        >>> xavg_1 = averager(X, axis = 'xy', weights = area_weights(X))
        >>> xavg_2 = averager(X, axis = 'xy', weights = ['weighted', 'weighted', 'weighted'], combinewts=1)

        Where X is a function of x, y and a third dimension such as time or level.
        
        In general, the above can be substituted with arrays of weights where
        the 'weighted' keyword appears.
        """
    __DEBUG__ = 0
    cdat_info.pingPCMDIdb("cdat", "genutil.averager")
    #
    # Check the weight = option. This is done for backward compatibility since
    # weights= is the current default syntax.
    #
    if not weight is None:
        if not weights is None:
            raise AveragerError, \
                  'Error: You cannot set both weight and weights!. weight is obsolete please use weights only !!!'
        else:
            weights = weight
        # end of if not weights in ['generate','weighted'] :
    # end of if not weight is None:
    #
    # Note: Further checking on weights is done later - in the numpy.ma & MV2 sections also.
    #
    # Check the returned option
    #
    if returned not in [0, 1]:
        raise AveragerError, \
              'Error: Invalid option for returned. Pass 0 or 1.'
    # end of if returned not in [0,1]:
    #
    # Check the action = options
    #
    if string.upper(action) in ['AVERAGE', 'AVE', 'AVG']:
        action = 'average'
    elif string.upper(action) in ['SUM', 'ADD']:
        action = 'sum'
    else:
        raise AveragerError, 'Error: Invalid option for action. Pass \'average\' or \'sum\''
    # end of if string.upper(action) in ['AVERAGE', 'AVE', 'AVG']:
    #
    # Check the combinewts option
    #
    if not combinewts:
        combinewts = 0
    elif combinewts not in [0, 1]:
        raise AveragerError, \
              "Error: combinewts must be set to 0 or 1"
    # end of if not combinewts :
    # ************************* End of option checking *************************
    #
    # Account for MV2, numpy.ma or numpy arrays sent in by users. Return result of same type.
    #
    #
    # Case 1. numpy array
    #         Convert numpy array to numpy.ma and remember it using _NUM_FLAG so you
    #         can convert the answer to numpy array before returning
    #
    if isinstance(V, numpy.ndarray):
        if __DEBUG__:
            print 'Converting to numpy.ma so I can do an numpy.ma.average or sum'
        V = numpy.ma.array(V)
        _NUM_FLAG = 1
    else:
        _NUM_FLAG = 0
    # end of if isinstance(V, numpy.ndarray):
    #
    #
    # Case 2. Masked Array (numpy.ma)
    #
    if numpy.ma.isMaskedArray(V) and not MV2.isMaskedVariable(V):
        #
        # The passed array is an numpy.ma
        #
        if __DEBUG__: print 'Entered numpy.ma only....'
        if __DEBUG__: print '!!!!!!Checking weights for numpy.ma', weights
        #
        #
        if isinstance(weights, types.StringType) and weights in [
                'weighted', 'generate'
        ]:
            if __DEBUG__: print 'VOILA!'
            print 'cdutil.averager Warning: \n\tNot operating on a TransientVariable.'
            print '\tChanging default weights to \'unweighted\' (equally weighted)'
            weights = None
        # end of if weights == 'weighted':
        #
        # Check the axis options.
        #
        axis = _check_MA_axisoptions(axis, V.ndim)
        #
        # Now reorder the original MA to the order in which operations need to be done
        #
        newaxorder = []
        for i in axis:
            newaxorder.append(i)
        # end of for i in axis:
        for i in range(len(V.shape)):
            if not i in newaxorder:
                newaxorder.append(i)
            # if not i in newaxorder:
        # end of for i in range(len(numpy.ma.shape(V))):
        #doloop = False
        if newaxorder != range(len(V.shape)):
            x = numpy.ma.transpose(V, newaxorder)
            if __DEBUG__: print 'Reordered shape = ', x.shape
            #osh=list(x.shape)
            #na=len(axis)
            #if n!=x.rank():
            #    nsh=osh[:na] # the axes of operations....
            #    n=1
            #    for m in osh[na:]:
            #        n*=m
            #    nsh.append(n)
            #    x = numpy.ma.reshape(x,nsh)
            #    if n>35000000:
            #        doloop=
        else:
            x = V

        # end of if newaxorder != range(len(V.shape)):
        #
        # Check the weight options
        #
        weights = _check_MA_weight_options(weights, x.shape, len(axis))
        #
        #
        if __DEBUG__: print 'Length of axis = ', len(axis)
        if __DEBUG__: print 'Length of weights = ', len(weights)
        #
        # If the user has passed combinewts = 1, then do the combining of weights here
        #
        if combinewts == 1:
            weights = _combine_weights(x, weights)
        # end of if combinewts == 1:
        #
        # Now decide if we need to average or sum
        #
        if action == 'average':
            #
            # The actual averaging.........
            #
            for i in range(len(axis)):
                #
                if __DEBUG__: print 'Averaging axis # = ', i,
                #
                if isinstance(weights[i],
                              types.StringType) or (weights[i] is None):
                    pass
                else:
                    if __DEBUG__: print weights[i].shape
                # end of if not isinstance(weights[i] , types.StringType):
                if i > len(weights) - 1:
                    if not retwts:
                        raise AveragerError, 'An unknown error occurred (retwts). Report this bug.'
                    else:
                        weights.append(retwts)
                    # end of if not retwts:
                # end of if i > len(weights)-1:
                try:
                    x, retwts = numpy.ma.average(x,
                                                 weights=weights[i],
                                                 returned=1,
                                                 axis=0)
                except:
                    raise AveragerError, 'Some problem with averaging MA'
                #
            # end of for i in range(len(axis)):
        elif action == 'sum':
            #
            # Come to sum function here
            #
            for i in range(len(axis)):
                if __DEBUG__: print 'Summing axis #', i
                if i > len(weights) - 1:
                    try:
                        x = numpy.ma.sum(x, returned=0, axis=0)
                        retwts = numpy.ma.sum(retwts, axis=0)
                    except:
                        raise AveragerError, 'Some problem with summing numpy.ma'
                    # end of try:
                else:
                    try:
                        x, retwts = numpy.ma.average(x,
                                                     weights=weights[i],
                                                     returned=1,
                                                     axis=0)
                        x = x * retwts
                    except:
                        raise AveragerError, 'Some problem with summing numpy.ma'
                    # end of try:
                # end of if i > len(filled_wtoptions):
                if __DEBUG__: print 'Finished Summing axis #', i
            # end of for i in range(N_axes):
        # end of if action == 'sum':
        #
        # If we started out with a numpy array, convert the numpy.ma to numpy
        #
        if _NUM_FLAG:
            if numpy.ma.isMaskedArray(x):
                x = x.filled()
            # end of if numpy.ma.isMaskedArray(x):
            #
            if numpy.ma.isMaskedArray(retwts):
                retwts = retwts.filled()
            # end of if numpy.ma.isMaskedArray(retwts):
        # end of if _NUM_FLAG:
        #
        if returned:
            return x, retwts
        else:
            return x
        # end of if returned:
        #
        return None
    # end of if numpy.ma.isMaskedArray(V):
    #
    #******************************************************************************************
    #
    # Case 3: Masked Variable.
    #
    if weights is None: weights = 'weighted'
    #
    axis_order = []
    if __DEBUG__: print 'Inside averager axis = ', axis
    if axis == None:
        if __DEBUG__: print 'Default axis is the first axis.........'
        axis = V.getOrder()[0]
        axis_order.append(axis)
    else:
        if type(axis) == type(0): axis = str(axis)
        axis_order = _check_axisoptions(V, axis)
        if __DEBUG__: print 'Axes to be addressed in the order ', axis_order
        for an in range(len(axis_order)):
            item = axis_order[an]
            if isinstance(item, types.IntType):
                loc = string.find(axis, str(item))
                if loc != -1:
                    xlist = list(axis)
                    xlist[loc] = V.getOrder()[item]
                    if xlist[loc] == '-':
                        xlist[loc] = '(' + V.getAxis(item).id + ')'
                    if __DEBUG__:
                        print '*** the axisoption is about to be modified. Before mod  = ', axis
                    axis = string.joinfields(xlist, '')
                    if __DEBUG__:
                        print '*** the axisoption has been modified. It is = ', axis
            # end of if type(item) = type(1):
        # end of for an in range(len(axis_order)):
        if __DEBUG__:
            print 'NEW! Axes to be addressed in the order ', axis_order
        if axis_order != None:
            if __DEBUG__: print 'axis = ', axis
            V = V(order=axis)
            if __DEBUG__:
                print '********** I have reordered V= V(order=axis) **********'
        else:
            return None
        # end of if axis_order != None:
    # end of if axis == None:
    #
    if __DEBUG__: print 'Passed axis checks......'
    if __DEBUG__: print 'Axes to be addressed in the order ', axis_order
    #
    # Number of axes to average/sum over = len(axis_order)
    #
    N_axes = len(axis_order)
    #
    # Parse the weights = options
    #
    if __DEBUG__: print 'Checking weights= options:', weights
    #
    filled_wtoptions = __check_weightoptions(V, axis, weights)
    if __DEBUG__: print 'The weights options are ', filled_wtoptions
    #
    if not isinstance(filled_wtoptions, types.ListType):
        filled_wtoptions = [filled_wtoptions]
    # end of if not isinstance(filled_wtoptions, types.ListType):
    #
    #
    if __DEBUG__: print 'Length of axis_order = ', N_axes
    if __DEBUG__: print 'Length of filled_wtoptions = ', len(filled_wtoptions)
    #
    # If the user has passed combinewts = 1, then do the combining of weights here
    #
    if combinewts == 1:
        filled_wtoptions = _combine_weights(V, weights)
    # end of if combinewts == 1:
    #
    # Now decide if we need to average or sum
    #
    if __DEBUG__: print 'type(weights) = ', type(weights)
    try:
        if __DEBUG__:
            print 'Are they equal?', MV2.allclose(weights,
                                                  area_weights(V, axisoptions))
    except:
        pass
    #
    if action == 'average':
        #
        # Come to averaging function here....
        #
        for i in range(N_axes):
            #
            if __DEBUG__: print 'Averaging axis #', i
            #
            if i > len(filled_wtoptions) - 1:
                if sumwts is None:
                    raise AveragerError, 'An unknown error occurred (sumwts). Report this bug.'
                else:
                    filled_wtoptions.append(sumwts)
                # end of if not sumwts:
            # end of if i > len(filled_wtoptions):
            V, sumwts = average_engine(V, filled_wtoptions[i])
            if __DEBUG__: print 'Finished Averaging axis #', i
        # end of for i in range(N_axes):
        if returned == 1:
            return V, sumwts
        else:
            return V
        # end of if returned == 1:
    elif action == 'sum':
        #
        # Come to sum function here
        #
        for i in range(N_axes):
            if __DEBUG__: print 'Summing axis #', i
            if i > len(filled_wtoptions) - 1:
                V, dummy_wts = sum_engine(V, 'unweighted')
                sumwts = MV2.sum(sumwts, axis=0)
            else:
                V, sumwts = sum_engine(V, filled_wtoptions[i])
            # end of if i > len(filled_wtoptions):
            if __DEBUG__: print 'Finished Summing axis #', i
        # end of for i in range(N_axes):
        y = V
        # end of if len(filled_wtoptions) == 1:
        if returned == 1:
            return y, sumwts
        else:
            return y
예제 #37
0
def custom1D(x, filter, axis=0):
    """
    Function: custom(x,filter,axis=0)
     
    Description of function:
        Apply a custom 1 dimensional filter to an array over a specified axis
        filter can be a list of numbers or a 1D array
    Usage:
        filtered = custom1D(x,filter)
    Options:
        axisoptions: 'x' | 'y' | 'z' | 't' | '(dimension_name)' | 0 | 1 ... | n 
            default value = 0. You can pass the name of the dimension or index
            (integer value 0...n) over which you want to compute the statistic.
    """
    cdat_info.pingPCMDIdb("cdat", "genutil.filters.custom1D")
    isMV2 = cdms2.isVariable(x)
    if isMV2: xatt = x.attributes
    filter = MV2.array(filter)
    newx = MV2.array(x)
    initialorder = newx.getOrder(ids=1)
    n = len(filter)
    newx = newx(order=str(axis) + '...')
    sh = list(newx.shape)
    sh[0] = sh[0] - n + 1
    out = numpy.ma.zeros(sh, dtype=newx.dtype.char)
    ax = []
    bnds = []
    nax = newx.getAxis(0)
    for i in range(sh[0]):
        sub = newx[i:i + n]
        if i == 0:
            filter.setAxis(0, sub.getAxis(0))
            filter, sub = genutil.grower(filter, sub)
        out[i] = numpy.ma.average(sub, weights=filter, axis=0)
        if isMV2:
            a = nax.subAxis(i, i + n)
            try:
                b = a.getBounds()
                b1 = b[0][0]
                b2 = b[-1][1]
                ax.append((b1 + b2) / 2.)
                bnds.append([b1, b2])
            except:  # No bounds on this axis
                bnds = None
                ax.append(float(numpy.ma.average(a[:], axis=0)))
    out = MV2.array(out, id=newx.id)
    if isMV2:
        for k in xatt.keys():
            setattr(out, k, xatt[k])
        for i in range(1, len(sh)):
            out.setAxis(i, newx.getAxis(i))
        if not bnds is None: bnds = numpy.ma.array(bnds)
        ax = cdms2.createAxis(ax, bounds=bnds)
        a = newx.getAxis(0)
        attr = a.attributes
        ax.id = a.id
        for k in attr.keys():
            setattr(ax, k, attr[k])
        out.setAxis(0, ax)

    out = out(order=initialorder)
    if not isMV2:
        out = numpy.ma.array(out)
    return out
예제 #38
0
"""
genutil -- General utility modules for scientific computing
"""
from grower import grower
import xmgrace
import statistics
from minmax import minmax
from statusbar import statusbar
from selval import picker
import filters
import salstat
import arrayindexing
import ASCII
from unidata import udunits
from Filler import Filler,StringConstructor
from averager import averager, AveragerError, area_weights, getAxisWeight, getAxisWeightByName,__check_weightoptions
#from Statusbar_Pmw import Statusbar
import cdat_info
from ASCII import get_parenthesis_content
cdat_info.pingPCMDIdb("cdat","genutil")

예제 #39
0
파일: __init__.py 프로젝트: zshaheen/cdms
"""
CDMS module-level API
"""

import cdat_info
cdat_info.pingPCMDIdb("cdat", "cdms2")  # noqa
from . import git  # noqa
from . import myproxy_logon  # noqa

__all__ = [
    "cdmsobj", "axis", "coord", "grid", "hgrid", "avariable", "sliceut",
    "error", "variable", "fvariable", "tvariable", "dataset", "database",
    "cache", "selectors", "MV2", "convention", "bindex", "auxcoord", "gengrid",
    "gsHost", "gsStaticVariable", "gsTimeVariable", "mvBaseWriter",
    "mvSphereMesh", "mvVsWriter", "mvCdmsRegrid"
]

# Errors
from .error import CDMSError  # noqa

# CDMS datatypes
from .cdmsobj import CdArray, CdChar, CdByte, CdDouble, CdFloat, CdFromObject, CdInt, CdLong, CdScalar, CdShort, CdString  # noqa

# Functions which operate on all objects or groups of objects
from .cdmsobj import Unlimited, getPathFromTemplate, matchPattern, matchingFiles, searchPattern, searchPredicate, setDebugMode  # noqa

# Axis functions and classes
from .axis import AbstractAxis, axisMatches, axisMatchAxis, axisMatchIndex  # noqa
from .axis import createAxis, createEqualAreaAxis, createGaussianAxis, createUniformLatitudeAxis, createUniformLongitudeAxis, setAutoBounds, getAutoBounds  # noqa

# Grid functions
예제 #40
0
def logLinearInterpolation(A,P,levels=[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000],status=None):
    """
    Log-linear interpolation
    to convert a field from sigma levels to pressure levels
    Value below surface are masked
    
    Input
    A :    array on sigma levels
    P :    pressure field from TOP (level 0) to BOTTOM (last level)
    levels : pressure levels to interplate to (same units as P), default levels are:[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000]

    P and levels must have same units

    Output
    array on pressure levels (levels)
    
    Examples:
    A=logLinearInterpolation(A,P),levels=[100000, 92500, 85000, 70000, 60000, 50000, 40000, 30000, 25000, 20000, 15000, 10000, 7000, 5000, 3000, 2000, 1000])
    """
    
    cdat_info.pingPCMDIdb("cdat","cdutil.vertical.logLinearInterpolation")
    try:
        nlev=len(levels)  # Number of pressure levels
    except:
        nlev=1  # if only one level len(levels) would breaks
        levels=[levels,]
    order=A.getOrder()
    A=A(order='z...')
    P=P(order='z...')
    sh=list(P.shape)
    nsigma=sh[0] #number of sigma levels
    sh[0]=nlev
    t=MV2.zeros(sh,typecode=MV2.float32)
    sh2=P[0].shape
    prev=-1
    for ilev in range(nlev): # loop through pressure levels
        if status is not None:
            prev=genutil.statusbar(ilev,nlev-1.,prev)
        lev=levels[ilev] # get value for the level
        Pabv=MV2.ones(sh2,MV2.float)
        Aabv=-1*Pabv # Array on sigma level Above
        Abel=-1*Pabv # Array on sigma level Below
        Pbel=-1*Pabv # Pressure on sigma level Below
        Pabv=-1*Pabv # Pressure on sigma level Above
        Peq=MV2.masked_equal(Pabv,-1) # Area where Pressure == levels
        for i in range(1,nsigma): # loop from second sigma level to last one
            a=MV2.greater_equal(P[i],  lev) # Where is the pressure greater than lev
            b=   MV2.less_equal(P[i-1],lev) # Where is the pressure less than lev
            # Now looks if the pressure level is in between the 2 sigma levels
            # If yes, sets Pabv, Pbel and Aabv, Abel
            a=MV2.logical_and(a,b)
            Pabv=MV2.where(a,P[i],Pabv) # Pressure on sigma level Above
            Aabv=MV2.where(a,A[i],Aabv) # Array on sigma level Above
            Pbel=MV2.where(a,P[i-1],Pbel) # Pressure on sigma level Below
            Abel=MV2.where(a,A[i-1],Abel) # Array on sigma level Below
            Peq= MV2.where(MV2.equal(P[i],lev),A[i],Peq)

        val=MV2.masked_where(MV2.equal(Pbel,-1),numpy.ones(Pbel.shape)*lev) # set to missing value if no data below lev if there is
        
        tl=MV2.log(val/Pbel)/MV2.log(Pabv/Pbel)*(Aabv-Abel)+Abel # Interpolation
        if ((Peq.mask is None) or (Peq.mask is MV2.nomask)):
            tl=Peq
        else:
            tl=MV2.where(1-Peq.mask,Peq,tl)
        t[ilev]=tl.astype(MV2.float32)
        
    ax=A.getAxisList()
    autobnds=cdms2.getAutoBounds()
    cdms2.setAutoBounds('off')
    lvl=cdms2.createAxis(MV2.array(levels).filled())
    cdms2.setAutoBounds(autobnds)
    try:
        lvl.units=P.units
    except:
        pass
    lvl.id='plev'
    
    try:
      t.units=P.units
    except:
      pass
  
    ax[0]=lvl
    t.setAxisList(ax)
    t.id=A.id
    for att in A.listattributes():
        setattr(t,att,getattr(A,att))
    return t(order=order)