示例#1
0
def calc_subregion_area_mean_and_std(dataset_array, subregions):
    ''' Calculate area mean and standard deviation values for a given subregions using datasets on common grid points
    :param dataset_array: An array of OCW Dataset Objects
    :type list:  
    :param subregions: list of subregions
    :type subregions: :class:`numpy.ma.array`
    :returns: area averaged time series for the dataset of shape (ntime, nsubregion)
    '''

    ndata = len(dataset_array)
    dataset0 = dataset_array[0]
    if dataset0.lons.ndim == 1:
       lons, lats = np.meshgrid(dataset0.lons, dataset0.lats)
    else:
       lons = dataset0.lons
       lats = dataset0.lats
    subregion_array = np.zeros(lons.shape)
    mask_array = dataset_array[0].values[0,:].mask
    # dataset0.values.shsape[0]: length of the time dimension
    # spatial average
    t_series =ma.zeros([ndata, dataset0.values.shape[0], len(subregions)])
    # spatial standard deviation
    spatial_std =ma.zeros([ndata, dataset0.values.shape[0], len(subregions)])

    for iregion, subregion in enumerate(subregions):
        lat_min, lat_max, lon_min, lon_max = subregion[1]
        y_index,x_index = np.where((lats >= lat_min) & (lats <= lat_max) & (lons >= lon_min) & (lons <= lon_max))
        subregion_array[y_index,x_index] = iregion+1
        for idata in np.arange(ndata):
            t_series[idata, :, iregion] = ma.mean(dataset_array[idata].values[:,y_index, x_index], axis=1)
            spatial_std[idata, :, iregion] = ma.std(dataset_array[idata].values[:,y_index, x_index], axis=1)
    subregion_array = ma.array(subregion_array, mask=mask_array) 
    return t_series, spatial_std, subregion_array
示例#2
0
文件: som.py 项目: Zekom/orange
 def __call__(self, data, weight_id=0, progress_callback=None):
     array, classes, w = data.toNumpyMA()
     domain = data.domain
     if isinstance(domain.class_var, Orange.feature.Discrete):
         # Discrete class (extend the data with class indicator matrix)
         nval = len(data.domain.class_var.values)
         ext = ma.zeros((len(array), nval))
         ext[([i for i, m in enumerate(classes.mask) if m],
              [int(c) for c, m in zip(classes, classes.mask) if m])] = 1.0
     elif isinstance(domain.class_var, Orange.feature.Continuous):
         # Continuous class, just add the one column (what about multitarget)
         nval = 1
         ext = ma.zeros((len(array), nval))
         ext[:,0] = classes
     elif domain.class_var is None:
         # No class var
         nval = 0
         ext = ma.zeros((len(array), nval))
     else:
         raise TypeError("Unsuported `class_var` %r" % domain.class_var) 
     array = ma.hstack((array, ext))
     
     map = Map(self.map_shape, topology=self.topology)
     if self.initialize == Map.InitializeLinear:
         map.initialize_map_linear(array)
     else:
         map.initialize_map_random(array)
     map = Solver(batch_train=self.batch_train, eps=self.eps, neighbourhood=self.neighbourhood,
                  radius_ini=self.radius_ini, radius_fin=self.radius_fin, learning_rate=self.learning_rate,
                  epoch=self.epochs)(array, map, progress_callback=progress_callback)
     # Remove class columns from the vectors 
     for node in map:
         node.vector = node.vector[:-nval]
     return SOMMap(map, data)
示例#3
0
def calcTemporalCorrelation(evaluationData, referenceData):
    '''
    Purpose ::
        Calculate the temporal correlation.
    
    Assumption(s) ::
        The first dimension of two datasets is the time axis.
    
    Input ::
        evaluationData - model data array of any shape
        referenceData- observation data array of any shape
            
    Output::
        temporalCorelation - A 2-D array of temporal correlation coefficients at each subregion
        sigLev - A 2-D array of confidence levels related to temporalCorelation 
    
    REF: 277-281 in Stat methods in atmos sci by Wilks, 1995, Academic Press, 467pp.
    sigLev: the correlation between model and observation is significant at sigLev * 100 %
    '''
    evaluationDataMask = process.create_mask_using_threshold(evaluationData, threshold = 0.75)
    referenceDataMask = process.create_mask_using_threshold(referenceData, threshold = 0.75)
    
    nregion = evaluationData.shape[0]
    temporalCorrelation = ma.zeros([nregion])-100.
    sigLev = ma.zeros([nregion])-100.
    for iregion in np.arange(nregion):
        temporalCorrelation[iregion], sigLev[iregion] = stats.pearsonr(evaluationData[iregion,:], referenceData[iregion,:])
        sigLev[iregion] = 1 - sigLev[iregion]
                    
    temporalCorrelation=ma.masked_equal(temporalCorrelation.data, -100.)        
    sigLev=ma.masked_equal(sigLev.data, -100.)    
    
    return temporalCorrelation, sigLev
示例#4
0
文件: trend.py 项目: bnordgren/pylsce
def grid_linefit(grid, timevals=None, timeslice=slice(None, None, None)):
    """A compressed spatiotemporal grid is provided. A line fit is performed 
    along the time axis for each spatial cell. Two grids are returned,
    each of which is 2d, with the same spatial shape as the input.
    The pixels of one grid contains the slope, the other contains the 
    r squared value of the line fit for that spatial cell.
    A vector of time values may be provided. If not supplied, one 
    will be generated."""
    if timevals == None:
        timevals = ma.arange(grid.shape[0])
    X = sm.add_constant(timevals, prepend=True)

    outshape = (grid.shape[1],)

    rsq_map = ma.zeros(outshape)
    slope_map = ma.zeros(outshape)

    for i in range(outshape[0]):
        if (i % 1000) == 0:
            print "%d of %d (%f)" % (i, outshape[0], (i * 100.0) / outshape[0])
        if (type(grid) == "numpy.ma.core.MaskedArray") and grid[0, :].mask[i]:
            rsq_map[i] = ma.masked
            slope_map[i] = ma.masked
        else:
            m, rsq = linefit(grid, i, X, timeslice)
            rsq_map[i] = rsq
            slope_map[i] = m

    return (slope_map, rsq_map)
示例#5
0
def convert_unary_evaluation_result(evaluation_result, subregion = False):
    if not subregion:
        nmetric = len(evaluation_result)
        nmodel = len(evaluation_result[0])
        results = []
        for imetric in range(nmetric):
            result_shape = list(evaluation_result[imetric][0].shape)
            result_shape.insert(0, nmodel)
            result = ma.zeros(result_shape)
            for imodel in range(nmodel):
                result[imodel,:] = evaluation_result[imetric][imodel]
            results.append(result)
        return results
    else:
        nmetric = len(evaluation_result)
        nsubregion = len(evaluation_result[0])
        nmodel = len(evaluation_result[0][0])

        results = []
        for isubregion in range(nsubregion):
            subregion_results = []
            for imetric in range(nmetric):
                result_shape = list(evaluation_result[imetric][isubregion][0].shape)
                result_shape.insert(0, nmodel)
                result = ma.zeros(result_shape)
                for imodel in range(nmodel):
                    result[imodel,:] = evaluation_result[imetric][isubregion][imodel]
                subregion_results.append(result)
            results.append(subregion_results)
        return results
    def calculate(self, tracks):

        dataMean = ma.zeros((len(self.lon_range) - 1,
                             len(self.lat_range) - 1))
        dataMin = ma.zeros((len(self.lon_range) - 1,
                            len(self.lat_range) - 1))
        dataMax = ma.zeros((len(self.lon_range) - 1,
                            len(self.lat_range) - 1))
        dataMed = ma.zeros((len(self.lon_range) - 1,
                            len(self.lat_range) - 1))
        log.debug("Processing %d tracks" % (len(tracks)))
        for cell in self.gridCells:
            vcell = np.array([])
            for t in tracks:
                ii = np.where(((t.Latitude >= cell.ymin) &
                               (t.Latitude < cell.ymax)) &
                              ((t.Longitude >= cell.xmin) &
                               (t.Longitude < cell.xmax)))[0]
                if len(ii) > 0:
                    vv = t.CentralPressure[ii].compress(t.CentralPressure[ii] < sys.maxint)
                    vcell = np.append(vcell, vv.compress(vv > 0.0))

            if len(vcell > 0):
                dataMean[cell.index[0], cell.index[1]] = np.mean(vcell)
                dataMin[cell.index[0], cell.index[1]] = np.min(vcell)
                dataMax[cell.index[0], cell.index[1]] = np.max(vcell)
                dataMed[cell.index[0], cell.index[1]] = np.median(vcell)

        dataMean = ma.masked_equal(dataMean, 0)
        dataMin = ma.masked_equal(dataMin, 0)
        dataMax = ma.masked_equal(dataMax, 0)
        dataMed = ma.masked_equal(dataMed, 0)
        return dataMean, dataMin, dataMax, dataMed
示例#7
0
文件: averager.py 项目: RDCEP/ggcmi
    def sum(self, var, agg, lats, weights = None, calcarea = False, mask = None, numchunks = 1):
        nt, nlats, nlons = var.shape

        if weights is None: # weights
            weights = ones((nt, nlats, nlons))
        elif len(weights.shape) == 2:
            weights = ma.resize(weights, (nt, nlats, nlons))

        if calcarea: # area
            area = self.area(lats, nlats, nlons)
        else:
            area = ones((nlats, nlons))

        aggvals = self.__uniquevals(agg)
        sz = len(aggvals)

        if mask is None:
            varmask = ones((nt, nlats, nlons)) # no additional mask
        else:
            varmask = mask

        chunksize = sz / numchunks # chunk data to reduce memory usage

        sumv = ma.masked_array(zeros((sz, nt)), mask = ones((sz, nt)))

        maxchunksize = max(chunksize, chunksize + sz - chunksize * numchunks)

        aselect = ma.zeros((maxchunksize, nlats, nlons), dtype = bool) # preallocate
        vartmp  = ma.zeros((maxchunksize, nlats, nlons))

        cnt = 0
        for i in range(numchunks):
            startidx = cnt
            if i != numchunks - 1:
                endidx = cnt + chunksize
            else:
                endidx = sz

            aggvalsc = aggvals[startidx : endidx] # work on subset of aggregation values
            szc = len(aggvalsc)

            aselect[:] = 0 # clear
            for j in range(szc): aselect[j] = (agg == aggvalsc[j])
            ridx, latidx, lonidx = where(aselect)

            vartmp[:] = 0 # clear
            vartmp.mask = ones(vartmp.shape)
            for t in range(nt):
                vartmp[ridx, latidx, lonidx] = var[t, latidx, lonidx]        * \
                                               varmask[t, latidx, lonidx]    * \
                                               weights[t, latidx, lonidx]    * \
                                               area[latidx, lonidx]          * \
                                               aselect[ridx, latidx, lonidx]
                sumv[startidx : endidx, t] = vartmp.sum(axis = 2).sum(axis = 1)[: szc]

            cnt += chunksize

        return sumv
示例#8
0
文件: files.py 项目: cgoodale/climate
def writeBN_lola(fileName, lons, lats):
    # write a binary data file that include longitude (1-d) and latitude (1-d) values
    
    F = fortranfile.FortranFile(fileName, mode='w')
    ngrdY = lons.shape[0]; ngrdX = lons.shape[1]
    tmpDat = ma.zeros(ngrdX); tmpDat[:] = lons[0, :]; F.writeReals(tmpDat)
    tmpDat = ma.zeros(ngrdY); tmpDat[:] = lats[:, 0]; F.writeReals(tmpDat)
    # release temporary arrays
    tmpDat = 0
    F.close()
示例#9
0
 def GetVariableData(self, col, vtype = None):
     """
     This is an important method. It returns the data for a single variable.
     Empty variable returns None
     Blanks after data are not returned
     Variable type can be specified as string (or None), int or float
     For string or None, all raw values are returned.
     For int, all figures are rounded to nearest decimal place and values that 
     cannot be converted are masked.
     For float, all values that cannot be converted to float are masked.
     All values matching the variable's 'missingvalue' are masked.
     It's inefficient and requires 2 passes but it should be reliable.
     """
     maxRow = self.GetNumberRows()
     meta = self.meta[col]
     missing = meta["missingvalues"]
     if col > self.GetNumberCols():
         return None
     maxIdx = -1
     for idx in range(maxRow):
         if not self.CheckBlank(self.GetCellValue(idx, col)):
             maxIdx = idx
     if maxIdx < 0:
         return None
     maxIdx = maxIdx + 1
     data = []
     if (vtype == None) or (vtype.lower() == "str"):
         for row in range(maxIdx):
             val = self.GetCellValue(row, col)
             if self.CheckBlank(val):
                 val = ""
             data.append(val)
     elif vtype.lower() == "int":
         data = ma.zeros(maxIdx,dtype='int')
         for row in range(maxIdx):
             val = self.GetCellValue(row, col)
             if (val == missing) or (self.CheckBlank(val)):
                 data[row] = ma.masked
             else:
                 try:
                     data[row] = int(round(float(val)))
                 except ValueError:
                     data[row] = ma.masked
     elif vtype.lower() == "float":
         data = ma.zeros(maxIdx,dtype='float')
         for row in range(maxIdx):
             val = self.GetCellValue(row, col)
             if (val == missing) or (self.CheckBlank(val)):
                 data[row] = ma.masked
             else:
                 try:
                     data[row] = float(val)
                 except ValueError:
                     data[row] = ma.masked
     return data
示例#10
0
def blockify(x, y):
    """Convert x and y vectors so that they can be used for line plots where
    the y values are piecewise constant

    x needs to be y.size + 1"""
    assert x.size == y.size + 1, "x should be one value longer than y"

    x_repeat, y_repeat = ma.zeros(2*y.size), ma.zeros(2*y.size)
    x_repeat[::2], x_repeat[1::2] = x[:-1], x[1:]
    y_repeat[::2], y_repeat[1::2] = y, y
    return x_repeat, y_repeat
示例#11
0
def test_discarded_mask():
    data_in = ma.zeros((10,), dtype=[('wlen', float), ('flux', float)])
    data_out = np.zeros((10,), dtype=[('wlen', float), ('flux', float)])
    with pytest.raises(ValueError):
        redshift(z_in=0, z_out=0, data_in=data_in, data_out=data_out)
    wlen = np.arange(10)
    flux = ma.zeros((10,))
    with pytest.raises(ValueError):
        redshift(z_in=0, z_out=0, data_out=data_out, rules=[
            {'name': 'wlen', 'exponent': +1, 'array_in': wlen},
            {'name': 'flux', 'exponent': -1, 'array_in': flux}])
示例#12
0
def overlapping_chars(ann1, annotations):
    """Returns the number of chars in ann1 that overlap with the annotations."""
    annotations = [ann2 for ann2 in annotations if is_overlapping(ann1, ann2)]
    if len(annotations) == 0 or not isinstance(ann1, Annotation):
        return 0
    this_overlaps = zeros(ann1[TLEN], dtype=bool)
    source_overlaps = zeros(ann1[SLEN], dtype=bool)
    for ann2 in annotations:
        mark_overlapping_chars(this_overlaps, ann1, ann2, TOFF, TLEN)
        mark_overlapping_chars(source_overlaps, ann1, ann2, SOFF, SLEN)
    return npsum(this_overlaps) + npsum(source_overlaps)
示例#13
0
文件: edges.py 项目: yaukwankiu/armor
def find(a):
    """
    use straightforward summing of mask criteria
    """
    m1 = ma.zeros(a.matrix.shape)
    m2 = ma.zeros(a.matrix.shape)
    # look around it's neighbourhood
    for i in [-1,0,1]:
        for j in [-1,0,1]:
            m1 += (a.shiftMatrix(i,j).matrix.mask==False)   # finding a point not masked
            m2 += (a.shiftMatrix(i,j).matrix.mask==True )   # finding a point masked
    return m1*m2
示例#14
0
def comp_p(dert__, map, rng):   # compare rng-distant pixels within blob

    p__ = dert__[:, :, 0]
    mask = ~map     # complemented blob.map is a mask of array
    dy__ = ma.zeros(map.shape, dtype=int)   # initialize dy__ as array masked for selective computation
    dx__ = ma.zeros(map.shape, dtype=int)
    dy__.mask = dx__.mask = mask    # all operations on masked arrays ignore elements at mask == True.

    # vertical comp:
    d__ = p__[rng:] - p__[:-rng]    # comparison between p at coordinates (x, y) and p at coordinates (x, y+ rng)
    dy__[rng:] += d__               # bilateral accumulation on dy (x, y+ rng)
    dy__[:-rng] += d__              # bilateral accumulation on dy (x, y)

    # horizontal comp:
    d__ = p__[:, rng:] - p__[:, :-rng]  # comparison between p (x, y) and p (x + rng, y)
    dx__[:, rng:] += d__                # bilateral accumulation on dx (x + rng, y)
    dx__[:, :-rng] += d__               # bilateral accumulation on dx (x, y)

    # diagonal comparison:

    for xd in range(1, rng):
        yd = rng - xd           # y and x distance between comparands
        hyp = hypot(xd, yd)
        y_coef = yd / hyp       # to decompose d into dy
        x_coef = xd / hyp       # to decompose d into dx

        # top-left and bottom-right quadrants:

        d__ = p__[yd:, xd:] - p__[:-yd, :-xd]   # comparison between p (x, y) and p (x + xd, y + yd)
        # decompose d to dy, dx:
        temp_dy__ = d__ * y_coef                # buffer for dy accumulation
        temp_dx__ = d__ * x_coef                # buffer for dx accumulation
        # accumulate dy, dx:
        dy__[yd:, xd:] += temp_dy__.astype(int)     # bilateral accumulation on dy (x + xd, y + yd)
        dy__[:-yd, :-xd] += temp_dy__.astype(int)   # bilateral accumulation on dy (x, y)
        dx__[yd:, xd:] += temp_dx__.astype(int)     # bilateral accumulation on dx (x + xd, y + yd)
        dx__[:-yd, :-xd] += temp_dx__.astype(int)   # bilateral accumulation on dx (x, y)

        # top-right and bottom-left quadrants:

        d__ = p__[yd:, :-xd] - p__[:-yd, xd:]   # comparison between p (x + xd, y) and p (x, y + yd)
        # decompose d to dy, dx:
        temp_dy__ = d__ * y_coef                # buffer for dy accumulation
        temp_dx__ = -(d__ * x_coef)             # buffer for dx accumulation, sign inverted with comp direction
        # accumulate dy, dx:
        dy__[yd:, :-xd] += temp_dy__.astype(int)    # bilateral accumulation on dy (x, y + yd)
        dy__[:-yd, xd:] += temp_dy__.astype(int)    # bilateral accumulation on dy (x + xd, y)
        dx__[yd:, :-xd] += temp_dx__.astype(int)    # bilateral accumulation on dx (x, y + yd)
        dx__[:-yd, xd:] += temp_dx__.astype(int)    # bilateral accumulation on dx (x + xd, y)

    dert__[:, :, 2] += dx__  # add dx to shorter-rng-accumulated dx
    dert__[:, :, 3] += dy__  # add dy to shorter-rng-accumulated dy
    # ---------- comp_p() end -------------------------------------------------------------------------------------------
示例#15
0
文件: files.py 项目: cgoodale/climate
def writeBNdata(fileName, numOBSs, numMDLs, nT, ngrdX, ngrdY, numSubRgn, obsData, mdlData, obsRgnAvg, mdlRgnAvg):
    #(fileName,maskOption,numOBSs,numMDLs,nT,ngrdX,ngrdY,numSubRgn,obsData,mdlData,obsRgnAvg,mdlRgnAvg):
    # write spatially- and regionally regridded data into a binary data file
    missing = -1.e26
    F = fortranfile.FortranFile(fileName, mode='w')
    # construct a data array to replace mask flag with a missing value (missing=-1.e12) for printing
    data = ma.zeros((nT, ngrdY, ngrdX))
    for m in np.arange(numOBSs):
        data[:, :, :] = obsData[m, :, :, :]; msk = data.mask
        for n in np.arange(nT):
            for j in np.arange(ngrdY):
                for i in np.arange(ngrdX):
                    if msk[n, j, i]: data[n, j, i] = missing

        # write observed data. allowed to write only one row at a time
        tmpDat = ma.zeros(ngrdX)
        for n in np.arange(nT):
            for j in np.arange(ngrdY):
                tmpDat[:] = data[n, j, :]
                F.writeReals(tmpDat)

    # write model data (dep. on the number of models).
    for m in np.arange(numMDLs):
        data[:, :, :] = mdlData[m, :, :, :]
        msk = data.mask
        for n in np.arange(nT):
            for j in np.arange(ngrdY):
                for i in np.arange(ngrdX):
                    if msk[n, j, i]:
                        data[n, j, i] = missing

        for n in np.arange(nT):
            for j in np.arange(ngrdY):
                tmpDat[:] = data[n, j, :]
                F.writeReals(tmpDat)

    data = 0     # release the array allocated for data
    # write data in subregions
    if numSubRgn > 0:
        print 'Also included are the time series of the means over ', numSubRgn, ' areas from obs and model data'
        tmpDat = ma.zeros(nT); print numSubRgn
        for m in np.arange(numOBSs):
            for n in np.arange(numSubRgn):
                tmpDat[:] = obsRgnAvg[m, n, :]
                F.writeReals(tmpDat)
        for m in np.arange(numMDLs):
            for n in np.arange(numSubRgn):
                tmpDat[:] = mdlRgnAvg[m, n, :]
                F.writeReals(tmpDat)
    tmpDat = 0     # release the array allocated for tmpDat
    F.close()
示例#16
0
def shiibaLocal(a, b, windowSize=100, iRange=range(000, 881, 100),\
                    jRange=range(000, 921, 100), searchWindowHeight=11,\
                    searchWindowWidth=11, useRecursion=True, plotting=True ):
    # imports
    from shiiba import regression2 as regression
    from shiiba import regressionCFLfree as cflfree
    import numpy.ma as ma
    #  results =dictionary with
    #         {'mn': mn, 'C':C, 'Rsquared':Rsquared, 'CR2':CR2, 'timeSpent':timeSpent}
    results=cflfree.regressLocalAll(a, b, windowSize, iRange, jRange, searchWindowHeight,\
                    searchWindowWidth, useRecursion, plotting)
    mn  = results['mn']
    C   = results['C']
    Rsquared    = results['Rsquared']
    # constructing the prediction
    a1 = dbz(name  = 'shiiba prediction for %s and %s' % (a.name, b.name),
            matrix = ma.zeros(a.matrix.shape))
    a1.matrix.fill_value = a.matrix.fill_value
    a1.mask = True
    # constructing the vector field
    U = ma.zeros(a.matrix.shape)
    U.fill_value = a.matrix.fill_value
    U.mask = True
    V = ma.zeros(a.matrix.shape)
    V.fill_value = a.matrix.fill_value
    V.mask = True
    vect = pattern.VectorField(U=U, V=V, \
                            title='shiiba prediction for %s and %s' % (a.name, b.name))
    # filling in the local values
    for i, j in mn.keys():
        aa  = a.getWindow(i, j, windowSize)
        a1.matrix[i:i+windowSize, j:j+windowSize] = regression.getPrediction(C[(i,j)], aa)

        vectLocal = regression.convert(C[(i,j)], aa)
        vect.U[i:i+windowSize, j:j+windowSize] = vectLocal.U
        vect.V[i:i+windowSize, j:j+windowSize] = vectLocal.V
        #########
        # added 15 july 2013 ; doesn't make sense to have a global vector map without adding in the mn[(i,j)]'s
        # adding the shift back to the regression result
        # (see pattern.py and regression2.py)
        vect.U[i:i+windowSize, j:j+windowSize] += mn[(i,j)][0]  # U = first (i-) component ; V = j-component 
        vect.V[i:i+windowSize, j:j+windowSize] += mn[(i,j)][1]
        #
        ##########




    results['prediction']   = a1
    results['vect']         = vect
    return results
示例#17
0
文件: files.py 项目: cgoodale/climate
def shiftgrid(lon0, datain, lonsin, start= True, cyclic=360.0):
    """
    Purpose::
        Shift global lat/lon grid east or west. This function is taken directly
        from the (unreleased) basemap 1.0.7 source code as version 1.0.6 does not
        currently support arrays with more than two dimensions.
        https://github.com/matplotlib/basemap
        
    Input::
        lon0 - starting longitude for shifted grid (ending longitude if start=False). 
               lon0 must be on input grid (within the range of lonsin).
        datain - original data with longitude the right-most dimension.
        lonsin - original longitudes.
        start  - if True, lon0 represents the starting longitude of the new grid. 
                 if False, lon0 is the ending longitude. Default True.
        cyclic - width of periodic domain (default 360)

    Output:: 
        dataout - data on shifted grid
        lonsout - lons on shifted grid
    """
    if np.fabs(lonsin[-1]-lonsin[0]-cyclic) > 1.e-4:
        # Use all data instead of raise ValueError, 'cyclic point not included'
        start_idx = 0
    else:
        # If cyclic, remove the duplicate point
        start_idx = 1
    if lon0 < lonsin[0] or lon0 > lonsin[-1]:
        raise ValueError('lon0 outside of range of lonsin')
    i0 = np.argmin(np.fabs(lonsin-lon0))
    i0_shift = len(lonsin)-i0
    if ma.isMA(datain):
        dataout  = ma.zeros(datain.shape,datain.dtype)
    else:
        dataout  = np.zeros(datain.shape,datain.dtype)
    if ma.isMA(lonsin):
        lonsout = ma.zeros(lonsin.shape,lonsin.dtype)
    else:
        lonsout = np.zeros(lonsin.shape,lonsin.dtype)
    if start:
        lonsout[0:i0_shift] = lonsin[i0:]
    else:
        lonsout[0:i0_shift] = lonsin[i0:]-cyclic
    dataout[...,0:i0_shift] = datain[...,i0:]
    if start:
        lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]+cyclic
    else:
        lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]
    dataout[...,i0_shift:] = datain[...,start_idx:i0+start_idx]
    return dataout,lonsout
示例#18
0
def handWritingClassTest(trainingDirName, testDirName, width, height, fileNameSplit, fileStrSplit):
    from os import listdir
    from numpy.ma import zeros

    hwLabels = []
    traningFileList = listdir(trainingDirName)
    m = len(traningFileList)
    trainingMat = zeros((m, width * height))
    for i in range(m):
        fileNameStr = traningFileList[i]
        fileStr = fileNameStr.split(fileNameSplit)[0]
        classNumStr = int(fileStr.split(fileStrSplit)[0])
        hwLabels.append(classNumStr)
        trainingMat[i, :] = KNN.img2vector(filename=trainingDirName + "/%s" % fileNameStr, width=width, height=height)

    testFileList = listdir(testDirName)
    errorCount = 0.0
    mTest = len(testFileList)
    for i in range(mTest):
        fileNameStr = testFileList[i]
        fileStr = fileNameStr.split(fileNameSplit)[0]
        classNumStr = int(fileStr.split(fileStrSplit)[0])
        vectorUnderTest = KNN.img2vector(filename=testDirName + "/%s" % fileNameStr, width=width, height=height)
        classifierResult = KNN.classify0(inx=vectorUnderTest, dataSet=trainingMat, labels=hwLabels, k=3)

        print("the classifier came back with: %d, the real answer is: %d" % (classifierResult, classNumStr))
        if(classifierResult != classNumStr): errorCount += 1.0

    print("\nthe total number of errors is: %d" % errorCount)
    print("\nthe total error rate is: %f" % (errorCount/float(mTest)))
示例#19
0
def temporal_rebin_with_time_index(target_dataset, nt_average):
    """ Rebin a Dataset to a new temporal resolution

    :param target_dataset: Dataset object that needs temporal rebinned
    :type target_dataset: :class:`dataset.Dataset`

    :param nt_average: Time resolution for the output datasets. 
     It is the same as the number of time indicies to be averaged. (length of time dimension in the rebinned dataset) = (original time dimension length/nt_average)
    :type temporal_resolution: integer

    :returns: A new temporally rebinned Dataset
    :rtype: :class:`dataset.Dataset`
    """
    nt = target_dataset.times.size
    if nt % nt_average !=0:
        print 'Warning: length of time dimension must be a multiple of nt_average'
    # nt2 is the length of time dimension in the rebinned dataset
    nt2 = nt/nt_average
    binned_dates = target_dataset.times[np.arange(nt2)*nt_average]
    binned_values = ma.zeros(np.insert(target_dataset.values.shape[1:],0,nt2))
    for it in np.arange(nt2):
        binned_values[it,:] = ma.average(target_dataset.values[nt_average*it:nt_average*it+nt_average,:], axis=0)
    new_dataset = ds.Dataset(target_dataset.lats,
                             target_dataset.lons,
                             binned_dates,
                             binned_values,
                             variable=target_dataset.variable,
                             units=target_dataset.units,
                             name=target_dataset.name,
                             origin=target_dataset.origin)
    return new_dataset
示例#20
0
    def __init__(self, data=None, copy=True) :
        """Can either be initialized with a raw data array or with None"""
        
        # Dictionary that holds all data other than .data.  This is safe to 
        # be accessed and updated by the user.
        self.field = {}
        # Dictionary with the same keys as field but holds the axes over which
        # a parameter varies.  For instance, the LST variable varies over the
        # 'time' axis.  axes['LST'] should thus be ('time') and
        # shape(field['LST']) should be (ntimes, ).
        self.field_axes = {}
        # To write data to fits you need a fits format for each field.
        self.field_formats = {}
        # Dictionary that holds the history of this data.  It's keys are
        # history entries for hte data.  They must be strings starting with a
        # three digit integer ennumerating the histories.  The corresponding
        # values give additional details, held in a tuple of strings.  The
        # intension is that when merging data, histories must be identical, but
        # details can be merged.
        self.history = History()

        if data is None :
            self.data = ma.zeros(tuple(sp.zeros(len(self.axes))), float)
            self.data_set = False
        else :
            self.set_data(data, copy=copy)
示例#21
0
def mask_seafloor(input, kmt, axis=0):
    """ def mask_seafloor(input, kmt):
              default is that input has depth (zt) as first dimension.
              axis is axis of depth dimension.
              Change axis=1 if time is first and depth is second.

              returns input as masked array 
    """

    tmp = ma.zeros(input.shape)
    ndepth = input.shape[axis]

    if axis==1:
        # need to tile kmt
        kmtt = np.tile(kmt,(input.shape[0],1,1))

    for lii in np.arange(0,ndepth): # loop through each depth layer

        # mask out levels below sea floor
        if axis==0:
            tmp[lii,...] = ma.masked_where(kmt <= lii, input[lii,...])
        elif axis==1:
            tmp[:,lii,...] = ma.masked_where(kmtt <= lii, input[:,lii,...])
        else:
            print 'axis must = 1 or 0'

    return tmp
示例#22
0
def getKmeans(a, k, threshold=1, iter=40, thresh=1e-05, minit="random", missing="warn"):
    """input : a, k threshold
        output : atk
        """
    if minit == "matrix":
        seeds, k = k, len(k)
    a.k = k  # initialise (could move it to __init__ but not bothered for the moment)
    height, width = a.matrix.shape
    pixels = a.matrix > threshold
    print "width, height:", width, height  # debug
    print "sum of relevant pixels:", sum(sum(pixels))  # debug
    dataPoints = [[(i, j) for i in range(width) if pixels[j, i]] for j in range(height)]
    dataPoints = sum(dataPoints, [])
    dataPoints = np.array(dataPoints)
    print dataPoints[:20]
    if minit == "matrix":
        a.centroids = kmeans2(data=dataPoints, k=seeds, iter=iter, thresh=thresh, minit=minit, missing=missing)
    else:
        a.centroids = kmeans2(data=dataPoints, k=k, iter=iter, thresh=thresh, minit=minit, missing=missing)
    a.data = dataPoints

    resultPattern = ma.zeros((height, width))
    resultPattern.mask = True
    resultPattern.fill_value = -999
    for i in range(len(dataPoints)):
        resultPattern[dataPoints[i][1], dataPoints[i][0]] = a.centroids[1][i]
    resultPattern = dbz(
        name="Clustering for %s with %d clusters" % (a.name, k + 1), matrix=resultPattern, vmin=0, vmax=k
    )

    atk = {"centroids": a.centroids, "data": a.data, "pattern": resultPattern}
    return atk
示例#23
0
    def _calculate(self, tracks):
        """
        Calculate a histogram of TC genesis counts given a set of tracks.

        :param tracks: Collection of :class:`Track` objects.
        """
        
        hist = ma.zeros((len(self.lon_range) - 1,
                         len(self.lat_range) - 1))

        positions = np.vstack([self.X.ravel(), self.Y.ravel()])
        
        x = []
        y = []

        for track in tracks: 
            if len(track.Longitude) == 0:
                pass
            elif len(track.Longitude) == 1:
                x.append(track.Longitude)
                y.append(track.Latitude)
            else:
                x.append(track.Longitude[0])
                y.append(track.Latitude[0])

        values = np.vstack([np.array(x), np.array(y)])
        kernel = gaussian_kde(values, bw_method=.01)
        Z = np.reshape(kernel(positions).T, self.X.shape)
        return Z.T
示例#24
0
def average(tsl):
    # find fastest dt:
    dt_common = 1e12
    for ts in tsl:
        if ts.dt < dt_common:
            newtime = ts.timearray()
            dt_common = ts.dt
            
    # interpolate all series to new dt:
    tslip = [ts.interpolate(newtime, dt_common) for ts in tsl]
    if len(tslip[0].data.shape)==1:
        ave = np.empty((len(tslip), len(tslip[0].data)))
    else:
        ave = np.empty((len(tslip), tslip[0].data.shape[0], tslip[0].data.shape[1]))
        
    for its, ts in enumerate(tslip):
        if len(ts.data.shape)==1:
            ave[its] = ts.data
        else:
            ave[its,:,:] = ts.data[:,:]

    if len(ts.data.shape)==1:
        return Timeseries(ma.mean(ave, axis=0), dt_common)
    else:
        avef = ma.zeros((tslip[0].data.shape[0], tslip[0].data.shape[1]))
        for nrow, row in enumerate(avef):
            avef[nrow,:] = ma.mean(ave[:,nrow,:], axis=0)
        return Timeseries(avef, dt_common)
示例#25
0
    def any_of(self, *states, **kwargs):
        '''
        Return a boolean array containing True where the value of the
        MappedArray equals any state in states.

        :param states: List of states.
        :type states: [str]
        :param ignore_missing: If this is False, raise an exception if any of
            the states are not in the values mapping.
        :type ignore_missing: bool
        :returns: Boolean array.
        :rtype: np.ma.array(bool)
        '''
        ignore_missing = kwargs.get('ignore_missing', False)
        valid_states = self.values_mapping.values()
        array = zeros(len(self), dtype=bool_)
        for state in states:
            if state not in valid_states:
                if ignore_missing:
                    # do not check array as invalid states cause
                    # exception level log messages.
                    continue
                else:
                    raise ValueError(
                        "State '%s' is not valid. Valid states: '%s'." %
                        (state, valid_states))
            array |= self == state
        return array
示例#26
0
    def __init__(self, MetricTable):

        # Create empty ratio table
        nprobs = MetricTable.nprobs
        nsolvs = MetricTable.nsolvs
        self.ratios = ma.masked_array(1.0 * ma.zeros((nprobs+1, nsolvs)))

        # Compute best relative performance ratios across
        # solvers for each problem
        for prob in range(nprobs):
            metrics  = MetricTable.prob_mets(prob)
            best_met = ma.minimum(metrics)
	    if (ma.count(metrics)==nsolvs and
                ma.maximum(metrics)<=opts.minlimit):
                self.ratios[prob+1,:] = 1.0;
	    else:
                self.ratios[prob+1,:] = metrics * (1.0 / best_met)

        # Sort each solvers performance ratios
        for solv in range(nsolvs):
            self.ratios[:,solv] = ma.sort(self.ratios[:,solv])

        # Compute largest ratio and use to replace failures entries
        self.maxrat = ma.maximum(self.ratios)
        self.ratios = ma.filled(self.ratios, 1.01 * self.maxrat)
示例#27
0
文件: pprof.py 项目: joeywen/nlpy
    def __init__(self, MetricTable, opts):

        epsilon = 0.0
        if opts.cpu:
            epsilon = 0.01

        # Create empty ratio table
        nprobs = MetricTable.nprobs
        nsolvs = MetricTable.nsolvs
        self.ratios = ma.zeros((nprobs, nsolvs), dtype=numpy.float)

        # Compute best relative performance ratios across
        # solvers for each problem
        for prob in range(nprobs):
            metrics  = MetricTable.prob_mets(prob) + epsilon
            best_met = ma.minimum(metrics)
            self.ratios[prob,:] = metrics * (1.0 / best_met)

        # Sort each solvers performance ratios
        for solv in range(nsolvs):
            self.ratios[:,solv] = ma.sort(self.ratios[:,solv])

        # Compute largest ratio and use to replace failure entries
        self.maxrat = ma.maximum(self.ratios)
        self.ratios = ma.filled(self.ratios, 10 * self.maxrat)
示例#28
0
文件: metrics.py 项目: CWSL/climate
    def run(self, reference_dataset, target_dataset):
        """Calculate the temporal correlation coefficients and associated
           confidence levels between two datasets, using Pearson's correlation.

        .. note::
           Overrides BinaryMetric.run()

        :param reference_dataset: The reference dataset to use in this metric
            run
        :type reference_dataset: :class:`dataset.Dataset`

        :param target_dataset: The target dataset to evaluate against the
            reference dataset in this metric run
        :type target_dataset: :class:`dataset.Dataset`

        :returns: A 2D array of temporal correlation coefficients and a 2D
            array of confidence levels associated with the temporal correlation
            coefficients
        """
        num_times, num_lats, num_lons = reference_dataset.values.shape
        coefficients = ma.zeros([num_lats, num_lons])
        for i in numpy.arange(num_lats):
            for j in numpy.arange(num_lons):
                coefficients[i, j] = calc_correlation(target_dataset.values[:, i, j], reference_dataset.values[:, i, j])
        return coefficients
示例#29
0
 def get_data(self, file_middle):
     params = self.params
     cal_weights = params['cal_weights']
     pol_weights = params['pol_weights']
     n_time = self.n_time
     window = params['window']
     subtract_slope = params['subtract_slope']
     input_fname = (params['input_root'] + file_middle +
                        params['input_end'])
     # Read in the data.
     Reader = core.fitsGBT.Reader(input_fname)
     Blocks = Reader.read(params['scans'], params['IFs'],
                          force_tuple=True)
     # On the first pass, set the channel width.
     if not hasattr(self, "chan_width"):
         self.chan_width = Blocks[0].field['CDELT1']
     # Loop over the Blocks to select the channel polarizations and cal
     # state that we want to process.
     for Data in Blocks:
         data = Data.data
         data_selected = ma.zeros((Data.dims[0], 1, 1, Data.dims[3]),
                                   dtype=float)
         data_selected.mask = ma.getmaskarray(data_selected)
         for ii in range(len(pol_weights)) :
             for jj in range(len(cal_weights)) :
                 data_selected[:,0,0,:] += (data[:,ii,jj,:]
                                            * pol_weights[ii]
                                            * cal_weights[jj])
         Data.set_data(data_selected)
     # Convert the data to the proper format and return it.
     return make_masked_time_stream(Blocks, n_time, window=window, 
                                    return_means=True, 
                                    subtract_slope=subtract_slope)
示例#30
0
def calc_area_weighted_spatial_average(dataset, area_weight=False):
    '''Calculate area weighted average of the values in OCW dataset

    :param dataset: Dataset object 
    :type dataset: :class:`dataset.Dataset`

    :returns: time series for the dataset of shape (nT)
    '''

    if dataset.lats.ndim ==1:
        lons, lats = np.meshgrid(dataset.lons, dataset.lats)
    else:
        lons = dataset.lons
        lats = dataset.lats
    weights = np.cos(lats*np.pi/180.) 

    nt, ny, nx = dataset.values.shape
    spatial_average = ma.zeros(nt)
    for it in np.arange(nt):
        if area_weight:
            spatial_average[it] = ma.average(dataset.values[it,:], weights = weights)
        else:
            spatial_average[it] = ma.average(dataset.values[it,:])

    return spatial_average
示例#31
0
def composite_diurnal_cycle(var, season, fft=True):
    """
    Compute the composite diurnal cycle for var for the given season.
    Return mean + amplitudes and times-of-maximum of the first Fourier harmonic component as three transient variables.
    """
    season_idx = {
        "01": [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        "02": [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        "03": [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
        "04": [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
        "05": [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
        "06": [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
        "07": [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
        "08": [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
        "09": [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
        "10": [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
        "11": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
        "12": [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
        "DJF": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
        "MAM": [0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
        "JJA": [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
        "SON": [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
        "ANN": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
    }

    site = False
    if var.getLatitude() is None and var.getLongitude() is None:
        site = True
        lat = var.lat
        lon = var.lon
    # Redefine time to be in the middle of the time interval
    var_time = var.getTime()
    if var_time is None:
        # Climo cannot be run on this variable.
        return var

    #    tbounds = var_time.getBounds()
    #    var_time[:] = 0.5*(tbounds[:,0]+tbounds[:,1]) #time bounds for h1-h4 are problematic
    var_time_absolute = var_time.asComponentTime()
    time_freq = int(24 /
                    (var_time_absolute[1].hour - var_time_absolute[0].hour)
                    )  # This only valid for time interval >= 1hour
    start_time = var_time_absolute[0].hour
    print("start_time", var_time_absolute[0], var_time_absolute[0].hour)
    print("var_time_freq={}".format(time_freq))

    # Convert to masked array
    v = var.asma()

    # Select specified seasons:
    if season == "ANNUALCYCLE":  # Not supported yet!
        cycle = [
            "01",
            "02",
            "03",
            "04",
            "05",
            "06",
            "07",
            "08",
            "09",
            "10",
            "11",
            "12",
        ]
    elif season == "SEASONALCYCLE":  # Not supported yet!
        cycle = ["DJF", "MAM", "JJA", "SON"]
    else:
        cycle = [season]

    ncycle = len(cycle)
    # var_diurnal has shape i.e. (ncycle, ntimesteps, [lat,lon]) for lat lon data
    var_diurnal = ma.zeros([ncycle] + [time_freq] + list(numpy.shape(v))[1:])
    for n in range(ncycle):
        # Get time index for each month/season.
        idx = numpy.array(
            [
                season_idx[cycle[n]][var_time_absolute[i].month - 1]
                for i in range(len(var_time_absolute))
            ],
            dtype=numpy.int,
        ).nonzero()
        var_diurnal[n, ] = ma.average(  # noqa
            numpy.reshape(
                v[idx],
                (int(v[idx].shape[0] / time_freq), time_freq) +
                v[idx].shape[1:],
            ),
            axis=0,
        )

    # Convert GMT to local time
    if site:
        nlat = 1
        nlon = 1
        # lat = [36.6]
        # lon = [262.5]
        lat = [
            lat,
        ]
        lon = [
            lon,
        ]
    else:
        nlat = var.shape[1]
        nlon = var.shape[2]
        lat = var.getLatitude()
        lon = var.getLongitude()
        var_diurnal = numpy.squeeze(var_diurnal)

    nt = time_freq
    lst = numpy.zeros((nt, nlat, nlon))
    for it, itime in enumerate(numpy.arange(0, 24, int(24 / nt))):
        for ilon in range(nlon):
            lst[it, :,
                ilon] = (itime + start_time +
                         lon[ilon] / 360 * 24) % 24  # convert GMT to LST

    # Compute mean, amplitude and max time of the first three Fourier components.
    if not fft:
        return var_diurnal, lst

    else:
        cycmean, maxvalue, tmax = fastAllGridFT(var_diurnal, lst)

        # Save phase, amplitude, and mean for the first homonic,
        amplitude = MV2.zeros((nlat, nlon))
        amplitude[:, :] = maxvalue[0]
        amplitude.id = "PRECT_diurnal_amplitude"
        amplitude.longname = "Amplitude of diurnal cycle of PRECT"
        amplitude.units = var.units
        amplitude.setAxis(0, lat)
        amplitude.setAxis(1, lon)

        maxtime = MV2.zeros((nlat, nlon))
        maxtime[:, :] = tmax[0]
        maxtime.id = "PRECT_diurnal_phase"
        maxtime.longname = "Phase of diurnal cycle of PRECT"
        maxtime.units = "hour"
        maxtime.setAxis(0, lat)
        maxtime.setAxis(1, lon)

        cmean = MV2.zeros((nlat, nlon))
        cmean[:, :] = cycmean
        cmean.id = "PRECT_diurnal_cycmean"
        cmean.longname = "Mean of diurnal cycle of PRECT"
        cmean.units = var.units
        cmean.setAxis(0, lat)
        cmean.setAxis(1, lon)

        return cmean, amplitude, maxtime
示例#32
0
    def set_event_properties(self, image, time, pixel_x, pixel_y, type_tel,
                             tel_x, tel_y, array_direction, hillas):
        """The setter class is used to set the event properties within this
        class before minimisation can take place. This simply copies a
        bunch of useful properties to class members, so that we can
        use them later without passing all this information around.

        Parameters
        ----------
        image: dict
            Amplitude of pixels in camera images
        time: dict
            Time information per each pixel in camera images
        pixel_x: dict
            X position of pixels in nominal system
        pixel_y: dict
            Y position of pixels in nominal system
        type_tel: dict
            Type of telescope
        tel_x: dict
            X position of telescope in TiltedGroundFrame
        tel_y: dict
            Y position of telescope in TiltedGroundFrame
        array_direction: SkyCoord[AltAz]
            Array pointing direction in the AltAz Frame
        hillas: dict
            dictionary with telescope IDs as key and
            HillasParametersContainer instances as values

        Returns
        -------
        None

        """
        # First store these parameters in the class so we can use them
        # in minimisation For most values this is simply copying
        self.image = image

        self.tel_pos_x, self.tel_pos_y, self.ped = \
            np.zeros(len(tel_x)), np.zeros(len(tel_x)), np.zeros(len(tel_x))
        self.tel_types, self.tel_id = list(), list()

        max_pix_x, max_pix_y = 0, 0
        px, py, pa, pt = list(), list(), list(), list()
        self.hillas_parameters = list()

        # So here we must loop over the telescopes
        for x, i in zip(tel_x, range(len(tel_x))):

            px.append(pixel_x[x].to(u.rad).value)
            if len(px[i]) > max_pix_x:
                max_pix_x = len(px[i])
            py.append(pixel_y[x].to(u.rad).value)
            pa.append(image[x])
            pt.append(time[x])

            self.tel_pos_x[i] = tel_x[x].to(u.m).value
            self.tel_pos_y[i] = tel_y[x].to(u.m).value

            self.ped[i] = self.ped_table[type_tel[x]]
            self.tel_types.append(type_tel[x])
            self.tel_id.append(x)
            self.hillas_parameters.append(hillas[x])

        # Most interesting stuff is now copied to the class, but to remove our requirement
        # for loops we must copy the pixel positions to an array with the length of the
        # largest image

        # First allocate everything
        shape = (len(tel_x), max_pix_x)
        self.pixel_x, self.pixel_y = ma.zeros(shape), ma.zeros(shape)
        self.image, self.time, self.ped = ma.zeros(shape), ma.zeros(shape),\
                                          ma.zeros(shape)
        self.tel_types = np.array(self.tel_types)

        # Copy everything into our masked arrays
        for i in range(len(tel_x)):
            array_len = len(px[i])
            self.pixel_x[i][:array_len] = px[i]
            self.pixel_y[i][:array_len] = py[i]
            self.image[i][:array_len] = pa[i]
            self.time[i][:array_len] = pt[i]
            self.ped[i][:array_len] = self.ped_table[self.tel_types[i]]

        # Set the image mask
        mask = self.image == 0.0
        self.pixel_x[mask], self.pixel_y[mask] = ma.masked, ma.masked
        self.image[mask] = ma.masked
        self.time[mask] = ma.masked

        self.array_direction = array_direction
        self.nominal_frame = NominalFrame(origin=self.array_direction)

        # Finally run some functions to get ready for the event
        self.get_hillas_mean()
        self.initialise_templates(type_tel)
示例#33
0
def comp_r_old(dert__, fig, root_fcr):

    i__ = dert__[0]  # i is ig if fig else pixel
    '''
    sparse aligned i__center and i__rim arrays:
    '''
    i__center = i__[1:-1:2, 1:-1:2].copy()
    i__topleft = i__[:-2:2, :-2:2].copy()
    i__top = i__[:-2:2, 1:-1:2].copy()
    i__topright = i__[:-2:2, 2::2].copy()
    i__right = i__[1:-1:2, 2::2].copy()
    i__bottomright = i__[2::2, 2::2].copy()
    i__bottom = i__[2::2, 1:-1:2].copy()
    i__bottomleft = i__[2::2, :-2:2].copy()
    i__left = i__[1:-1:2, :-2:2].copy()
    ''' 
    remove mask from kernels with only one masked dert 
    '''
    mask_i = mask_SUM([
        i__center.mask, i__topleft.mask, i__top.mask, i__topright.mask,
        i__right.mask, i__bottomright.mask, i__bottom.mask, i__bottomleft.mask,
        i__left.mask
    ])

    i__center.mask = i__topleft.mask = i__top.mask = i__topright.mask = i__right.mask = i__bottomright.mask = \
    i__bottom.mask = i__bottomleft.mask = i__left.mask = mask_i

    idy__, idx__ = dert__[[1, 2]]

    if root_fcr:  # root fork is comp_r, accumulate derivatives:

        dy__, dx__, m__ = dert__[[4, 5, 6]]
        dy__ = dy__[1:-1:2, 1:-1:2].copy()  # sparse to align with i__center
        dx__ = dx__[1:-1:2, 1:-1:2].copy()
        m__ = m__[1:-1:2, 1:-1:2].copy()
        dy__.mask = dx__.mask = m__.mask = mask_i

    else:  # root fork is comp_g or comp_pixel, initialize sparse derivatives:

        dy__ = ma.zeros(
            (i__center.shape[0], i__center.shape[1]))  # row, column
        dx__ = ma.zeros((i__center.shape[0], i__center.shape[1]))
        m__ = ma.zeros((i__center.shape[0], i__center.shape[1]))

    if not fig:  # compare four diametrically opposed pairs of rim pixels:

        dt__ = np.stack((i__topleft - i__bottomright, i__top - i__bottom,
                         i__topright - i__bottomleft, i__right - i__left))
        dt__.mask = mask_i  # not needed?

        for d__, YCOEF, XCOEF in zip(dt__, YCOEFs[:4], XCOEFs[:4]):

            dy__ += d__ * YCOEF  # decompose differences into dy and dx,
            dx__ += d__ * XCOEF  # accumulate with prior-rng dy, dx

        g__ = np.hypot(dy__, dx__)  # gradient
        '''
        inverse match = SAD, more precise measure of variation than g, direction-invariant
        (all diagonal derivatives can be imported from prior 2x2 comp)
        '''
        m__ += (abs(i__center - i__topleft) + abs(i__center - i__top) +
                abs(i__center - i__topright) + abs(i__center - i__right) +
                abs(i__center - i__bottomright) + abs(i__center - i__bottom) +
                abs(i__center - i__bottomleft) + abs(i__center - i__left))

    else:  # fig is TRUE, compare angle and then magnitude of 8 center-rim pairs
        # replace float with int

        i__[ma.where(i__ == 0)] = 1  # if g is int
        a__ = [idy__, idx__] / i__  # sin, cos;  i = ig
        '''
        sparse aligned a__center and a__rim arrays:
        '''
        a__center = a__[:, 1:-1:2, 1:-1:2].copy()
        a__topleft = a__[:, :-2:2, :-2:2].copy()
        a__top = a__[:, :-2:2, 1:-1:2].copy()
        a__topright = a__[:, :-2:2, 2::2].copy()
        a__right = a__[:, 1:-1:2, 2::2].copy()
        a__bottomright = a__[:, 2::2, 2::2].copy()
        a__bottom = a__[:, 2::2, 1:-1:2].copy()
        a__bottomleft = a__[:, 2::2, :-2:2].copy()
        a__left = a__[:, 1:-1:2, :-2:2].copy()
        ''' 
        mask only kernels with more than one masked dert 
        '''
        mask_a = mask_SUM([
            a__center.mask, a__topleft.mask, a__top.mask, a__topright.mask,
            a__right.mask, a__bottomright.mask, a__bottom.mask,
            a__bottomleft.mask, a__left.mask
        ])

        a__center.mask = a__topleft.mask = a__top.mask = a__topright.mask = a__right.mask = a__bottomright.mask = \
            a__bottom.mask = a__bottomleft.mask = a__left.mask = mask_a
        '''
        8-tuple of differences between central dert angle and rim dert angle:
        '''
        cos_da = ma.stack(
            (((a__topleft[1] * a__center[1]) + (a__center[0] * a__topleft[0])),
             ((a__top[1] * a__center[1]) + (a__center[0] * a__top[0])),
             ((a__topright[1] * a__center[1]) +
              (a__center[0] * a__topright[0])),
             ((a__right[1] * a__center[1]) + (a__center[0] * a__right[0])),
             ((a__bottomright[1] * a__center[1]) +
              (a__center[0] * a__bottomright[0])),
             ((a__bottom[1] * a__center[1]) + (a__center[0] * a__bottom[0])),
             ((a__bottomleft[1] * a__center[1]) +
              (a__center[0] * a__bottomleft[0])),
             ((a__left[1] * a__center[1]) + (a__center[0] * a__left[0]))))
        '''
        8-tuple of cosine matches per direction:
        '''
        m__ += (np.minimum(i__center, i__topleft) * cos_da[0] +
                np.minimum(i__center, i__top) * cos_da[1] +
                np.minimum(i__center, i__topright) * cos_da[2] +
                np.minimum(i__center, i__right) * cos_da[3] +
                np.minimum(i__center, i__bottomright) * cos_da[4] +
                np.minimum(i__center, i__bottom) * cos_da[5] +
                np.minimum(i__center, i__bottomleft) * cos_da[6] +
                np.minimum(i__center, i__left) * cos_da[7])
        '''
        8-tuple of cosine differences per direction:
        '''
        dt__ = np.stack(((i__center - i__topleft * cos_da[0]),
                         (i__center - i__top * cos_da[1]),
                         (i__center - i__topright * cos_da[2]),
                         (i__center - i__right * cos_da[3]),
                         (i__center - i__bottomright * cos_da[4]),
                         (i__center - i__bottom * cos_da[5]),
                         (i__center - i__bottomleft * cos_da[6]),
                         (i__center - i__left * cos_da[7])))

        for d__, YCOEF, XCOEF in zip(dt__, YCOEFs, XCOEFs):

            dy__ += d__ * YCOEF  # decompose differences into dy and dx,
            dx__ += d__ * XCOEF  # accumulate with prior-rng dy, dx
            '''
            accumulate in prior-range dy, dx: 3x3 -> 5x5 -> 9x9 
            '''
        g__ = np.hypot(dy__, dx__)

    idy__ = idy__[1:-1:2, 1:-1:2].copy()  # i__center.shape, add .copy()?
    idx__ = idx__[1:-1:2, 1:-1:2].copy()  # i__center.shape
    idy__.mask = idx__.mask = i__center.mask  # align shifted masks
    '''
    next comp_r will use full dert       
    next comp_g will use g__, dy__, dx__
    '''
    return ma.stack((i__center, idy__, idx__, g__, dy__, dx__, m__))
示例#34
0
def comp_a(dert__, rng):
    """
    Compute and compare a over predetermined range.
    """
    # Unpack dert__:
    if len(dert__) in (5, 12):  # idert or full dert with m.
        i__, g__, m__, dy__, dx__ = dert__[:5]
    else:  # idert or full dert without m.
        i__, g__, dy__, dx__ = dert__[:4]

    if len(dert__) > 10:  # if ra+:
        a__ = dert__[
            -7:-5]  # Computed angle (use reverse indexing to avoid m check).
        day__ = dert__[-4:-2]  # Accumulated day__.
        dax__ = dert__[-2:]  # Accumulated day__.
    else:  # if fa:
        # Compute angles:
        a__ = ma.stack((dy__, dx__), axis=0) / g__
        a__.mask = g__.mask

        # Initialize dax, day:
        day__, dax__ = [ma.zeros((2, ) + i__.shape) for _ in range(2)]

    # Compute angle differences:
    da__ = translated_operation(a__, rng, angle_diff)
    comp_field = central_slice(rng)

    # Decompose and add to corresponding day and dax:
    day__[comp_field] = (da__ * Y_COEFFS[rng]).mean(axis=-1)
    dax__[comp_field] = (da__ * X_COEFFS[rng]).mean(axis=-1)

    # Apply mask:
    msq = np.ones(a__.shape, dtype=int)  # Rim mask.
    msq[comp_field] = a__.mask[comp_field] + da__.mask.sum(
        axis=-1)  # Summed d mask.
    imsq = msq.nonzero()
    day__[imsq] = dax__[imsq] = ma.masked  # Apply mask.

    # Compute ga:
    ga__ = ma.hypot(ma.arctan2(*day__), ma.arctan2(*dax__))[np.newaxis,
                                                            ...] * SCALER_ga

    try:  # dert with m is more common:
        return ma.concatenate(  # Concatenate on the first dimension.
            (
                ma.stack((i__, g__, m__, dy__, dx__), axis=0),
                a__,
                ga__,
                day__,
                dax__,
            ),
            axis=0,
        )
    except NameError:  # m doesn't exist:
        return ma.concatenate(  # Concatenate on the first dimension.
            (
                ma.stack((i__, g__, dy__, dx__), axis=0),
                a__,
                ga__,
                day__,
                dax__,
            ),
            axis=0,
        )
示例#35
0
    def voronoi_binning(self, target_snr=10.0, write_fits=False, outfile=None, overwrite=False, plot=False,
                        flag_threshold=0.5, **kwargs):
        """
        Applies Voronoi binning to the data cube, using Cappellari's Python implementation.

        Parameters
        ----------
        target_snr : float
            Desired signal to noise ratio of the binned pixels
        write_fits : boolean
            Writes a FITS image with the output of the binning.
        plot: bool
            Plots the binning results.
        outfile : string
            Name of the output FITS file. If 'None' then the name of
            the original FITS file containing the data cube will be used
            as a root name, with '.bin' appended to it.
        overwrite : boolean
            Overwrites files with the same name given in 'outfile'.
        flag_threshold : float
            Bins with less than this fraction of unflagged pixels will be flagged.
        **kwargs: dict
            Arguments passed to voronoi_2d_binning.

        Returns
        -------
        Nothing.

        Notes
        -----
        The output file contains two tables which outline the tesselation process. These are
        stored in the extensions 'VOR' and 'VORPLUS'.
        """

        try:
            from vorbin.voronoi_2d_binning import voronoi_2d_binning
        except ImportError:
            raise ImportError('Could not find the voronoi_2d_binning module. Please add it to your PYTHONPATH.')

        if self.noise is None:
            raise RuntimeError('This function requires prior execution of the snr_eval method.')

        # Initializing the binned arrays as zeros.
        assert hasattr(self, 'data'), 'Could not access the data attribute of the Cube object.'
        b_data = ma.zeros(self.data.shape)
        b_data.mask = self.flags.astype(bool)

        assert hasattr(self, 'variance'), 'Could not access the variance attribute of the Cube object.'
        b_variance = ma.zeros(self.variance.shape)
        b_variance.mask = self.flags.astype(bool)

        assert hasattr(self, 'flags'), 'Could not access the variance attribute of the Cube object.'
        b_flags = np.zeros_like(self.flags, dtype=int)

        valid_spaxels = np.ravel(~np.isnan(self.signal) & ~np.isnan(self.noise) & ~self.spatial_mask)

        x = np.ravel(np.indices(np.shape(self.signal))[1])[valid_spaxels]
        y = np.ravel(np.indices(np.shape(self.signal))[0])[valid_spaxels]

        s, n = deepcopy(self.signal), deepcopy(self.noise)

        s[s <= 0] = np.average(self.signal[self.signal > 0])
        n[n <= 0] = np.average(self.signal[self.signal > 0]) * .5

        signal, noise = np.ravel(s)[valid_spaxels], np.ravel(n)[valid_spaxels]

        bin_num, x_node, y_node, x_bar, y_bar, sn, n_pixels, scale = \
            voronoi_2d_binning(x, y, signal, noise, target_snr, plot=plot, quiet=0, **kwargs)
        v = np.column_stack([y, x, bin_num])

        # For every nan in the original cube, fill with nan the binned cubes.
        nan_idx = (Ellipsis,
                   np.ravel(np.indices(np.shape(self.signal))[0])[~valid_spaxels],
                   np.ravel(np.indices(np.shape(self.signal))[1])[~valid_spaxels])
        b_data[nan_idx] = np.nan
        b_variance[nan_idx] = np.nan
        b_flags[nan_idx] = 1

        for i in np.arange(bin_num.max() + 1):
            same_bin = v[:, 2] == i
            same_bin_coordinates = v[same_bin, :2]

            for k in same_bin_coordinates:
                binned_idx = (Ellipsis, k[0], k[1])
                unbinned_idx = (Ellipsis, same_bin_coordinates[:, 0], same_bin_coordinates[:, 1])

                b_data[binned_idx] = ma.mean(self.data[unbinned_idx], axis=1)
                b_variance[binned_idx] = ma.mean(self.variance[unbinned_idx], axis=1)
                b_flags[binned_idx] = (np.mean(self.flags[unbinned_idx], axis=1) >= flag_threshold).astype(int)

        b_data = b_data.data
        b_variance = b_variance.data

        if write_fits:

            h = fits.HDUList()
            hdu = fits.PrimaryHDU(header=self.header)
            hdu.name = 'PRIMARY'
            hdu.header['VORBIN'] = (True, 'Processed by Voronoi binning?')
            hdu.header['VORTSNR'] = (target_snr, 'Target SNR for Voronoi binning.')
            h.append(hdu)

            hdr = self.header_data
            # noinspection PyTypeChecker
            hdu = fits.ImageHDU(data=b_data, header=hdr)
            hdu.name = 'SCI'
            h.append(hdu)

            # noinspection PyTypeChecker
            hdu = fits.ImageHDU(data=b_variance, header=hdr)
            hdu.name = 'VAR'
            h.append(hdu)

            # noinspection PyTypeChecker
            hdu = fits.ImageHDU(data=b_flags, header=hdr)
            hdu.name = 'FLAGS'
            h.append(hdu)

            tbhdu = fits.BinTableHDU.from_columns(
                [
                    fits.Column(name='xcoords', format='i8', array=x),
                    fits.Column(name='ycoords', format='i8', array=y),
                    fits.Column(name='binNum', format='i8', array=bin_num),
                ], name='VOR')

            tbhdu_plus = fits.BinTableHDU.from_columns(
                [
                    fits.Column(name='ubin', format='i8', array=np.unique(bin_num)),
                    fits.Column(name='xNode', format='F16.8', array=x_node),
                    fits.Column(name='yNode', format='F16.8', array=y_node),
                    fits.Column(name='xBar', format='F16.8', array=x_bar),
                    fits.Column(name='yBar', format='F16.8', array=y_bar),
                    fits.Column(name='sn', format='F16.8', array=sn),
                    fits.Column(name='nPixels', format='i8', array=n_pixels),
                ], name='VORPLUS')

            h.append(tbhdu)
            h.append(tbhdu_plus)

            if outfile is None:
                outfile = self.fitsfile.replace('.fits', '_vor.fits')

            h.writeto(outfile, overwrite=overwrite)

        self.binned_cube = b_data
示例#36
0
def addIntegratedIntensity(full_stack, outDir):
    '''

    calculate the integrated line flux and the associated noise

    Input:

    full_stack: astropy table with one row containing the stacks for
    each bin.

    outDir: directory for diagnostics plots
    
    Output:
    
    stack with added columns for the integrated intensity.

    Date        Programmer      Description of Changes
    ----------------------------------------------------------------------
    5/13/2021   A.A. Kepley     Original Code

    '''

    # get number of stacks
    nstack = len(full_stack)

    # initialize output arrays
    int_intensity_fit = {}
    int_intensity_fit_err = {}
    int_intensity_fit_uplim = {}

    int_intensity_sum = {}
    int_intensity_sum_err = {}
    int_intensity_sum_uplim = {}

    fwhm_fit = np.zeros(nstack) * full_stack['spectral_axis'].unit
    fwhm_sum = np.zeros(nstack) * full_stack['spectral_axis'].unit

    for line in ['CO', 'HCN', 'HCOp', '13CO', 'C18O']:

        int_intensity_fit[line] = ma.zeros(nstack) * full_stack[
            'stack_profile_CO'].unit * full_stack['spectral_axis'].unit
        int_intensity_fit_err[line] = ma.zeros(nstack) * full_stack[
            'stack_profile_CO'].unit * full_stack['spectral_axis'].unit
        int_intensity_fit_uplim[line] = np.full(nstack, False)

        int_intensity_sum[line] = ma.zeros(nstack) * full_stack[
            'stack_profile_CO'].unit * full_stack['spectral_axis'].unit
        int_intensity_sum_err[line] = ma.zeros(nstack) * full_stack[
            'stack_profile_CO'].unit * full_stack['spectral_axis'].unit
        int_intensity_sum_uplim[line] = np.full(nstack, False)

    # calculate the integrated intensity from a fit and from a simple sum.
    for i in range(nstack):

        line = 'CO'
        (stack_int, stack_int_err, fwhm,
         uplim) = fitIntegratedIntensity(full_stack[i], line, outDir)

        int_intensity_fit['CO'][i] = stack_int
        int_intensity_fit_err['CO'][i] = stack_int_err
        int_intensity_fit_uplim[i] = uplim
        fwhm_fit[i] = fwhm

        for line in ['HCN', 'HCOp', '13CO', 'C18O']:

            if ((full_stack[i]['galaxy'] == 'NGC6946') & ((line == '13CO') |
                                                          (line == 'C18O'))):

                int_intensity_fit[line][i] = np.nan
                int_intensity_fit_err[line][i] = np.nan
                int_intensity_fit_uplim[line][i] = True

            else:

                (stack_int, stack_int_err, fwhm,
                 uplim) = fitIntegratedIntensity(full_stack[i],
                                                 line,
                                                 outDir,
                                                 fwhm=fwhm)

                int_intensity_fit[line][i] = stack_int
                int_intensity_fit_err[line][i] = stack_int_err
                int_intensity_fit_uplim[line][i] = uplim

        # straight sum
        (stack_sum, stack_sum_err, fwhm,
         uplim) = sumIntegratedIntensity(full_stack[i]['spectral_axis'],
                                         full_stack[i]['stack_profile_CO'])

        int_intensity_sum['CO'][i] = stack_sum
        int_intensity_sum_err['CO'][i] = stack_sum_err
        int_intensity_sum_uplim[i] = uplim
        fwhm_sum[i] = fwhm

        for line in ['HCN', 'HCOp', '13CO', 'C18O']:

            if ((full_stack[i]['galaxy'] == 'NGC6946') & ((line == '13CO') |
                                                          (line == 'C18O'))):

                int_intensity_sum[line][i] = np.nan
                int_intensity_sum_err[line][i] = np.nan
                int_intensity_sum_uplim[line][i] = True

            else:

                (stack_sum, stack_sum_err, fwhm,
                 uplim) = sumIntegratedIntensity(
                     full_stack[i]['spectral_axis'],
                     full_stack[i]['stack_profile_' + line],
                     fwhm=fwhm)

                int_intensity_sum[line][i] = stack_sum
                int_intensity_sum_err[line][i] = stack_sum_err
                int_intensity_sum_uplim[line][i] = uplim

    # add results to data able.
    for line in ['CO', 'HCN', 'HCOp', '13CO', 'C18O']:
        full_stack.add_column(
            Column(int_intensity_fit[line], name='int_intensity_fit_' + line))
        full_stack.add_column(
            Column(int_intensity_fit_err[line],
                   name='int_intensity_fit_err_' + line))
        full_stack.add_column(
            Column(int_intensity_fit_uplim[line],
                   name='int_intensity_fit_uplim_' + line))

        full_stack.add_column(
            Column(int_intensity_sum[line], name='int_intensity_sum_' + line))
        full_stack.add_column(
            Column(int_intensity_sum_err[line],
                   name='int_intensity_sum_err_' + line))
        full_stack.add_column(
            Column(int_intensity_sum_uplim[line],
                   name='int_intensity_sum_uplim_' + line))

    full_stack.add_column(Column(fwhm_fit), name='FWHM_fit')
    full_stack.add_column(Column(fwhm_sum), name='FWHM_sum')

    return full_stack
示例#37
0
    def get_likelihood(self,
                       source_x,
                       source_y,
                       core_x,
                       core_y,
                       energy,
                       x_max_scale,
                       goodness_of_fit=False):
        """Get the likelihood that the image predicted at the given test
        position matches the camera image.

        Parameters
        ----------
        source_x: float
            Source position of shower in the nominal system (in deg)
        source_y: float
            Source position of shower in the nominal system (in deg)
        core_x: float
            Core position of shower in tilted telescope system (in m)
        core_y: float
            Core position of shower in tilted telescope system (in m)
        energy: float
            Shower energy (in TeV)
        x_max_scale: float
            Scaling factor applied to geometrically calculated Xmax
        goodness_of_fit: boolean
            Determines whether expected likelihood should be subtracted from result
        Returns
        -------
        float: Likelihood the model represents the camera image at this position

        """
        # First we add units back onto everything.  Currently not
        # handled very well, maybe in future we could just put
        # everything in the correct units when loading in the class
        # and ignore them from then on

        zenith = (np.pi / 2) - self.array_direction.alt.to(u.rad).value
        azimuth = self.array_direction.az

        # Geometrically calculate the depth of maximum given this test position
        x_max = self.get_shower_max(source_x, source_y, core_x, core_y, zenith)
        x_max *= x_max_scale

        # Calculate expected Xmax given this energy
        x_max_exp = guess_shower_depth(energy)  # / np.cos(20*u.deg)

        # Convert to binning of Xmax
        x_max_bin = x_max - x_max_exp

        # Check for range
        if x_max_bin > 200:
            x_max_bin = 200
        if x_max_bin < -100:
            x_max_bin = -100

        # Calculate impact distance for all telescopes
        impact = np.sqrt(
            np.power(self.tel_pos_x - core_x, 2) +
            np.power(self.tel_pos_y - core_y, 2))
        # And the expected rotation angle
        phi = np.arctan2((self.tel_pos_x - core_x),
                         (self.tel_pos_y - core_y)) * u.rad

        # Rotate and translate all pixels such that they match the
        # template orientation
        pix_y_rot, pix_x_rot = self.rotate_translate(self.pixel_x,
                                                     self.pixel_y, source_x,
                                                     source_y, phi)

        # In the interpolator class we can gain speed advantages by using masked arrays
        # so we need to make sure here everything is masked
        prediction = ma.zeros(self.image.shape)
        prediction.mask = ma.getmask(self.image)

        time_gradients = np.zeros((self.image.shape[0], 2))

        # Loop over all telescope types and get prediction
        for tel_type in np.unique(self.tel_types).tolist():
            type_mask = self.tel_types == tel_type
            prediction[type_mask] = \
                self.image_prediction(tel_type, energy *
                                      np.ones_like(impact[type_mask]),
                                      impact[type_mask], x_max_bin *
                                      np.ones_like(impact[type_mask]),
                                      pix_x_rot[type_mask] * (180 / math.pi) * -1,
                                      pix_y_rot[type_mask] * (180 / math.pi))

            if self.use_time_gradient:
                time_gradients[type_mask] = \
                    self.predict_time(tel_type,
                                      energy * np.ones_like(impact[type_mask]),
                                      impact[type_mask],
                                      x_max_bin * np.ones_like(impact[type_mask]))

        if self.use_time_gradient:
            time_mask = np.logical_and(np.invert(ma.getmask(self.image)),
                                       self.time > 0)
            weight = np.sqrt(self.image) * time_mask
            rv = norm()

            sx = pix_x_rot * weight
            sxx = pix_x_rot * pix_x_rot * weight

            sy = self.time * weight
            sxy = self.time * pix_x_rot * weight
            d = weight.sum(axis=1) * sxx.sum(axis=1) - sx.sum(axis=1) * sx.sum(
                axis=1)
            time_fit = (weight.sum(axis=1) * sxy.sum(axis=1) -
                        sx.sum(axis=1) * sy.sum(axis=1)) / d
            time_fit /= -1 * (180 / math.pi)
            chi2 = -2 * np.log(
                rv.pdf((time_fit - time_gradients.T[0]) / time_gradients.T[1]))

        # Likelihood function will break if we find a NaN or a 0
        prediction[np.isnan(prediction)] = 1e-8
        prediction[prediction < 1e-8] = 1e-8
        prediction *= self.template_scale

        # Get likelihood that the prediction matched the camera image
        like = poisson_likelihood_gaussian(self.image, prediction, self.spe,
                                           self.ped)
        like[np.isnan(like)] = 1e9
        like *= np.invert(ma.getmask(self.image))
        like = ma.MaskedArray(like, mask=ma.getmask(self.image))

        array_like = like
        if goodness_of_fit:
            return np.sum(like - mean_poisson_likelihood_gaussian(
                prediction, self.spe, self.ped))

        prior_pen = 0
        # Add prior penalities if we have them
        array_like += 1e-8
        if "energy" in self.priors:
            prior_pen += energy_prior(energy, index=-1)
        if "xmax" in self.priors:
            prior_pen += xmax_prior(energy, x_max)

        array_like += prior_pen / float(len(array_like))

        if self.array_return:
            array_like = array_like.ravel()
            return array_like[np.invert(ma.getmask(array_like))]

        final_sum = array_like.sum()
        if self.use_time_gradient:
            final_sum += chi2.sum()  #* np.sum(ma.getmask(self.image))

        return final_sum
示例#38
0
    def __init__(self,
                 in_file,
                 vg_files=[1],
                 data_type=1,
                 projection='cyl',
                 color_map='jet',
                 time_zone=0,
                 plot_contours=False,
                 plot_center='t',
                 plot_meridians=True,
                 plot_parallels=True,
                 plot_terminator=True,
                 resolution='c',
                 points_of_interest=[],
                 save_file='',
                 run_quietly=False,
                 dpi=150,
                 parent=None):

        self.run_quietly = run_quietly
        self.dpi = float(dpi)

        plot_parameters = VOAFile((in_file + '.voa'))
        plot_parameters.parse_file()

        if (plot_parameters.get_projection() != 'cyl'):
            print _("Error: Only lat/lon (type 1) input files are supported")
            sys.exit(1)

        grid = plot_parameters.get_gridsize()
        self.image_defs = VOAAreaPlot.IMG_TYPE_DICT[int(data_type)]

        # TODO This needs a little more work... what if the pcenter card is not specified

        if plot_center == 'p':
            plot_centre_location = plot_parameters.get_location(
                plot_parameters.P_CENTRE)
        else:
            plot_centre_location = plot_parameters.get_location(
                plot_parameters.TX_SITE)

        self.points_of_interest = [plot_centre_location]
        if len(points_of_interest) > 0:
            self.points_of_interest.extend(points_of_interest)

        imageBuf = P.zeros([grid, grid], float)

        area_rect = plot_parameters.get_area_rect()
        points = P.zeros([grid, grid], float)
        lons = P.zeros(grid * grid, float)
        lats = P.zeros(grid * grid, float)

        lons = P.arange(area_rect.get_sw_lon(),
                        area_rect.get_ne_lon() + 0.001,
                        (area_rect.get_ne_lon() - area_rect.get_sw_lon()) /
                        float(grid - 1))
        lats = P.arange(area_rect.get_sw_lat(),
                        area_rect.get_ne_lat() + 0.001,
                        (area_rect.get_ne_lat() - area_rect.get_sw_lat()) /
                        float(grid - 1))

        colString = 'P.cm.' + color_map
        colMap = eval(colString)

        self.subplots = []

        matplotlib.rcParams['axes.edgecolor'] = 'gray'
        matplotlib.rcParams['axes.facecolor'] = 'white'
        matplotlib.rcParams['figure.facecolor'] = 'white'
        #matplotlib.rcParams['figure.figsize'] = (6, 10)
        #        matplotlib.rcParams['figure.subplot.hspace'] = 0.45
        #        matplotlib.rcParams['figure.subplot.wspace'] = 0.35
        #        matplotlib.rcParams['figure.subplot.right'] = 0.85
        colorbar_fontsize = 8

        self.num_rows = 1
        self.main_title_fontsize = 18
        matplotlib.rcParams['legend.fontsize'] = 12
        matplotlib.rcParams['axes.labelsize'] = 10
        matplotlib.rcParams['axes.titlesize'] = 10
        matplotlib.rcParams['xtick.labelsize'] = 8
        matplotlib.rcParams['ytick.labelsize'] = 8
        #        matplotlib.rcParams['figure.subplot.top'] = 0.8 # single figure plots have a larger title so require more space at the top.

        self.fig = Figure(figsize=(9, 5))
        #        self.main_title_label = self.fig.suptitle(unicode(self.image_defs['title'],'utf-8'), fontsize=self.main_title_fontsize)

        ax = self.fig.add_subplot(111, axisbg='white')
        self.subplots.append(ax)

        ax.label_outer()
        #print "opening: ",(in_file+'.vg'+str(vg_files[plot_ctr]))
        plot_ctr = 0

        #        dir_name = os.path.dirname(in_file)
        #        if not os.path.exists(dir_name):
        #            os.makedirs(dir_name)

        vgFile = open(in_file + '.vg' + str(vg_files[plot_ctr]))
        pattern = re.compile(r"[a-z]+")

        for line in vgFile:
            match = pattern.search(line)
            if not match:
                value = float(line[int(self.image_defs['first_char']
                                       ):int(self.image_defs['last_char'])])
                # TODO Does this need to be normalised here if it's also being done in the plot?
                value = max(self.image_defs['min'], value)
                value = min(self.image_defs['max'], value)
                #if value < self.image_defs[2] : value = self.image_defs[2]
                #if value > self.image_defs[3] : value = self.image_defs[3]
                points[int(line[3:6]) - 1][int(line[0:3]) - 1] = value
        vgFile.close()

        map = Basemap(\
            llcrnrlon=area_rect.get_sw_lon(), llcrnrlat=area_rect.get_sw_lat(),\
            urcrnrlon=area_rect.get_ne_lon(), urcrnrlat=area_rect.get_ne_lat(),\
            projection=projection,\
            lat_0=plot_centre_location.get_latitude(),\
            lon_0=plot_centre_location.get_longitude(),\
            resolution=resolution,
            ax=ax)

        map.drawcoastlines(color='black')
        map.drawcountries(color='grey')
        map.drawmapboundary(color='black', linewidth=1.0)

        warped = ma.zeros((grid, grid), float)
        warped, warped_lon, warped_lat = map.transform_scalar(
            points,
            lons,
            lats,
            grid,
            grid,
            returnxy=True,
            checkbounds=False,
            masked=True)
        warped = warped.filled(self.image_defs['min'] - 1.0)

        colMap.set_under(color='k', alpha=0.0)

        im = map.imshow(warped,
                        cmap=colMap,
                        extent=(-180, 180, -90, 90),
                        origin='lower',
                        norm=P.Normalize(clip=False,
                                         vmin=self.image_defs['min'],
                                         vmax=self.image_defs['max']))

        #######################
        # Plot greyline
        #######################
        if plot_terminator:
            the_sun = Sun()
            the_month = plot_parameters.get_month(vg_files[plot_ctr] - 1)
            the_day = plot_parameters.get_day(vg_files[plot_ctr] - 1)
            the_hour = plot_parameters.get_utc(vg_files[plot_ctr] - 1)
            if (the_day == 0):
                the_day = 15
            the_year = datetime.date.today().year
            num_days_since_2k = the_sun.daysSince2000Jan0(
                the_year, the_month, the_day)

            res = the_sun.sunRADec(num_days_since_2k)
            declination = res[1]
            if (declination == 0.0):
                declination = -0.001

            tau = the_sun.computeGHA(the_day, the_month, the_year, the_hour)

            if declination > 0:
                terminator_end_lat = area_rect.get_sw_lat()
            else:
                terminator_end_lat = area_rect.get_ne_lat()

            terminator_lat = [terminator_end_lat]
            terminator_lon = [area_rect.get_sw_lon()]

            for i in range(int(area_rect.get_sw_lon()),
                           int(area_rect.get_ne_lon()),
                           1) + [int(area_rect.get_ne_lon())]:
                longitude = i + tau
                tan_lat = -the_sun.cosd(longitude) / the_sun.tand(declination)
                latitude = the_sun.atand(tan_lat)
                latitude = max(latitude, area_rect.get_sw_lat())
                latitude = min(latitude, area_rect.get_ne_lat())
                xpt, ypt = map(i, latitude)
                terminator_lon.append(xpt)
                terminator_lat.append(ypt)

            terminator_lon.append(area_rect.get_ne_lon())
            terminator_lat.append(terminator_end_lat)

            #This is a little simplistic and doesn't work for ortho plots....
            ax.plot(terminator_lon, terminator_lat, color='grey', alpha=0.75)
            ax.fill(terminator_lon,
                    terminator_lat,
                    facecolor='grey',
                    alpha=0.5)

            tau = -tau
            if (tau > 180.0):
                tau = tau - 360.0
            if (tau < -180.0):
                tau = tau + 360.0

            #Plot the position of the sun (if it's in the coverage area)
            if area_rect.contains(declination, tau):
                xpt, ypt = map(tau, declination)
                #sbplt_ax.plot([xpt],[ypt],'yh')
                ax.plot([xpt], [ypt], 'yh')

        ##########################
        # Points of interest
        ##########################
        for location in self.points_of_interest:
            if area_rect.contains(location.get_latitude(),
                                  location.get_longitude()):
                xpt, ypt = map(location.get_longitude(),
                               location.get_latitude())
                ax.plot([xpt], [ypt], 'ro')
                ax.text(xpt + 100000, ypt + 100000, location.get_name())

        if plot_meridians:
            if (area_rect.get_lon_delta() <= 90.0):
                meridians = P.arange(-180, 190.0, 10.0)
            elif (area_rect.get_lon_delta() <= 180.0):
                meridians = P.arange(-180.0, 210.0, 30.0)
            else:
                meridians = P.arange(-180, 240.0, 60.0)
            if ((projection == 'ortho') or (projection == 'vandg')):
                map.drawmeridians(meridians)
            else:
                map.drawmeridians(meridians, labels=[1, 1, 0, 1])

        if plot_parallels:
            if (area_rect.get_lat_delta() <= 90.0):
                parallels = P.arange(-90.0, 120.0, 60.0)
            else:
                parallels = P.arange(-90.0, 120.0, 30.0)
            if ((projection == 'ortho') or (projection == 'vandg')):
                map.drawparallels(parallels)
            else:
                map.drawparallels(parallels, labels=[1, 0, 0, 1])

        if plot_contours:
            map.contour(warped_lon,
                        warped_lat,
                        warped,
                        self.image_defs['y_labels'],
                        linewidths=1.0,
                        colors='k',
                        alpha=0.5)

        #add a title
        title_str = plot_parameters.get_plot_description_string(
            vg_files[plot_ctr] - 1, self.image_defs['plot_type'], time_zone)
        title_str = title_str + "\n" + plot_parameters.get_detailed_plot_description_string(
            vg_files[plot_ctr] - 1)
        self.subplot_title_label = ax.set_title(title_str)

        # Add a colorbar on the right hand side, aligned with the
        # top of the uppermost plot and the bottom of the lowest
        # plot.
        pos = [0.91, 0.19, 0.02, 0.62]
        self.cb_ax = self.fig.add_axes(pos)
        cb = self.fig.colorbar(im,
                               cax=self.cb_ax,
                               orientation='vertical',
                               ticks=self.image_defs['y_labels'],
                               format=P.FuncFormatter(
                                   eval('self.' +
                                        self.image_defs['formatter'])))
        cb.set_label(unicode(self.image_defs['title'], 'utf-8'))

        #print self.image_defs['y_labels']
        for t in self.cb_ax.get_yticklabels():
            t.set_fontsize(colorbar_fontsize)

        canvas = FigureCanvasAgg(self.fig)

        if save_file:
            self.save_plot(canvas, save_file)
示例#39
0
class trainCaffeNet:
    niter = 6000
    test_interval = 100
    display = 100

    idx = int(ceil(niter * 1.0 / display))
    train_loss = zeros(idx)
    cpu_u = zeros(idx)
    gpu_u = zeros(idx)
    gpu_m_u = zeros(idx)
    test_loss = zeros(idx)
    test_acc = zeros(idx)

    def __init__(self):
        self.time_s = datetime.datetime.now()
        caffe.set_device(0)
        caffe.set_mode_gpu()

    def train(self):
        solver = caffe.get_solver('./model/solver.prototxt')
        _train_loss = 0
        _test_loss = 0
        _accuracy = 0
        _cpu = 0
        _gpu = 0
        _gpu_m = 0

        print 'Running solvers for %d iterations...' % self.niter
        for it in range(self.niter):
            solver.step(1)
            _train_loss += solver.net.blobs['loss'].data
            _cpu += psutil.cpu_percent()
            _gpu += GPU.readl(9)
            _gpu_m += GPU.readl(10)
            if it % 100 == 0:
                print(str(it) + " CPU : " + str(psutil.cpu_percent())) + '%'
                print str(it) + " GPU : " + str(GPU.readl(9)) + '%'
                print str(it) + " GPU_M : " + str(GPU.readl(10)) + '%'
                # print(psutil.virtual_memory())  #
                print str(it) + " LOSS : " + str(_train_loss / 100)
                self.train_loss[it / self.test_interval] = _train_loss / 100
                self.cpu_u[it / self.test_interval] = _cpu / 10000
                self.gpu_u[it / self.test_interval] = _gpu / 10000
                self.gpu_m_u[it / self.test_interval] = _gpu_m / 10000
                _train_loss = 0
                _cpu = 0
                _gpu = 0
                _gpu_m = 0

            if it % self.test_interval == 0:
                for test_it in range(100):
                    solver.test_nets[0].forward()
                    _test_loss += solver.test_nets[0].blobs['loss'].data
                    _accuracy += solver.test_nets[0].blobs['acc'].data

                    self.test_loss[it / self.test_interval] = _test_loss / 100
                    self.test_acc[it / self.test_interval] = _accuracy / 100
                print str(it) + " VAL_LOSS : " + str(_test_loss / 100)
                print str(it) + " VAL_ACC : " + str(_accuracy / 100)

                _test_loss = 0
                _accuracy = 0
                log.out_log(self.time_s, self.cpu_u, self.gpu_u, self.gpu_m_u,
                            self.train_loss, self.test_loss, self.test_acc)
            # if it % display == 0:
        print 'Done.'
        self.plot(self.cpu_u, self.gpu_u, self.gpu_m_u, self.train_loss,
                  self.test_loss, self.test_acc)
        print 'Out.'

    def plot(self, cpu_u, gpu_u, gpu_m_u, train_loss, test_loss, test_acc):
        _, ax1 = plt.subplots(2, sharex=True)
        ax2 = ax1[0].twinx()

        ax1[0].plot(self.display * arange(len(train_loss)), train_loss, 'g')

        ax1[0].plot(self.test_interval * arange(len(test_loss)), test_loss,
                    'y')

        ax2.plot(self.test_interval * arange(len(test_acc)), test_acc, 'r')

        ax1[0].set_xlabel('iteration')
        ax1[0].set_ylabel('loss')
        ax2.set_ylabel('accuracy')

        ax1[1].plot(self.display * arange(len(cpu_u)), cpu_u, 'g')
        ax1[1].plot(self.display * arange(len(gpu_u)), gpu_u, 'y')
        ax1[1].plot(self.display * arange(len(gpu_m_u)), gpu_m_u, 'r')

        ax1[1].set_xlabel('iteration')
        ax1[1].set_ylabel('precentage')

        plt.show()
示例#40
0
# -*- coding: utf-8 -*-
from PIL import Image
import PIL
from numpy.ma import array, zeros
import matplotlib.pyplot
from scipy.misc import imresize
import graphcut
# 读取一幅图像,并从图像的两个矩形区域估算出类概率,然后创建一个图:

im = array(Image.open('empire.jpg'))
im = imresize(im, 0.07, interp='bilinear')
size = im.shape[:2]
# 添加两个矩形训练区域
labels = zeros(size)
labels[3:18, 3:18] = -1
labels[-18:-3, -18:-3] = 1
# 创建图
g = graphcut.build_bayes_graph(im, labels, kappa=1)
# 对图进行分割
res = graphcut.cut_graph(g, size)

figure()
graphcut.show_labeling(im, labels)
figure()
imshow(res)
gray()
axis('off')
show()
示例#41
0
def wct_significance(a1, a2, significance_level=0.95, mc_count=300, 
    verbose=False, **kwargs):
    """
    Calculates wavelet coherence significance using Monte Carlo
    simulations with 95% confidence.
    
    PARAMETERS
        a1, a2 (float) :
            Lag-1 autoregressive coeficients of both time series.
        significance_level (float, optional) :
            Significance level to use. Default is 0.95.
        count (integer, optional) :
            Number of Monte Carlo simulations. Default is 300.
        verbose (boolean, optional) :
            If set to true, does not print anything on screen.
        kwargs (dictionary) :
            List of parameters like dt, dj, s0, J=-1 and wavelet.
            Please refer to the wavelet.cwt function documentation for
            further details.
    
    RETURNS
    
    """
    # Load cache if previously calculated. It is assumed that wavelet analysis
    # is performed using the wavelet's default parameters.
    aa = round(arctanh(array([a1, a2]) * 4))
    aa = abs(aa) + 0.5 * (aa < 0)
    cache = 'cache_%0.5f_%0.5f_%0.5f_%0.5f_%d_%s' % (aa[0], aa[1], kwargs['dj'],
        kwargs['s0']/kwargs['dt'], kwargs['J'], kwargs['wavelet'].name)
    cached = '%s/.klib/wavelet' % (expanduser("~"))
    try:
        dat = loadtxt('%s/%s.gz' % (cached, cache), unpack=True)
        return dat[:, 0], dat[:, 1]
    except:
        pass
    # Some output to the screen
    if not verbose:
        vS = 'Calculating wavelet coherence significance'
        vs = '%s...' % (vS)
        stdout.write(vs)
        stdout.flush()
    # Choose N so that largest scale has at least some part outside the COI
    ms = kwargs['s0'] * (2 ** (kwargs['J'] * kwargs['dj'])) / kwargs['dt']
    N = ceil(ms * 6)
    noise1 = rednoise(N, a1, 1)
    nW1 = cwt(noise1, **kwargs)
    #
    period = ones([1, N]) / nW1['freqs'][:, None]
    coi = ones([kwargs['J']+1, 1]) * nW1['coi'][None, :]
    outsidecoi = (period <= coi)
    scales = ones([1, N]) * nW1['sj'][:, None]
    #
    sig95 = zeros(kwargs['J'] + 1)
    maxscale = find(outsidecoi.any(axis=1))[-1]
    sig95[outsidecoi.any(axis=1)] = nan
    #
    nbins = 1000
    wlc = ma.zeros([kwargs['J']+1, nbins])
    t1 = time()
    for i in range(mc_count):
        t2 = time()
        # Generates two red-noise signals with lag-1 autoregressive 
        # coefficients given by a1 and a2
        noise1 = rednoise(N, a1, 1)
        noise2 = rednoise(N, a2, 1)
        # Calculate the cross wavelet transform of both red-noise signals
        nW1 = cwt(noise1, **kwargs)
        nW2 = cwt(noise2, **kwargs)
        nW12 = nW1['W'] * nW2['W'].conj()
        # Smooth wavelet wavelet transforms and calculate wavelet coherence
        # between both signals.
        S1 = kwargs['wavelet'].smooth(abs(nW1['W']) ** 2 / scales, 
            kwargs['dt'], nW1['dj'], nW1['sj'])
        S2 = kwargs['wavelet'].smooth(abs(nW2['W']) ** 2 / scales, 
            kwargs['dt'], nW2['dj'], nW2['sj'])
        S12 = kwargs['wavelet'].smooth(nW12 / scales, kwargs['dt'], nW1['dj'],
            nW1['sj'])
        R2 = ma.array(abs(S12) ** 2 / (S1 * S2), mask=~outsidecoi)
        # Walks through each scale outside the cone of influence and builds a
        # coherence coefficient counter.
        for s in range(maxscale):
            cd = floor(R2[s, :] * nbins)
            for j, t in enumerate(cd[~cd.mask]):
                wlc[s, t] += 1
        # Outputs some text to screen if desired
        if not verbose:
            stdout.write(len(vs) * '\b')
            vs = '%s... %s ' % (vS, profiler(mc_count, i + 1, 0, t1, t2))
            stdout.write(vs)
            stdout.flush()
    
    # After many, many, many Monte Carlo simulations, determine the 
    # significance using the coherence coefficient counter percentile.
    wlc.mask = (wlc.data == 0.)
    R2y = (arange(nbins) + 0.5) / nbins
    for s in range(maxscale):
        sel = ~wlc[s, :].mask
        P = wlc[s, sel].data.cumsum()
        P = (P - 0.5) / P[-1]
        sig95[s] = interp(significance_level, P, R2y[s:, sel])
    
    # Save the results on cache to avoid to many computations in the future
    try:
        makedirs(cached)
    except:
        pass
    savetxt('%s/%s.gz' % (cached, cache), [sig95, nW1['sj']])
    
    # And returns the results
    return sig95, nW1['sj']
示例#42
0
mod_yc_data_nodp = ycdata[yc_cols]
mod_yc_data = mod_yc_data_nodp.dropna(axis=0)

#limit to 1983 and on to not include Volker period
dates = pa.date_range("1/1/1983", "10/1/2009", freq="MS").to_pydatetime()
#dates = pa.date_range("6/1/1979", "12/1/2012", freq="MS").to_pydatetime()

# limit dates of yields and take logs
mod_yc_data = np.log(mod_yc_data.ix[dates])

# Maturities
mats = [12, 24, 36, 60, 84, 120]

# construct model matrices up
datatype = np.complex_
lam_0_e = ma.zeros([latent, 1], dtype=datatype)
lam_1_e = ma.zeros([latent, latent], dtype=datatype)
delta_0_e = ma.zeros([1, 1], dtype=datatype)
delta_1_e = ma.zeros([latent, 1], dtype=datatype)
mu_e = ma.zeros([latent, 1], dtype=datatype)
phi_e = ma.zeros([latent, latent], dtype=datatype)
sigma_e = ma.zeros([latent, latent], dtype=datatype)

#mask values to be estimated
lam_0_e[:, 0] = ma.masked
lam_1_e[:, :] = ma.masked
delta_0_e[:, :] = ma.masked

delta_1_e[:, :] = ma.masked
delta_1_e[:, :] = ma.nomask
delta_1_e[:, :] = 1
    def test_testAverage2(self):
        # More tests of average.
        w1 = [0, 1, 1, 1, 1, 0]
        w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
        x = arange(6)
        assert_(allclose(average(x, axis=0), 2.5))
        assert_(allclose(average(x, axis=0, weights=w1), 2.5))
        y = array([arange(6), 2.0 * arange(6)])
        assert_(
            allclose(average(y, None),
                     np.add.reduce(np.arange(6)) * 3. / 12.))
        assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.))
        assert_(
            allclose(average(y, axis=1),
                     [average(x, axis=0),
                      average(x, axis=0) * 2.0]))
        assert_(allclose(average(y, None, weights=w2), 20. / 6.))
        assert_(
            allclose(average(y, axis=0, weights=w2),
                     [0., 1., 2., 3., 4., 10.]))
        assert_(
            allclose(average(y, axis=1),
                     [average(x, axis=0),
                      average(x, axis=0) * 2.0]))
        m1 = zeros(6)
        m2 = [0, 0, 1, 1, 0, 0]
        m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
        m4 = ones(6)
        m5 = [0, 1, 1, 1, 1, 1]
        assert_(allclose(average(masked_array(x, m1), axis=0), 2.5))
        assert_(allclose(average(masked_array(x, m2), axis=0), 2.5))
        assert_(average(masked_array(x, m4), axis=0) is masked)
        assert_equal(average(masked_array(x, m5), axis=0), 0.0)
        assert_equal(count(average(masked_array(x, m4), axis=0)), 0)
        z = masked_array(y, m3)
        assert_(allclose(average(z, None), 20. / 6.))
        assert_(allclose(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]))
        assert_(allclose(average(z, axis=1), [2.5, 5.0]))
        assert_(
            allclose(average(z, axis=0, weights=w2),
                     [0., 1., 99., 99., 4.0, 10.0]))

        a = arange(6)
        b = arange(6) * 3
        r1, w1 = average([[a, b], [b, a]], axis=1, returned=True)
        assert_equal(shape(r1), shape(w1))
        assert_equal(r1.shape, w1.shape)
        r2, w2 = average(ones((2, 2, 3)),
                         axis=0,
                         weights=[3, 1],
                         returned=True)
        assert_equal(shape(w2), shape(r2))
        r2, w2 = average(ones((2, 2, 3)), returned=True)
        assert_equal(shape(w2), shape(r2))
        r2, w2 = average(ones((2, 2, 3)),
                         weights=ones((2, 2, 3)),
                         returned=True)
        assert_(shape(w2) == shape(r2))
        a2d = array([[1, 2], [0, 4]], float)
        a2dm = masked_array(a2d, [[0, 0], [1, 0]])
        a2da = average(a2d, axis=0)
        assert_(eq(a2da, [0.5, 3.0]))
        a2dma = average(a2dm, axis=0)
        assert_(eq(a2dma, [1.0, 3.0]))
        a2dma = average(a2dm, axis=None)
        assert_(eq(a2dma, 7. / 3.))
        a2dma = average(a2dm, axis=1)
        assert_(eq(a2dma, [1.5, 4.0]))
    def test_testOddFeatures(self):
        # Test of other odd features
        x = arange(20)
        x = x.reshape(4, 5)
        x.flat[5] = 12
        assert_(x[1, 0] == 12)
        z = x + 10j * x
        assert_(eq(z.real, x))
        assert_(eq(z.imag, 10 * x))
        assert_(eq((z * conjugate(z)).real, 101 * x * x))
        z.imag[...] = 0.0

        x = arange(10)
        x[3] = masked
        assert_(str(x[3]) == str(masked))
        c = x >= 8
        assert_(count(where(c, masked, masked)) == 0)
        assert_(shape(where(c, masked, masked)) == c.shape)
        z = where(c, x, masked)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is masked)
        assert_(z[7] is masked)
        assert_(z[8] is not masked)
        assert_(z[9] is not masked)
        assert_(eq(x, z))
        z = where(c, masked, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        z = masked_where(c, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        assert_(eq(x, z))
        x = array([1., 2., 3., 4., 5.])
        c = array([1, 1, 1, 0, 0])
        x[2] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        c[0] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))
        assert_(
            eq(masked_where(greater_equal(x, 2), x),
               masked_greater_equal(x, 2)))
        assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))
        assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
        assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
        assert_(
            eq(
                masked_inside(array(list(range(5)), mask=[1, 0, 0, 0, 0]), 1,
                              3).mask, [1, 1, 1, 1, 0]))
        assert_(
            eq(
                masked_outside(array(list(range(5)), mask=[0, 1, 0, 0, 0]), 1,
                               3).mask, [1, 1, 0, 0, 1]))
        assert_(
            eq(
                masked_equal(array(list(range(5)), mask=[1, 0, 0, 0, 0]),
                             2).mask, [1, 0, 1, 0, 0]))
        assert_(
            eq(
                masked_not_equal(array([2, 2, 1, 2, 1], mask=[1, 0, 0, 0, 0]),
                                 2).mask, [1, 0, 1, 0, 1]))
        assert_(
            eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
               [99, 99, 3, 4, 5]))
        atest = ones((10, 10, 10), dtype=np.float32)
        btest = zeros(atest.shape, MaskType)
        ctest = masked_where(btest, atest)
        assert_(eq(atest, ctest))
        z = choose(c, (-x, x))
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        x = arange(6)
        x[5] = masked
        y = arange(6) * 10
        y[2] = masked
        c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])
        cm = c.filled(1)
        z = where(c, x, y)
        zm = where(cm, x, y)
        assert_(eq(z, zm))
        assert_(getmask(zm) is nomask)
        assert_(eq(zm, [0, 1, 2, 30, 40, 50]))
        z = where(c, masked, 1)
        assert_(eq(z, [99, 99, 99, 1, 1, 1]))
        z = where(c, 1, masked)
        assert_(eq(z, [99, 1, 1, 99, 99, 99]))
示例#45
0
def mimic(doPrint=False,
          doPlot=False,
          RossHS=False,
          RecipFlag=False,
          thisSza=None):
    '''
    A test method to reproduce the results in Wanner et al. 1995.
    There are no parameters and a single option:
            doPrint=True    : print results to stdout (default doPrint=False)

    The method returns:
	VZA,SZA,RAA,RossThick,RossThin,LiSparse,LiDense,Roujean,LiTransit
    where all are numy arrays of dimensions 3 x nSamples 
    so:
	VZA[0,:],RossThick[0,:] are the results for sza = 0.0
	VZA[1,:],RossThick[1,:] are the results for sza = 30.0
        VZA[2,:],RossThick[2,:] are the results for sza = 60.0

    '''
    # set up the angles
    r = 89  # do results for +/- r degrees)
    if thisSza == None:
        SZAS = ma.array([0.0, -30.0, -60.0])  # sza
    else:
        SZAS = ma.array(thisSza)
    vza = numpy.array(range(2 * r + 1)) * 1.0 - r
    # set up storage info
    RossThick = ma.zeros([3, len(vza)])
    RossThin = ma.zeros([3, len(vza)])
    LiSparse = ma.zeros([3, len(vza)])
    LiDense = ma.zeros([3, len(vza)])
    Roujean = ma.zeros([3, len(vza)])
    LiTransit = ma.zeros([3, len(vza)])
    SZA = ma.zeros([3, len(vza)])
    VZA = ma.zeros([3, len(vza)])
    RAA = ma.zeros([3, len(vza)])
    # fill the angle info
    RossHS = RossHS
    for i in range(len(SZAS)):
        SZA[i, :] = SZAS[i]
        VZA[i, :] = vza[:]
        RAA[i, :] = 0.0
        # do the kernels
        kk = Kernels(VZA[i, :],
                     SZA[i, :],
                     RAA[i, :],
                     RossHS=RossHS,
                     MODISSPARSE=True,
                     RecipFlag=RecipFlag,
                     normalise=1,
                     doIntegrals=False,
                     LiType='Dense',
                     RossType='Thick')
        RossThick[i, :] = kk.Ross[:]
        LiDense[i, :] = kk.Li[:]
        if doPrint == True:
            kk.printKernels(file='RossThickLiDense.' + str(SZAS[i]) + '.dat')
            kk.printer('')
        kk = Kernels(VZA[i, :],
                     SZA[i, :],
                     RAA[i, :],
                     RossHS=RossHS,
                     MODISSPARSE=True,
                     RecipFlag=RecipFlag,
                     normalise=1,
                     doIntegrals=False,
                     LiType='Sparse',
                     RossType='Thin')
        RossThin[i, :] = kk.Ross[:]
        LiSparse[i, :] = kk.Li[:]
        if doPrint == True:
            kk.printKernels(file='RossThinLiSparse.' + str(SZAS[i]) + '.dat')
            kk.printer('')
        kk = Kernels(VZA[i, :],
                     SZA[i, :],
                     RAA[i, :],
                     RossHS=RossHS,
                     MODISSPARSE=True,
                     RecipFlag=RecipFlag,
                     normalise=1,
                     doIntegrals=False,
                     LiType='Roujean',
                     RossType='Thin')
        Roujean[i, :] = kk.Li[:]
        if doPrint == True:
            kk.printKernels(file='RossThinRoujean.' + str(SZAS[i]) + '.dat')
            kk.printer('')
        kk = Kernels(VZA[i, :],
                     SZA[i, :],
                     RAA[i, :],
                     RossHS=RossHS,
                     MODISSPARSE=True,
                     RecipFlag=RecipFlag,
                     normalise=1,
                     doIntegrals=False,
                     LiType='Transit',
                     RossType='Thin')
        LiTransit[i, :] = kk.Li[:]
        if doPrint == True:
            kk.printKernels(file='RossThinLiTransit.' + str(SZAS[i]) + '.dat')
            kk.printer('')
    if (doPlot == True):
        import pylab
        x = [-90.0, 90.0]
        y = [0.0, 0.0]
        for i in range(len(SZAS)):
            sza = SZAS[i]
            pylab.clf()
            pylab.xlabel('View Zenith Angle')
            pylab.ylabel('Kernel Value')
            pylab.title('Solar Zenith Angle ' + str(sza) + ' Degrees')
            pylab.plot(x, y)
            pylab.plot(kk.vzaDegrees, RossThick[i, :], label='RThick')
            pylab.plot(kk.vzaDegrees, RossThin[i, :], label='RThin')
            pylab.plot(kk.vzaDegrees, LiSparse[i, :], label='LiSp')
            pylab.plot(kk.vzaDegrees, LiDense[i, :], label='LiDen')
            pylab.plot(kk.vzaDegrees, Roujean[i, :], label='Roujean')
            pylab.plot(kk.vzaDegrees, LiTransit[i, :], label='LiTrans')
            pylab.axis([-90.0, 90.0, -3.0, 3.0])
            pylab.legend(loc=0)
            pylab.show()

    return VZA, SZA, RAA, RossThick, RossThin, LiSparse, LiDense, Roujean, LiTransit
dataset = local.load_file(
    'AOD_monthly_2000-MAR_2016-FEB_from_MISR_L3_JOINT.nc', 'nonabsorbing_ave')
''' Subset the data for East Asia'''
Bounds = ds.Bounds(lat_min=20, lat_max=57.7, lon_min=90, lon_max=150)
dataset = dsp.subset(dataset, Bounds)
'''The original dataset includes nonabsorbing AOD values between March 2000 and February 2015. 
dsp.temporal_subset will extract data in September-October-November.'''
dataset_SON = dsp.temporal_subset(dataset,
                                  month_start=9,
                                  month_end=11,
                                  average_each_year=True)

ny, nx = dataset_SON.values.shape[1:]

# multi-year mean aod
clim_aod = ma.zeros([3, ny, nx])

clim_aod[0, :] = ma.mean(dataset_SON.values, axis=0)  # 16-year mean
clim_aod[1, :] = ma.mean(dataset_SON.values[-5:, :],
                         axis=0)  # the last 5-year mean
clim_aod[2, :] = dataset_SON.values[-1, :]  # the last year's value

# plot clim_aod (3 subplots)
plotter.draw_contour_map(
    clim_aod,
    dataset_SON.lats,
    dataset_SON.lons,
    fname='nonabsorbing_AOD_clim_East_Asia_Sep-Nov',
    gridshape=[1, 3],
    subtitles=['2000-2015: 16 years', '2011-2015: 5 years', '2015: 1 year'],
    clevs=np.arange(21) * 0.02)
示例#47
0
def rebin(Data, n_bins_combined):
    """The function that acctually does the rebinning on a Data Block."""

    nt = Data.data.shape[0]
    new_nt = nt // n_bins_combined
    new_shape = (new_nt, ) + Data.data.shape[1:]
    unmask = sp.logical_not(ma.getmaskarray(Data.data))
    data = Data.data.filled(0)
    # Allowcate memeory for the rebinned data.
    new_data = ma.zeros(new_shape, dtype=data.dtype)
    counts = sp.zeros(new_shape, dtype=int)
    # Add up the bins to be combined.
    for ii in range(n_bins_combined):
        new_data += data[ii:new_nt * n_bins_combined:n_bins_combined, ...]
        counts += unmask[ii:new_nt * n_bins_combined:n_bins_combined, ...]
    new_data[counts == 0] = ma.masked
    counts[counts == 0] = 1
    new_data /= counts
    Data.set_data(new_data)
    # Now deal with all the other records that aren't the main data.
    for field_name in Data.field.iterkeys():
        # DATE-OBS is a string field so we have to write special code for it.
        if field_name == "DATE-OBS":
            time_field = Data.field[field_name]
            new_field = sp.empty(new_nt, dtype=Data.field[field_name].dtype)
            # Convert to float, average, then convert back to a string.
            time_float = utils.time2float(time_field)
            for ii in range(new_nt):
                tmp_time = sp.mean(time_float[n_bins_combined *
                                              ii:n_bins_combined * (ii + 1)])
                new_field[ii] = utils.float2time(tmp_time)
            Data.set_field(field_name,
                           new_field,
                           axis_names=Data.field_axes[field_name],
                           format=Data.field_formats[field_name])
            continue
        # Only change fields that have a 'time' axis.
        try:
            time_axis = list(Data.field_axes[field_name]).index('time')
        except ValueError:
            continue
        # For now, the time axis has to be the first axis.
        if time_axis != 0:
            msg = "Expected time to be the first axis for all fields."
            raise NotImplementedError(msg)
        field_data = Data.field[field_name]
        if not field_data.dtype.name == "float64":
            msg = "Field data type is not float. Handle explicitly."
            raise NotImplementedError(msg)
        new_field = sp.empty(field_data.shape[:time_axis] + (new_nt, ) +
                             field_data.shape[time_axis + 1:],
                             dtype=field_data.dtype)
        for ii in range(new_nt):
            tmp_data = sp.sum(
                field_data[n_bins_combined * ii:n_bins_combined * (ii + 1),
                           ...], 0)
            tmp_data /= n_bins_combined
            new_field[ii, ...] = tmp_data
        Data.set_field(field_name,
                       new_field,
                       axis_names=Data.field_axes[field_name],
                       format=Data.field_formats[field_name])
示例#48
0
def comp_g(
    dert__
):  # cross-comp of g in 2x2 kernels, between derts in ma.stack dert__

    # initialize return variable
    new_dert__ = ma.zeros(
        (dert__.shape[0], dert__.shape[1] - 1, dert__.shape[2] - 1))
    new_dert__.mask = True  # initialize mask
    ig__, idy__, idx__, gg__, dgy__, dgx__, mg__ = new_dert__  # assign 'views'. Use [:] to update views

    # Unpack relevant params
    g__, dy__, dx__ = dert__[[3, 4, 5]]  # g, dy, dx -> local i, idy, idx
    g__.data[np.where(
        g__.data == 0
    )] = 1  # replace 0 values with 1 to avoid error, not needed in high-g blobs?
    ''' 
    for all operations below: only mask kernels with more than one masked dert 
    '''
    majority_mask = (
        g__[:-1, :-1].mask.astype(int) + g__[:-1, 1:].mask.astype(int) +
        g__[1:, 1:].mask.astype(int) + g__[1:, :-1].mask.astype(int)) > 1
    g__.mask = dy__.mask = dx__.mask = majority_mask

    g0__, dy0__, dx0__ = g__[:-1, :
                             -1].data, dy__[:-1, :
                                            -1].data, dx__[:-1, :
                                                           -1].data  # top left
    g1__, dy1__, dx1__ = g__[:-1,
                             1:].data, dy__[:-1,
                                            1:].data, dx__[:-1,
                                                           1:].data  # top right
    g2__, dy2__, dx2__ = g__[1:, 1:].data, dy__[1:, 1:].data, dx__[
        1:, 1:].data  # bottom right
    g3__, dy3__, dx3__ = g__[1:, :-1].data, dy__[1:, :-1].data, dx__[
        1:, :-1].data  # bottom left

    sin0__ = dy0__ / g0__
    cos0__ = dx0__ / g0__
    sin1__ = dy1__ / g1__
    cos1__ = dx1__ / g1__
    sin2__ = dy2__ / g2__
    cos2__ = dx2__ / g2__
    sin3__ = dy3__ / g3__
    cos3__ = dx3__ / g3__
    '''
    cosine of difference between diagonally opposite angles, in vector representation
    print(cos_da1__.shape, type(cos_da1__))
    '''
    cos_da0__ = (cos2__ * cos0__) + (sin2__ * sin0__
                                     )  # top left to bottom right
    cos_da1__ = (cos3__ * cos1__) + (sin3__ * sin1__
                                     )  # top right to bottom left

    dgy__[:] = ((g3__ + g2__) - (g0__ * cos_da0__ + g1__ * cos_da0__))
    # y-decomposed cosine difference between gs
    dgx__[:] = ((g1__ + g2__) - (g0__ * cos_da0__ + g3__ * cos_da1__))
    # x-decomposed cosine difference between gs

    gg__[:] = ma.hypot(dgy__, dgx__)  # gradient of gradient

    mg0__ = ma.minimum(g0__, g2__) * (cos_da1__ + 1)  # +1 to make all positive
    mg1__ = ma.minimum(g1__, g3__) * (cos_da1__ + 1)
    mg__[:] = mg0__ + mg1__  # match of gradient

    ig__[:] = g__[:-1, :
                  -1]  # remove last row and column to align with derived params
    idy__[:] = dy__[:-1, :-1]
    idx__[:] = dx__[:-1, :-1]  # -> idy, idx to compute cos for comp rg
    # unnecessary?:
    gg__.mask = mg__.mask = dgy__.mask = dgx__.mask = majority_mask
    '''
    next comp_rg will use g, dy, dx
    next comp_gg will use gg, dgy, dgx
    '''
    return new_dert__  # new_dert__ has been updated along with 'view' arrays: ig__, idy__, idx__, gg__, dgy__, dgx__, mg__
upper = 2100

# CMIP6 models
cmip6_models = [
    'ACCESS-ESM1-5', 'BCC-CSM2-MR', 'CanESM5', 'CNRM-ESM2-1', 'IPSL-CM6A-LR',
    'MIROC-ES2L', 'UKESM1-0-LL'
]
n_models = len(cmip6_models)
model_shapes = ['o', '^', '+', 's', '*', 'd', 'x']

# SSP scenarios
ssp_options = ['ssp126', 'ssp245', 'ssp585']
ssp_options_length = len(ssp_options)

# defining empty numpy array to save values
x_array = ma.zeros((len(ssp_options), len(cmip6_models)))
y_array = ma.zeros((len(ssp_options), len(cmip6_models)))

#%%
# Loop through each ssp run being considered
for ssp_option in range(0, ssp_options_length):
    ssp = ssp_options[ssp_option]  # selecting the ssp scenarios

    # for loop for each CMIP5 model
    for model_i in range(0, n_models):
        model = cmip6_models[model_i]  # seleting the models

        print(ssp, model)

        #%% historical soil turnover time
示例#50
0
    def estimate_shift2D(self,
                         reference='current',
                         correlation_threshold=None,
                         chunk_size=30,
                         roi=None,
                         normalize_corr=False,
                         sobel=True,
                         medfilter=True,
                         hanning=True,
                         plot=False,
                         dtype='float',
                         show_progressbar=None,
                         sub_pixel_factor=1):
        """Estimate the shifts in an image using phase correlation.

        This method can only estimate the shift by comparing
        bi-dimensional features that should not change position
        between frames. To decrease the memory usage, the time of
        computation and the accuracy of the results it is convenient
        to select a region of interest by setting the ``roi`` argument.

        Parameters
        ----------
        reference : {'current', 'cascade' ,'stat'}
            If 'current' (default) the image at the current
            coordinates is taken as reference. If 'cascade' each image
            is aligned with the previous one. If 'stat' the translation
            of every image with all the rest is estimated and by
            performing statistical analysis on the result the
            translation is estimated.
        correlation_threshold : {None, 'auto', float}
            This parameter is only relevant when reference='stat'.
            If float, the shift estimations with a maximum correlation
            value lower than the given value are not used to compute
            the estimated shifts. If 'auto' the threshold is calculated
            automatically as the minimum maximum correlation value
            of the automatically selected reference image.
        chunk_size : {None, int}
            If int and reference='stat' the number of images used
            as reference are limited to the given value.
        roi : tuple of ints or floats (left, right, top, bottom)
            Define the region of interest. If int(float) the position
            is given axis index(value). Note that ROIs can be used
            in place of a tuple.
        normalize_corr : bool, default False
            If True, use phase correlation to align the images, otherwise
            use cross correlation.
        sobel : bool, default True
            Apply a Sobel filter for edge enhancement
        medfilter : bool, default True
            Apply a median filter for noise reduction
        hanning : bool, default True
            Apply a 2D hanning filter
        plot : bool or 'reuse'
            If True plots the images after applying the filters and
            the phase correlation. If 'reuse', it will also plot the images,
            but it will only use one figure, and continuously update the images
            in that figure as it progresses through the stack.
        dtype : str or dtype
            Typecode or data-type in which the calculations must be
            performed.
        %s
        sub_pixel_factor : float
            Estimate shifts with a sub-pixel accuracy of 1/sub_pixel_factor
            parts of a pixel. Default is 1, i.e. no sub-pixel accuracy.

        Returns
        -------
        shifts : list of array
            List of estimated shifts

        Notes
        -----
        The statistical analysis approach to the translation estimation
        when using ``reference='stat'`` roughly follows [Schaffer2004]_.
        If you use it please cite their article.

        References
        ----------
        .. [Schaffer2004] Schaffer, Bernhard, Werner Grogger, and Gerald Kothleitner.
           “Automated Spatial Drift Correction for EFTEM Image Series.”
           Ultramicroscopy 102, no. 1 (December 2004): 27–36.

        See Also
        --------
        * :py:meth:`~._signals.signal2d.Signal2D.align2D`

        """
        if show_progressbar is None:
            show_progressbar = preferences.General.show_progressbar
        self._check_signal_dimension_equals_two()
        if roi is not None:
            # Get the indices of the roi
            yaxis = self.axes_manager.signal_axes[1]
            xaxis = self.axes_manager.signal_axes[0]
            roi = tuple([xaxis._get_index(i) for i in roi[2:]] +
                        [yaxis._get_index(i) for i in roi[:2]])

        ref = None if reference == 'cascade' else \
            self.__call__().copy()
        shifts = []
        nrows = None
        images_number = self.axes_manager._max_index + 1
        if plot == 'reuse':
            # Reuse figure for plots
            plot = plt.figure()
        if reference == 'stat':
            nrows = images_number if chunk_size is None else \
                min(images_number, chunk_size)
            pcarray = ma.zeros((nrows, self.axes_manager._max_index + 1,
                                ),
                               dtype=np.dtype([('max_value', np.float),
                                               ('shift', np.int32,
                                                (2,))]))
            nshift, max_value = estimate_image_shift(
                self(),
                self(),
                roi=roi,
                sobel=sobel,
                medfilter=medfilter,
                hanning=hanning,
                normalize_corr=normalize_corr,
                plot=plot,
                dtype=dtype,
                sub_pixel_factor=sub_pixel_factor)
            np.fill_diagonal(pcarray['max_value'], max_value)
            pbar_max = nrows * images_number
        else:
            pbar_max = images_number

        # Main iteration loop. Fills the rows of pcarray when reference
        # is stat
        with progressbar(total=pbar_max,
                         disable=not show_progressbar,
                         leave=True) as pbar:
            for i1, im in enumerate(self._iterate_signal()):
                if reference in ['current', 'cascade']:
                    if ref is None:
                        ref = im.copy()
                        shift = np.array([0, 0])
                    nshift, max_val = estimate_image_shift(
                        ref, im, roi=roi, sobel=sobel, medfilter=medfilter,
                        hanning=hanning, plot=plot,
                        normalize_corr=normalize_corr, dtype=dtype,
                        sub_pixel_factor=sub_pixel_factor)
                    if reference == 'cascade':
                        shift += nshift
                        ref = im.copy()
                    else:
                        shift = nshift
                    shifts.append(shift.copy())
                    pbar.update(1)
                elif reference == 'stat':
                    if i1 == nrows:
                        break
                    # Iterate to fill the columns of pcarray
                    for i2, im2 in enumerate(
                            self._iterate_signal()):
                        if i2 > i1:
                            nshift, max_value = estimate_image_shift(
                                im,
                                im2,
                                roi=roi,
                                sobel=sobel,
                                medfilter=medfilter,
                                hanning=hanning,
                                normalize_corr=normalize_corr,
                                plot=plot,
                                dtype=dtype,
                                sub_pixel_factor=sub_pixel_factor)
                            pcarray[i1, i2] = max_value, nshift
                        del im2
                        pbar.update(1)
                    del im
        if reference == 'stat':
            # Select the reference image as the one that has the
            # higher max_value in the row
            sqpcarr = pcarray[:, :nrows]
            sqpcarr['max_value'][:] = symmetrize(sqpcarr['max_value'])
            sqpcarr['shift'][:] = antisymmetrize(sqpcarr['shift'])
            ref_index = np.argmax(pcarray['max_value'].min(1))
            self.ref_index = ref_index
            shifts = (pcarray['shift'] +
                      pcarray['shift'][ref_index, :nrows][:, np.newaxis])
            if correlation_threshold is not None:
                if correlation_threshold == 'auto':
                    correlation_threshold = \
                        (pcarray['max_value'].min(0)).max()
                    _logger.info("Correlation threshold = %1.2f",
                                 correlation_threshold)
                shifts[pcarray['max_value'] <
                       correlation_threshold] = ma.masked
                shifts.mask[ref_index, :] = False

            shifts = shifts.mean(0)
        else:
            shifts = np.array(shifts)
            del ref
        return shifts
示例#51
0
def project(cube, target_proj, nx=None, ny=None):
    """
    Nearest neighbour regrid to a specified target projection.

    Return a new cube that is the result of projecting a cube with 1 or 2
    dimensional latitude-longitude coordinates from its coordinate system into
    a specified projection e.g. Robinson or Polar Stereographic.
    This function is intended to be used in cases where the cube's coordinates
    prevent one from directly visualising the data, e.g. when the longitude
    and latitude are two dimensional and do not make up a regular grid.

    Args:
        * cube
            An instance of :class:`iris.cube.Cube`.
        * target_proj
            An instance of the Cartopy Projection class, or an instance of
            :class:`iris.coord_systems.CoordSystem` from which a projection
            will be obtained.
    Kwargs:
        * nx
            Desired number of sample points in the x direction for a domain
            covering the globe.
        * ny
            Desired number of sample points in the y direction for a domain
            covering the globe.

    Returns:
        An instance of :class:`iris.cube.Cube` and a list describing the
        extent of the projection.

    .. note::

        This function assumes global data and will if necessary extrapolate
        beyond the geographical extent of the source cube using a nearest
        neighbour approach. nx and ny then include those points which are
        outside of the target projection.

    .. note::

        Masked arrays are handled by passing their masked status to the
        resulting nearest neighbour values.  If masked, the value in the
        resulting cube is set to 0.

    .. warning::

        This function uses a nearest neighbour approach rather than any form
        of linear/non-linear interpolation to determine the data value of each
        cell in the resulting cube. Consequently it may have an adverse effect
        on the statistics of the data e.g. the mean and standard deviation
        will not be preserved.

    """
    try:
        lon_coord, lat_coord = _get_lon_lat_coords(cube)
    except IndexError:
        raise ValueError('Cannot get latitude/longitude '
                         'coordinates from cube {!r}.'.format(cube.name()))

    if lat_coord.coord_system != lon_coord.coord_system:
        raise ValueError('latitude and longitude coords appear to have '
                         'different coordinates systems.')

    if lon_coord.units != 'degrees':
        lon_coord = lon_coord.copy()
        lon_coord.convert_units('degrees')
    if lat_coord.units != 'degrees':
        lat_coord = lat_coord.copy()
        lat_coord.convert_units('degrees')

    # Determine source coordinate system
    if lat_coord.coord_system is None:
        # Assume WGS84 latlon if unspecified
        warnings.warn('Coordinate system of latitude and longitude '
                      'coordinates is not specified. Assuming WGS84 Geodetic.')
        orig_cs = iris.coord_systems.GeogCS(semi_major_axis=6378137.0,
                                            inverse_flattening=298.257223563)
    else:
        orig_cs = lat_coord.coord_system

    # Convert to cartopy crs
    source_cs = orig_cs.as_cartopy_crs()

    # Obtain coordinate arrays (ignoring bounds) and convert to 2d
    # if not already.
    source_x = lon_coord.points
    source_y = lat_coord.points
    if source_x.ndim != 2 or source_y.ndim != 2:
        source_x, source_y = _meshgrid(source_x, source_y)

    # Calculate target grid
    target_cs = None
    if isinstance(target_proj, iris.coord_systems.CoordSystem):
        target_cs = target_proj
        target_proj = target_proj.as_cartopy_projection()

    # Resolution of new grid
    if nx is None:
        nx = source_x.shape[1]
    if ny is None:
        ny = source_x.shape[0]

    target_x, target_y, extent = cartopy.img_transform.mesh_projection(
        target_proj, nx, ny)

    # Determine dimension mappings - expect either 1d or 2d
    if lat_coord.ndim != lon_coord.ndim:
        raise ValueError("The latitude and longitude coordinates have "
                         "different dimensionality.")

    latlon_ndim = lat_coord.ndim
    lon_dims = cube.coord_dims(lon_coord)
    lat_dims = cube.coord_dims(lat_coord)

    if latlon_ndim == 1:
        xdim = lon_dims[0]
        ydim = lat_dims[0]
    elif latlon_ndim == 2:
        if lon_dims != lat_dims:
            raise ValueError("The 2d latitude and longitude coordinates "
                             "correspond to different dimensions.")
        # If coords are 2d assume that grid is ordered such that x corresponds
        # to the last dimension (shortest stride).
        xdim = lon_dims[1]
        ydim = lon_dims[0]
    else:
        raise ValueError('Expected the latitude and longitude coordinates '
                         'to have 1 or 2 dimensions, got {} and '
                         '{}.'.format(lat_coord.ndim, lon_coord.ndim))

    # Create array to store regridded data
    new_shape = list(cube.shape)
    new_shape[xdim] = nx
    new_shape[ydim] = ny
    new_data = ma.zeros(new_shape, cube.data.dtype)

    # Create iterators to step through cube data in lat long slices
    new_shape[xdim] = 1
    new_shape[ydim] = 1
    index_it = np.ndindex(*new_shape)
    if lat_coord.ndim == 1 and lon_coord.ndim == 1:
        slice_it = cube.slices([lat_coord, lon_coord])
    elif lat_coord.ndim == 2 and lon_coord.ndim == 2:
        slice_it = cube.slices(lat_coord)
    else:
        raise ValueError('Expected the latitude and longitude coordinates '
                         'to have 1 or 2 dimensions, got {} and '
                         '{}.'.format(lat_coord.ndim, lon_coord.ndim))

#    # Mask out points outside of extent in source_cs - disabled until
#    # a way to specify global/limited extent is agreed upon and code
#    # is generalised to handle -180 to +180, 0 to 360 and >360 longitudes.
#    source_desired_xy = source_cs.transform_points(target_proj,
#                                                   target_x.flatten(),
#                                                   target_y.flatten())
#    if np.any(source_x < 0.0) and np.any(source_x > 180.0):
#        raise ValueError('Unable to handle range of longitude.')
#    # This does not work in all cases e.g. lon > 360
#    if np.any(source_x > 180.0):
#        source_desired_x = (source_desired_xy[:, 0].reshape(ny, nx) +
#                            360.0) % 360.0
#    else:
#        source_desired_x = source_desired_xy[:, 0].reshape(ny, nx)
#    source_desired_y = source_desired_xy[:, 1].reshape(ny, nx)
#    outof_extent_points = ((source_desired_x < source_x.min()) |
#                           (source_desired_x > source_x.max()) |
#                           (source_desired_y < source_y.min()) |
#                           (source_desired_y > source_y.max()))
#    # Make array a mask by default (rather than a single bool) to allow mask
#    # to be assigned to slices.
#    new_data.mask = np.zeros(new_shape)

# Step through cube data, regrid onto desired projection and insert results
# in new_data array
    for index, ll_slice in zip(index_it, slice_it):
        # Regrid source data onto target grid
        index = list(index)
        index[xdim] = slice(None, None)
        index[ydim] = slice(None, None)
        index = tuple(index)  # Numpy>=1.16 : index with tuple, *not* list.
        new_data[index] = cartopy.img_transform.regrid(ll_slice.data, source_x,
                                                       source_y, source_cs,
                                                       target_proj, target_x,
                                                       target_y)


#    # Mask out points beyond extent
#    new_data[index].mask[outof_extent_points] = True

# Remove mask if it is unnecessary
    if not np.any(new_data.mask):
        new_data = new_data.data

    # Create new cube
    new_cube = iris.cube.Cube(new_data)

    # Add new grid coords
    x_coord = iris.coords.DimCoord(target_x[0, :],
                                   'projection_x_coordinate',
                                   units='m',
                                   coord_system=copy.copy(target_cs))
    y_coord = iris.coords.DimCoord(target_y[:, 0],
                                   'projection_y_coordinate',
                                   units='m',
                                   coord_system=copy.copy(target_cs))

    new_cube.add_dim_coord(x_coord, xdim)
    new_cube.add_dim_coord(y_coord, ydim)

    # Add resampled lat/lon in original coord system
    source_desired_xy = source_cs.transform_points(target_proj,
                                                   target_x.flatten(),
                                                   target_y.flatten())
    new_lon_points = source_desired_xy[:, 0].reshape(ny, nx)
    new_lat_points = source_desired_xy[:, 1].reshape(ny, nx)
    new_lon_coord = iris.coords.AuxCoord(new_lon_points,
                                         standard_name='longitude',
                                         units='degrees',
                                         coord_system=orig_cs)
    new_lat_coord = iris.coords.AuxCoord(new_lat_points,
                                         standard_name='latitude',
                                         units='degrees',
                                         coord_system=orig_cs)
    new_cube.add_aux_coord(new_lon_coord, [ydim, xdim])
    new_cube.add_aux_coord(new_lat_coord, [ydim, xdim])

    coords_to_ignore = set()
    coords_to_ignore.update(cube.coords(contains_dimension=xdim))
    coords_to_ignore.update(cube.coords(contains_dimension=ydim))
    for coord in cube.dim_coords:
        if coord not in coords_to_ignore:
            new_cube.add_dim_coord(coord.copy(), cube.coord_dims(coord))
    for coord in cube.aux_coords:
        if coord not in coords_to_ignore:
            new_cube.add_aux_coord(coord.copy(), cube.coord_dims(coord))
    discarded_coords = coords_to_ignore.difference([lat_coord, lon_coord])
    if discarded_coords:
        warnings.warn('Discarding coordinates that share dimensions with '
                      '{} and {}: {}'.format(
                          lat_coord.name(), lon_coord.name(),
                          [coord.name() for coord in discarded_coords]))

    # TODO handle derived coords/aux_factories

    # Copy metadata across
    new_cube.metadata = cube.metadata

    return new_cube, extent
示例#52
0
def create_random_synthetic_ifgs(volcanoes,
                                 defo_sources,
                                 n_ifgs,
                                 n_pix=224,
                                 outputs=['uuu'],
                                 intermediate_figure=False,
                                 coh_scale=5000,
                                 coh_threshold=0.7,
                                 coh_interpolation_threshold=5e3,
                                 min_deformation=0.05,
                                 max_deformation=0.25,
                                 snr_threshold=2.0,
                                 turb_aps_mean=0.02,
                                 turb_aps_length=5000,
                                 turb_aps_interpolation_threshold=5e3,
                                 topo_aps_mean=56.0,
                                 topo_aps_var=2.0):
    """
    A function to generate n random synthetic interferograms at subaerial volcanoes in the Smithsonian database at SRTM3 resolution (ie.e. ~90m).  Different deformation
    sources are supported (no deformatin, point (Mogi), sill or dyke), topographically correlated and turbulent atmopsheric phase screens (APS) are added,
    and areas of incoherence are synthesisd.  The outputs are as rank 4 arrays with channels last (ie n_ifgs x ny x nx x 3 ), and can be in a variety of
    styles (e.g. unwrapped across 3 channels, of unwrapped in channels 1 and 2 and the dem in 3).  The paper Gaddes et al. (in prep) details this 
    in more detail.  
    
    General structure:
            open_dem                            - these are required for making coastline and a topo correlated APS
            coherence_mask                      - synthesise areas of incoherence.  
                atmosphere_turb                 - generates the spatially correlated noise which is used to create areas of incoherence.  
            create_random_defo_m                - creates the random source_kwargs (e.g. depth/opening) and checks signals are of correct magnitude
                deformation_wrapper             - prepare grids in meters etc. and project 3D deformation to satellite LOS
                    deformation_Mogi            - if deformation is Mogi, take source_wargs and make 3d surfaced deformation
                    deformation_eq_dyke_sill    - if an Okada dislocation, take source_wargs and make 3d surfaced deformation
            def_and_dem_translate               - try random locations of the deformation signals and see if on land an in a coherent area.  
            atmosphere_turb                     - generate a turbulent APS
            atmosphere_topo                     - generate a topo correlated APS
            check_def_visible                   - check that signal to noise ratio (SNR) is acceptable and deformation pattern hasn't  dissapeared.  
            combine_signals                     - combine signals and return in different 3 channel formats (ie for use with AlexNet etc.  )
    
    Inputs:
        volcanoes | list of dicts | each volcano is a dictionary in the list, and contains various keys and values.  
                                    'dem': the dem 'lons_mg' : longitude of each pixel  (ie a meshgrid) 'lats_mg' : latitude of each pixel
        defo_sources | list | defo sources to be synthesised.  e.g. ['no_def', 'dyke', 'sill', 'mogi']
        n_ifgs | int | the number of interferogram to generate.  
        n_pix | int | Interferograms are square, with side length of this many pixels.  Note that we use SRTM3 pixels, so squares of ~90m side length.  
        intermediate_figure | boolean | If True, a figure showing the search for a viable deformatin location and SNR is shown.  
        coh_scale | float | sets spatial scale of incoherent areas
        coh_threshold | float | coherence is in range of 0-1, values above this are classed as incoherent
        coh_interpolation_threshold | int | If n_pix is larger than this, interpolation will be used to generate the extra resolution (as the spatially correlated noise function used here is very slow for large images).  Similar to the setting turb_aps_interpolation_threshold
        min_deformation | float | Deformation must be above this size (in metres), even before checking the SNR agains the deformation and the atmosphere.  
        max_deformation | float | Deformation must be below this size (in metres), even before checking the SNR agains the deformation and the atmosphere.  
        snr_threshold | float | SNR of the deformation vs (topographically correlated APS + turbulent APS) must be above this for the signals to be considered as visible.  
        turb_aps_mean | float | mean strength of turbulent atmospheres, in metres.  Note the the atmosphere_turb funtion takes cmm, and the value provided in m is converted first
        turb_aps_length | float | Length scale of spatial correlatin, in metres. e.g. 5000m
        turb_aps_interpolation_threshold | int | If n_pix is larger than this, interpolation will be used to generate the extra resolution (as the spatially correlated noise function used here is very slow for large images).  Similar to the setting coh_interpolation_threshold
        topo_aps_mean | float | rad/km of delay for the topographically correlated APS
        topo_aps_var | float | rad/km.  Sets the strength difference between topographically correlated APSs
    Returns:
        X_all | dict of masked arrays | keys are formats (e.g. uuu), then rank 4 masked array
        Y_class | rank 2 array | class labels, n x 1 (ie not one hot encoding)
        Y_loc | rank 2 array |  location of deformaiton, nx4 (xy location, xy width)
        Y_source_kwargs | list of dicts | stores the source_kwargs that were generated randomly to create each interferogram.  Also contains the source names (ie the same as Y_class, but as a string).  
    History:  
        2020/10/19 | MEG | Written from various scripts.  
        2020/10/26 | MEG | Add funtion to record the source_kwargs.  Intended for use if these are to be the label of interest (e.g. trianing a CNN to determine strike etc.  )
    """
    import numpy as np
    import numpy.ma as ma
    import matplotlib.pyplot as plt

    from syinterferopy_functions import coherence_mask, atmosphere_turb, atmosphere_topo
    from auxiliary_functions import truncate_colormap  # needed to plot the DEM with nice (terrain) colours

    # hard coded variables:
    count_max = 8  # the number of times the function searches for acceptable deformation positions and SNR

    # begin to generate the data for this output file
    succesful_generate = 0  # count how many ifgs succesful made so we can stop when we get to n_ifgs
    attempt_generate = 0  # only succesfully generated ifgs are counted above, but also useful to count all
    X_all = {}  # the data will be stored in a dictionary, X_all
    for output in outputs:
        X_all[output] = ma.zeros(
            (n_ifgs, n_pix, n_pix, 3)
        )  # populate the dictionary with the required outputs and empty arrays.
    Y_class = np.zeros(
        (n_ifgs, 1))  # initate for labels showing type of deformation
    Y_loc = np.zeros(
        (n_ifgs, 4))  # initate for labels showing location of deformation
    Y_source_kwargs = [
    ]  # initate an empty list for storing source kwargs (parameters like opening dip etc.)

    while succesful_generate < n_ifgs:
        volcano_n = np.random.randint(
            0, len(volcanoes))  # choose a volcano at random
        defo_source = defo_sources[np.random.randint(0, len(
            defo_sources))]  # random choice of which deformation source to use
        print(f"Volcano: {volcanoes[volcano_n]['name']} ", end='')

        # 0: generate incoherence mask, choose dem choose if ascending or descending.
        dem_large = volcanoes[volcano_n]['dem']  # open a dem
        dem_ll_extent = [
            (volcanoes[volcano_n]['lons_mg'][0, 0],
             volcanoes[volcano_n]['lats_mg'][0, 0]
             ),  # get lon lat of lower left corner
            (volcanoes[volcano_n]['lons_mg'][-1, -1],
             volcanoes[volcano_n]['lats_mg'][-1, -1])
        ]  # and upper right corner
        mask_coherence = coherence_mask(
            volcanoes[volcano_n]['lons_mg'][:n_pix, :n_pix],
            volcanoes[volcano_n]['lats_mg']
            [:n_pix, :
             n_pix],  # generate coherence mask, but at the number of pixels required for the ouput, and not hte size of the large dem
            coh_scale,
            coh_threshold,
            coh_interpolation_threshold
        )  # if threshold is 0, all of the pixels are incoherent , and if 1, none are.

        print(f"| Coherence mask generated ", end='')
        if np.random.rand() < 0.5:
            asc_or_desc = 'asc'
            heading = 348
        else:
            asc_or_desc = 'desc'
            heading = 192
        print(f"| Deformation source: {defo_source} ", end='')

        if intermediate_figure:
            f, axes = plt.subplots(3, count_max, figsize=(16, 8))
            f.suptitle(
                f"{attempt_generate}: Volcano: {volcanoes[volcano_n]['name']} | Deformation label: {defo_source}"
            )
            f.canvas.set_window_title(
                f"{attempt_generate}_Volcano:{volcanoes[volcano_n]['name']}")
            axes[0, 0].set_ylabel(
                'Location search \n (hatch = water or incoherent)')
            axes[1, 0].set_ylabel('SNR search \n (defo + APS_turb + APT_topo')
            for axe_n, axe in enumerate(axes[0, :]):
                axe.set_title(f"Attempt # {axe_n}")
            cmap = plt.get_cmap('terrain')  # makes sense for DEMs
            cmap = truncate_colormap(
                cmap, 0.2, 1
            )  # but by deafult it starts at blue, so crop (truncate) that part off.
            axes[2, 0].imshow(dem_large, cmap=cmap)
            axes[2, 0].set_xlabel('Full DEM')
            axes[2, 1].imshow(mask_coherence)
            axes[2, 1].set_xlabel('Coherence Mask')
            axes[2, 1].yaxis.tick_right()
            for axe in np.concatenate((axes[0, 1:], axes[1, :], axes[2, 2:]),
                                      axis=0):
                axe.set_axis_off()

        # 1: If no deformation, just generate topo. correlated and turbulent APS
        if defo_source == 'no_def':
            viable_location = viable_snr = True  # in the no deformation case, these are always True
            source_kwargs = {'source': 'no_def'}
            defo_m = np.ones(
                dem_large.shape
            )  # in the no deformation case, make a deformaiton that is just zeros.
            defo_m, dem, viable_location, loc_list, masks = def_and_dem_translate(
                dem_large,
                defo_m,
                mask_coherence,
                threshold=0.3,
                n_pixs=n_pix,
                defo_fraction=0.8
            )  # doesn't matter if this returns false.  Note that masks is a dictionary of deformation, coherence and water, and water
            dem = ma.array(dem, mask=masks['coh_water']
                           )  # mask the DEM for water and incoherence
            APS_turb_m = atmosphere_turb(
                1,
                volcanoes[volcano_n]['lons_mg'][:n_pix, :n_pix],
                volcanoes[volcano_n]['lats_mg']
                [:n_pix, :
                 n_pix],  # generate a turbulent APS, but for speed not at the size of the original DEM, and instead at the correct n_pixs
                None,
                turb_aps_length,
                False,
                False,
                turb_aps_interpolation_threshold,
                turb_aps_mean)
            APS_turb_m = APS_turb_m[0, ]  # remove the 1st dimension
            APS_topo_m = atmosphere_topo(
                dem, topo_aps_mean, topo_aps_var, difference=True
            )  # generate a topographically correlated APS                                                                                                                                # helps to split up and clarify terminal output.
            if intermediate_figure:
                axes[0, 0].imshow(np.zeros((n_pix, n_pix)))
                axes[0, 0].set_xlabel(f"No deformation")
                axes[0, 0].set_axis_on()
                temp_combined = APS_topo_m + APS_turb_m
                axes[1, 0].imshow(temp_combined)
                axes[1, 0].set_xlabel(
                    f"[{np.round(np.min(temp_combined), 2)}, {np.round(np.max(temp_combined),2 )}] m"
                )
                axes[1, 0].set_axis_on()
                plt.pause(4)

        # 2: Or, if we do have deformation, generate it and topo correlated and turbulent APS
        else:
            viable_location = False
            count = 0
            # prepare for while statement that will search for a viable deformation location.
            # 2a: Try to make a deformation signal of the correct magnitude, and then place it on land.
            while viable_location is False and count < count_max:  # random translations of dem and deformation, but deformation must remain visible.
                defo_m, source_kwargs = create_random_defo_m(
                    dem_large,
                    volcanoes[volcano_n]['lons_mg'],
                    volcanoes[volcano_n]['lats_mg'],
                    volcanoes[volcano_n]['centre'],
                    defo_source,  #  make a deformation signal with a size within the bounds set by min and max.  
                    min_deformation,
                    max_deformation,
                    asc_or_desc
                )  # Note that is is made at the size of the large DEM (dem_large)
                source_kwargs[
                    'source'] = defo_source  # add name of source to dict of source_kwargs (e.g. depth/opening etc.  )
                defo_m, dem, viable_location, loc_list, masks = def_and_dem_translate(
                    dem_large,
                    defo_m,
                    mask_coherence,
                    threshold=
                    0.3,  # do the random crop of the dem and the defo pattern, so reducing the size to that desired.  
                    n_pixs=n_pix,
                    defo_fraction=0.8
                )  # and check that the majority of the deformation pattern isn't in an incoheret area, or in water.
                dem = ma.array(
                    dem, mask=masks['coh_water']
                )  # mask the DEM (for water and areas of incoherence)
                if intermediate_figure:
                    axes[0, count].imshow(ma.array(defo_m,
                                                   mask=masks['coh_water']),
                                          vmin=np.min(defo_m),
                                          vmax=np.max(defo_m))
                    axes[0,
                         count].imshow(ma.array(defo_m,
                                                mask=1 - masks['coh_water']),
                                       vmin=np.min(defo_m),
                                       vmax=np.max(defo_m))
                    axes[0, count].contourf(masks['coh_water'],
                                            1,
                                            hatches=['', '//'],
                                            alpha=0)
                    axes[0, count].set_xlabel(
                        f"Viable Location: {viable_location}")
                    axes[0, count].set_axis_on()
                    plt.pause(1)
                if viable_location == False:
                    count += 1

            if viable_location:  #
                # 2b: If we have a viable size and location, try to make the atmospheric signals and check that deforamtion is still visible (ie acceptable signal to noise ratio)
                print(f"| Viable location ", end='')
                viable_snr = False
                count = 0  # make dem and ph_def and check that def is visible
                while viable_snr is False and count < count_max:
                    APS_turb_m = atmosphere_turb(
                        1,
                        volcanoes[volcano_n]['lons_mg'][:n_pix, :n_pix],
                        volcanoes[volcano_n]['lats_mg']
                        [:n_pix, :
                         n_pix],  # generate a turbulent APS, but for speed not at the size of the original DEM, and instead at the correct n_pixs
                        None,
                        turb_aps_length,
                        False,
                        False,
                        turb_aps_interpolation_threshold,
                        turb_aps_mean)
                    APS_turb_m = APS_turb_m[0, ]  # remove the 1st dimension
                    APS_topo_m = atmosphere_topo(
                        dem, topo_aps_mean, topo_aps_var, difference=True
                    )  # generate a topographically correlated APS using the DEM
                    viable_snr, snr = check_def_visible(
                        defo_m, masks['def'], APS_topo_m, APS_turb_m,
                        snr_threshold
                    )  # check that the deformation is visible over the ph_topo and ph_trub (SNR has to be above snr_threshold)

                    if intermediate_figure:
                        temp_combined = defo_m + APS_topo_m + APS_turb_m
                        axes[1, count].imshow(temp_combined)
                        axes[1, count].set_xlabel(
                            f"[{np.round(np.min(temp_combined), 2)}, {np.round(np.max(temp_combined),2 )}] m \n"
                            f"SNR: {np.round(snr, 2)}")
                        axes[1, count].set_axis_on()
                        plt.pause(1)
                    if viable_snr == False:
                        count += 1
                if viable_snr:
                    print(f"| Viable SNR ", end='')
                else:
                    print('| SNR is too low. \n')
                plt.pause(2)
            else:
                print(f"| No viable location found. \n")

        # 3: If succesful, append to X (data) and Y (labels) arrays.                                                                                    # still in the main while loop, but out of the deformation / no deformation else statement.
        if (viable_location and viable_snr) or (defo_source == 'no_def'):
            X_all, Y_class, Y_loc, succesful = combine_signals(
                X_all, Y_class, Y_loc, defo_m, APS_topo_m, APS_turb_m, heading,
                dem, defo_source, defo_sources, loc_list, outputs,
                succesful_generate
            )  # we have a succesful flag as sometimes this can fail due to Nans etc.
            if succesful:
                Y_source_kwargs.append(source_kwargs)
                succesful_generate += 1  # updat the countery of how many ifgs have been succesfuly made
                print(f"| Succesful write. \n")
            else:
                print(f"| Failed write.  \n")
        attempt_generate += 1  # update the counter of how many have been made in total (successes and failures)
        plt.close()

    return X_all, Y_class, Y_loc, Y_source_kwargs
示例#53
0
    def estimShifts(self, dataOneSubjTB, thetas, variances, ageOneSubj1array,
                    clustProbBC, prevSubShift, prevSubShiftAvg, fixSpeed):
        '''
    do not use dot product because when NaNs are involved the weights will not sum to 1.
    use np.ma.average(.., weights) instead, as the weights will be re-normalised accordingly
    '''

        clustProbBCColNorm = clustProbBC / np.sum(clustProbBC, 0)[None, :]

        nrBiomk, nrClust = clustProbBC.shape
        nrTimepts = dataOneSubjTB.shape[0]

        dataOneSubjBT = dataOneSubjTB.T

        # declare it as masked array, compute it for every cluster with ma.average
        dataOneSubjWeightedCT = ma.zeros((nrClust, nrTimepts), float)
        for c in range(nrClust):
            dataOneSubjWeightedCT[c, :] = ma.average(
                dataOneSubjBT, axis=0, weights=clustProbBCColNorm[:, c])

        # convert back to np array for speed, do the calculation manually.
        dataOneSubjWeiManMaskCT = np.array(dataOneSubjWeightedCT)
        dataOneSubjWeiManMaskCT[dataOneSubjWeightedCT.mask] = np.nan

        if fixSpeed:  # fixes parameter alpha to 1
            composeShift = lambda beta: [prevSubShiftAvg[0], beta]
            initSubShift = prevSubShift[1]
            # objFuncLambda2 = lambda beta: self.objFunShift(composeShift(beta), dataOneSubjWeightedCT, thetas,
            #   variances, ageOneSubj1array, clustProbBC)
            objFuncLambda = lambda beta: self.objFunShiftMaskedManual(
                composeShift(beta), dataOneSubjWeiManMaskCT, thetas, variances,
                ageOneSubj1array, clustProbBC)

            prevSubShiftAvgCurr = prevSubShiftAvg[1].reshape(1, -1)
        else:
            composeShift = lambda shift: shift
            initSubShift = prevSubShift
            # objFuncLambda2 = lambda shift: self.objFunShift(shift, dataOneSubjWeightedCT, thetas,
            #   variances, ageOneSubj1array, clustProbBC)
            objFuncLambda = lambda beta: self.objFunShiftMaskedManual(
                composeShift(beta), dataOneSubjWeiManMaskCT, thetas, variances,
                ageOneSubj1array, clustProbBC)

            prevSubShiftAvgCurr = prevSubShiftAvg

        # assert objFuncLambda(initSubShift) == objFuncLambda2(initSubShift)
        # print(adsa)

        # print('objFuncLambda(initSubShift)', objFuncLambda(initSubShift))

        res = scipy.optimize.minimize(objFuncLambda,
                                      initSubShift,
                                      method='Nelder-Mead',
                                      options={
                                          'xatol': 1e-2,
                                          'disp': False
                                      })
        bestShift = res.x
        nrStartPoints = 2
        nrParams = prevSubShiftAvgCurr.shape[0]
        pertSize = 1
        minSSD = res.fun
        success = False
        for i in range(nrStartPoints):
            perturbShift = prevSubShiftAvgCurr * (
                np.ones(nrParams) + pertSize * np.random.multivariate_normal(
                    np.zeros(nrParams), np.eye(nrParams)))
            res = scipy.optimize.minimize(objFuncLambda,
                                          perturbShift,
                                          method='Nelder-Mead',
                                          options={
                                              'xtol': 1e-8,
                                              'disp': False,
                                              'maxiter': 100
                                          })
            currShift = res.x
            currSSD = res.fun
            # print('currSSD', currSSD, objFuncLambda(currShift))
            if currSSD < minSSD:
                # if we found a better solution then we decrease the step size
                minSSD = currSSD
                bestShift = currShift
                pertSize /= 1.2
                success = res.success
            else:
                # if we didn't find a solution then we increase the step size
                pertSize *= 1.2
        print('bestShift', bestShift)

        return composeShift(bestShift)
示例#54
0
]
n_models = len(cmip6_models)
model_shapes = ['o', '^', '+', 's', '*', 'd', 'x']

# SSP scenarios
ssp_options = ['ssp126', 'ssp245', 'ssp585']
ssp_options_length = len(ssp_options)

# Global mean temperature change
temperature_change_options = [1, 2, 3]
temperature_change_options_length = len(temperature_change_options)

#%%
# defining array to save values
x_array = ma.zeros(
    [len(temperature_change_options),
     len(ssp_options),
     len(cmip6_models)])
y_array = ma.zeros(
    [len(temperature_change_options),
     len(ssp_options),
     len(cmip6_models)])
obs_array = ma.zeros(
    [len(temperature_change_options),
     len(ssp_options) * len(cmip6_models)])

#%%
# loop through each global mean temperature change
for temp_option in range(0, temperature_change_options_length):
    min_temperature = temperature_change_options[
        temp_option]  # selecting the temperature change
示例#55
0
def deriv(*args, **kwargs):
    """Calculate the derivative along a single dimension.


    Calling Sequence:
        Result = deriv([x_in,] y_in, missing=1e+20, algorithm='default')


    Positional Input Arguments:
    * x_in:  Abscissa values of y_in to take with respect to.  If 
      not set, the derivative of y_in is take with respect to unit 
      abscissa intervals.  Numeric array of same shape and size as 
      y_in.  Must be monotonic and with no duplicate values.
      Optional.  First positional argument out of two, if present.

    * y_in:  Ordinate values, to take the derivative with respect 
      to.  Numeric array vector of rank 1.  Required.  Second posi-
      tional argument, if x_in is present; only positional argument 
      if x_in is absent.


    Keyword Input Arguments:
    * missing:  If y_in and/or x_in has missing values, this is the 
      missing value value.  Scalar.  Default is 1e+20.

    * algorithm:  Name of the algorithm to use.  String scalar.
      Default is 'default'.  Possible values include:
      + 'default':  Default method (currently set to 'order1').
      + 'order1':  First-order finite-differencing (backward and
        forward differencing used at the endpoints, and centered
        differencing used everywhere else).  If abscissa intervals
        are irregular, differencing will be correspondingly asym-
        metric.


    Output Result:
    * Derivative of y_in with respect to x_in (or unit interval 
      abscissa, if x_in is not given).  Numeric array of same shape 
      and size as y_in.  If there are missing values, those elements 
      in the output are set to the value in |missing|.  For instance, 
      if y_in is only one element, a one-element vector is returned 
      as the derivative with the value of |missing|.  If there are 
      missing values in the output due to math errors and |missing| 
      is set to None, output will fill those missing values with the 
      MA default value of 1e+20.  


    References:
    * Press, W. H., et al. (1992):  Numerical Recipes in Fortran 
      77:  The Art of Scientific Computing.  New York, NY:  Cambridge
      University Press, pp. 180-184.

    * Wang, Y. (1999):  "Numerical Differentiation," Introduction to 
      MHD Numerical Simulation in Space, ESS265: Instrumentation, 
      Data Processing and Data Analysis in Space Physics (UCLA).
      URL:  http://www-ssc.igpp.ucla.edu/personnel/russell/ESS265/
      Ch10/ylwang/node21.html.


    Example with one argument, no missing values, using the default
    method:
    >>> from deriv import deriv
    >>> import Numeric as N
    >>> y = N.sin(N.arange(8))
    >>> dydx = deriv(y)
    >>> ['%.7g' % dydx[i] for i in range(4)]
    ['0.841471', '0.4546487', '-0.3501755', '-0.83305']
    >>> true = N.cos(N.arange(8))  #- Compare with exact solution
    >>> ['%.7g' % true[i] for i in range(4)]  
    ['1', '0.5403023', '-0.4161468', '-0.9899925']

    Example with two arguments with missing values, using first-
    order differencing:
    >>> x = N.arange(8)/(2.*N.pi)
    >>> y = N.sin(x)
    >>> y[3] = 1e20            #- Set an element to missing value
    >>> dydx = deriv(x, y, missing=1e20, algorithm='order1')
    >>> ['%.7g' % dydx[i] for i in range(5)]
    ['0.9957836', '0.9831985', '1e+20', '0.8844179', '1e+20']
    >>> true = N.cos(x)       #- Compare with exact solution
    >>> ['%.7g' % true[i] for i in range(5)]  
    ['1', '0.9873616', '0.9497657', '0.8881628', '0.8041098']
    """

    #- Establish y_in and x_in from *args:

    if len(args) == 1:
        y_in = args[0]
        x_in = np.arange(len(y_in), dtype=y_in.dtype)
    elif len(args) == 2:
        x_in = args[0]
        y_in = args[1]
    else:
        raise ValueError("deriv:  Bad inputs")

    #- Establish missing and algorithm from *kwargs:

    if ('missing' in kwargs) == 1:
        missing = kwargs['missing']
    else:
        missing = 1e+20

    if ('algorithm' in kwargs) == 1:
        algorithm = kwargs['algorithm']
    else:
        algorithm = 'default'

    #- Check positional and keyword inputs for possible errors:

    if (len(y_in.shape) != 1) or (len(x_in.shape) != 1):
        raise ValueError("deriv:  Inputs not a vector")
    if type(algorithm) != type(''):
        raise ValueError("deriv:  algorithm not str")

    #- Set algorithm_to_use variable, based on the algorithm keyword.
    #  The algorithm_to_use tells which algorithm below to actually
    #  use (so here is where we set what algorithm to use for default):

    if algorithm == 'default':
        algorithm_to_use = 'order1'
    else:
        algorithm_to_use = algorithm

    #- Change input to MA:  just set to input value unless there are
    #  missing values, in which case add mask:

    if missing == None:
        x = ma.masked_array(x_in)
        y = ma.masked_array(y_in)
    else:
        x = ma.masked_values(x_in, missing, copy=0)
        y = ma.masked_values(y_in, missing, copy=0)

    #- Calculate and return derivative:

    #  * Create working arrays that are consistent with a 3-point
    #    stencil in the interior and 2-point stencil on the ends:
    #    *im1 means the point before index i, *ip1 means the point
    #    after index i, and the i index array is just plain x or
    #    y; the endpadded arrays replicate the ends of x and y.
    #    I use an MA array filled approach instead of concatentation
    #    because the MA concatenation routine doesn't work right
    #    when the endpoint element is a missing value:

    x_endpadded = ma.zeros(x.size + 2, dtype=x.dtype)
    x_endpadded[0] = x[0]
    x_endpadded[1:-1] = x
    x_endpadded[-1] = x[-1]

    y_endpadded = ma.zeros(y.size + 2, dtype=y.dtype)
    y_endpadded[0] = y[0]
    y_endpadded[1:-1] = y
    y_endpadded[-1] = y[-1]

    y_im1 = y_endpadded[:-2]
    y_ip1 = y_endpadded[2:]
    x_im1 = x_endpadded[:-2]
    x_ip1 = x_endpadded[2:]

    #  * Option 1:  First-order differencing (interior points use
    #    centered differencing, and end points use forward or back-
    #    ward differencing, as applicable):

    if algorithm_to_use == 'order1':
        dydx = (y_ip1 - y_im1) / (x_ip1 - x_im1)

    #  * Option 2:  Bad algorithm specified:

    else:
        raise ValueError("deriv:  bad algorithm")

    #- Return derivative as Numeric array:

    return ma.filled(dydx, missing)
示例#56
0
    def cascadeSpec(self,
                    epsGeV,
                    gmin=None,
                    gsteps=25,
                    Esteps=25,
                    epssteps=25,
                    epsmin=1e-10,
                    epsmax=1e1,
                    intSpectrum=True):
        """
	Calculate cascade spectrum.  

	Parameters
	----------
	epsGeV:		n-dim array, energy of upscattered CMB photons, in GeV

	kwargs
	------
	gmin:	n-dim array with minimum gamma-factors. 
		If none, calculated from gammaMin function (default = None)
	gsteps:	int, number of steps for gamma integration (default = 20)
	Esteps:	int, number of steps for energy integrations 
	epsmin: float, minimum energy of CMB integration, in eV (default = 1e-10)
	epsmax: float, maximum energy of CMB integration, in eV (default = 1e1)
	intSpectrum: bool, 
		if True, provided spectrum assumed to be the intrinsic spectrum, 
		    otherwise, assumed to be observed spectrum

	Returns
	-------
	n-dim array with cascade spectrum in 1/cm^2/s/eV 

	Notes
	-----
	"""
        if gmin == None:
            logGmin = log(self.gammaMin(epsGeV))
        else:
            logGmin = log(gmin)

        # maximum gamma factor
        # corresponding to max prim. energy
        #logGmax = log(self.EmaxTeV * 1e12 / (2. * M_E_EV)) * 0.999
        logGmax = log(1e8)

        # check if maximum gamma factor is larger than minimum gamma factor
        if np.all(logGmin >= logGmax):
            wString = '*** all minimum gamma values are larger than the maximum gamma value:'
            logging.warning(wString)
            logging.warning('*** gmax: {0:.3e}'.format(exp(logGmax)))
            logging.warning('*** max(gammaMin): {0:.3e}'.format(
                np.max(self.gammaMin(epsGeV))))
            logging.warning('*** max(gammaEngine): {0:.3e}'.format(
                np.max(self.gammaEngine(epsGeV))))
            logging.warning('*** max(gammaDeflect): {0:.3e}'.format(
                np.max(self.gammaDeflect(epsGeV))))
            logging.warning('*** max(gammaCMB): {0:.3e}'.format(
                np.max(self.gammaCMB(epsGeV))))
            return np.zeros(epsGeV.shape)

        # mask out regions that where gmin > gmax
        epsGeV = ma.array(epsGeV, mask=logGmin >= logGmax)
        logGmin = ma.array(logGmin, mask=epsGeV.mask)

        # define the energy arrays for integration
        # outer integral runs over log gamma (one log gamma for each cascade energy)
        # inner integral runs over primary energy spectrum
        # second inner integral runs over energy of CMB spectrum
        logG = []
        logEprim = []
        logEpsCMB = []
        tauPrim = []

        logG = ma.zeros((epsGeV.shape[0], gsteps))
        logG.mask = np.zeros(logG.shape, dtype=bool)
        for ilg, logg in enumerate(
                logGmin):  # first loop over minimum gamma factors
            logG.mask[ilg] = logGmin.mask[ilg] * np.ones(gsteps, dtype=bool)
            if not logGmin.mask[ilg]:
                # check if corresponding minimum energy is larger than max energy
                if logg + log(2. * M_E_EV * 1e-12) > log(self.EmaxTeV):
                    logG.mask[ilg] = np.ones(gsteps, dtype=bool)
                else:
                    logG[ilg] = np.linspace(logg, logGmax, gsteps)

        # calculate the lower bound for the first inner integral for the injected spectrum
        # in TeV
        logEmin = log(self.EminTeV) * np.ones(logG.shape)
        logEprimMinTeV = logG + log(2. * M_E_EV * 1e-12)
        logEprimMinTeV = logEprimMinTeV * (logEprimMinTeV > logEmin) + \
           logEmin * (logEprimMinTeV <= logEmin)

        # generate three dim arrays for the intergral over the primary spectrum
        # also generate the optical depths for the injected energies
        logEprim = ma.zeros((epsGeV.shape[0], gsteps, Esteps))
        logEprim.mask = np.zeros(logEprim.shape, dtype=bool)
        tauPrim = ma.zeros((epsGeV.shape[0], gsteps, Esteps))
        tauPrim.mask = np.zeros(tauPrim.shape, dtype=bool)

        for ilE, logE in enumerate(logEprimMinTeV):
            for jlE, logEE in enumerate(logE):
                logEprim.mask[ilE,jlE] = logEprimMinTeV.mask[ilE,jlE] * \
                       np.ones(Esteps, dtype = bool)

                if not (logEprimMinTeV.mask[ilE, jlE]
                        or logEE > log(self.EmaxTeV)):
                    logEprim[ilE, jlE] = np.linspace(logEE, log(self.EmaxTeV),
                                                     Esteps)
                    tauPrim[ilE,
                            jlE] = self.tau.opt_depth(self.zSource,
                                                      exp(logEprim[ilE, jlE]))

        # if all masks are true (gmin > gmax) || (ETeVmin > ETeVmax)
        # return zero
        if np.all(logEprim.mask):
            logging.warning(
                "All (gmin > gmax) || (ETeVmin > ETeVmax), returning 0.")
            return np.zeros(epsGeV.shape)

        tauPrim.mask = logEprim.mask
        # generate three dim arrays for the intergral over the CMB spectrum
        logEpsCMB = ma.zeros((epsGeV.shape[0], gsteps, epssteps))
        logEpsCMB.mask = np.zeros(logEpsCMB.shape, dtype=bool)

        x = ma.zeros((epsGeV.shape[0], gsteps, epssteps))

        for ilg, lg in enumerate(logG):
            for jlg, lgg in enumerate(lg):

                logEpsCMB[ilg, jlg] = np.linspace(log(epsmin), log(epsmax),
                                                  epssteps)
                x[ilg,jlg] = epsGeV[ilg] * 1e9 / 4. / exp(logEpsCMB[ilg,jlg]) / \
                      exp(2. * logG[ilg,jlg])

        x.mask = logEpsCMB.mask | (x >= 1)

        # calculate kernel for CMB integral ---------------------- #
        # the rollaxis calls do cast the arrays into the right shapes to
        # be multiplied

        # log(x) will return warning for masked entries
        kernelCMB = self._F_IC_T(x) * 4. * \
          nphotCMBarray(exp(logEpsCMB)) / exp(logEpsCMB)

        kernelCMB *= exp(logEpsCMB)
        # rollaxis needed, since logG and kernelCMB have different dimensions
        kernelCMB = np.rollaxis(
            np.rollaxis(np.rollaxis(kernelCMB, 2) * exp(2. * logG), 2), 2)

        # Do the integration over CMB energy, with rollaxis this is axis 0 at the moment
        kernelGamma = simps(kernelCMB, logEpsCMB, axis=2)

        # calculate kernel for injected source integral ------------------- #
        kernelInjSpec = self.intSpec(exp(logEprim), **self.intSpecPar)
        if intSpectrum:  # provided spectrum is the intrinsic one
            kernelInjSpec *= 1. - exp(-tauPrim)
        else:  # provided spectrum is the observed one
            kernelInjSpec *= exp(tauPrim) - 1.
        kernelInjSpec *= exp(logEprim)  # account for log integration

        kernelGamma *= simps(kernelInjSpec, logEprim, axis=2) / exp(logG * 5.)
        mask = np.isnan(kernelGamma)

        kernelGamma = ma.array(kernelGamma, mask=mask)
        logG = ma.array(logG, mask=mask | logG.mask)

        #logging.info('new {0}'.format(kernelGamma))
        #logging.info('new {0}'.format(kernelGamma.shape))
        #logging.info('new {0}'.format(logG))
        #logging.info('new {0}'.format(logG.shape))

        result = np.zeros(epsGeV.shape)
        for ikG, kG in enumerate(kernelGamma):
            if np.all(kG.mask): continue
            result[ikG] = simps(kG.compressed(), logG[ikG].compressed())
        result *= M_E_EV / U_CMB * 9. / 64.

        return result
示例#57
0
    filenames1 = glob.glob(
        'C:/Users/admin/Desktop/Master_thesis/Javier_ERAI_winds_icedrift_OSISAF/SortedData_OSISAF_IceWind/WindSpeed_10m/'
        + PEP + '/' + B + '/*.nc')
    filenames2 = glob.glob(
        'C:/Users/admin/Desktop/Master_thesis/Javier_ERAI_winds_icedrift_OSISAF/SortedData_OSISAF_IceWind/SeaIceDriftSpeed/'
        + PEP + '/' + B + '/*.nc')

    Wu = np.zeros(
        [21063, len(filenames1)], dtype=np.float
    )  # 2D array to save LOCAL (X) WIND VELOCITY values as they're extracted from each .nc file (colunmns)
    Wv = np.zeros(
        [21063, len(filenames1)], dtype=np.float
    )  # 2D array to save MERIDIONAL (Y) WIND VELOCITY values as they're extracted from each .nc file (colunmns)
    Iu = ma.zeros(
        [21063, len(filenames2)], dtype=np.float
    )  # 2D array to save LOCAL (X) ICE VELOCITY values as they're extracted from each .vec file (colunmns)
    Iv = ma.zeros(
        [21063, len(filenames2)], dtype=np.float
    )  # 2D array to save MERIDIONAL (X) ICE VELOCITY values as they're extracted from each .vec file (colunmns)

    n = -1  # initial value of counter for iceDrift files
    nn = -1  # initial value of counter for wind files

    for filename in filenames1:  # extracts data from each .nc file one by one
        datum1 = Dataset(filename, mode='r')

        print('filename: ', filename)

        lat1 = datum1.variables['lat1'][:]
        lon1 = datum1.variables['lon1'][:]
示例#58
0
    def Read(self, **args):
        #        self.dsetname = self.runConfig.get( 'input', 'Dataset' )
        #        cfgDir = os.path.dirname( self.runConfigFile )
        #        datasetConfigFile =  os.path.expanduser( os.path.join( cfgDir, 'datasets.cfg' ) )
        #        self.datasetConfig = ConfigParser.RawConfigParser()
        #        self.datasetConfig.read( datasetConfigFile )
        #
        #        inputDatasetDir = self.datasetConfig.get( self.dsetname, 'DataDir' )
        #        self.latVarName = self.datasetConfig.get( self.dsetname, 'LatVarName' )
        #        self.lonVarName = self.datasetConfig.get( self.dsetname, 'LonVarName' )
        #        self.vertVarName = self.datasetConfig.get( self.dsetname, 'VertVarName' )
        #        self.timeVarName = self.datasetConfig.get( self.dsetname, 'TimeVarName' )

        #        inputFileName = self.runConfig.get( 'input', 'Datafile' )
        #        inputFilePath = os.path.join(inputDatasetDir,inputFileName)
        self.zscale = args.pop("zscale", self.zscale)

        if self.varName == None:
            self.varName = self.GetDefaultVarName()
        try:
            var = self.variableList[self.varName]
        except:
            print >> sys.stderr, "\n\n Error reading var %s from variable list: %s\n" % (
                self.varName, str(self.variableList.keys()))
            return


#        self.missingValue = var.fmissing_value

        inputDataBounds = self.GetDataBounds()
        shapeT = np.array(var.shape)
        self.shape = np.ndarray([len(shapeT)], dtype=int)
        self.shape[::1] = shapeT[::-1]
        self.nTimeFrames = 1 if len(self.shape) < 4 else self.shape[3]
        self.vertvar = self.variableList[self.vertVarName]

        self.ROIExtent = np.array(
            [0, self.shape[0], 0, self.shape[1], 0, self.shape[2]])
        self.dataRange = np.array([
            inputDataBounds[1] - inputDataBounds[0],
            inputDataBounds[3] - inputDataBounds[2],
            inputDataBounds[5] - inputDataBounds[4]
        ])
        self.dataOffset = np.array(
            [inputDataBounds[0], inputDataBounds[2], inputDataBounds[4]])
        offset = [180, 90]
        if self.ROI != None:
            for iD in range(4):
                self.ROI[iD] = self.ROI[iD] + offset[iD / 2]
            for iD in range(0, 6, 2):
                if self.ROI[iD] < inputDataBounds[iD]:
                    self.ROI[iD] = inputDataBounds[iD]
            for iD1 in range(1, 6, 2):
                if self.ROI[iD1] > inputDataBounds[iD1]:
                    self.ROI[iD1] = inputDataBounds[iD1]
            for i in range(6):
                i0 = i / 2
                ROIFraction = (self.ROI[i] -
                               self.dataOffset[i0]) / self.dataRange[i0]
                self.ROIExtent[i] = int(ROIFraction * self.shape[i0])

        roishapeT = list(var.shape)
        roishapeT[-1] = self.ROIExtent[1] - self.ROIExtent[0]
        roishapeT[-2] = self.ROIExtent[3] - self.ROIExtent[2]
        roishapeT[-3] = self.ROIExtent[5] - self.ROIExtent[4]

        self.roishape = np.ndarray([len(roishapeT)], dtype=int)
        self.roishape = roishapeT[::-1]

        #        tempvar = np.zeros( roishapeT, dtype=var.dtype )

        subvar = var[..., self.ROIExtent[4]:self.ROIExtent[5],
                     self.ROIExtent[2]:self.ROIExtent[3],
                     self.ROIExtent[0]:self.ROIExtent[1]]
        #        tempvar = subvar

        #        tempvar = ma.masked_array( subvar )
        #        undef_value_mask = tempvar == self.missingValue
        #        tempvar.mask = undef_value_mask
        tempvar = ma.masked_values(subvar, self.undef)
        #        undef_value_mask = tempvar == self.missingValue
        #        tempvar.mask = undef_value_mask

        tempvar = tempvar.transpose()

        newvar = tempvar
        if self.reductionFactor > 1:
            newshape = np.array(self.roishape)
            for i in range(-2, 0):
                self.shape[i] /= self.reductionFactor
                newshape[i] /= self.reductionFactor
            newvar = ma.zeros(newshape, dtype=var.dtype)
            bounds = newshape * self.reductionFactor
            for ix in range(0, self.reductionFactor):
                for iy in range(0, self.reductionFactor):
                    subarray = tempvar[ix:bounds[0]:self.reductionFactor,
                                       iy:bounds[1]:self.reductionFactor, ...]
                    newvar[...] += subarray
            newvar /= (self.reductionFactor * self.reductionFactor)

        print "ReadNetCDFVariable '" + self.varName + "', roi shape = " + str(
            self.roishape) + ", data var shape = " + str(
                var.shape) + ", new var shape = " + str(
                    newvar.shape) + ", ROI Extent = " + str(
                        self.ROIExtent) + ", reductionFactor = " + str(
                            self.reductionFactor) + ", InvertZ = " + str(
                                self.invertZ)

        if self.invertZ:
            self._dataArray = newvar.copy()
            self._dataArray = newvar[..., ::-1, :]
            del newvar
        else:
            self._dataArray = newvar

        range_min = self._dataArray.min()
        range_max = self._dataArray.max()
        self.timeseries_range = [range_min, range_max]
        self.grid_origin = self.GetDataCornerPosition()
        self.grid_spacing = self.GetDataRangeScaling(self.zscale)
        self.hasData = True
示例#59
0
    import numpy.ma as ma
    import os.path

    hits = np.array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 1, 2, 0],
                     [0, 2, 1, 3, 0], [0, 0, 0, 0, 1]])

    visits = np.array([[0, 0, 0, 0, 0], [0, 1, 1, 1, 0], [0, 1, 9, 3, 0],
                       [0, 2, 2, 4, 0], [0, 0, 0, 0, 8]])

    undef_mask = (visits == 0)
    alpha = ma.masked_array(hits, dtype=np.float)
    alpha[undef_mask] = ma.masked

    means = ma.divide(alpha, visits)

    means_ds = ma.zeros(means.shape)
    means_ds[undef_mask] = DiSt.UNDEFINED.value
    means_ds[~undef_mask] = ma.masked

    worldmap_extent = [150.4, 183.0, 0, 24.5]
    test_ds_list = [DiSt.UNDEFINED, DiSt.UNIFORM, DiSt.BIMODAL]

    test_v_min = 0
    test_v_max = 1

    test_occ = True

    # Create Colorizer Object
    mean_colorizer = MapColorizer()
    mean_colorizer.set_wm_extent(worldmap_extent)
    mean_colorizer.set_disc_state_list(test_ds_list)
示例#60
0
def comp_r(dert__, fig, root_fcr):
    '''
    Cross-comparison of input param (dert[0]) over rng passed from intra_blob.
    This fork is selective for blobs with below-average gradient,
    where input intensity didn't vary much in shorter-range cross-comparison.
    Such input is predictable enough for selective sampling: skipping current
    rim derts as kernel-central derts in following comparison kernels.
    Skipping forms increasingly sparse output dert__ for greater-range cross-comp, hence
    rng (distance between centers of compared derts) increases as 2^n, starting at 0:
    rng = 1: 3x3 kernel,
    rng = 2: 5x5 kernel,
    rng = 4: 9x9 kernel,
    ...
    Due to skipping, configuration of input derts in next-rng kernel will always be 3x3, see:
    https://github.com/boris-kz/CogAlg/blob/master/frame_2D_alg/Illustrations/intra_comp_diagrams.png
    '''
    # initialize new dert structure
    new_dert__ = ma.zeros((dert__.shape[0], (dert__.shape[1] - 1) // 2,
                           (dert__.shape[2] - 1) // 2),
                          dtype=dert__.dtype)
    new_dert__.mask = True
    # extract new_dert__ 'views', use [:] to 'update' views and new_dert__ at the same time
    i__center, idy__, idx__, g__, dy__, dx__, m__ = new_dert__

    i__ = dert__[0]  # i is ig if fig else pixel
    '''
    sparse aligned i__center and i__rim arrays:
    '''
    i__center[:] = i__[1:-1:2, 1:-1:2]  # also assignment to new_dert__[0]
    i__topleft = i__[:-2:2, :-2:2]
    i__top = i__[:-2:2, 1:-1:2]
    i__topright = i__[:-2:2, 2::2]
    i__right = i__[1:-1:2, 2::2]
    i__bottomright = i__[2::2, 2::2]
    i__bottom = i__[2::2, 1:-1:2]
    i__bottomleft = i__[2::2, :-2:2]
    i__left = i__[1:-1:2, :-2:2]
    ''' 
    unmask all derts in kernels with only one masked dert (can be set to any number of masked derts), 
    to avoid extreme blob shrinking and loss of info in other derts of partially masked kernels
    unmasked derts were computed due to extend_dert() in intra_blob   
    '''
    majority_mask = (i__[:, 1:-1:2, 1:-1:2].mask.astype(int) +
                     i__[:, :-2:2, :-2:2].mask.astype(int) +
                     i__[:, :-2:2, 1:-1:2].mask.astype(int) +
                     i__[:, :-2:2, 2::2].mask.astype(int) +
                     i__[:, 1:-1:2, 2::2].mask.astype(int) +
                     i__[:, 2::2, 2::2].mask.astype(int) +
                     i__[:, 2::2, 1:-1:2].mask.astype(int) +
                     i__[:, 2::2, :-2:2].mask.astype(int) +
                     i__[:, 1:-1:2, :-2:2].mask.astype(int)) > 1
    i__center.mask = i__topleft.mask = i__top.mask = i__topright.mask = i__right.mask = i__bottomright.mask = \
    i__bottom.mask = i__bottomleft.mask = i__left.mask = majority_mask  # not only i__center

    idy__[:], idx__[:] = dert__[[1, 2], 1:-1:2, 1:-1:2]

    if root_fcr:  # root fork is comp_r, accumulate derivatives:

        dy__[:] = dert__[4, 1:-1:2, 1:-1:2]  # sparse to align with i__center
        dx__[:] = dert__[5, 1:-1:2, 1:-1:2]
        m__[:] = dert__[6, 1:-1:2, 1:-1:2]

    dy__.mask = dx__.mask = m__.mask = majority_mask

    if not fig:  # compare four diametrically opposed pairs of rim pixels:

        d_tl_br = i__topleft.data - i__bottomright.data
        d_t_b = i__top.data - i__bottom.data
        d_tr_bl = i__topright.data - i__bottomleft.data
        d_r_l = i__right.data - i__left.data

        dy__ += (d_tl_br * YCOEFs[0] + d_t_b * YCOEFs[1] +
                 d_tr_bl * YCOEFs[2] + d_r_l * YCOEFs[3])

        dx__ += (d_tl_br * XCOEFs[0] + d_t_b * XCOEFs[1] +
                 d_tr_bl * XCOEFs[2] + d_r_l * XCOEFs[3])

        g__[:] = ma.hypot(dy__, dx__)  # gradient
        '''
        inverse match = SAD, direction-invariant and more precise measure of variation than g
        (all diagonal derivatives can be imported from prior 2x2 comp)
        '''
        m__ += (abs(i__center.data - i__topleft.data) +
                abs(i__center.data - i__top.data) +
                abs(i__center.data - i__topright.data) +
                abs(i__center.data - i__right.data) +
                abs(i__center.data - i__bottomright.data) +
                abs(i__center.data - i__bottom.data) +
                abs(i__center.data - i__bottomleft.data) +
                abs(i__center.data - i__left.data))

    else:  # fig is TRUE, compare angle and then magnitude of 8 center-rim pairs

        i__[ma.where(i__ == 0)] = 1  # to avoid / 0
        a__ = dert__[[1, 2]] / i__  # sin = idy / i, cos = idx / i, i = ig
        '''
        sparse aligned a__center and a__rim arrays:
        '''
        a__center = a__[:, 1:-1:2, 1:-1:2]
        a__topleft = a__[:, :-2:2, :-2:2]
        a__top = a__[:, :-2:2, 1:-1:2]
        a__topright = a__[:, :-2:2, 2::2]
        a__right = a__[:, 1:-1:2, 2::2]
        a__bottomright = a__[:, 2::2, 2::2]
        a__bottom = a__[:, 2::2, 1:-1:2]
        a__bottomleft = a__[:, 2::2, :-2:2]
        a__left = a__[:, 1:-1:2, :-2:2]
        ''' 
        only mask kernels with more than one masked dert, for all operations below: 
        '''
        majority_mask_a = (a__[:, 1:-1:2, 1:-1:2].mask.astype(int) +
                           a__[:, :-2:2, :-2:2].mask.astype(int) +
                           a__[:, :-2:2, 1:-1:2].mask.astype(int) +
                           a__[:, :-2:2, 2::2].mask.astype(int) +
                           a__[:, 1:-1:2, 2::2].mask.astype(int) +
                           a__[:, 2::2, 2::2].mask.astype(int) +
                           a__[:, 2::2, 1:-1:2].mask.astype(int) +
                           a__[:, 2::2, :-2:2].mask.astype(int) +
                           a__[:, 1:-1:2, :-2:2].mask.astype(int)) > 1
        a__center.mask = a__topleft.mask = a__top.mask = a__topright.mask = a__right.mask = a__bottomright.mask = \
        a__bottom.mask = a__bottomleft.mask = a__left.mask = majority_mask_a

        assert (majority_mask_a[0] == majority_mask_a[1]).all()
        # what does that do?
        dy__.mask = dx__.mask = m__.mask = majority_mask_a[0]
        '''
        8-tuple of differences between central dert angle and rim dert angle:
        '''
        cos_da = [((a__topleft[1].data * a__center[1].data) +
                   (a__center[0].data * a__topleft[0].data)),
                  ((a__top[1].data * a__center[1].data) +
                   (a__center[0].data * a__top[0].data)),
                  ((a__topright[1].data * a__center[1].data) +
                   (a__center[0].data * a__topright[0].data)),
                  ((a__right[1].data * a__center[1].data) +
                   (a__center[0].data * a__right[0].data)),
                  ((a__bottomright[1].data * a__center[1].data) +
                   (a__center[0].data * a__bottomright[0].data)),
                  ((a__bottom[1].data * a__center[1].data) +
                   (a__center[0].data * a__bottom[0].data)),
                  ((a__bottomleft[1].data * a__center[1].data) +
                   (a__center[0].data * a__bottomleft[0].data)),
                  ((a__left[1].data * a__center[1].data) +
                   (a__center[0].data * a__left[0].data))]
        '''
        8-tuple of cosine matches per direction:
        '''
        m__ += (ma.minimum(i__center.data, i__topleft.data) * cos_da[0] +
                ma.minimum(i__center.data, i__top.data) * cos_da[1] +
                ma.minimum(i__center.data, i__topright.data) * cos_da[2] +
                ma.minimum(i__center.data, i__right.data) * cos_da[3] +
                ma.minimum(i__center.data, i__bottomright.data) * cos_da[4] +
                ma.minimum(i__center.data, i__bottom.data) * cos_da[5] +
                ma.minimum(i__center.data, i__bottomleft.data) * cos_da[6] +
                ma.minimum(i__center.data, i__left.data) * cos_da[7])
        '''
        8-tuple of cosine differences per direction:
        '''
        dt__ = [(i__center.data - i__topleft.data * cos_da[0]),
                (i__center.data - i__top.data * cos_da[1]),
                (i__center.data - i__topright.data * cos_da[2]),
                (i__center.data - i__right.data * cos_da[3]),
                (i__center.data - i__bottomright.data * cos_da[4]),
                (i__center.data - i__bottom.data * cos_da[5]),
                (i__center.data - i__bottomleft.data * cos_da[6]),
                (i__center.data - i__left.data * cos_da[7])]
        for d__, YCOEF, XCOEF in zip(dt__, YCOEFs, XCOEFs):

            dy__ += d__ * YCOEF  # decompose differences into dy and dx,
            dx__ += d__ * XCOEF  # accumulate with prior-rng dy, dx
            '''
            accumulate in prior-range dy, dx: 3x3 -> 5x5 -> 9x9 
            '''
        g__[:] = ma.hypot(dy__, dx__)
    '''
    next comp_r will use full dert       
    next comp_g will use g__, dy__, dx__
    '''
    return new_dert__  # new_dert__ has been updated along with 'view' arrays: i__center, idy__, idx__, g__, dy__, dx__, m__