示例#1
2
文件: feature.py 项目: tacaswell/mr
def _local_maxima(image, diameter, separation, percentile=64):
    "Find local maxima whose brightness is above a given percentile."
    # Find the threshold brightness, representing the given
    # percentile among all NON-ZERO pixels in the image.
    flat = np.ravel(image)
    threshold = stats.scoreatpercentile(flat[flat > 0], percentile)
    # The intersection of the image with its dilation gives local maxima.
    assert image.dtype == np.uint8, "Perform dilation on exact (uint8) data." 
    dilation = morphology.grey_dilation(
        image, footprint=circular_mask(diameter, separation))
    maxima = np.where((image == dilation) & (image > threshold))
    if not np.size(maxima) > 0:
        raise ValueError, ("Bad image! Found zero maxima above the {}"
                           "-percentile treshold at {}.".format(
                           percentile, threshold))
    # Flat peaks, for example, return multiple maxima.
    # Eliminate redundancies within the separation distance.
    maxima_map = np.zeros_like(image)
    maxima_map[maxima] = image[maxima]
    peak_map = filters.generic_filter(
        maxima_map, _Cfilters.nullify_secondary_maxima(), 
        footprint=circular_mask(separation), mode='constant')
    # Also, do not accept peaks near the edges.
    margin = int(separation)/2
    peak_map[..., :margin] = 0
    peak_map[..., -margin:] = 0
    peak_map[:margin, ...] = 0
    peak_map[-margin:, ...] = 0
    peaks = np.where(peak_map != 0)
    if not np.size(peaks) > 0:
        raise ValueError, "Bad image! All maxima were in the margins."
    return peaks[1], peaks[0] # x, y
示例#2
0
def calc_abundance(cdl, affine, window, meta):
    """
    Calculate farm abundance based on nesting and floral coefficients for
    various crop types.
    """

    # Create floral and nesting rasters derived from the CDL
    fl_out = np.zeros(shape=cdl.shape, dtype=np.float32)
    n_out = np.zeros(shape=cdl.shape, dtype=np.float32)
    floral = reclassify_from_data(cdl, SETTINGS['floral_reclass'], fl_out)
    nesting = reclassify_from_data(cdl, SETTINGS['nesting_reclass'], n_out)

    # Create an abundance index based on forage and nesting indexes
    # over the area a bee may travel
    forage = generic_filter(floral,
                            footprint=SETTINGS['window'],
                            function=focal_op)
    source = forage * nesting
    area_abundance = generic_filter(source,
                                    footprint=SETTINGS['window'],
                                    function=focal_op)

    if DEBUG:
        write_tif('cdl', cdl, affine, window, meta)
        write_tif('floral', floral, affine, window, meta)
        write_tif('nesting', nesting, affine, window, meta)
        write_tif('forage', forage, affine, window, meta)
        write_tif('source', source, affine, window, meta)
        write_tif('abundance', area_abundance, affine, window, meta)

    return area_abundance
示例#3
0
    def normalize(data, mask, method='mean', axis=[1, 4], window_size=2):
        masked_data = np.where(np.invert(mask), data, np.nan)

        result = np.zeros(shape=masked_data.shape)
        norm_scene = np.zeros(shape=result.shape)

        for time_bin in range(data.shape[0]):
            for freq_bin in range(data.shape[3]):

                scene = masked_data[time_bin, :, :, freq_bin]
                if (method == 'mean'):
                    norm = generic_filter(scene, np.nanmean, size=window_size)
                elif (method == 'median'):
                    norm = generic_filter(scene,
                                          np.nanmedian,
                                          size=window_size)
                elif (method == 'min'):
                    norm = generic_filter(scene, np.nanmin, size=window_size)
                else:
                    raise Exception(
                        "Method needs to be either mean, median or min")
                result[time_bin, :, :, freq_bin] = scene - norm
                norm_scene[time_bin, :, :, freq_bin] = norm

        return np.array(result), np.array(norm_scene)
示例#4
0
def coarseness(img, k):
    # compute average over all 0,..,k-1 regions of side-length 2^k-1
    A = __coarsness_average(img, k)

    # compute differences between pairs of non-overlapping neighbourhoods
    E = scipy.zeros([img.ndim] + list(A.shape), dtype=scipy.float_) # matrix holding computed differences in all directions

    for dim in range(img.ndim):
        for nbh in range(k):
            shape = img.ndim * [1]
            shape[dim] = 2 * int(math.pow(2, nbh)) + 1
            footprint = scipy.zeros(shape, dtype=scipy.bool_)
            idx = img.ndim * [0]
            footprint[tuple(idx)] = 1
            idx[dim] = -1
            footprint[tuple(idx)] = 1
            generic_filter(A[nbh], lambda x: abs(x[0]- x[1]), output=E[dim][nbh], footprint=footprint, mode='mirror')
            
    # compute for each voxel the k value, that lead to the highest E (regardless in which direction)
    S = scipy.zeros_like(img)

    for x in range(S.shape[0]):
        for y in range(S.shape[1]):
            for z in range(S.shape[2]):
                maxv = 0 
                maxk = 0
                for dim in range(img.ndim):
                    for nbh in range(k):
                        if E[dim][nbh][x,y,z] > maxv:
                            maxv = E[dim][nbh][x,y,z]
                            maxk = nbh
                S[x,y,z] = maxk
    
    return S
示例#5
0
    def binSpec(self, nbins, median=True):
        """Write this function at some point
        median or mean bin the spectrum
        """
        def medianFootprint(values):
            return np.median(values)

        def meanFootprint(values):
            return np.mean(values)

        footprint = np.ones((nbins, ))
        if median == True:
            self.wave = spfilt.generic_filter(self.wave,
                                              medianFootprint,
                                              footprint=footprint,
                                              mode='reflect',
                                              cval=0.)
            self.flux = spfilt.generic_filter(self.flux,
                                              medianFootprint,
                                              footprint=footprint,
                                              mode='reflect',
                                              cval=0.)
        else:
            self.wave = spfilt.generic_filter(self.wave,
                                              meanFootprint,
                                              footprint=footprint,
                                              mode='reflect',
                                              cval=0.)
            self.flux = spfilt.generic_filter(self.flux,
                                              meanFootprint,
                                              footprint=footprint,
                                              mode='reflect',
                                              cval=0.)
示例#6
0
文件: mathutil.py 项目: rkdarst/fitz
def extrema(input, halfwidth=2, excludeedges=False):
    """Find local extrema

    halfwidth: 


    Returns two lists (minima, maxima)
    """
    from scipy.ndimage.filters import generic_filter
    from scipy.ndimage import extrema
    """Return all local maxima/minima."""
    minima = collections.defaultdict(int)
    maxima = collections.defaultdict(int)
    inputLength = len(input)-1
    #i = [ 0 ]
    def f(array, il):
        i = il[0]
        # This function returns (mini, maxi), the indexes of the max
        # and min of the array.  They return the *lowest* possible
        # such indexes, thus the max(0, ...) below.
        min_, max_, mini, maxi = extrema(array)
        #print array, (min_, max_, mini, maxi), mini[0]+i-halfwidth, maxi[0]+i-halfwidth
        minima[max(0, mini[0]+i-halfwidth)] += 1
        maxima[       maxi[0]+i-halfwidth ] += 1
        il[0] += 1
        return 0
    #from fitz import interactnow
    generic_filter(input, f, size=2*halfwidth+1, mode='nearest',
                   extra_arguments=([0],) )
    if excludeedges:
        minima.pop(0, None) ; minima.pop(len(input)-1, None)
        maxima.pop(0, None) ; maxima.pop(len(input)-1, None)
    return list(sorted(k for k,v in minima.items() if v > (halfwidth))), \
           list(sorted(k for k,v in maxima.items() if v > (halfwidth)))
示例#7
0
文件: lab4.py 项目: athuras/tools
def lee_filter(x, variance_threshold, window=5):
    '''THE POWER OF ARRAY BROADCASTING COMPELS YOU'''
    u = generic_filter(x, lambda q: q.mean(), size=window)
    s2 = generic_filter(x, lambda q: q.var(), size=window)

    K = 1. - variance_threshold / s2  # A little wasteful, but fine for now...
    K[variance_threshold > s2] = 0.
    return K * x + (1. - K) * u
示例#8
0
def median_clip(array, sigma, num_neighbor=5):
    """Sigma clipping for detecting outlying values in 2d array. If the parameter
    'neighbor' is True the clipping can be performed in a local patch around 
    each pixel, whose size depends on 'neighbor' parameter.
    
    Parameters
    ----------
    array : array_like 
        Input 2d array, image.
    sigma : float 
        Value for sigma
    num_neighbor : int
        The side of the square window around each pixel where the sigma and 
        median are calculated. 
        
    Returns
    -------
    array where outliers have been replaced by the median values
    
    Adapted from the VIP package sigma_filter method:
    https://github.com/vortex-exoplanet/VIP
    @carlgogo
    Modified by J. Olofsson to use the footprint instead of the size in generic filter.
    """
    assert type(num_neighbor) is int, "num_neighbor should be an int"
    if not array.ndim == 2:
        raise TypeError("Input array is not two dimensional (frame)\n")
    if num_neighbor % 2 == 0:
        raise ValueError("num_neighbor should be an odd integer\n")
    # footprint will exlcude the central pixel
    footprint = np.ones(shape=(num_neighbor, num_neighbor))
    footprint[num_neighbor / 2, num_neighbor / 2] = 0.

    values = array.copy()
    median = generic_filter(array,
                            function=np.median,
                            size=(num_neighbor, num_neighbor),
                            mode="mirror")
    stdev = generic_filter(array,
                           function=np.std,
                           size=(num_neighbor, num_neighbor),
                           mode="mirror")
    # median = generic_filter(array, function=np.median, footprint = footprint, mode="mirror")
    # stdev = generic_filter(array, function=np.std, footprint = footprint, mode="mirror")

    good1 = values > (median - sigma * stdev)
    good2 = values < (median + sigma * stdev)

    bad1 = values < (median - sigma * stdev)
    bad2 = values > (median + sigma * stdev)

    bad = np.where(bad1 | bad2)  # deviating px indices in either bad1 or bad2
    values[bad] = median[
        bad]  # replace the bad pixels by the median in the box
    del median
    del stdev
    return values
def two_step_esmod(params, nRow, nCol):
    """Two stage ES model where first an intermediary layer is calculated as the 
    same as the simple ES model but only if the land cover type is that required. 
    The ES value is then calculated as the mean of the intermediary layer within 
    the desired window.
    
    Parameters
    ----------
    params: pd.series
        Values of each of the parameters (p, h, w - window size, r - replicate)
    nRow: int
        Number of rows in the landscape
    nCol: int
        Number of columns in the landscape    
    """
    p = params['p']
    h = params['h']
    w = int(params['w'])
    r = params['r']
    out = mpd_prop(nRow, nCol, h, p)

    # define the ES function
    # output the first layer (e.g. the one where focal patch must be = 1)
    wdw1 = generic_filter(out, np.mean, w, mode='wrap')
    # multiply wdw by out to set the zeros to zero
    wdw1 = wdw1 * out
    # this is currently set to take the relationship as 1:1 (i.e. 10% natural cover = 10% ecosystem service)
    # will need to add in the relationship to create ES surface at a later date
    es1_mean = np.mean(wdw1)
    es1_total = np.sum(wdw1)
    es1_var = np.var(
        wdw1
    )  # NB this is population variance, try to work out if this is right, if sample variance needed use ddof = 1

    # output the second layer (e.g. the one which takes in the first as input and works on the opposite land cover = 0)
    wdw2 = generic_filter(wdw1, np.mean, w, mode='wrap')
    wdw2 = wdw2 * (1 - out)
    es2_mean = np.mean(wdw2)
    es2_total = np.sum(wdw2)
    es2_var = np.var(
        wdw2
    )  # NB this is population variance, try to work out if this is right, if sample variance needed use ddof = 1

    return pd.Series({
        'p_val': p,
        'h_val': h,
        'rep': r,
        'window_size': w,
        'es1_mean': es1_mean,
        'es1_total': es1_total,
        'es1_var': es1_var,
        'es2_mean': es2_mean,
        'es2_total': es2_total,
        'es2_var': es2_var
    })
示例#10
0
def clip_array(array, lower_sigma, upper_sigma, out_good=False, neighbor=False,
              num_neighbor=None):
    """Sigma clipping for detecting outlying values in 2d array. If the parameter
    'neighbor' is True the clipping can be performed in a local patch around 
    each pixel, whose size depends on 'neighbor' parameter.
    
    Parameters
    ----------
    array : array_like 
        Input 2d array, image.
    lower_sigma : float 
        Value for sigma, lower boundary.
    upper_sigma : float 
        Value for sigma, upper boundary.
    out_good : {'False','True'}, optional
        For choosing different outputs.
    neighbor : {'False','True'}, optional
        For clipping over the median of the contiguous pixels.
    num_neighbor : int, optional
        The side of the square window around each pixel where the sigma and 
        median are calculated. 
        
    Returns
    -------
    good : array_like
        If out_good argument is true, returns the indices of not-outlying px.
    bad : array_like 
        If out_good argument is false, returns a vector with the outlier px.
    
    """
    if not array.ndim == 2:
        raise TypeError("Input array is not two dimensional (frame)\n")

    values = array.copy()
    if neighbor and num_neighbor:
        median = generic_filter(array, function=np.median, 
                                size=(num_neighbor,num_neighbor), mode="mirror")
        sigma = generic_filter(array, function=np.std, 
                                size=(num_neighbor,num_neighbor), mode="mirror")
    else:
        median = np.median(values)
        sigma = values.std()
        
    good1 = values > (median - lower_sigma * sigma) 
    good2 = values < (median + upper_sigma * sigma)
    bad1 = values < (median - lower_sigma * sigma)
    bad2 = values > (median + upper_sigma * sigma)
    
    if out_good:
        good = np.where(good1 & good2)                                          # normal px indices in both good1 and good2
        return good
    else:
        bad = np.where(bad1 | bad2)                                             # deviating px indices in either bad1 or bad2
        return bad
def farmland_birds_sim(ls_size, h, w1, w2, npp):
    """Function to predict species richness of farmland bird indicator species using amount 
    and heterogeneity of habitat at the appropriate scale. Currently the proportions for each 
    landscape are fixed, only the spatial autocorrelation changes
    
    Parameters
    ----------
    ls_size: int
        Side length of the landscape
    h: float
        spatial autocorrelation of the landscape
    w1: int
        size of the amount window
    w2: int
        size of the heterogeneity window
    npp: float
        level of npp for the landscape
        
    Returns
    -------
    out: array
        Predicted species richness of farmland bird indicator species
    """

    # create landscape with four land cover types 1-3 are habitat, 0 is not
    ls = mpd(ls_size, ls_size, h)
    ls = classifyArray(ls, [0.25, 0.25, 0.25, 0.25])
    binary = (ls != 0) * 1
    # for each cell, calculate habitat amount within the window
    ls_amount = generic_filter(
        ls, lc_prop, w1, mode='wrap', extra_keywords={'lc': [1, 2, 3]
                                                      }) * binary
    # for each cell, calculate the habitat heterogeneity within the window
    ls_hetero = generic_filter(ls,
                               shannon,
                               w2,
                               mode='wrap',
                               extra_keywords={'lc': [1, 2, 3]})
    # multiply the amount*hetero*npp
    out = ls_amount * ls_hetero * npp
    return pd.Series({
        'ls_size': ls_size,
        'h_val': h,
        'w1': w1,
        'w2': w2,
        'npp': npp,
        'es_mean': np.mean(out),
        'es_var': np.var(out)
    })
示例#12
0
def temporal_interp(data_one_variable):
    footprint = np.zeros((3, 1, 1))
    footprint[[0, 2], 0, 0, ] = 1
    return generic_filter(data_one_variable,
                          np.nanmean,
                          footprint=footprint,
                          mode='nearest')
示例#13
0
文件: io.py 项目: gitter-badger/iuvs
def find_scaling_window(to_filter, size=None):
    if size is None:
        x = max(to_filter.shape[0] // 5, 2)
        y = max(to_filter.shape[1] // 10, 1)
        size = (x, y)
    filtered = generic_filter(to_filter, np.median, size=size,
                              mode='constant', cval=to_filter.max() * 100)
    min_spa, min_spe = np.unravel_index(filtered.argmin(), to_filter.shape)
    spa1 = min_spa - size[0] // 2
    if spa1 < 0:
        spa1 = 0
    spa2 = spa1 + size[0]
    if spa2 > to_filter.shape[0]:
        spa1 = to_filter.shape[0] - size[0]
        spa2 = to_filter.shape[0]
    spe1 = min_spe - size[1] // 2
    if spe1 < 0:
        spe1 = 0
    spe2 = spe1 + size[1]
    if spe2 > to_filter.shape[1]:
        spe1 = to_filter.shape[1] - size[1]
        spe2 = to_filter.shape[1]
    spa_slice = slice(spa1, spa2)
    spe_slice = slice(spe1, spe2)
    return (spa_slice, spe_slice)
示例#14
0
def spatial_mean(data_one_variable):
    footprint = np.zeros((1, 3, 3))
    footprint[0, :, :] = 1
    footprint[0, 1, 1] = 0
    return generic_filter(
        data_one_variable, np.nanmean, footprint=footprint, mode='wrap'
    )  #TODO: we need wrap for longitude and nearest for latitude
示例#15
0
def gapfill_interpolation(data, n=5):
    """
    Impute (i.e. Gapfill) data by infilling the spatiotemporal mean for each variable independently. A cube of n 5-pixel side length surrounding each missing value is taken and the mean of all non-missing values in this cube is computed and used to infill the missing value . If a point cannot be filled because all the values in the neighbourhood are missing as well, the points is filled by the local monthly climatology. Any remaining missing points are filled by the local temporal mean, or, if not available, the global mean of the variable.

    Parameters
    ----------
    data: xarray dataarray, with coordinates time, latitude, longitude and variable

    n: size of the cube in any spatiotemporal dimension

    Returns
    ----------
    imputed_data: data of the same shape as input data, where all values that were not missing are still the same and all values that were originally missing are imputed via spatiotemporal mean
    """

    # infill spatiotemporal mean
    footprint = np.ones((1, n, n, n))
    tmp = generic_filter(data, mean_fct, footprint=footprint, mode='nearest')
    data = data.fillna(tmp)
    log_fracmis(data, 'after filtering')

    # infill dayofyear mean
    seasonality = dataxr_lost.groupby('time.dayofyear').mean(dim='time')
    data = data.groupby('time.dayofyear').fillna(seasonality).drop('dayofyear')
    log_fracmis(data, 'after seasonality')

    # infill variable mean
    temporal_mean = data.mean(dim=('time'))
    variable_mean = data.mean(dim=('time', 'latitude', 'longitude'))
    data = data.fillna(temporal_mean)
    data = data.fillna(variable_mean)
    log_fracmis(data, 'after mean impute')

    return data
示例#16
0
def compute(valid):
    ''' Get me files '''
    prob = None
    for hr in range(-15,0):
        ts = valid + datetime.timedelta(hours=hr)
        fn = ts.strftime("hrrr.ref.%Y%m%d%H00.grib2")
        if not os.path.isfile(fn):
            continue

        grbs = pygrib.open(fn)
        gs = grbs.select(level=1000,forecastTime=(-1 * hr * 60))
        ref = generic_filter(gs[0]['values'], np.max, size=10)
        if prob is None:
            lats, lons = gs[0].latlons()
            prob = np.zeros( np.shape(ref) )
        
        prob = np.where(ref > 29, prob+1, prob)

    prob = np.ma.array(prob / 15. * 100.)
    prob.mask = np.ma.where(prob < 1, True, False)    
    
    m = MapPlot(sector='iowa',
                title='HRRR Composite Forecast 4 PM 20 May 2014 30+ dbZ Reflectivity',
                subtitle='frequency of previous 15 model runs all valid at %s, ~15km smoothed' % (valid.astimezone(pytz.timezone("America/Chicago")).strftime("%-d %b %Y %I:%M %p %Z"),))

    m.pcolormesh(lons, lats, prob, np.arange(0,101,10), units='%',
                     clip_on=False)
    m.map.drawcounties()
    m.postprocess(filename='test.ps')
    m.close()
示例#17
0
def test_percentileFilter1d():
    for mode in modes.keys():
        size = np.random.randint(1000, 2000, [
            1,
        ]).item()
        cShape = NumCpp.Shape(1, size)
        cArray = NumCpp.NdArray(cShape)
        data = np.random.randint(100, 1000, [
            size,
        ]).astype(np.double)
        cArray.setArray(data)
        kernalSize = 0
        while kernalSize % 2 == 0:
            kernalSize = np.random.randint(5, 15)
        percentile = np.random.randint(0, 101, [
            1,
        ]).item()
        constantValue = np.random.randint(0, 5, [
            1,
        ]).item()  # only actaully needed for constant boundary condition
        dataOutC = NumCpp.percentileFilter1d(
            cArray, kernalSize, percentile, modes[mode],
            constantValue).getNumpyArray().flatten()
        dataOutPy = filters.generic_filter(data,
                                           np.percentile,
                                           footprint=np.ones([
                                               kernalSize,
                                           ]),
                                           mode=mode,
                                           cval=constantValue,
                                           extra_arguments=(percentile, ))
        assert np.array_equal(np.round(dataOutC, 7), np.round(dataOutPy, 7))
示例#18
0
def gamma(sample, reference, delta_d, delta_D, resolution):
    ''' Compute the gamma deviation distribution between a sample and reference distribution,
        based on composite evaluation of Distance-To-Agreement (DTA) and Dose Deviation (DT).

        :param sample: sample distribution (ndarray)
        :param reference: reference distribution (ndarray)
        :param delta_d: distance-to-agreement criterion, i.e. search window limit in
            the same units as `resolution` (float)
        :param delta_D: dose difference criterion, i.e. maximum passable deviation between
            distributions (float)
        :param resolution: resolution of each axis of the distributions (tuple, or scalar for 1D)
        :return: evaluated gamma distribution (ndarray, same dimensions as input distributions)
    '''
    kernel = gammaKernel(delta_d, resolution)
    assert sample.ndim == reference.ndim == kernel.ndim, \
        '`sample` and `reference` dimensions must equal `kernel` dimensions'
    assert sample.shape == reference.shape, \
        '`sample` and `reference` arrays must have the same shape'

    # Save kernel footprint and flatten kernel for passing into generic_filter
    footprint = np.ones_like(kernel)
    kernel = kernel.flatten()

    # Compute squared dose deviations, normalized to dose difference criterion
    normalized_dose_devs = (reference - sample)**2 / delta_D**2

    # Move the DTA penalty kernel over the normalized dose deviation values and search
    # for the minimum of the sum between the kernel and the values under it.
    # This is the point of closest agreement.
    gamma_dist = generic_filter(normalized_dose_devs,
                                lambda x: np.minimum.reduce(x + kernel),
                                footprint=footprint)

    # return Euclidean distance
    return np.sqrt(gamma_dist)
示例#19
0
def test_uniformFilter1d():
    for mode in modes.keys():
        size = np.random.randint(1000, 2000, [
            1,
        ]).item()
        cShape = NumCpp.Shape(1, size)
        cArray = NumCpp.NdArray(cShape)
        data = np.random.randint(100, 1000, [
            size,
        ]).astype(float)
        cArray.setArray(data)
        kernalSize = 0
        while kernalSize % 2 == 0:
            kernalSize = np.random.randint(5, 15)
        constantValue = np.random.randint(0, 5, [
            1,
        ]).item()  # only actaully needed for constant boundary condition
        dataOutC = NumCpp.uniformFilter1d(
            cArray, kernalSize, modes[mode],
            constantValue).getNumpyArray().flatten()
        dataOutPy = filters.generic_filter(data,
                                           np.mean,
                                           footprint=np.ones([
                                               kernalSize,
                                           ]),
                                           mode=mode,
                                           cval=constantValue)
        assert np.array_equal(dataOutC, dataOutPy)
示例#20
0
    def _StatisticalNaturalness(self, L_ldr, win=11):
        phat1 = 4.4
        phat2 = 10.1
        muhat = 115.94
        sigmahat = 27.99
        u = np.mean(L_ldr)

        # moving window standard deviation using reflected image
        if self.original:
            W, H = L_ldr.shape
            w_extra = (11 - W % 11)
            h_extra = (11 - H % 11)
            # zero padding to simulate matlab's behaviour
            if w_extra > 0 or h_extra > 0:
                test = np.pad(L_ldr,
                              pad_width=((0, w_extra), (0, h_extra)),
                              mode='constant')
            else:
                test = L_ldr
            # block view with fixed block size, like in the original article
            view = view_as_blocks(test, block_shape=(11, 11))
            sig = np.mean(np.std(view, axis=(-1, -2)))
        else:
            # deviation: moving window with reflected borders
            sig = np.mean(generic_filter(L_ldr, np.std, size=win))

        beta_mode = (phat1 - 1.) / (phat1 + phat2 - 2.)
        C_0 = beta.pdf(beta_mode, phat1, phat2)
        C = beta.pdf(sig / 64.29, phat1, phat2)
        pc = C / C_0
        B = norm.pdf(u, muhat, sigmahat)
        B_0 = norm.pdf(muhat, muhat, sigmahat)
        pb = B / B_0
        N = pb * pc
        return N
示例#21
0
def std_intensity(image, w):
    std_image = filters.generic_filter(image,
                                       function=stddev,
                                       size=3,
                                       mode='constant',
                                       cval=0)
    return std_image
示例#22
0
def blob_detector_downsample(image):
    threshold = 0.003
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = gray.astype(np.float32) / 255.

    h, w = gray.shape
    scale_space = np.zeros((h, w, level))
    for i in range(level):
        scale = k**i
        scale_gray = transform.resize(gray, (int(h / scale), int(w / scale)),
                                      mode='reflect')
        square_lg = gaussian_laplace(scale_gray, sigma=initial)**2
        scale_space[:, :, i] = transform.resize(square_lg, (h, w),
                                                mode='reflect')

    nms_3d = generic_filter(scale_space, nms, size=(3, 3, 3))
    # nms_3d = rank_nms(scale_space)
    # nms_3d = nms_3d/np.max(nms_3d)
    cx = []
    cy = []
    radius = []

    for i in range(level):
        sigma = initial * k**i
        cx.append(list(np.where(nms_3d[:, :, i] > threshold)[1]))
        cy.append(list(np.where(nms_3d[:, :, i] > threshold)[0]))
        radius.append([np.sqrt(2) * sigma] * len(cx[i]))

    cx = np.concatenate(cx)
    cy = np.concatenate(cy)
    radius = np.concatenate(radius)

    return gray, cx, cy, radius
示例#23
0
def blob_detector_increase(image):
    threshold = 0.01
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = gray.astype(np.float32) / 255.

    h, w = gray.shape
    scale_space = np.zeros((h, w, level))
    # Laplacian Gaussian filter
    for i in range(level):
        sigma = initial * k**i
        scale_normalize = sigma**2 * gaussian_laplace(gray, sigma=sigma)
        scale_space[:, :, i] = scale_normalize**2

    # generic filter
    nms_3d = generic_filter(scale_space, nms, size=(3, 3, 3))
    # nms_3d = rank_nms(scale_space)
    # nms_3d = nms_3d/np.max(nms_3d)
    cx = []
    cy = []
    radius = []

    for i in range(level):
        sigma = initial * k**i
        cx.append(list(np.where(nms_3d[:, :, i] > threshold)[1]))
        cy.append(list(np.where(nms_3d[:, :, i] > threshold)[0]))
        radius.append([np.sqrt(2) * sigma] * len(cx[i]))

    cx = np.concatenate(cx)
    cy = np.concatenate(cy)
    radius = np.concatenate(radius)

    return gray, cx, cy, radius
示例#24
0
def get_coldest_values(dataObj, matched):
    nobj = NeighbourObj()
    t11 = get_channel_data_from_objectfull_resolution(dataObj, '11', nodata=-9)
    #t11 = np.random.rand(10,10)*10+270
    row, col = np.indices(t11.shape)
    from scipy.ndimage.filters import generic_filter
    flat_index = generic_filter(t11,
                                function=np.argmin,
                                size=5,
                                mode='constant',
                                cval=9999999999999)
    flat_index = np.array(flat_index, dtype=np.int)
    delta_row, delta_col = np.unravel_index(flat_index, (5,5))
    delta_row = delta_row - 2
    delta_col = delta_col - 2    
    new_row = row+delta_row
    new_col = col+delta_col
    new_row_matched = np.array([new_row[matched['row'][idx], matched['col'][idx]]
                       for idx in range(matched['row'].shape[0])]) 
    new_col_matched = np.array([new_col[matched['row'][idx], matched['col'][idx]]
                       for idx in range(matched['row'].shape[0])]) 
    new_row_col = {'row': new_row_matched, 'col': new_col_matched}
    nobj.coldest_t11=get_channel_data_from_object(dataObj, '11', new_row_col)
    nobj.coldest_t12=get_channel_data_from_object(dataObj, '12', new_row_col)
    nobj.coldest_t37=get_channel_data_from_object(dataObj, '37', new_row_col)
    nobj.coldest_r06=get_channel_data_from_object(dataObj, '06', new_row_col)
    nobj.coldest_r16=get_channel_data_from_object(dataObj, '16', new_row_col)
    nobj.coldest_r09=get_channel_data_from_object(dataObj, '09', new_row_col)
    return nobj
def _apply_smoother_at_all_points(input_matrix, weight_matrix):
    """Applies any kind of smoother at all grid points.

    M = number of grid rows
    N = number of grid columns
    m = number of grid rows used for smoothing at each point
    n = number of grid columns used for smoothing at each point

    This method treats all NaN's as zero.

    :param input_matrix: M-by-N numpy array of input data.
    :param weight_matrix: m-by-n numpy array of weights.
    :return: output_matrix: M-by-N numpy array of smoothed input values.
    """

    weight_vector = numpy.reshape(weight_matrix, weight_matrix.size)
    input_matrix[numpy.isnan(input_matrix)] = 0.

    output_matrix = generic_filter(input_matrix,
                                   function=_apply_smoother_at_one_point,
                                   size=(weight_matrix.shape[0],
                                         weight_matrix.shape[1]),
                                   mode='constant',
                                   cval=0.,
                                   extra_arguments=(weight_vector, ))

    output_matrix[numpy.absolute(output_matrix) < TOLERANCE] = numpy.nan
    return output_matrix
示例#26
0
def _local_maxima(image, diameter, separation, percentile=64):
    "Find local maxima whose brightness is above a given percentile."
    # Find the threshold brightness, representing the given
    # percentile among all NON-ZERO pixels in the image.
    flat = np.ravel(image)
    threshold = stats.scoreatpercentile(flat[flat > 0], percentile)
    # The intersection of the image with its dilation gives local maxima.
    assert image.dtype == np.uint8, "Perform dilation on exact (uint8) data."
    dilation = morphology.grey_dilation(image,
                                        footprint=circular_mask(
                                            diameter, separation))
    maxima = np.where((image == dilation) & (image > threshold))
    if not np.size(maxima) > 0:
        raise ValueError, ("Bad image! Found zero maxima above the {}"
                           "-percentile treshold at {}.".format(
                               percentile, threshold))
    # Flat peaks, for example, return multiple maxima.
    # Eliminate redundancies within the separation distance.
    maxima_map = np.zeros_like(image)
    maxima_map[maxima] = image[maxima]
    peak_map = filters.generic_filter(maxima_map,
                                      _Cfilters.nullify_secondary_maxima(),
                                      footprint=circular_mask(separation),
                                      mode='constant')
    # Also, do not accept peaks near the edges.
    margin = int(separation) / 2
    peak_map[..., :margin] = 0
    peak_map[..., -margin:] = 0
    peak_map[:margin, ...] = 0
    peak_map[-margin:, ...] = 0
    peaks = np.where(peak_map != 0)
    if not np.size(peaks) > 0:
        raise ValueError, "Bad image! All maxima were in the margins."
    return peaks[1], peaks[0]  # x, y
示例#27
0
文件: peakdet.py 项目: Pica4x6/numina
def find_peaks_indexes(arr, window_width=5, threshold=0.0):
    """Find indexes of peaks in a 1d array.

    Note that window_width must be an odd number. The function imposes that the
    fluxes in the window_width /2 points to the left (and right) of the peak
    decrease monotonously as one moves away from the peak.

    Parameters
    ----------
    arr : 1d numpy array
        Input 1D spectrum.
    window_width : int
        Width of the window where the peak must be found. This number must be
        odd.
    threshold : float
        Minimum signal in the peak (optional).

    Returns
    -------
    ipeaks : 1d numpy array (int)
        Indices of the input array arr in which the peaks have been found.


    """

    if (window_width<3) or (window_width % 2 == 0):
        raise ValueError('Window width must be an odd number and >=3')

    kernel_peak = kernel_peak_function(threshold)
    out = generic_filter(arr, kernel_peak, window_width, mode="reflect")
    result, =  numpy.nonzero(out)

    return filter_array_margins(arr, result, window_width)
示例#28
0
def _piecewise_linreg(xyw, window_width=3):
    n = int(np.round((window_width - 1) / 2))

    piece_a = generic_filter(xyw.T, _calc_linreg_wrapper_a, size=(3, window_width), mode='nearest')
    piece_b = generic_filter(xyw.T, _calc_linreg_wrapper_b, size=(3, window_width), mode='nearest')

    # pad array
    piece_a = np.pad(piece_a[1, :], n, 'edge')
    piece_b = np.pad(piece_b[1, :], n, 'edge')

    smooth_a = np.convolve(piece_a, np.ones(window_width) / window_width, mode='valid')
    smooth_b = np.convolve(piece_b, np.ones(window_width) / window_width, mode='valid')

    y_est = smooth_b * xyw[:, 0] + smooth_a

    return y_est
示例#29
0
文件: io.py 项目: michaelaye/iuvs
def find_scaling_window(to_filter, size=None):
    if size is None:
        x = max(to_filter.shape[0] // 5, 2)
        y = max(to_filter.shape[1] // 10, 1)
        size = (x, y)
    filtered = generic_filter(to_filter,
                              np.median,
                              size=size,
                              mode='constant',
                              cval=to_filter.max() * 100)
    min_spa, min_spe = np.unravel_index(filtered.argmin(), to_filter.shape)
    spa1 = min_spa - size[0] // 2
    if spa1 < 0:
        spa1 = 0
    spa2 = spa1 + size[0]
    if spa2 > to_filter.shape[0]:
        spa1 = to_filter.shape[0] - size[0]
        spa2 = to_filter.shape[0]
    spe1 = min_spe - size[1] // 2
    if spe1 < 0:
        spe1 = 0
    spe2 = spe1 + size[1]
    if spe2 > to_filter.shape[1]:
        spe1 = to_filter.shape[1] - size[1]
        spe2 = to_filter.shape[1]
    spa_slice = slice(spa1, spa2)
    spe_slice = slice(spe1, spe2)
    return (spa_slice, spe_slice)
示例#30
0
    def smooth(self):
        # return the percentage of smooth areas"
        gray = cv2.cvtColor(self.opened_file_cv2, cv2.COLOR_BGR2GRAY)
        filtered_image = generic_filter(gray, np.std, size=3)
        smooth_area = filtered_image == 0
        percent = np.count_nonzero(smooth_area) / smooth_area.size

        return round(percent, 2)
示例#31
0
    def _remove_dup_laplace(self, data, mask, size=5):
        laplacian = np.multiply(laplace(data, mode='wrap'), mask)

        return generic_filter(laplacian,
                              self._local_max_laplace,
                              size=size,
                              mode='wrap',
                              extra_keywords={'size': size})
示例#32
0
 def square_filter(X, f):
     """Scans a square-shaped filter over the input matrix X, applies
     the reducer function f and returns a new matrix with the same
     dimensions of X containing the reduced values.
     """
     footprint = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
     proc = generic_filter(X, f, footprint=footprint, mode='wrap')
     return proc
示例#33
0
def _vecS2(k, data):
    '''min filter for peak detection.'''

    def func(x):
        return x[k] - 0.5 * (x[:k].mean() + x[k+1:].mean())

    ap = generic_filter(data, func, size=2*k+1)
    return ap
示例#34
0
def wroll(npfunc, grid, winsize=(20, 20)):
    #    grid = iterabs[0]
    #    npfunc = iterabs[1]

    P, Q = grid.shape
    N, M = winsize
    wroll_ = generic_filter(grid, function=npfunc, size=(M, N))
    wroll = wroll_[M // 2:(M // 2) + P - M + 1, N // 2:(N // 2) + Q - N + 1]
    return wroll
示例#35
0
    def varFilt(self, im, sz):
        # Mask
        NHOOD = ones([sz, sz])
        r = floor((sz - 1) / 2)
        NHOOD[r + 1, r + 1] = 0

        # Filter
        imvar = generic_filter(im, std, footprint=NHOOD)
        return imvar
def two_step_binary_cont(ls_size,
                         p,
                         h1,
                         h2,
                         w1,
                         w2,
                         w3,
                         fp1_same=True,
                         fp2_same=True):
    # note w1 is for the first stage, w2 is the scale at which the output of first
    # stage is important, w3 is for the continuous ls

    # at the moment this function doesn't allow for differing functions on the links
    ls_binary = mpd_prop(ls_size, ls_size, h1, p)
    ls_cont = mpd(ls_size, ls_size, h2)

    w1_out = generic_filter(ls_binary, np.mean, w1, mode='wrap')
    if (fp1_same == True):
        w1_out = w1_out * ls_binary

    w2_out = generic_filter(w1_out, np.mean, w2, mode='wrap')
    if (fp2_same == True):
        w2_out = w2_out * (1 - ls_binary)

    w3_out = generic_filter(ls_cont, np.var, w3,
                            mode='wrap')  # this is the context dependency

    out = w2_out * (
        1 - w3_out
    )  # in this instance, the more variable the continuous surface within the window, the less the effect of the pollinators

    return pd.Series({
        'ls_size': ls_size,
        'p_val': p,
        'h_val1': h1,
        'h_val2': h2,
        'w1': w1,
        'fp1_same': fp1_same,
        'w2': w2,
        'fp2_same': fp2_same,
        'w3': w3,
        'es_mean': np.mean(out),
        'es_var': np.var(out)
    })
示例#37
0
    def steiner_filter(self):
        """
        Steiner Filter is based on the Steiner Method Steiner et al. (1995) for
         filtering convective rainfall from a radar image.

        This method follow 3 rules, which should be applied to every point the
         grid:

        1. Intensity: Any point above 40dBZ is a Convective Point
        2. Peak: Any point above a threshold (a) over the mean local rain
            (11 km radius circle) is a Convective Point
        3. Neighbor: Every point around a certain radius (b) around a Convective
            point is also a Convective Point

        (a) is the threshold
        (b) is given by the static method convective_radius, in this class

        """

        data = np.copy(self.data)

        # On a numpy mask, ``True`` means masked, while ``False`` means unmasked
        # thus all masks should be ``True`` where pixels should be removed.

        # 1. Intensity
        # This rule may be removed eventually after fixing 2. Peak

        rule_intensity = data < 40.0  # type: np.ndarray

        # 2. Peak
        # TODO improve performance here
        rule_peak = np.ones(rule_intensity.shape, dtype=rule_intensity.dtype)
        generic_filter(data, self._above_background, output=rule_peak, size=23)

        self.steiner_mask = np.logical_and(rule_peak, rule_intensity)
        data[self.steiner_mask] = self.mask_value

        # 3. Neighbor
        # Probably shouldn't need to make a logical and with the mask, as it
        # should be at least equal to it.

        rule_neighbor = -self._surrounding_area(data)
        self.steiner_mask = np.logical_and(self.steiner_mask, rule_neighbor)
        self.steiner_mask = np.logical_or(self.steiner_mask, self.mask)
示例#38
0
文件: cappi.py 项目: calliban/RLREN
    def steiner_filter(self):
        """
        Steiner Filter is based on the Steiner Method Steiner et al. (1995) for
         filtering convective rainfall from a radar image.

        This method follow 3 rules, which should be applied to every point the
         grid:

        1. Intensity: Any point above 40dBZ is a Convective Point
        2. Peak: Any point above a threshold (a) over the mean local rain
            (11 km radius circle) is a Convective Point
        3. Neighbor: Every point around a certain radius (b) around a Convective
            point is also a Convective Point

        (a) is the threshold
        (b) is given by the static method convective_radius, in this class

        """

        data = np.copy(self.data)

        # On a numpy mask, ``True`` means masked, while ``False`` means unmasked
        # thus all masks should be ``True`` where pixels should be removed.

        # 1. Intensity
        # This rule may be removed eventually after fixing 2. Peak

        rule_intensity = data < 40.0  # type: np.ndarray

        # 2. Peak
        # TODO improve performance here
        rule_peak = np.ones(rule_intensity.shape, dtype=rule_intensity.dtype)
        generic_filter(data, self._above_background, output=rule_peak, size=23)

        self.steiner_mask = np.logical_and(rule_peak, rule_intensity)
        data[self.steiner_mask] = self.mask_value

        # 3. Neighbor
        # Probably shouldn't need to make a logical and with the mask, as it
        # should be at least equal to it.

        rule_neighbor = -self._surrounding_area(data)
        self.steiner_mask = np.logical_and(self.steiner_mask, rule_neighbor)
        self.steiner_mask = np.logical_or(self.steiner_mask, self.mask)
示例#39
0
 def plus_filter(X, f):
     """Scans a +-shaped filter over the input matrix X, applies
     the reducer function f and returns a new matrix with the same
     dimensions of X containing the reduced values.
     This kind of technique is useful for a tonne of stuff, and
     very efficient.
     """
     footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
     proc = generic_filter(X, f, footprint=footprint, mode='wrap')
     return proc
示例#40
0
def Sobel(img, threshold=50):
    def kernel(p):
        return (np.abs((p[0] + 2 * p[1] + p[2]) - (p[6] + 2 * p[7] + p[8])) +
                np.abs((p[2] + 2 * p[6] + p[7]) - (p[0] + 2 * p[3] + p[6])))

    res = generic_filter(img, kernel, (3, 3))
    res = np.array(list(
        map(lambda x: [255 if xx > threshold else 0 for xx in x], res)),
                   dtype=res.dtype)
    return res
示例#41
0
 def make_distanses(self):
     self.matrix_1=self.matrix
     
     for i in range(50):
         self.matrix_1=filters.generic_filter(self.matrix_1,self.erode,3)
         self.distances+=self.matrix_1
         
     self.conturs[self.distances == 12]=1
     
     self.conturs[self.conturs==0]=np.nan
     self.distances_perf=np.round( self.distances*np.random.uniform(0.3,4,self.distances.shape) )
def collapse_to_grid(E_coded,grid_time,grid_frequency,num_codes,
                     do_grid_subsampling =True):
    full_grid = np.dstack(tuple(
            generic_filter(E_coded,lambda x: np.any(x==i).astype(np.uint8),
                           size = (grid_time,
                                   grid_frequency))
            for i in xrange(num_codes)))
    if do_grid_subsampling:
        return full_grid[::grid_time,::grid_frequency]
    else:
        return full_grid
示例#43
0
def gamma_evaluation(sample, reference, distance, threshold, resolution, signed=False):
    """
    Distance to Agreement between a sample and reference using gamma evaluation.

    Parameters
    ----------
    sample : ndarray
        Sample dataset, simulation output for example
    reference : ndarray
        Reference dataset, what the `sample` dataset is expected to be
    distance : int
        Search window limit in the same units as `resolution`
    threshold : float
        The maximum passable deviation in `sample` and `reference`
    resolution : tuple
        The resolution of each axis of `sample` and `reference`
    signed : bool
        Returns signed gamma for identifying hot/cold fails

    Returns
    -------
    gamma_map : ndarray
        g == 0     (pass) the sample and reference pixels are equal
        0 < g <= 1 (pass) agreement within distance and threshold
        g > 1      (fail) no agreement 
    """
    
    ndim = len(resolution)
    assert sample.ndim == reference.ndim == ndim, \
        "`sample` and `reference` dimensions must equal `resolution` length"
    assert sample.shape == reference.shape, \
        "`sample` and `reference` must have the same shape"
    
    resolution = numpy.array(resolution)[[numpy.newaxis for i in range(ndim)]].T
    slices = [slice(-ceil(distance/r), ceil(distance/r)+1) for r in resolution]
    
    kernel = numpy.mgrid[slices] * resolution
    kernel = numpy.sum(kernel**2, axis=0) # Distance squared from central voxel.
    kernel[numpy.where(numpy.sqrt(kernel) > distance)] = numpy.inf
    kernel = kernel / distance**2
    
    footprint = numpy.ones_like(kernel)
    kernel = kernel.flatten()
    values = (reference - sample)**2 / (threshold)**2
    
    gamma_map = generic_filter(values, \
        lambda vals: numpy.minimum.reduce(vals + kernel), footprint=footprint)
    gamma_map = numpy.sqrt(gamma_map)

    if (signed):
        return gamma_map * numpy.sign(sample - reference)
    else:
        return gamma_map
示例#44
0
    def distance_map(self):
        self.d_map = np.zeros(self.data.shape)
        self.matrix = np.copy(self.data)

        self.d_map += self.matrix
        for i in range(50):
            self.matrix = filters.generic_filter(self.matrix, self.erode, size=(3, 3, 3, 1))
            self.d_map += self.matrix

            if np.sum(self.matrix) == 0:
                break

            print i
示例#45
0
def ifcb_segment(img):
    Y = img_as_float(img)
    # step 1. local variance
    Yv = rescale_intensity(generic_filter(Y, np.var, footprint=disk(3)))
    # step 2. threshold local variance, aggressively
    Ye = Yv > (threshold_otsu(Yv) / 2.)
    # step 3. dark areas
    Yt = Y < threshold_otsu(Y)
    thin_blob = Ye | Yt
    # step 4. morphological reconstruction
    seed = np.copy(thin_blob)
    seed[1:-1,1:-1] = 1
    four=np.disk(1).astype(np.bool)
    return reconstruction(seed,thin_blob,method='erosion',selem=four)
def dilate_raster(array, kernel_size=3, threshold=120):
    '''smooth the borders of areas exceeding the given threshold,
    so that these areas shrink by half the kernel-size along their borders'''
    thresh_exceeded = array >= threshold
    ret = np.where(thresh_exceeded, np.nan, array)
    o = kernel_size // 2
    filtered = filters.generic_filter(
        ret, np.nanmedian, (kernel_size, kernel_size), origin=(o, o),
        mode='reflect')
    a = array.copy()
    thresh_exceeded_and_not_nan = thresh_exceeded & ~ np.isnan(filtered) 
    fill_values = filtered[thresh_exceeded]
    a[thresh_exceeded_and_not_nan] = filtered[thresh_exceeded_and_not_nan]
    return a
示例#47
0
    def _local_minima_filter(self, input, size, threshold=0.):

        if size%2 != 1:
            raise ValueError, "size must be an odd number"

        half_size = size//2

        output = generic_filter(input, self._local_minima_func, size=size, \
                mode='wrap', extra_keywords={'size': size, 'threshold': threshold})

        # Mask the extreme latitudes
        output[:half_size,:] = 0.
        output[-half_size:,:] = 0.

        return output
示例#48
0
文件: mesh.py 项目: HullLab/AutoMorph
def extractMesh(settings,obj):
    '''
    Extracts semi-3D mesh using ImageJ-generated heightmap.
    '''
    # Start timer on object processing
    start = time.time()

    # Resize height map and focused RGB image, and invert height map if macro = True
    hmap_resized,rgb_resized = imagePreparation(settings,obj)

    # Get 2D binary outline map for background removal
    edge,image_clean,image_clean_unfilled = extractoutline.extractOutline(settings,obj.name,rgb_resized,True)

    # Remove background in height map by performing element-wise multiplication with binary outline map from run2dmorph
    hmap_no_BG = hmap_resized * (image_clean / 255)

    # Convert pixel greyscale values from background-pruned height map to real-world heights
    heights = pixelToHeight(hmap_no_BG,settings['slices'],settings['zstep'])

    # Run neighborhood filter to remove outliers
    k = settings['kernel_outlierfilter'] # Kernel size
    filtered = generic_filter(heights,outlierFilter,(k,k))

    # Get number of pixels on each z-level
    counts = countPerLevel(filtered)

    # Delete outliers from filtered heights
    no_outliers = deleteOutliers(filtered,counts)

    # Get length, width, and top and bottom heights
    length,width,bottom_height,top_height = getTopBottom(image_clean,no_outliers)

    # If aperture present, set aperture height to average height. Also extract length, width,
    # and top and bottom heights
    final_heights = aperture(image_clean,image_clean_unfilled,no_outliers,bottom_height)

    # Extract mesh from x,y,z point cloud
    z_values,triangulation,triangles,faceColors = meshDelaunay(settings,final_heights)

    # Write 3D x,y,z-coordinates file
    writeCoordinates(obj,triangulation)

    end = time.time()
    time_elapsed = end - start
    print '\tINFO: Time elapsed: {0:.3f} seconds\n'.format(time_elapsed)

    return edge,image_clean,triangulation,triangles,faceColors,length,width,bottom_height,top_height
示例#49
0
def get_warmest_index_old(t11,  matched):
    from scipy.ndimage.filters import generic_filter
    row, col = np.indices(t11.shape)
    flat_index = generic_filter(t11,
                                function=np.argmax,
                                size=5,
                                mode='constant',
                                cval=-9999999999999)
    flat_index = np.array(flat_index, dtype=np.int)
    delta_row, delta_col = np.unravel_index(flat_index, (5,5))
    delta_row = delta_row - 2
    delta_col = delta_col - 2    
    new_row = row+delta_row
    new_col = col+delta_col
    new_row_matched = np.array([new_row[matched['row'][idx], matched['col'][idx]]
                       for idx in range(matched['row'].shape[0])]) 
    new_col_matched = np.array([new_col[matched['row'][idx], matched['col'][idx]]
                       for idx in range(matched['row'].shape[0])])
    return new_row_matched, new_col_matched
示例#50
0
文件: zr.py 项目: heistermann/wradlib
def _z_to_r_enhanced_mdfilt(z, mode='mirror'):
    """multidimensional version

    assuming the two last dimensions represent a 2-D image
    Uses :func:`scipy:scipy.ndimagefilters.generic_filter` to reduce the number
    of for-loops even more.
    """
    # get the shape of the input
    # dimy = z.shape[-2]
    # dimx = z.shape[-1]

    # calculate the decibel values from the input
    db = decibel(z)

    # set up our output arrays
    r = np.zeros(z.shape)
    size = list(z.shape)
    size[-2:] = [3, 3]
    size[:-2] = [1] * len(size[:-2])
    size = tuple(size)
    si = filters.generic_filter(db, z_to_r_esifilter, size=size, mode=mode)

    gt44 = db > 44.
    r[gt44] = z_to_r(z[gt44], a=77, b=1.9)
    si[gt44] = -1.
    # the same is true for values between 36.5 and 44 dBZ
    bt3644 = (db >= 36.5) & (db <= 44.)
    r[bt3644] = z_to_r(z[bt3644], a=200, b=1.6)
    si[bt3644] = -2.

    si1 = (si >= 0.)
    si2 = si1 & (si < 3.5)
    si3 = si1 & ~si2 & (si <= 7.5)
    si4 = si > 7.5

    r[si2] = z_to_r(z[si2], a=125, b=1.4)
    r[si3] = z_to_r(z[si3], a=200, b=1.6)
    r[si4] = z_to_r(z[si4], a=320, b=1.4)

    return r, si
示例#51
0
def stddev(parameters):
    """Calculates the local standard deviation.

    It wraps `scipy.ndimage.filters.minimum_filter`. The `footprint`,
    `output`, `mode`, `cval` and `origin` options are not supported.

    Keep in mind that `mode` and `cval` influence the results. In this case
    the default mode is used, `reflect`.

    :param parameters['data'][0]: input array
    :type parameters['data'][0]: numpy.array
    :param parameters['size']: which neighbours to take into account, defaults
                               to (3, 3) a.k.a. numpy.ones((3, 3))
    :type parameters['size']: list

    :return: numpy.array

    """
    data = parameters['data'][0].astype('float')
    size = parameters.get('size', [3, 3])

    return generic_filter(data, standard_deviation, size=tuple(size))
示例#52
0
def find_peaks_indexes(arr, window_width=5, threshold=0.0, fpeak=0):
    """Find indexes of peaks in a 1d array.

    Note that window_width must be an odd number. The function imposes that the
    fluxes in the window_width /2 points to the left (and right) of the peak
    decrease monotonously as one moves away from the peak, except that
    it allows fpeak constant values around the peak.

    Parameters
    ----------
    arr : 1d numpy array
        Input 1D spectrum.
    window_width : int
        Width of the window where the peak must be found. This number must be
        odd.
    threshold : float
        Minimum signal in the peak (optional).
    fpeak: int
        Number of equal values around the peak

    Returns
    -------
    ipeaks : 1d numpy array (int)
        Indices of the input array arr in which the peaks have been found.


    """

    _check_window_width(window_width)

    if (fpeak<0 or fpeak + 1 >= window_width):
        raise ValueError('fpeak must be in the range 0- window_width - 2')

    kernel_peak = kernel_peak_function(threshold, fpeak)
    out = generic_filter(arr, kernel_peak, window_width, mode="reflect")
    result, =  numpy.nonzero(out)

    return filter_array_margins(arr, result, window_width)
示例#53
0
文件: io.py 项目: gitter-badger/iuvs
def check_scaling_window_finder(l1b, integration):
    to_filter = l1b.get_integration('raw_dn_s', integration)
    x = max(to_filter.shape[0] // 10, 1)
    y = max(to_filter.shape[1] // 10, 1)
    size = (x, y)
    print("Img shape:", to_filter.shape)
    print("Kernel size:", size)

    filtered = generic_filter(to_filter, np.std, size=size,
                              mode='constant', cval=to_filter.max() * 100)
    min_spa, min_spe = np.unravel_index(filtered.argmin(), to_filter.shape)
    print("Minimum:", filtered.min())
    print("Minimum coords", min_spa, min_spe)

    spa1 = min_spa - size[0] // 2
    if spa1 < 0:
        spa1 = 0
    spa2 = spa1 + size[0]
    if spa2 > to_filter.shape[0]:
        spa1 = to_filter.shape[0] - size[0]
        spa2 = to_filter.shape[0]
    print("Spatial:", spa1, spa2)

    spe1 = min_spe - size[1] // 2
    if spe1 < 0:
        spe1 = 0
    spe2 = spe1 + size[1]
    if spe2 > to_filter.shape[1]:
        spe1 = to_filter.shape[1] - size[1]
        spe2 = to_filter.shape[1]
    print("Spectral:", spe1, spe2)

    fig, axes = plt.subplots(nrows=3)
    axes[0].imshow(np.log(to_filter), cmap=mycmap)
    axes[0].add_patch(get_rectangle(to_filter))
    axes[1].imshow(np.log(filtered), cmap=mycmap, vmax=0.1)
    axes[1].add_patch(get_rectangle(to_filter))
    axes[2].hist(filtered[~np.isnan(filtered)].ravel(), bins=100)
def automatic_outline_michalis( imgs, o, sl, tp, scalefactor = 1 ):
    
    '''
    outline[0] = head
    outline[1] = tail
    '''
    
    minsl = 0#np.clip( np.min( [ outline[0][-1], outline[-1][-1] ] ), 0, len(imgs) )
    maxsl = 19#np.clip( np.max( [ outline[0][-1], outline[-1][-1] ] ), 0, len(imgs) )
    
    # compute the max over the stack
    img = np.max( imgs[minsl:maxsl+1], 0 )

    # resize the image
    Nbig = img.shape[0]
    Nsmall = img.shape[0]/scalefactor
    smallimg = img.reshape([Nsmall, Nbig/Nsmall, Nsmall, Nbig/Nsmall]).mean(3).mean(1)
    
    # compute local variance with generic filter function
    varimg = filters.generic_filter( smallimg, np.var, size=2 ).astype('uint16')
    
    thr = threshold_otsu( varimg )    
    labels = measure.label( morphology.binary_dilation( varimg>thr, disk(2) ) )
    labels = remove_small_objects( labels , 12 )

    pos = [ [ scalefactor*i[ 'centroid' ][1], scalefactor*i[ 'centroid' ][0] ] for i in measure.regionprops(labels) ]

    sortedpos = np.array([o[0]])
    for i in np.arange(len(pos)):
#            print(len(pos))
        idx = find_closest_point(sortedpos[-1],pos)
        newsl = minsl + list(imgs[minsl:,pos[idx][1],pos[idx][0]]).index(np.max(imgs[minsl:,pos[idx][1],pos[idx][0]]))
        pos[idx].append(newsl)
        sortedpos = np.append( sortedpos, np.array( [ pos[idx] ] ), axis=0 )
        pos.pop(idx)
    sortedpos = np.append( sortedpos, np.array( [o[-1]] ), axis=0 )
    
    return sortedpos
示例#55
0
def sharpness(image, edge_threshold=0.0001, w=5, t=1.0):
    """
    Code implemented by the following article:
    Sharpness Estimation for Document and Scene Images,
    Kumar, Chen, Doermann
    """
    # Median filtering
    w_size = (w * 2) + 1
    image = util.img_as_float(image)
    image_m = util.img_as_float(median(image, mph.square(3)))

    # Window functions
    def dom_func(window):
        # import pdb; pdb.set_trace()
        return abs(
            abs(window[4] - window[2]) - abs(window[2] - window[0])
        )

    def contrast_func(window):
        # print window
        s = 0.0
        for i in xrange(0, len(window) - 1):
            # print i
            s += abs(window[i] - window[i+1])
        return s

    # Delta DoM in horizontal direction
    dom_x_values = generic_filter(image_m, dom_func,
        size=(1, 5),
        mode='reflect')
    # Delta DoM in vertical direction
    dom_y_values = generic_filter(image_m, dom_func,
        size=(5, 1),
        mode='reflect')

    dom_x = generic_filter(
        dom_x_values, lambda w: sum(w),
        size=(1, w_size), mode='reflect')
    dom_y = generic_filter(
        dom_y_values, lambda w: sum(w),
        size=(w_size, 1), mode='reflect')


    edges_x = vsobel(image)
    # Normalize
    edges_x *= (1.0 / edges_x.max())
    edges_x_pixels = len(edges_x[edges_x > edge_threshold].ravel())

    edges_y = hsobel(image)
    # Normalize
    edges_y *= (1.0 / edges_y.max())
    edges_y_pixels = len(edges_y[edges_y > edge_threshold].ravel())

    # Contrast in horizontal direction
    contrast_x = generic_filter(image, contrast_func,
        size=(1, w_size + 1),
        mode='reflect')
    # Contrast in vertical direction
    contrast_y = generic_filter(image, contrast_func,
        size=(w_size + 1, 1),
        mode='reflect')

    sharpness_x = dom_x / contrast_x
    sharpness_y = dom_y / contrast_y

    # import pdb; pdb.set_trace()

    sharp_x_pixels = len(np.where(
        sharpness_x[edges_x > edge_threshold] > t
    )[0])
    sharp_y_pixels = len(np.where(
        sharpness_y[edges_y > edge_threshold] > t
    )[0])

    # import pdb; pdb.set_trace()

    if edges_x_pixels > 0:
        rx = (float(sharp_x_pixels) / edges_x_pixels)
    else:
        rx = 1

    if edges_y_pixels > 0:
        ry = (float(sharp_y_pixels) / edges_y_pixels)
    else:
        ry = 1

    final_estimate = np.sqrt(
        (rx ** 2) + (ry ** 2)
    )    
    return final_estimate
示例#56
0
def rag_mean_color(image, labels, connectivity=2, mode='distance',
                   sigma=255.0):
    """Compute the Region Adjacency Graph using mean colors.

    Given an image and its initial segmentation, this method constructs the
    corresponding Region Adjacency Graph (RAG). Each node in the RAG
    represents a set of pixels within `image` with the same label in `labels`.
    The weight between two adjacent regions represents how similar or
    dissimilar two regions are depending on the `mode` parameter.

    Parameters
    ----------
    image : ndarray, shape(M, N, [..., P,] 3)
        Input image.
    labels : ndarray, shape(M, N, [..., P,])
        The labelled image. This should have one dimension less than
        `image`. If `image` has dimensions `(M, N, 3)` `labels` should have
         dimensions `(M, N)`.
    connectivity : int, optional
        Pixels with a squared distance less than `connectivity` from each other
        are considered adjacent. It can range from 1 to `labels.ndim`. Its
        behavior is the same as `connectivity` parameter in
        `scipy.ndimage.filters.generate_binary_structure`.
    mode : {'distance', 'similarity'}, optional
        The strategy to assign edge weights.

            'distance' : The weight between two adjacent regions is the
            :math:`|c_1 - c_2|`, where :math:`c_1` and :math:`c_2` are the mean
            colors of the two regions. It represents the Euclidean distance in
            their average color.

            'similarity' : The weight between two adjacent is
            :math:`e^{-d^2/sigma}` where :math:`d=|c_1 - c_2|`, where
            :math:`c_1` and :math:`c_2` are the mean colors of the two regions.
            It represents how similar two regions are.
    sigma : float, optional
        Used for computation when `mode` is "similarity". It governs how
        close to each other two colors should be, for their corresponding edge
        weight to be significant. A very large value of `sigma` could make
        any two colors behave as though they were similar.

    Returns
    -------
    out : RAG
        The region adjacency graph.

    Examples
    --------
    >>> from skimage import data, segmentation
    >>> from skimage.future import graph
    >>> img = data.astronaut()
    >>> labels = segmentation.slic(img)
    >>> rag = graph.rag_mean_color(img, labels)

    References
    ----------
    .. [1] Alain Tremeau and Philippe Colantoni
           "Regions Adjacency Graph Applied To Color Image Segmentation"
           http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.11.5274

    """
    graph = RAG()

    # The footprint is constructed in such a way that the first
    # element in the array being passed to _add_edge_filter is
    # the central value.
    fp = nd.generate_binary_structure(labels.ndim, connectivity)
    for d in range(fp.ndim):
        fp = fp.swapaxes(0, d)
        fp[0, ...] = 0
        fp = fp.swapaxes(0, d)

    # For example
    # if labels.ndim = 2 and connectivity = 1
    # fp = [[0,0,0],
    #       [0,1,1],
    #       [0,1,0]]
    #
    # if labels.ndim = 2 and connectivity = 2
    # fp = [[0,0,0],
    #       [0,1,1],
    #       [0,1,1]]

    filters.generic_filter(
        labels,
        function=_add_edge_filter,
        footprint=fp,
        mode='nearest',
        output=np.zeros(labels.shape, dtype=np.uint8),
        extra_arguments=(graph,))

    for n in graph:
        graph.node[n].update({'labels': [n],
                              'pixel count': 0,
                              'total color': np.array([0, 0, 0],
                                                      dtype=np.double)})

    for index in np.ndindex(labels.shape):
        current = labels[index]
        graph.node[current]['pixel count'] += 1
        graph.node[current]['total color'] += image[index]

    for n in graph:
        graph.node[n]['mean color'] = (graph.node[n]['total color'] /
                                       graph.node[n]['pixel count'])

    for x, y, d in graph.edges_iter(data=True):
        diff = graph.node[x]['mean color'] - graph.node[y]['mean color']
        diff = np.linalg.norm(diff)
        if mode == 'similarity':
            d['weight'] = math.e ** (-(diff ** 2) / sigma)
        elif mode == 'distance':
            d['weight'] = diff
        else:
            raise ValueError("The mode '%s' is not recognised" % mode)

    return graph
示例#57
0
文件: vcull.py 项目: low-sky/m83
from astropy.io import fits
from astropy.table import Table
import astropy.wcs as wcs
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import generic_filter
t = Table.read('../measurements/m83.co10.K_props_clfind.fits')

hdu = fits.open('../data/NGC_5236_RO_MOM1:I:HI:wbb2008.fits')
w = wcs.WCS(hdu[0].header)
mom1 = np.squeeze(hdu[0].data)
mom1 = generic_filter(mom1,np.nanmedian,size=(11,11))
x,y,s,v = w.wcs_world2pix(t['XPOS'].data,t['YPOS'].data,np.zeros(len(t)),np.zeros(len(t)),0)

vvals = mom1[y.astype(np.int),x.astype(np.int)]
dv = (vvals/1e3-t['VPOS'].data+22.3)
示例#58
0
def spatial_ave(data):
    #use a uniform filter to get the background
    data_unif = filters.uniform_filter(data,size=11)
    data_std = filters.generic_filter(data,np.std,size=11)
    return data_unif,data_std  
示例#59
0
import numpy as np
from scipy.ndimage.filters import generic_filter

data = np.arange(100).reshape((10,10))
sz = 3
midpt = 5
identity = np.ones((sz,sz))

def cb(arr):
    # The arr passed here is flattened!
    diff = (np.sum( (arr[midpt] - arr)**2, 0))**.5
    return diff
    
res = generic_filter(data, cb, footprint=identity)

print res
示例#60
0
    def _remove_dup_laplace(self, data, mask, size=5):
        laplacian = np.multiply(laplace(data, mode='wrap'), mask)

        return generic_filter(laplacian, self._local_max_laplace, size=size, mode='wrap',
                extra_keywords={'size': size})