Exemple #1
0
    def _filter_meshes(self):
        """
        Apply a 2D median filter to the low-resolution 2D mesh,
        including only pixels inside the image at the borders.
        """

        from scipy.ndimage import generic_filter
        try:
            nanmedian_func = np.nanmedian    # numpy >= 1.9
        except AttributeError:    # pragma: no cover
            from scipy.stats import nanmedian
            nanmedian_func = nanmedian

        if self.filter_threshold is None:
            # filter the entire arrays
            self.background_mesh = generic_filter(
                self.background_mesh, nanmedian_func, size=self.filter_size,
                mode='constant', cval=np.nan)
            self.background_rms_mesh = generic_filter(
                self.background_rms_mesh, nanmedian_func,
                size=self.filter_size, mode='constant', cval=np.nan)
        else:
            # selectively filter
            indices = np.nonzero(self.background_mesh > self.filter_threshold)
            self.background_mesh = self._selective_filter(
                self.background_mesh, indices)
            self.background_rms_mesh = self._selective_filter(
                self.background_rms_mesh, indices)

        return
Exemple #2
0
    def __init__(self, label_image=None, connectivity=1, data=None, **attr):

        super(RAG, self).__init__(data, **attr)
        if self.number_of_nodes() == 0:
            self.max_id = 0
        else:
            self.max_id = max(self.nodes_iter())

        if label_image is not None:
            fp = ndi.generate_binary_structure(label_image.ndim, connectivity)
            # In the next ``ndi.generic_filter`` function, the kwarg
            # ``output`` is used to provide a strided array with a single
            # 64-bit floating point number, to which the function repeatedly
            # writes. This is done because even if we don't care about the
            # output, without this, a float array of the same shape as the
            # input image will be created and that could be expensive in
            # memory consumption.
            ndi.generic_filter(
                label_image,
                function=_add_edge_filter,
                footprint=fp,
                mode='nearest',
                output=as_strided(np.empty((1,), dtype=np.float_),
                                  shape=label_image.shape,
                                  strides=((0,) * label_image.ndim)),
                extra_arguments=(self,))
Exemple #3
0
def width_filter(data, angles=None, FILT_SIZE=5):
    if angles == None:
        #estimate angle from intensity data
        return ndimage.generic_filter(data.astype('f'), width, FILT_SIZE, extra_arguments=genCoords(FILT_SIZE))
    else:
        d = np.concatenate([data[:,:,None], angles[:,:,None]], 2)
        return ndimage.generic_filter(d.astype('f'), width_o, [FILT_SIZE, FILT_SIZE, 2], extra_arguments=genCoords(FILT_SIZE))[:,:,0].squeeze()
def prepareInputs(temp_folder, lastoolsPath):

	# lastoolsPath = "C:/lastools/bin/"

	# Run lasground
	# Note: Nasa laszip dapat
	os.chdir(lastoolsPath)

	print "Running LASground..."

	subprocess.call(["lasground_new", "-i", temp_folder + "/pointcloud.laz","-metro", "-compute_height","-odir", temp_folder + "/", "-o","ground.laz"], stdout=subprocess.PIPE)

	print "Running LASClassify..."

	# Prepare file_list.txt

	# subprocess.call(["lasclassify", "-i", "C:/bertud_temp/ground.laz","-odir", "C:/bertud_temp/", "-o","classified.laz"], stdout=subprocess.PIPE)
	
	# Added fine tuning parameter -planar
	subprocess.call(["lasclassify", "-i", temp_folder + "/ground.laz","-planar","0.15","-odir", temp_folder + "/", "-o","classified.laz"], stdout=subprocess.PIPE)

	print "Running LASGrid for classification raster..."

	# Prepare file_list.txt
	# subprocess.call(["lasgrid", "-i", "C:/bertud_temp/classified.laz","-step","0.5","-classification","-odir", "C:/bertud_temp/", "-o","classified.tif"], stdout=subprocess.PIPE)
	
	# Added fine tuning parameter -subsample 8
	subprocess.call(["lasgrid", "-i", temp_folder + "/classified.laz","-step","0.5","-classification","-subsample","8","-odir", temp_folder + "/", "-o","classified.tif"], stdout=subprocess.PIPE)

	print "Running LASGrid for number of returns raster..."
	subprocess.call(["lasgrid", "-i", temp_folder + "/classified.laz","-step","0.5","-number_returns","-lowest","-subsample","8","-odir", temp_folder + "/", "-o","numret.tif"], stdout=subprocess.PIPE)

	print "Running blast2DEM..."
	
	subprocess.call(["blast2dem", "-i", temp_folder + "/classified.laz", "-first_only","-step","0.5","-elevation","-odir", temp_folder + "/", "-o","dsm.tif"], stdout=subprocess.PIPE)
	subprocess.call(["blast2dem", "-i", temp_folder + "/classified.laz", "-keep_classification","2","-keep_classification","8","-step","0.5","-elevation","-odir", temp_folder + "/", "-o","dtm.tif"], stdout=subprocess.PIPE)

	dsm = io.imread(temp_folder + "/dsm.tif")
	dtm = io.imread(temp_folder + "/dtm.tif")

	# nDSM

	dtm[dtm<0] = 9999
	dsm[dsm<0] = 0

	ndsm = dsm-dtm

	# Revised nDSM generation
	# ndsm[ndsm<2] = 0
	ndsm[ndsm<0] = 0

	io.imsave(temp_folder + "/ndsm.tif",ndsm)
	
	# Slope 

	slope = ndimage.generic_filter(ndsm,slopeFilter,size=3)
	io.imsave(temp_folder + "/slope.tif",slope)

	slopeslope = ndimage.generic_filter(slope,slopeFilter,size=3)
	io.imsave(temp_folder + "/slopeslope.tif",slopeslope)
def performSoftMatting(im=None, transmission=None, *args, **kwargs):
    global neighbors

    width, height, depth = im.shape
    windowRadius = 1
    numWindowPixels = 9
    epsilon = 10 ** - 8
    _lambda = 10 ** - 4

    totalPixels = numWindowPixels ** 2
    
    windowIndicies = np.reshape(xrange(1, width * height + 1), (width, height), order='F')
    totalElements = totalPixels * (width - 2) * (height - 2)
    xIndicies = np.ones((1, totalElements))
    yIndicies = np.ones((1, totalElements))
    laplacian = np.zeros((1, totalElements))
    count = 0

    neighbors = np.empty((width * height, numWindowPixels))

    footprint = np.array([[1,1,1],
                          [1,1,1],
                          [1,1,1]])

    ndimage.generic_filter(windowIndicies, getWindow, footprint=footprint)

    U = epsilon / numWindowPixels * identity(3)

    for i in xrange(0 + windowRadius, height - windowRadius):
        for j in xrange(0 + windowRadius, width - windowRadius):
            print 'i', i
            print 'j', j
            window = im[j - windowRadius: j + windowRadius + 1, i - windowRadius : i + windowRadius + 1, :]

            reshapedWindow = np.reshape(window, (numWindowPixels, 3), order='F')

            diffFromMean = reshapedWindow.T - np.tile(np.mean(reshapedWindow, axis=0).T, (numWindowPixels, 1)).T

            window_covariance = np.dot(diffFromMean, diffFromMean.T) / numWindowPixels

            entry = identity(numWindowPixels) - (1 + np.dot(np.dot(diffFromMean.T, np.linalg.inv(window_covariance + U)), diffFromMean)) / float(numWindowPixels)

            temp = count * totalPixels
            temp2 = count * totalPixels + totalPixels

            iterationNeighbors = np.reshape(np.reshape(neighbors[height * j + i], (3, 3)), (1, numWindowPixels), order='F')

            x = np.tile(iterationNeighbors, (numWindowPixels, 1))
            y = (x.T).flatten(1)

            xIndicies[0][temp : temp2] = x.flatten(1)
            yIndicies[0][temp : temp2] = y
            laplacian[0][temp : temp2] = entry.flatten(1)
            count += 1

    L = csc_matrix((laplacian.flatten(), (xIndicies.flatten(), yIndicies.flatten())))
    tBar = np.append(np.reshape(transmission.T, (width * height, 1)), [0])

    T = spla.spsolve(L + _lambda * identity(L.shape[0]), tBar * _lambda) 
    return np.reshape(np.delete(T, len(T) - 1), transmission.shape, order='F')
Exemple #6
0
def checkTile(tile,title=''):
    nRow,nCol= np.shape(tile)
    freqList = np.sort(np.reshape(tile,(-1,)))

    nearestDist = neighborDistClass(tile)
    minDists = ndimage.generic_filter(tile, nearestDist.minFilter, footprint=footprint,mode='constant',cval=np.inf)

    nearestDist = neighborDistClass(tile)
    minDists2 = ndimage.generic_filter(tile, nearestDist.minFilter, footprint=secondNeighborFootprint,mode='constant',cval=np.inf)

    nearestDist = neighborDistClass(tile)
    minDistsWrap = ndimage.generic_filter(tile, nearestDist.minFilter, footprint=sideFootprint,mode='wrap',cval=np.inf)

    nearestDist = neighborDistClass(tile)
    maxDists = ndimage.generic_filter(tile, nearestDist.maxFilter, footprint=footprint,mode='reflect')

    plotArray(title=title,image=tile,normNSigma=2.,origin='upper')
    plotArray(title='{} min dists wrap'.format(title),image=minDistsWrap,normNSigma=2.,origin='upper')
    plotArray(title='{} min dists'.format(title),image=minDists,normNSigma=2.,origin='upper')
    plotArray(title='{} min dists 2nd nearest'.format(title),image=minDists2,normNSigma=2.,origin='upper')
    plotArray(title='{} max dists'.format(title),image=maxDists,normNSigma=2.,origin='upper')

#    def f(thing):
#        thing.axes.hist(minDists.ravel(),bins=100)
#        thing.axes.set_title('{} min dists'.format(title))
#        
#    pop = PopUp(plotFunc=f)

    def f(thing):
        thing.axes.hist(minDists2.ravel(),bins=100)
        thing.axes.set_title('{} second neighbor min dists'.format(title))
        
    pop = PopUp(plotFunc=f)
Exemple #7
0
def test_ticket_701():
    # Test generic filter sizes
    arr = np.arange(4).reshape((2,2))
    func = lambda x: np.min(x)
    res = sndi.generic_filter(arr, func, size=(1,1))
    # The following raises an error unless ticket 701 is fixed
    res2 = sndi.generic_filter(arr, func, size=1)
    assert_equal(res, res2)
Exemple #8
0
def get_windowed_vals(x, k=3):
    nrows, ncols = x.shape
    targets = np.concatenate([np.zeros(k - 1, dtype=int), x["target"], np.zeros(k - 1, dtype=int)])
    preds = np.concatenate([np.zeros(k - 1, dtype=int), x["pred"], np.zeros(k - 1, dtype=int)])
    wtargets = generic_filter(targets, np.max, size=(k,), mode="constant")
    wpreds = generic_filter(preds, np.max, size=(k,), mode="constant")
    starts = range(-k, nrows + 1)
    ends = range(nrows + k + 1)
    wvals = pd.DataFrame({"start": starts, "target": wtargets, "pred": wpreds}, index=[ends])
    wvals["conv"] = x["conv"].iloc[0]

    return wvals
Exemple #9
0
def currents_function(ax, data_file, bmap, key_ax, time_index, downsample_ratio):
    def compute_average(array):
        avg = numpy.average(array)
        return numpy.nan if avg > 10**3 else avg

    print "Currents Downsample Ratio:", downsample_ratio

    currents_u = data_file.variables['u'][time_index][39]
    currents_v = data_file.variables['v'][time_index][39]
    rho_mask = get_rho_mask(data_file)

    # average nearby points to align grid, and add the edge column/row so it's the right size.
    #-------------------------------------------------------------------------
    right_column = currents_u[:, -1:]
    currents_u_adjusted = ndimage.generic_filter(scipy.hstack((currents_u, right_column)),
                                                 compute_average, footprint=[[1], [1]], mode='reflect')
    bottom_row = currents_v[-1:, :]
    currents_v_adjusted = ndimage.generic_filter(scipy.vstack((currents_v, bottom_row)),
                                                 compute_average, footprint=[[1], [1]], mode='reflect')

    # zoom
    #-------------------------------------------------------------------------
    u_zoomed = crop_and_downsample(currents_u_adjusted, downsample_ratio)
    v_zoomed = crop_and_downsample(currents_v_adjusted, downsample_ratio)
    rho_mask[rho_mask == 1] = numpy.nan
    rho_mask_zoomed = crop_and_downsample(rho_mask, downsample_ratio)
    longs = data_file.variables['lon_rho'][:]
    lats = data_file.variables['lat_rho'][:]

    longs_zoomed = crop_and_downsample(longs, downsample_ratio, False)
    lats_zoomed = crop_and_downsample(lats, downsample_ratio, False)

    u_zoomed[rho_mask_zoomed == 1] = numpy.nan
    v_zoomed[rho_mask_zoomed == 1] = numpy.nan

    x, y = bmap(longs_zoomed, lats_zoomed)

    bmap.drawmapboundary(linewidth=0.0, ax=ax)

    overlay = bmap.quiver(x, y, u_zoomed, v_zoomed, ax=ax, color='black', units='inches',
                          scale=10.0, headwidth=2, headlength=3,
                          headaxislength=2.5, minlength=0.5, minshaft=.9)

    # Multiplying .5, 1, and 2 by .5144 is converting from knots to m/s
    #-------------------------------------------------------------------------
    quiverkey = key_ax.quiverkey(overlay, .95, .4, 0.5*.5144, ".5 knots", labelpos='S', labelcolor='white',
                                 color='white', labelsep=.5, coordinates='axes')
    quiverkey1 = key_ax.quiverkey(overlay, 3.75, .4, 1*.5144, "1 knot", labelpos='S', labelcolor='white',
                                  color='white', labelsep=.5, coordinates='axes')
    quiverkey2 = key_ax.quiverkey(overlay, 6.5, .4, 2*.5144, "2 knots", labelpos='S', labelcolor='white',
                                  color='white', labelsep=.5, coordinates='axes')
    key_ax.set_axis_off()
def rag_solidity(labels, connectivity=2):

    graph = RAG()

    # The footprint is constructed in such a way that the first
    # element in the array being passed to _add_edge_filter is
    # the central value.
    fp = ndi.generate_binary_structure(labels.ndim, connectivity)
    for d in range(fp.ndim):
        fp = fp.swapaxes(0, d)
        fp[0, ...] = 0
        fp = fp.swapaxes(0, d)

    # For example
    # if labels.ndim = 2 and connectivity = 1
    # fp = [[0,0,0],
    #       [0,1,1],
    #       [0,1,0]]
    #
    # if labels.ndim = 2 and connectivity = 2
    # fp = [[0,0,0],
    #       [0,1,1],
    #       [0,1,1]]

    ndi.generic_filter(
        labels,
        function=_add_edge_filter,
        footprint=fp,
        mode='nearest',
        output=np.zeros(labels.shape, dtype=np.uint8),
        extra_arguments=(graph,))

    # remove bg_label
    # graph.remove_node(-1)
    graph.remove_node(0)

    for n in graph:
        mask = (labels == n)
        solidity = 1. * mask.sum() / convex_hull_image(mask).sum()
        graph.node[n].update({'labels': [n],
                              'solidity': solidity,
                              'mask': mask})

    for x, y, d in graph.edges_iter(data=True):
        new_mask = np.logical_or(graph.node[x]['mask'], graph.node[y]['mask'])
        new_solidity = 1. * new_mask.sum() / convex_hull_image(new_mask).sum()
        org_solidity = np.mean([graph.node[x]['solidity'],
                                graph.node[y]['solidity']])
        d['weight'] = org_solidity / new_solidity

    return graph
def texture(gray_img, ksize, threshold, offset=3, texture_method='dissimilarity', borders='nearest',
            max_value=255):
    """Creates a binary image from a grayscale image using skimage texture calculation for thresholding.
    This function is quite slow.

    Inputs:
    gray_img       = Grayscale image data
    ksize          = Kernel size for texture measure calculation
    threshold      = Threshold value (0-255)
    offset         = Distance offsets
    texture_method = Feature of a grey level co-occurrence matrix, either
                     'contrast', 'dissimilarity', 'homogeneity', 'ASM', 'energy',
                     or 'correlation'.For equations of different features see
                     scikit-image.
    borders        = How the array borders are handled, either 'reflect',
                     'constant', 'nearest', 'mirror', or 'wrap'
    max_value      = Value to apply above threshold (usually 255 = white)

    Returns:
    bin_img        = Thresholded, binary image

    :param gray_img: numpy.ndarray
    :param ksize: int
    :param threshold: int
    :param offset: int
    :param texture_method: str
    :param borders: str
    :param max_value: int
    :return bin_img: numpy.ndarray
    """

    # Function that calculates the texture of a kernel
    def calc_texture(inputs):
        inputs = np.reshape(a=inputs, newshape=[ksize, ksize])
        inputs = inputs.astype(np.uint8)
        # Greycomatrix takes image, distance offset, angles (in radians), symmetric, and normed
        # http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.greycomatrix
        glcm = greycomatrix(inputs, [offset], [0], 256, symmetric=True, normed=True)
        diss = greycoprops(glcm, texture_method)[0, 0]
        return diss

    # Make an array the same size as the original image
    output = np.zeros(gray_img.shape, dtype=gray_img.dtype)

    # Apply the texture function over the whole image
    generic_filter(gray_img, calc_texture, size=ksize, output=output, mode=borders)

    # Threshold so higher texture measurements stand out
    bin_img = binary(gray_img=output, threshold=threshold, max_value=max_value, object_type='light')
    return bin_img
Exemple #12
0
    def check(j):
        func = FILTER2D_FUNCTIONS[j]

        im = np.ones((20, 20))
        im[:10,:10] = 0
        footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
        footprint_size = np.count_nonzero(footprint)
        weights = np.ones(footprint_size)/footprint_size

        res = ndimage.generic_filter(im, func(weights),
                                     footprint=footprint)
        std = ndimage.generic_filter(im, filter2d, footprint=footprint,
                                     extra_arguments=(weights,))
        assert_allclose(res, std, err_msg="#{} failed".format(j))
def simulate_fire(grid_size, prob_tree, prob_burning, prob_lightning,
                  prob_immune, t):
    grids = []
    grid = init_grid(grid_size, prob_tree, prob_burning)
    grids.append(grid)
    for i in range(t):
        new_grid = np.zeros_like(grid)
        ndimage.generic_filter(grids[-1], spread, size=3, mode="constant",
                               output=new_grid,
                               # these are passed to spread
                               extra_arguments=(prob_immune,
                                                prob_lightning))
        grids.append(new_grid.copy())
    return grids
Exemple #14
0
def test_generic_filter():
    def filter2d(footprint_elements, weights):
        return (weights*footprint_elements).sum()

    im = np.ones((20, 20))
    im[:10,:10] = 0
    footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
    footprint_size = np.count_nonzero(footprint)
    weights = np.ones(footprint_size)/footprint_size
    for mod in MODULES:
        res = ndimage.generic_filter(im, mod.filter2d(weights),
                                     footprint=footprint)
        std = ndimage.generic_filter(im, filter2d, footprint=footprint,
                                     extra_arguments=(weights,))
        assert_allclose(res, std, err_msg="{} failed".format(mod.__name__))
Exemple #15
0
def lnlike(modelpatch,data,sig_smooth,sig_L2,sig_one,w_L2):
    """
    Return the negative log-likelihood given a pixel patch across a
    given set of data patches, weighted by regularization priors.
    Uniform noise case.
    """
    
    # Likelihood given current psf model
    lnlike = 0.0
    for ii in range(data.npatches):
        patch = np.ravel(data.patches[ii])
        flux  = np.dot(modelpatch.T,patch)/np.dot(modelpatch.T,modelpatch)
        model = modelpatch*flux
        lnlike += np.sum(0.5*(patch-model) ** 2
                         / data.bkg_sigmas[ii]**2. + \
                         0.5 * np.log(data.bkg_sigmas[ii]**2.))

    # Smoothness constraint
    if sig_smooth!=0:
            filt = np.array([[False,True,False],
                             [True,True,True],
                             [False,True,False]])
            nearest = ndimage.generic_filter(np.reshape(modelpatch,data.patchshape),
                                     sq_nearest, footprint=filt)
            lnlike  += np.sum(nearest) * sig_smooth

    # L2 norm
    if sig_L2!=0:
        lnlike += np.sum((modelpatch*w_L2)**2.) * sig_L2

    # PSF total ~ 1
    if sig_one!=0:
        lnlike += (np.sum(modelpatch)-1)**2. * sig_one

    return lnlike
Exemple #16
0
    def _filter_meshes(self, data_low_res):
        """
        Apply a 2d median filter to the low-resolution background map,
        including only pixels inside the image at the borders.
        """

        from scipy.ndimage import generic_filter
        try:
            nanmedian_func = np.nanmedian    # numpy >= 1.9
        except AttributeError:
            from scipy.stats import nanmedian
            nanmedian_func = nanmedian

        if self.filter_threshold is None:
            return generic_filter(data_low_res, nanmedian_func,
                                  size=self.filter_shape, mode='constant',
                                  cval=np.nan)
        else:
            data_out = np.copy(data_low_res)
            for i, j in zip(*np.nonzero(data_low_res >
                                        self.filter_threshold)):
                yfs, xfs = self.filter_shape
                hyfs, hxfs = yfs // 2, xfs // 2
                y0, y1 = max(i - hyfs, 0), min(i - hyfs + yfs,
                                               data_low_res.shape[0])
                x0, x1 = max(j - hxfs, 0), min(j - hxfs + xfs,
                                               data_low_res.shape[1])
                data_out[i, j] = np.median(data_low_res[y0:y1, x0:x1])
            return data_out
def solve(Z, start, goal):
    Z = 1 - Z
    G = np.zeros(Z.shape)
    G[start] = 1

    # We iterate until value at exit is > 0. This requires the maze
    # to have a solution or it will be stuck in the loop.
    def diffuse(Z, gamma=0.99):
        return max(gamma*Z[0], gamma*Z[1], Z[2], gamma*Z[3], gamma*Z[4])

    G_gamma = np.empty_like(G)
    while G[goal] == 0.0:
        G = Z * generic_filter(G, diffuse, footprint=[[0, 1, 0],
                                                      [1, 1, 1],
                                                      [0, 1, 0]])
    
    # Descent gradient to find shortest path from entrance to exit
    y, x = goal
    dirs = (0,-1), (0,+1), (-1,0), (+1,0)
    P = []
    while (x, y) != start:
        P.append((y,x))
        neighbours = [-1, -1, -1, -1]
        if x > 0:            neighbours[0] = G[y, x-1]
        if x < G.shape[1]-1: neighbours[1] = G[y, x+1]
        if y > 0:            neighbours[2] = G[y-1, x]
        if y < G.shape[0]-1: neighbours[3] = G[y+1, x]
        a = np.argmax(neighbours)
        x, y  = x + dirs[a][1], y + dirs[a][0]
    P.append((y,x))
    return P
Exemple #18
0
def computeDailyMean(dicoBand,nbBandByDay,typeData):

    def meanCalc(values):
        return np.nanmean(values)

    mean={}
    footprint = np.array([[0,1,0],
                          [1,0,1],
                          [0,1,0]])
    
    for i in range(0,len(dicoBand.keys())/nbBandByDay):
        maxRange=nbBandByDay+i*nbBandByDay
        #on ne prend pas la dernière bande... correspondante à 00-->3h
        for j in range (i*nbBandByDay,maxRange):
            if "array" in locals():
                array=array+dicoBand.items()[j][1]
                np.putmask(dicoBand.items()[j][1], dicoBand.items()[j][1]==0, 0)
                mask=mask+(dicoBand.items()[j][1] > 0).astype(int)
            else:
                array=dicoBand.items()[j][1]
                np.putmask(dicoBand.items()[j][1], dicoBand.items()[j][1]==0, 0)
                mask=(dicoBand.items()[j][1] > 0).astype(int)

        mean[i]=array
        del array

        #utilisation de la fonction nanmean --> bcp plus simple

        mean[i]=mean[i]/mask
        indices = np.where(np.isnan(mean[i]))
        results = ndimage.generic_filter(mean[i], meanCalc, footprint=footprint)
        for row, col in zip(*indices):
            mean[i][row,col] = results[row,col]    
    
    return mean
Exemple #19
0
def background_variance_filter(data, bbox):
    """
    Determine the background variance for each pixel from a box with size of
    bbox.

    Parameters
    ----------
    data : `~numpy.ndarray`
        Data to measure background variance

    bbox :  int
        Box size for calculating background variance

    Raises
    ------
    ValueError
        A value error is raised if bbox is less than 1

    Returns
    -------
    background : `~numpy.ndarray` or `~numpy.ma.MaskedArray`
        An array with the measured background variance in each pixel

    """
    # Check to make sure the background box is an appropriate size
    if bbox < 1:
        raise ValueError('bbox must be greater than 1')

    return ndimage.generic_filter(data, sigma_func, size=(bbox, bbox))
Exemple #20
0
def next_seq(curr, cnr=False):
  old = curr
  footprint = np.array([[1,1,1],
                        [1,0,1],
                        [1,1,1]])

  sums= ndimage.generic_filter(curr, sum, footprint=footprint,
                              mode='constant', cval=0)
  curr = curr.flatten()
  sums = sums.flatten()

  for i in range(len(curr)):
    if curr[i] == 1:
      if sums[i] != 2 and sums[i] != 3:
        curr[i] = 0
    else:
      if sums[i] == 3:
        curr[i] = 1
  curr = curr.reshape(old.shape)
  if cnr:
    curr[0,0] = 1
    curr[0,-1] = 1
    curr[-1,0] = 1
    curr[-1,-1] = 1
  return curr
    def _filter_meshes(self, mesh2d):
        """
        Apply a 2D median filter to the low-resolution 2D meshes,
        including only pixels inside the image at the borders.
        """

        from scipy.ndimage import generic_filter
        try:
            nanmedian_func = np.nanmedian    # numpy >= 1.9
        except AttributeError:
            from scipy.stats import nanmedian
            nanmedian_func = nanmedian

        if self.filter_threshold is None:
            return generic_filter(mesh2d, nanmedian_func,
                                  size=self.filter_size, mode='constant',
                                  cval=np.nan)
        else:
            # selectively filter only pixels above ``filter_threshold``
            data_out = np.copy(mesh2d)
            for i, j in zip(*np.nonzero(mesh2d > self.filter_threshold)):
                yfs, xfs = self.filter_size
                hyfs, hxfs = yfs // 2, xfs // 2
                y0, y1 = max(i - hyfs, 0), min(i - hyfs + yfs,
                                               mesh2d.shape[0])
                x0, x1 = max(j - hxfs, 0), min(j - hxfs + xfs,
                                               mesh2d.shape[1])
                data_out[i, j] = np.median(mesh2d[y0:y1, x0:x1])
            return data_out
def main():
    #Get the raster from the disk
    rast_data, x_cellsize, y_cellsize = get_array("./data/elevation.tif")

    slope = generic_filter(rast_data, calc_slope, size=3, extra_arguments=(x_cellsize, y_cellsize))

    plt.imshow(ma.masked_equal(slope, -9999), cmap=plt.winter(), origin="lower")
    plt.show()
def test_mean_std_3d(window_size, mean_kernel):
    image = np.random.rand(40, 40, 40)
    m, s = _mean_std(image, w=window_size)
    expected_m = ndi.convolve(image, mean_kernel, mode='mirror')
    np.testing.assert_allclose(m, expected_m)
    expected_s = ndi.generic_filter(image, np.std, size=window_size,
                                    mode='mirror')
    np.testing.assert_allclose(s, expected_s)
Exemple #24
0
def max_except_center_filter(a, size):
    '''Applies the following digital filter to a {-1,0,1}-valued array a: If a is not 1, return
    0; otherwise return the maximum over the window [-(size-1)/2, (size-1)/2] with the central
    element excluded.'''
    middle = (size - 1) / 2
    footprint = range(size)
    footprint.remove(middle)        
    return ndimage.generic_filter(a, __max_except_center, size, extra_arguments=(footprint, middle),
                                  mode='constant', cval=0)
Exemple #25
0
def sf(inpimage,dim,sigma): #FIXME make dim usable
  par = Singleton()
  par._instance = [dim, sigma, True]
  niter = 0
  while par._instance[2] and niter<21:
    par._instance = [dim, sigma, False]
    outimage = ndimage.generic_filter(inpimage, fun, footprint = [[1, 1, 1],[1, 1, 1],[1, 1, 1]])
    niter = niter + 1
  return outimage
def test_mean_std_2d():
    image = np.random.rand(256, 256)
    window_size = 11
    m, s = _mean_std(image, w=window_size)
    mean_kernel = np.ones((window_size,) * 2) / window_size**2
    expected_m = ndi.convolve(image, mean_kernel, mode='mirror')
    np.testing.assert_allclose(m, expected_m)
    expected_s = ndi.generic_filter(image, np.std, size=window_size,
                                    mode='mirror')
    np.testing.assert_allclose(s, expected_s)
Exemple #27
0
    def __init__(self, label_image=None, connectivity=1, data=None, **attr):

        super(RAG, self).__init__(data, **attr)
        if self.number_of_nodes() == 0:
            self.max_id = 0
        else:
            self.max_id = max(self.nodes_iter())

        if label_image is not None:
            fp = ndi.generate_binary_structure(label_image.ndim, connectivity)
            ndi.generic_filter(
                label_image,
                function=_add_edge_filter,
                footprint=fp,
                mode='nearest',
                output=as_strided(np.empty((1,), dtype=np.float_),
                                  shape=label_image.shape,
                                  strides=((0,) * label_image.ndim)),
                extra_arguments=(self,))
Exemple #28
0
    def __next_iteration__(self):
        footprint = np.array([[1,1,1],[1,0,1],[1,1,1]])

        alive_neigbhours = ndimage.generic_filter(self.board, test_func, footprint=footprint)
        alive = np.where(self.board == 1 & (alive_neigbhours == 3) | (alive_neigbhours == 2))
        born = np.where(self.board == 0 & (alive_neigbhours == 3))

        self.board = np.zeros((10,10),np.uint)
        self.board[alive] = 1
        self.board[born] = 1
Exemple #29
0
def conservative_filter(shared_array, i, **kwargs):
    kernel_size = kwargs['kernel_size']
    def _minormax(arr):
        size = len(arr)
        element = size / 2
        if arr[element] > numpy.amax(arr):
            arr[element] = numpy.amax(arr)
        elif arr[element] < numpy.amin(arr):
            arr[element] = numpy.amin(arr)
        return arr[element]
    shared_array[i] = ndimage.generic_filter(shared_array[i], _minormax, size=kernel_size)
Exemple #30
0
def filter_fann(image, ann):
    """
    Run ANN-based filter through image in sliding-window fashion
    """
    def _filter_func(window):
        return ann.run(window.flatten())[0]
    
    window_size = int(np.sqrt(ann.get_num_input()))
    footprint = np.ones((window_size, window_size))
    filtered_image = generic_filter(
        image, _filter_func,
        footprint=footprint)
    return filtered_image
Exemple #31
0


##############################
# MASK LOW BACKSCATTER AREAS #
##############################

# We wish to identify water and other low backscatter areas, as well as pixels
# with a high dynamic range of HV backscatter in their 3x3 local area
# (e.g., edges).  These areas will be excluded from the canopy height
# estimation.
hv_power = scene.power('hv')
hv_power[hv_power <= 1e-10] = 1e-10
hv_power = 10*np.log10(hv_power)

hv_localmax = ndimage.generic_filter(hv_power, np.nanmax, size=3)
hv_localmin = ndimage.generic_filter(hv_power, np.nanmin, size=3)

inc = np.degrees(scene.inc[:])
mask = hv_power > -30
mask[(inc < 35)] = hv_power[(inc < 35)] > -15
mask[(inc < 45) & (inc >= 35)] = hv_power[(inc < 45) & (inc >= 35)] > -21
mask[(inc < 55) & (inc >= 45)] = hv_power[(inc < 55) & (inc >= 45)] > -24
mask[(inc >= 55)] = hv_power[(inc >= 55)] > -28

mask_edges = (np.abs(hv_localmax-hv_localmin) < 20)

mask = mask & mask_edges


def match_maker(genomes,
                fitness,
                target_pop=None,
                viability=lambda x: True,
                passport=None):
    if target_pop is None:
        target_pop = genomes.shape[0:2]
    _index_shape = list(target_pop)
    #_index_shape.reverse()
    n_categories = fitness.shape[-1]
    n_dims = fitness.ndim

    #all normalization is local
    #do spatial combat & marriage
    #turnover kills off localized low fittness
    # 3x3 -> is center * alpha > median = if true we survive (compute all medians first)
    # on empty -> randomly select mating from neighboring sectors
    footprint = [
        [[0, 0], [1, 1], [0, 0]],
        [[1, 1], [1, 1], [1, 1]],
        [[0, 0], [1, 1], [0, 0]],
    ]

    #print("FITNESS:")
    #print(fitness)
    def relative_survival(a, **kwargs):
        ptp_norm = lambda x: (x - x.min(0)) / x.ptp(0)
        a.shape = (int(a.size / 2), 2)
        mid_index = int(a.shape[0] / 2) + 1
        acc = a[:, 1]
        tim = a[:, 0]
        acc_norm = ptp_norm(acc)
        tim_norm = ptp_norm(tim)
        return acc_norm[
            mid_index] + tim_norm[mid_index] / 3 - 2. / 3  # 1/2+1/(2*3)
        acc_md = np.median(acc)
        tim_md = np.median(tim)
        return (acc[mid_index] -
                acc_md) + (tim[mnd_index] - tim_md) * (acc_md / tim_md)

    survival_scores = generic_filter(fitness,
                                     relative_survival,
                                     footprint=footprint,
                                     mode='wrap')
    survival_scores = survival_scores[:, :, ::2]
    survival_scores = np.nan_to_num(survival_scores)
    #print("SURVIVAL_SCORES:")
    #print(survival_scores)

    #for all survival scores < 0 => reproduce
    indexes = itertools.product(*(range(size) for size in _index_shape))
    next_generation = np.zeros(genomes.shape)
    getItem = lambda m, c: m.__getitem__(c)
    for cords in indexes:
        v = getItem(survival_scores, cords)
        if v < 0:
            n_cords = list(neighbor_cords(cords, _index_shape))
            #print("neighbor cords:", cords, '=>', n_cords, target_pop)
            n_s = np.array([getItem(survival_scores, nc) for nc in n_cords])
            #print("neighbor scores:", n_s)
            #roullette with the neighbors
            parent_cords = list(roullette(n_cords, n_s, k=2))
            pair = list(map(lambda c: getItem(genomes, c), parent_cords))
            #TODO passport: t,x,y = [i,j], [k,l] #tracks genetic heritage, can answer where's the DNA from?
            if passport is not None:
                passport[cords] = parent_cords
            #TODO compute stress
            stress = rnd.uniform(0, 2)
            g = offspring(pair, stress=stress)
            #print("New Offspring:", g)
        else:
            g = getItem(genomes, cords)
        next_generation.__setitem__(cords, g)
    return next_generation
Exemple #33
0
def mode_filter(img):
    return ndimage.generic_filter(img, modal, size=5)
Exemple #34
0
                n = find_neighbor(tiles, row[-1], "right", seen)
                row.append(n)
                seen.add(n.idx)

            if idx_row < ndim-1:
                n = find_neighbor(tiles, row[0], "down", seen)
                display[idx_row+1].append(n)
                seen.add(n.idx)
    except:
        continue
    break

display2 = np.concatenate([np.concatenate([t.data[1:-1, 1:-1] for t in row], axis=1) for row in display])

nessi = np.array([list("                  # "), 
                  list("#    ##    ##    ###"),
                  list(" #  #  #  #  #  #   ")])
nessi = (nessi == "#").astype(np.uint8)

def f(part):
    part = np.reshape(part, nessi.shape)
    for y, row in enumerate(part):
        for x, pix in enumerate(row):
            if nessi[y][x] == 1 and pix == 0:
                return 0
    return 1

found = max([np.sum(generic_filter(img, f, nessi.shape, mode="constant", cval=0)) for img in get_all_transforms(display2)])
roughness = int(np.count_nonzero(display2) - found*np.sum(nessi))
print(roughness)
def thres_bernsen(img, n=DEFAULT_N):
    threshold_matrix = generic_filter(img, bernsen_aux, size=(n, n))
    return apply_threshold(img, threshold_matrix)
Exemple #36
0
    for i, feature in enumerate(feat):
        smsi_classLikelihoods = smsi_likeli(mean=means,
                                            var=covariances,
                                            varInv=covariances_Inv,
                                            s_value=s_value[i],
                                            feature=feature,
                                            d=d)
        smsi_resp.append(smsi_classLikelihoods)

    smsi_resp = np.asarray(smsi_resp)

    final_resp = []
    for cluster in range(k):
        result = ndimage.generic_filter(smsi_resp[:, cluster].reshape(
            o_shape[0], o_shape[1]),
                                        np.nansum,
                                        footprint=np.ones((3, 3)),
                                        mode='constant',
                                        cval=np.NaN).reshape(-1)
        final_resp.append(result * (smsi_init_weights[:, cluster] / R_value))

    # numerator of gama
    smsi_resp_num = np.asarray(final_resp).T

    # denominator of gama
    final_resp_den = smsi_resp_num.sum(axis=1)

    # gama value of the smsi

    final_smsi_resp = np.asarray(
        [smsi_resp_num[:, i] / final_resp_den for i in range(k)]).T
    """
Exemple #37
0
def process(filMOD02, commandLineArgs, cwd, directory):
    minNfrac = commandLineArgs.validFraction
    decimal = commandLineArgs.decimal
    minNcount = commandLineArgs.windowObservations
    maxKsize = commandLineArgs.maximumKernel
    minKsize = commandLineArgs.minimumKernel
    reductionFactor = commandLineArgs.reductionFactor
    maxLon = commandLineArgs.maximumLongitude
    minLon = commandLineArgs.minimumLongitude
    maxLat = commandLineArgs.maximumLatitude
    minLat = commandLineArgs.minimumLatitude

    # Value at which Band 22 saturates (L. Giglio, personal communication)
    b22saturationVal = 331
    increaseFactor = 1 + (1 - reductionFactor)
    waterFlag = -1
    cloudFlag = -2
    bgFlag = -3

    # Coefficients for radiance calculations
    coeff1 = 119104200
    coeff2 = 14387.752
    lambda21and22 = 3.959
    lambda31 = 11.009
    lambda32 = 12.02

    # Layers for reading in HDF files
    layersMOD02 = [
        'EV_1KM_Emissive', 'EV_250_Aggr1km_RefSB', 'EV_500_Aggr1km_RefSB'
    ]
    layersMOD03 = [
        'Land/SeaMask', 'Latitude', 'Longitude', 'SolarAzimuth', 'SolarZenith',
        'SensorAzimuth', 'SensorZenith'
    ]

    # meanMadFilt
    footprintx = []
    footprinty = []
    Ncount = []
    ksizes = []
    for s in range(minKsize, maxKsize + 2, 2):
        halfSize = (s - 1) / 2
        xlist = []
        ylist = []
        for x in range(-halfSize, halfSize + 1):
            for y in range(-halfSize, halfSize + 1):
                if x is 0:
                    if abs(y) > 1:
                        xlist.append(x)
                        ylist.append(y)
                else:
                    xlist.append(x)
                    ylist.append(y)
        footprintx.append(np.array(xlist))
        footprinty.append(np.array(ylist))
        Ncount.append(len(xlist))
        ksizes.append(s)

    # Parse the HDF02
    filSplt = filMOD02.split('.')
    datTim = filSplt[1].replace('A', '') + filSplt[2]
    t = datetime.datetime.strptime(datTim, "%Y%j%H%M")

    julianDay = str(t.timetuple().tm_yday)
    jZeros = 3 - len(julianDay)
    julianDay = '0' * jZeros + julianDay
    yr = str(t.year)
    hr = str(t.hour)
    hrZeros = 2 - len(hr)
    hr = '0' * hrZeros + hr
    mint = str(t.minute)
    mintZeros = 2 - len(mint)
    mint = '0' * mintZeros + mint
    datNam = yr + julianDay + '.' + hr + mint

    # Get the corresponding 03 HDF
    filMOD03 = None
    for filNamCandidate in HDF03:
        if datNam in filNamCandidate:
            filMOD03 = filNamCandidate
            break

    # The HDF03 does not exist - exit as we don't process a solitary HDF02
    if filMOD03 is None:
        return

    # Creates a blank dictionary to hold the full MODIS swaths
    fullArrays = {}

    # Invalid mask
    invalidMask = None

    for i, layer in enumerate(layersMOD02):

        file_template = 'HDF4_EOS:EOS_SWATH:%s:MODIS_SWATH_Type_L1B:%s'
        this_file = file_template % (filMOD02, layer)
        g = gdal.Open(this_file)
        if g is None:
            return
        metadataMOD02 = g.GetMetadata()
        dataMOD02 = g.ReadAsArray()

        # Initialise the invalid mask if it is not already
        if invalidMask is None:
            invalidMask = np.zeros_like(dataMOD02[1])

        if layer == 'EV_1KM_Emissive':
            B21index, B22index, B31index, B32index = 1, 2, 10, 11

            radScales = metadataMOD02["radiance_scales"].split(',')
            radScalesFlt = []
            for radScale in radScales:
                radScalesFlt.append(float(radScale))
            radScales = radScalesFlt
            del radScalesFlt

            radOffset = metadataMOD02["radiance_offsets"].split(',')
            radOffsetFlt = []
            for radOff in radOffset:
                radOffsetFlt.append(float(radOff))
            radOffset = radOffsetFlt
            del radOffsetFlt

            # Calculate temperature/reflectance based on scale and offset and correction term (L. Giglio, personal communication)
            B21, B22, B31, B32 = dataMOD02[B21index], dataMOD02[
                B22index], dataMOD02[B31index], dataMOD02[B32index]

            # Create the invalid mask from raw data values
            invalidMask[(B21 == 65534)] = 1
            invalidMask[(B22 == 65534)] = 1
            invalidMask[(B31 == 65534)] = 1
            invalidMask[(B32 == 65534)] = 1

            B21scale, B22scale, B31scale, B32scale = radScales[
                B21index], radScales[B22index], radScales[B31index], radScales[
                    B32index]
            B21offset, B22offset, B31offset, B32offset = radOffset[B21index], radOffset[B22index], radOffset[B31index], \
                                                         radOffset[B32index]

            B21 = (B21 - B21offset) * B21scale
            T21 = coeff2 / (lambda21and22 * (np.log(coeff1 / ((
                (math.pow(lambda21and22, 5)) * B21) + 1))))
            T21corr = 1.00009 * T21 - 0.05167
            fullArrays['BAND21'] = T21corr

            B22 = (B22 - B22offset) * B22scale
            T22 = coeff2 / (lambda21and22 * (np.log(coeff1 / ((
                (math.pow(lambda21and22, 5)) * B22) + 1))))
            T22corr = 1.00010 * T22 - 0.05332
            fullArrays['BAND22'] = T22corr

            B31 = (B31 - B31offset) * B31scale
            T31 = coeff2 / (lambda31 *
                            (np.log(coeff1 /
                                    (((math.pow(lambda31, 5)) * B31) + 1))))
            T31corr = 1.00046 * T31 - 0.09968
            fullArrays['BAND31'] = T31corr

            B32 = (B32 - B32offset) * B32scale
            T32 = coeff2 / (lambda32 *
                            (np.log(coeff1 /
                                    (((math.pow(lambda32, 5)) * B32) + 1))))
            fullArrays['BAND32'] = T32

        if layer == 'EV_250_Aggr1km_RefSB':

            B1index, B2index = 0, 1

            refScales = metadataMOD02["reflectance_scales"].split(',')
            refScalesFlt = []
            for refScale in refScales:
                refScalesFlt.append(float(refScale))
            refScales = refScalesFlt
            del refScalesFlt

            refOffset = metadataMOD02["reflectance_offsets"].split(',')
            refOffsetFlt = []
            for refOff in refOffset:
                refOffsetFlt.append(float(refOff))
            refOffset = refOffsetFlt
            del refOffsetFlt

            B1, B2 = dataMOD02[B1index], dataMOD02[B2index]

            # Create the invalid mask from raw data values
            invalidMask[(B1 == 65534)] = 1
            invalidMask[(B2 == 65534)] = 1

            B1scale, B2scale = refScales[B1index], refScales[B2index]
            B1offset, B2offset = refOffset[B1index], refOffset[B2index]

            B1 = ((B1 - B1offset) * B1scale) * 1000
            B1 = B1.astype(int)
            B2 = ((B2 - B2offset) * B2scale) * 1000
            B2 = B2.astype(int)

            fullArrays['BAND1x1k'], fullArrays['BAND2x1k'] = B1, B2

        if layer == 'EV_500_Aggr1km_RefSB':
            B7index = 4

            refScales = metadataMOD02["reflectance_scales"].split(',')
            refScalesFlt = []
            for refScale in refScales:
                refScalesFlt.append(float(refScale))
            refScales = refScalesFlt
            del refScalesFlt

            refOffset = metadataMOD02["reflectance_offsets"].split(',')
            refOffsetFlt = []
            for refOff in refOffset:
                refOffsetFlt.append(float(refOff))
            refOffset = refOffsetFlt
            del refOffsetFlt

            B7 = dataMOD02[B7index]

            # Create the invalid mask from raw data values
            invalidMask[(B7 == 65534)] = 1

            B7scale, B7offset = refScales[B7index], refOffset[B7index]
            B7 = ((B7 - B7offset) * B7scale) * 1000
            B7 = B7.astype(int)
            fullArrays['BAND7x1k'] = B7

    for i, layer in enumerate(layersMOD03):

        file_template = 'HDF4_EOS:EOS_SWATH:%s:MODIS_Swath_Type_GEO:%s'
        this_file = file_template % (filMOD03, layer)
        g = gdal.Open(this_file)
        if g is None:
            raise IOError
        if layer == 'Land/SeaMask':
            newLyrName = 'LANDMASK'
        elif layer == 'Latitude':
            newLyrName = 'LAT'
        elif layer == 'Longitude':
            newLyrName = 'LON'
        else:
            newLyrName = layer
        fullArrays[newLyrName] = g.ReadAsArray()

    # Clip area to bounding co-ordinates
    boundCrds = np.where((minLat < fullArrays['LAT'])
                         & (fullArrays['LAT'] < maxLat)
                         & (fullArrays['LON'] < maxLon)
                         & (minLon < fullArrays['LON']))

    if np.size(boundCrds) > 0 and (np.min(boundCrds[0]) != np.max(
            boundCrds[0])) and (np.min(boundCrds[1]) != np.max(boundCrds[1])):

        boundCrds0 = boundCrds[0]
        boundCrds1 = boundCrds[1]
        min0 = np.min(boundCrds0)
        max0 = np.max(boundCrds0)
        min1 = np.min(boundCrds1)
        max1 = np.max(boundCrds1)

        # Creates a blank dictionary to hold the cropped MODIS data
        allArrays = {}  # Clipped to min/max lat/long
        for b in fullArrays.keys():
            cropB = fullArrays[b][min0:max0, min1:max1]
            allArrays[b] = cropB

        # Crop the invalid mask
        invalidMask = invalidMask[min0:max0, min1:max1]

        [nRows, nCols] = np.shape(allArrays['BAND22'])

        # Test for b22 saturation - replace with values from B21
        allArrays['BAND22'][np.where(
            allArrays['BAND22'] >= b22saturationVal)] = allArrays['BAND21'][
                np.where(allArrays['BAND22'] >= b22saturationVal)]

        # Day/Night flag (Giglio, 2003 Section 2.2.2)
        dayFlag = np.zeros((nRows, nCols), dtype=np.int)
        dayFlag[np.where(allArrays['SolarZenith'] < 8500)] = 1

        # Create water mask
        waterMask = np.zeros((nRows, nCols), dtype=np.int)
        waterMask[np.where(allArrays['LANDMASK'] != 1)] = waterFlag

        # Create cloud mask (Giglio, 2003 Section 2.1)
        cloudMask = np.zeros((nRows, nCols), dtype=np.int)
        cloudMask[((allArrays['BAND1x1k'] + allArrays['BAND2x1k']) > 900)
                  & (dayFlag == 1)] = cloudFlag
        cloudMask[(allArrays['BAND32'] < 265) & (dayFlag == 1)] = cloudFlag
        cloudMask[(((allArrays['BAND1x1k'] + allArrays['BAND2x1k']) > 700)
                   & (allArrays['BAND32'] < 285)) & (dayFlag == 1)] = cloudFlag
        cloudMask[((allArrays['BAND32'] < 265) & (dayFlag == 0))] = cloudFlag

        # Mask clouds and water from input bands
        b21CloudWaterMasked = np.copy(allArrays['BAND21'])  # ONLY B21
        b21CloudWaterMasked[np.where(waterMask == waterFlag)] = waterFlag
        b21CloudWaterMasked[np.where(cloudMask == cloudFlag)] = cloudFlag

        b22CloudWaterMasked = np.copy(
            allArrays['BAND22'])  # HAS B21 VALS WHERE B22 SATURATED
        b22CloudWaterMasked[np.where(waterMask == waterFlag)] = waterFlag
        b22CloudWaterMasked[np.where(cloudMask == cloudFlag)] = cloudFlag

        b31CloudWaterMasked = np.copy(allArrays['BAND31'])
        b31CloudWaterMasked[np.where(waterMask == waterFlag)] = waterFlag
        b31CloudWaterMasked[np.where(cloudMask == cloudFlag)] = cloudFlag

        deltaT = np.abs(allArrays['BAND22'] - allArrays['BAND31'])
        deltaTCloudWaterMasked = np.copy(deltaT)
        deltaTCloudWaterMasked[np.where(waterMask == waterFlag)] = waterFlag
        deltaTCloudWaterMasked[np.where(cloudMask == cloudFlag)] = cloudFlag

        # Potential fire test (Giglio 2003, Section 2.2.1)
        potFire = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            potFire[(dayFlag == 1)
                    & (allArrays['BAND22'] > (310 * reductionFactor)) &
                    (deltaT > (10 * reductionFactor)) &
                    (allArrays['BAND2x1k'] <
                     (300 * increaseFactor)) & (invalidMask == 0)] = 1
            potFire[(dayFlag == 0)
                    & (allArrays['BAND22'] > (305 * reductionFactor)) &
                    (deltaT > (10 * reductionFactor)) & (invalidMask == 0)] = 1

        # Absolute threshold test 1 (Giglio 2003, Section 2.2.2)
        test1 = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            test1[(potFire == 1) & (dayFlag == 1) & (allArrays['BAND22'] >
                                                     (360 * reductionFactor)) &
                  (invalidMask == 0)] = 1
            test1[(potFire == 1) & (dayFlag == 0) & (allArrays['BAND22'] >
                                                     (320 * reductionFactor)) &
                  (invalidMask == 0)] = 1

        # Background fire test (Gilio 2003, Section 2.2.3, first paragraph)
        bgMask = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            bgMask[(potFire == 1) & (dayFlag == 1) & (allArrays['BAND22'] >
                                                      (325 * reductionFactor))
                   & (deltaT >
                      (20 * reductionFactor)) & (invalidMask == 0)] = bgFlag
            bgMask[(potFire == 1) & (dayFlag == 0) & (allArrays['BAND22'] >
                                                      (310 * reductionFactor))
                   & (deltaT >
                      (10 * reductionFactor)) & (invalidMask == 0)] = bgFlag

        b22bgMask = np.copy(b22CloudWaterMasked)
        b22bgMask[(potFire == 1) & (bgMask == bgFlag) &
                  (invalidMask == 0)] = bgFlag

        b31bgMask = np.copy(b31CloudWaterMasked)
        b31bgMask[(potFire == 1) & (bgMask == bgFlag) &
                  (invalidMask == 0)] = bgFlag

        deltaTbgMask = np.copy(deltaTCloudWaterMasked)
        deltaTbgMask[(potFire == 1) & (bgMask == bgFlag) &
                     (invalidMask == 0)] = bgFlag

        # Mean and mad filters - mad needed for confidence estimation
        b22meanFilt, b22MADfilt = meanMadFilt(b22bgMask, maxKsize, minKsize,
                                              footprintx, footprinty, ksizes,
                                              minNcount, minNfrac)
        b22minusBG = np.copy(b22CloudWaterMasked) - np.copy(b22meanFilt)
        b31meanFilt, b31MADfilt = meanMadFilt(b31bgMask, maxKsize, minKsize,
                                              footprintx, footprinty, ksizes,
                                              minNcount, minNfrac)
        deltaTmeanFilt, deltaTMADFilt = meanMadFilt(deltaTbgMask, maxKsize,
                                                    minKsize, footprintx,
                                                    footprinty, ksizes,
                                                    minNcount, minNfrac)

        b22bgRej = np.copy(allArrays['BAND22'])
        b22bgRej[(potFire == 1) & (bgMask != bgFlag) &
                 (invalidMask == 0)] = bgFlag
        b22rejMeanFilt, b22rejMADfilt = meanMadFilt(b22bgRej, maxKsize,
                                                    minKsize, footprintx,
                                                    footprinty, ksizes,
                                                    minNcount, minNfrac)

        # CONTEXTUAL TESTS - (Giglio 2003, Section 2.2.4)
        # The number associated with each test is the number of the equation in the paper

        # Context fire test 2 (Giglio 2003, Section 2.2.4)
        test2 = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            test2[(potFire == 1) & (deltaT > (deltaTmeanFilt +
                                              (3.5 * deltaTMADFilt))) &
                  (invalidMask == 0)] = 1

        # Context fire test 3 (Giglio 2003, Section 2.2.4)
        test3 = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            test3[(potFire == 1) & (deltaT > (deltaTmeanFilt + 6)) &
                  (invalidMask == 0)] = 1

        # Context fire test 4 (Giglio 2003, Section 2.2.4)
        test4 = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            test4[(potFire == 1) & (b22CloudWaterMasked > (b22meanFilt +
                                                           (3 * b22MADfilt))) &
                  (invalidMask == 0)] = 1

        # Context fire test 5 (Giglio 2003, Section 2.2.4)
        test5 = np.zeros((nRows, nCols), dtype=np.int)
        test5[(potFire == 1)
              & (b31CloudWaterMasked > (b31meanFilt + b31MADfilt - 4)) &
              (invalidMask == 0)] = 1

        # Context fire test 6 (Giglio 2003, Section 2.2.4)
        test6 = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            test6[(potFire == 1) & (b22rejMADfilt > 5) &
                  (invalidMask == 0)] = 1

        # Combine tests to create tentative fires (Giglio 2003, section 2.2.5)
        tests2and3and4 = test2 * test3 * test4

        test5or6 = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            test5or6[(test5 == 1) | (test6 == 1)] = 1
        fireLocTentativeDay = potFire * tests2and3and4 * test5or6

        dayFires = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            dayFires[(potFire == 1) & (dayFlag == 1) &
                     ((test1 == 1) |
                      (fireLocTentativeDay == 1)) & (invalidMask == 0)] = 1

        # Nighttime definite fire tests (Giglio 2003, section 2.2.5)
        nightFires = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            nightFires[(potFire == 1) & ((dayFlag == 0) & (
                (tests2and3and4 == 1) | test1 == 1)) & (invalidMask == 0)] = 1

        # Sun glint rejection 7 (Giglio 2003, section 2.2.6)
        relAzimuth = allArrays['SensorAzimuth'] - allArrays['SolarAzimuth']
        cosThetaG = (np.cos(allArrays['SensorZenith']) *
                     np.cos(allArrays['SolarZenith'])) - (
                         np.sin(allArrays['SensorZenith']) *
                         np.sin(allArrays['SolarZenith']) * np.cos(relAzimuth))
        thetaG = np.arccos(cosThetaG)
        thetaG = (thetaG / 3.141592) * 180

        # Sun glint test 8 (Giglio 2003, section 2.2.6)
        sgTest8 = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            sgTest8[(potFire == 1) & (thetaG < 2)] = 1

        # Sun glint test 9 (Giglio 2003, section 2.2.6)
        sgTest9 = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            sgTest9[(potFire == 1)
                    & ((thetaG < 8) & (allArrays['BAND1x1k'] > 100)
                       & (allArrays['BAND2x1k'] > 200)) &
                    (allArrays['BAND7x1k'] > 120) & (invalidMask == 0)] = 1

        # Sun glint test 10 (Giglio 2003, section 2.2.6)
        waterLoc = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            waterLoc[(potFire == 1) & (waterMask == waterFlag)] = 1
        nWaterAdj = ndimage.generic_filter(waterLoc, adj, size=3)
        nRejectedWater = runFilt(waterMask, nRejectWaterFilt, minKsize,
                                 maxKsize)
        with np.errstate(invalid='ignore'):
            nRejectedWater[(potFire == 1) & (nRejectedWater < 0) &
                           (invalidMask == 0)] = 0

        sgTest10 = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            sgTest10[(potFire == 1)
                     & ((thetaG < 12) & ((nWaterAdj + nRejectedWater) > 0)) &
                     (invalidMask == 0)] = 1

        sgAll = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            sgAll[(sgTest8 == 1) | (sgTest9 == 1) | (sgTest10 == 1)] = 1

        # Desert boundary rejection (Giglio 2003, section 2.2.7)
        nValid = runFilt(b22bgMask, nValidFilt, minKsize, maxKsize)
        nRejectedBG = runFilt(bgMask, nRejectBGfireFilt, minKsize, maxKsize)

        with np.errstate(invalid='ignore'):
            nRejectedBG[(potFire == 1) & (nRejectedBG < 0) &
                        (invalidMask == 0)] = 0

        # Desert boundary test 11 (Giglio 2003, section 2.2.7)
        dbTest11 = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            dbTest11[(potFire == 1) & ((nRejectedBG > (0.1 * nValid))) &
                     (invalidMask == 0)] = 1

        # Desert boundary test 12 (Giglio 2003, section 2.2.7)
        dbTest12 = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            dbTest12[(potFire == 1) & (nRejectedBG >= 4) &
                     (invalidMask == 0)] = 1

        # Desert boundary test 13 (Giglio 2003, section 2.2.7)
        dbTest13 = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            dbTest13[(potFire == 1) & (allArrays['BAND2x1k'] > 150) &
                     (invalidMask == 0)] = 1

        # Desert boundary test 14 (Giglio 2003, section 2.2.7)
        dbTest14 = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            dbTest14[(potFire == 1) & (b22rejMeanFilt < 345) &
                     (invalidMask == 0)] = 1

        # Desert boundary test 15 (Giglio 2003, section 2.2.7)
        dbTest15 = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            dbTest15[(potFire == 1) & (b22rejMADfilt < 3) &
                     (invalidMask == 0)] = 1

        # Desert boundary test 16 (Giglio 2003, section 2.2.7)
        dbTest16 = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            dbTest16[(potFire == 1)
                     & (b22CloudWaterMasked < (b22rejMeanFilt +
                                               (6 * b22rejMADfilt))) &
                     (invalidMask == 0)] = 1

        # Reject anything that fulfills desert boundary criteria
        dbAll = dbTest11 * dbTest12 * dbTest13 * dbTest14 * dbTest15 * dbTest16

        # Coastal false alarm rejection (Giglio 2003, Section 2.2.8)
        with np.errstate(invalid='ignore'):
            ndvi = (allArrays['BAND2x1k'] - allArrays['BAND1x1k']) / (
                allArrays['BAND2x1k'] + allArrays['BAND1x1k'])
        unmaskedWater = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            unmaskedWater[(potFire == 1)
                          & ((ndvi < 0) & (allArrays['BAND7x1k'] < 50)
                             & (allArrays['BAND2x1k'] < 150))] = -6
            unmaskedWater[(potFire == 1) & (bgMask == bgFlag)] = bgFlag
        Nuw = runFilt(unmaskedWater, nUnmaskedWaterFilt, minKsize, maxKsize)
        rejUnmaskedWater = np.zeros((nRows, nCols), dtype=np.int)
        with np.errstate(invalid='ignore'):
            rejUnmaskedWater[(potFire == 1) & ((test1 == 0) & (Nuw > 0)) &
                             (invalidMask == 0)] = 1

        # Combine all masks
        allFires = dayFires + nightFires  # All potential fires
        with np.errstate(
                invalid='ignore'
        ):  # Reject sun glint, desert boundary, coastal false alarms
            allFires[(sgAll == 1) | (dbAll == 1) | (rejUnmaskedWater == 1)] = 0

        # If any fires have been detected, calculate Fire Radiative Power (FRP)
        if np.max(allFires) > 0:

            b22firesAllMask = allFires * allArrays['BAND22']
            b22bgAllMask = allFires * b22meanFilt

            b22maskEXP = np.power(b22firesAllMask, 8)
            b22bgEXP = np.power(b22bgAllMask, 8)

            frpMW = (4.34 * (math.pow(10, -19))) * (b22maskEXP - b22bgEXP)

            # Detection confidence (Giglio 2003, Section 2.3)
            cloudLoc = np.zeros((nRows, nCols), dtype=np.int)
            with np.errstate(invalid='ignore'):
                cloudLoc[cloudMask == cloudFlag] = 1
            nCloudAdj = ndimage.generic_filter(cloudLoc, adj, size=3)

            waterLoc = np.zeros((nRows, nCols), dtype=np.int)
            with np.errstate(invalid='ignore'):
                waterLoc[waterMask == waterFlag] = 1
            nWaterAdj = ndimage.generic_filter(waterLoc, adj, size=3)

            # Fire detection confidence test 17
            z4 = b22minusBG / b22MADfilt

            # Fire detection confidence test 18
            zDeltaT = (deltaTbgMask - deltaTmeanFilt) / deltaTMADFilt

            with np.errstate(invalid='ignore'):
                firesNclouds = nCloudAdj[(allFires == 1)]
                firesZ4 = z4[(allFires == 1)]
                firesZdeltaT = zDeltaT[(allFires == 1)]
                firesB22bgMask = b22bgMask[(allFires == 1)]
                firesNwater = nWaterAdj[(allFires == 1)]
                firesDayFlag = dayFlag[(allFires == 1)]

            # Fire detection confidence test 19
            C1day = rampFn(firesB22bgMask, 310, 340)
            C1night = rampFn(firesB22bgMask, 305, 320)

            # Fire detection confidence test 20
            C2 = rampFn(firesZ4, 2.5, 6)

            # Fire detection confidence test 21
            C3 = rampFn(firesZdeltaT, 3, 6)

            # Fire detection confidence test 22 - not used for night fires
            C4 = 1 - rampFn(firesNclouds, 0,
                            6)  # zero adjacent clouds = zero confidence

            # Fire detection confidence test 23 - not used for night fires
            C5 = 1 - rampFn(firesNwater, 0, 6)

            # Detection confidence for the daytime
            confArrayDay = np.row_stack((C1day, C2, C3, C4, C5))
            detnConfDay = gmean(confArrayDay, axis=0)

            # Detection confidence for the nighttime
            confArrayNight = np.row_stack((C1night, C2, C3))
            detnConfNight = gmean(confArrayNight, axis=0)

            # Detection confidence for both day and night
            detnConf = np.zeros_like(detnConfDay, dtype=np.float)
            detnConf[firesDayFlag == 1] = detnConfDay[firesDayFlag == 1]
            detnConf[firesDayFlag == 0] = detnConfNight[firesDayFlag == 0]

            with np.errstate(invalid='ignore'):
                FRPx = np.where((allFires == 1))[1]
                FRPsample = FRPx + min1
                FRPy = np.where((allFires == 1))[0]
                FRPline = FRPy + min0
                FRPlats = allArrays['LAT'][(allFires == 1)]
                FRPlons = allArrays['LON'][(allFires == 1)]
                FRPT21 = allArrays['BAND22'][(allFires == 1)]
                FRPT31 = allArrays['BAND31'][(allFires == 1)]
                FRPMeanT21 = b22meanFilt[(allFires == 1)]
                FRPMeanT31 = b31meanFilt[(allFires == 1)]
                FRPMeanDT = deltaTmeanFilt[(allFires == 1)]
                FRPMADT21 = b22MADfilt[(allFires == 1)]
                FRPMADT31 = b31MADfilt[(allFires == 1)]
                FRP_MAD_DT = deltaTMADFilt[(allFires == 1)]
                FRP_AdjCloud = nCloudAdj[(allFires == 1)]
                FRP_AdjWater = nWaterAdj[(allFires == 1)]
                FRP_NumValid = nValid[(allFires == 1)]
                FRP_confidence = detnConf * 100
                FRPpower = frpMW[(allFires == 1)]

            exportCSV = np.column_stack([
                FRPline, FRPsample, FRPlats, FRPlons, FRPT21, FRPT31,
                FRPMeanT21, FRPMeanT31, FRPMeanDT, FRPMADT21, FRPMADT31,
                FRP_MAD_DT, FRPpower, FRP_AdjCloud, FRP_AdjWater, FRP_NumValid,
                FRP_confidence
            ])

            exportCSV = [x for x in exportCSV if -4 not in x]

            if len(exportCSV) > 0:

                hdr = '"FRPline",' \
                      '"FRPsample",' \
                      '"FRPlats",' \
                      '"FRPlons",' \
                      '"FRPT21",' \
                      '"FRPT31",' \
                      '"FRPMeanT21",' \
                      '"FRPMeanT31",' \
                      '"FRPMeanDT",' \
                      '"FRPMADT21",' \
                      '"FRPMADT31",' \
                      '"FRP_MAD_DT",' \
                      '"FRPpower",' \
                      '"FRP_AdjCloud",' \
                      '"FRP_AdjWater",' \
                      '"FRP_NumValid",' \
                      '"FRP_confidence"'
                os.chdir(cwd)
                np.savetxt(
                    filMOD02.replace('hdf', '') + "csv",
                    exportCSV,
                    delimiter=",",
                    header=hdr,
                    fmt=[
                        "%d",  # line
                        "%d",  # sample
                        "%.5f",  # lats
                        "%.5f",  # lons
                        "%.2f",  # t21
                        "%.2f",  # t31
                        "%.2f",  # mean t21
                        "%.2f",  # mean t31
                        "%.2f",  # mean dt
                        "%.2f",  # mad t21
                        "%.2f",  # mad t31
                        "%.2f",  # mad dt
                        "%." + str(decimal) + "f",  # power
                        "%d",  # cloud
                        "%d",  # water
                        "%d",  # valid
                        "%.2f"  # conf
                    ])
                os.chdir(directory)
def orientation_similarity_map(
    xmap,
    n_best: int = None,
    simulation_indices_prop: str = "simulation_indices",
    normalize: bool = True,
    from_n_best: int = None,
    footprint: np.ndarray = None,
    center_index: int = 2,
) -> np.ndarray:
    r"""Compute an orientation similarity map following
    :cite:`marquardt2017quantitative`, where the ranked list of the
    array indices of the best matching simulated patterns in one point
    is compared to the corresponding lists in the nearest neighbour
    points.

    Parameters
    ----------
    xmap : ~orix.crystal_map.crystal_map.CrystalMap
        A crystal map with a ranked list of the array indices of the
        best matching simulated patterns among its properties.
    n_best : int, optional
        Number of ranked indices to compare. If None (default), all
        indices are compared.
    simulation_indices_prop : str, optional
        Name of simulated indices array in the crystal maps' properties.
        Default is "simulation_indices".
    normalize : bool, optional
        Whether to normalize the number of equal indices to the range
        [0, 1], by default True.
    from_n_best : int, optional
        Return an OSM for each n in the range [`from_n_best`, `n_best`].
        If None (default), only the OSM for `n_best` indices is
        returned.
    footprint : numpy.ndarray, optional
        Boolean 2D array specifying which neighbouring points to compare
        lists with, by default the four nearest neighbours.
    center_index : int, optional
        Flat index of central navigation point in the truthy values of
        footprint, by default 2.

    Returns
    -------
    osm : numpy.ndarray
        Orientation similarity map(s). If `from_n_best` is not None,
        the returned array has three dimensions, where `n_best` is at
        array[:, :, 0] and `from_n_best` at array[:, :, -1].

    Notes
    -----
    If the set :math:`S_{r,c}` is the ranked list of best matching
    indices for a given point :math:`(r,c)`, then the orientation
    similarity index :math:`\eta_{r,c}` is the average value of the
    cardinalities (\#) of the intersections with the neighbouring sets

    .. math::

        \eta_{r,c} = \frac{1}{4}
            \left(
                \#(S_{r,c} \cap S_{r-1,c}) +
                \#(S_{r,c} \cap S_{r+1,c}) +
                \#(S_{r,c} \cap S_{r,c-1}) +
                \#(S_{r,c} \cap S_{r,c+1})
            \right).
    """
    simulation_indices = xmap.prop[simulation_indices_prop]
    nav_size, keep_n = simulation_indices.shape

    if n_best is None:
        n_best = keep_n
    elif n_best > keep_n:
        raise ValueError(
            f"n_best {n_best} cannot be greater than keep_n {keep_n}")

    data_shape = xmap.shape
    flat_index_map = np.arange(nav_size).reshape(data_shape)

    if from_n_best is None:
        from_n_best = n_best

    osm = np.zeros(data_shape + (n_best - from_n_best + 1, ), dtype=np.float32)

    if footprint is None:
        footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])

    for i, n in enumerate(range(n_best, from_n_best - 1, -1)):
        match_indicies = simulation_indices[:, :n]
        osm[:, :, i] = generic_filter(
            flat_index_map,
            lambda v: _orientation_similarity_per_pixel(
                v,
                center_index,
                match_indicies,
                n,
                normalize,
            ),
            footprint=footprint,
            mode="constant",
            cval=-1,
            output=np.float32,
        )

    return osm.squeeze()
Exemple #39
0
def mean_filter_2D(arr, footprint):
    from scipy.ndimage import generic_filter
    out = generic_filter(arr, np.nanmean, footprint=footprint, origin=0)
    out[np.isnan(arr)] = np.nan
    return out
def thres_phansalskar(img, n=DEFAULT_N, k=0.25, R=0.5, p=2, q=10):
    threshold_matrix = generic_filter(img,
                                      phansalskar_aux(k, R, p, q),
                                      size=(n, n))
    return apply_threshold(img, threshold_matrix)
def thres_sauvola_pietaksinen(img, n=DEFAULT_N, k=0.5, R=128):
    threshold_matrix = generic_filter(img, sauvola_aux(k, R), size=(n, n))
    return apply_threshold(img, threshold_matrix)
Exemple #42
0
def focal_statistics(raster, band_num=1, ignore_nodata=True, size=3, function=np.nanmean,
                     clip_to_mask=False, out_colname=None):
    """Derives for each pixel a statistic of the values with the specified neighbourhood.

    Currently the neighbourhood is restricted to square neighbourhoods ie 3x3 size.

    Any numpy statistical functions are supported along with custom functions.

    Nodata values are converted to np.nan and will be excluded from the statistical calculation. Nodata pixels may be
    assigned a value if at least one pixel in the neighbourhood has a valid value. To remove/mask these values from
    the final output, set the clip_to_mask setting to True.

    Using a size of 1 returns the selected band with the converted nodata values. No statistical functions are applied.

    An string out_colname is returned and can be used as a filename or column name during future analysis. If None, it
    is derived from the input raster, size and statistical function used.
          For single band inputs   <stat><size>x<size>_<raster name>
             eg.   mean3x3_area1_yield    apply a mean 3x3 filter for raster area1_yield

          For multi band inputs   <function><size>x<size>bd<band_num>_<raster name>
             eg.   mean3x3b3_area2       apply a mean 3x3 filter for band 3 of the raster area2

    Source: https://stackoverflow.com/a/30853116/9567306
    https://stackoverflow.com/questions/46953448/local-mean-filter-of-a-numpy-array-with-missing-data/47052791#47052791

    Args:
        raster (rasterio.io.DatasetReader): An raster file opened using rasterio.open(os.path.normpath())
        band_num (int):       The band number to apply focal statistics to.
        ignore_nodata (bool): If true, the nodata value of the raster will be converted to np.nan and excluded
                              from statistical calculations.
        size (int):           The size of the neighbourhood filter used for statistics calculations. Currently
                              restricted to a square neighbourhood ie 3x3, 5x5 etc.
        function (function):  a functions to apply to the raster. These can include numpy functions
                              like np.nanmean or custom ones.
        clip_to_mask (bool):  If true, remove values assigned to nodata pixels
        out_colname (str):    An output string used to describe the filter result and can be used as a column or
                              filename If NONE, then it will be derived.
    Returns:
        numpy.ndarray:        A 1D numpy array of double (float32) values
        str:                  a string representation of the inputs

    """

    if not isinstance(raster, rasterio.DatasetReader):
        raise TypeError("Input should be a rasterio.DatasetReader created using rasterio.open()")

    if not isinstance(size, int) or size % 2 == 0:
        raise TypeError("Size should be an odd number integer greater than one. Only Square Filters are supported.")

    if not isinstance(ignore_nodata, bool):
        raise TypeError('{} should be a boolean.'.format(ignore_nodata))

    start_time = time.time()
    col_name = []
    mask = raster.read_masks(band_num)
    if ignore_nodata:
        # change nodata values to np.nan
        band = np.where(mask, raster.read(band_num), np.nan)
    else:
        band = raster.read(band_num)

    # convert to float64 for accuracy
    band = band.astype(np.float64)
    if size > 1:
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", category=RuntimeWarning)
            filtered = generic_filter(band, function, mode='constant', cval=np.nan, size=size)

        col_name += [function.func_name.replace('nan', ''), '{0}x{0}'.format(size)]
    else:
        filtered = band
        col_name += ['pixel']

    if raster.count > 1:  # the number of bands in the raster
        col_name += ['bd{}'.format(band_num)]

    if clip_to_mask:
        # reapply the mask to remove values assigned to nodata pixels
        filtered = np.where(mask, filtered, np.nan)

    title = os.path.splitext(os.path.basename(raster.name))[0]
    if out_colname is None or out_colname == '':
        out_colname = '{}_{}'.format(''.join(col_name), title)

    if config.get_debug_mode():
        LOGGER.info('{:50}  {dur:17} min: {:>.4f} max: {:>.4f}'.format(out_colname, np.nanmin(filtered), np.nanmax(filtered),
                                                                 dur=timedelta(seconds=time.time() - start_time)))

    return filtered.astype(np.float32), out_colname
def feature_extraction(img):
    df = pd.DataFrame()

    #All features generated must match the way features are generated for TRAINING.
    #Feature1 is our original image pixels
    img2 = img.reshape(-1)
    df['Original Image'] = img2

    #Generate Gabor features
    num = 1
    kernels = []
    for theta in range(2):
        theta = theta / 4. * np.pi
        for sigma in (1, 3):
            for lamda in np.arange(0, np.pi, np.pi / 4):
                for gamma in (0.05, 0.5):
                    #               print(theta, sigma, , lamda, frequency)

                    gabor_label = 'Gabor' + str(num)
                    #                    print(gabor_label)
                    ksize = 9
                    kernel = cv2.getGaborKernel((ksize, ksize),
                                                sigma,
                                                theta,
                                                lamda,
                                                gamma,
                                                0,
                                                ktype=cv2.CV_32F)
                    kernels.append(kernel)
                    #Now filter image and add values to new column
                    fimg = cv2.filter2D(img2, cv2.CV_8UC3, kernel)
                    filtered_img = fimg.reshape(-1)
                    df[gabor_label] = filtered_img  #Modify this to add new column for each gabor
                    num += 1
########################################
#Geerate OTHER FEATURES and add them to the data frame
#Feature 3 is canny edge
    edges = cv2.Canny(img, 100, 200)  #Image, min and max values
    edges1 = edges.reshape(-1)
    df['Canny Edge'] = edges1  #Add column to original dataframe

    from skimage.filters import roberts, sobel, scharr, prewitt

    #Feature 4 is Roberts edge
    edge_roberts = roberts(img)
    edge_roberts1 = edge_roberts.reshape(-1)
    df['Roberts'] = edge_roberts1

    #Feature 5 is Sobel
    edge_sobel = sobel(img)
    edge_sobel1 = edge_sobel.reshape(-1)
    df['Sobel'] = edge_sobel1

    #Feature 6 is Scharr
    edge_scharr = scharr(img)
    edge_scharr1 = edge_scharr.reshape(-1)
    df['Scharr'] = edge_scharr1

    #Feature 7 is Prewitt
    edge_prewitt = prewitt(img)
    edge_prewitt1 = edge_prewitt.reshape(-1)
    df['Prewitt'] = edge_prewitt1

    #Feature 8 is Gaussian with sigma=3
    from scipy import ndimage as nd
    gaussian_img = nd.gaussian_filter(img, sigma=3)
    gaussian_img1 = gaussian_img.reshape(-1)
    df['Gaussian s3'] = gaussian_img1

    #Feature 9 is Gaussian with sigma=7
    gaussian_img2 = nd.gaussian_filter(img, sigma=7)
    gaussian_img3 = gaussian_img2.reshape(-1)
    df['Gaussian s7'] = gaussian_img3

    #Feature 10 is Median with sigma=3
    median_img = nd.median_filter(img, size=3)
    median_img1 = median_img.reshape(-1)
    df['Median s3'] = median_img1

    #Feature 11 is Variance with size=3
    variance_img = nd.generic_filter(img, np.var, size=3)
    variance_img1 = variance_img.reshape(-1)
    df['Variance s3'] = variance_img1  #Add column to original dataframe

    return df
Exemple #44
0
                                 'GLM',
                                 16,
                                 window=30)

# GLM event data from those files
G17 = accumulate_GLM_FAST(G17_files, data_type='event')
G16 = accumulate_GLM_FAST(G16_files, data_type='event')

# Bin GLM on HRRR grid (hist17), and filter in-HRRR events
hist17, filtered17 = bin_GLM_on_HRRR_grid(G17, Hlat, Hlon, m)
hist16, filtered16 = bin_GLM_on_HRRR_grid(G16, Hlat, Hlon, m)

# Dilate the GLM events
custom_filter = np.array([[0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1],
                          [1, 1, 1, 1, 1], [0, 1, 1, 1, 0]])
bloat_glm17 = ndimage.generic_filter(hist17, np.max, footprint=custom_filter)
bloat_glm16 = ndimage.generic_filter(hist16, np.max, footprint=custom_filter)

# Only use if number of events > 1...More than 1 GLM event in a grid box.
binary17 = bloat_glm17 > 1
binary16 = bloat_glm16 > 1

# Draw on Map
mask17 = np.ma.array(binary17, mask=binary17 == 0)
mask16 = np.ma.array(binary16, mask=binary16 == 0)

m.pcolormesh(Hlon,
             Hlat,
             mask17,
             latlon=True,
             cmap='YlOrBr',
Exemple #45
0
def texture(gray_img,
            ksize,
            threshold,
            offset=3,
            texture_method='dissimilarity',
            borders='nearest',
            max_value=255):
    """Creates a binary image from a grayscale image using skimage texture calculation for thresholding.
    This function is quite slow.

    Inputs:
    gray_img       = Grayscale image data
    ksize          = Kernel size for texture measure calculation
    threshold      = Threshold value (0-255)
    offset         = Distance offsets
    texture_method = Feature of a grey level co-occurrence matrix, either
                     'contrast', 'dissimilarity', 'homogeneity', 'ASM', 'energy',
                     or 'correlation'.For equations of different features see
                     scikit-image.
    borders        = How the array borders are handled, either 'reflect',
                     'constant', 'nearest', 'mirror', or 'wrap'
    max_value      = Value to apply above threshold (usually 255 = white)

    Returns:
    bin_img        = Thresholded, binary image

    :param gray_img: numpy.ndarray
    :param ksize: int
    :param threshold: int
    :param offset: int
    :param texture_method: str
    :param borders: str
    :param max_value: int
    :return bin_img: numpy.ndarray
    """

    # Function that calculates the texture of a kernel
    def calc_texture(inputs):
        inputs = np.reshape(a=inputs, newshape=[ksize, ksize])
        inputs = inputs.astype(np.uint8)
        # Greycomatrix takes image, distance offset, angles (in radians), symmetric, and normed
        # http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.greycomatrix
        glcm = greycomatrix(inputs, [offset], [0],
                            256,
                            symmetric=True,
                            normed=True)
        diss = greycoprops(glcm, texture_method)[0, 0]
        return diss

    # Make an array the same size as the original image
    output = np.zeros(gray_img.shape, dtype=gray_img.dtype)

    # Apply the texture function over the whole image
    generic_filter(gray_img,
                   calc_texture,
                   size=ksize,
                   output=output,
                   mode=borders)

    # Threshold so higher texture measurements stand out
    bin_img = binary(gray_img=output,
                     threshold=threshold,
                     max_value=max_value,
                     object_type='light')

    _debug(visual=bin_img,
           filename=os.path.join(params.debug_outdir,
                                 str(params.device) + "_texture_mask.png"))

    return bin_img
Exemple #46
0
def angle_filter(data, FILT_SIZE=5):
    return ndimage.generic_filter(data.astype('f'),
                                  th2,
                                  FILT_SIZE,
                                  extra_arguments=genCoords(FILT_SIZE))
def test_local_median_validation(u_threshold=3, N=3, size=1):

    u = np.random.rand(2 * N + 1, 2 * N + 1)
    u[N, N] = np.median(u) * 10

    print('mockup data')
    print(u)

    # prepare two copies for comparison
    tmp = u.copy()

    # and masked array copy
    masked_u = np.ma.masked_array(u.copy(), np.ma.nomask)
    masked_u[N + 1:, N + 1:-1] = np.ma.masked
    print('masked version, see inf')
    print(masked_u.filled(np.inf))

    f = np.ones((2 * size + 1, 2 * size + 1))
    f[size, size] = 0
    print('Kernel or footprint')
    print(f)

    # # out = convolve2d(u, f, boundary='wrap', mode='same')/f.sum()
    # out = median_filter(u,footprint=f)
    # print('median filter does no work with nan')
    # print(out)

    um = generic_filter(u,
                        np.nanmedian,
                        mode='constant',
                        cval=np.nan,
                        footprint=f)
    print('generic filter output with nan')
    print(um)

    ind = np.abs((u - um)) > u_threshold
    print('found outliers in places:')
    print(ind)

    # mark those places
    u[ind] = np.nan
    print('marked data and the mask')
    print(u)

    mask = np.zeros(u.shape, dtype=bool)
    mask[ind] = True
    print(mask)

    # now we test our function which is just a decoration
    # of the above steps
    u1, u1, mask1 = validation.local_median_val(tmp, tmp, 3, 3)

    print('data and its mask')
    print(u1)
    print(mask1)

    # Now we shall test a masked array (new in 0.23.3)
    # for the image masked data
    # image mask is a masked array property
    # while nan in the matrix is the previous validation step marker
    u2, u2, mask2 = validation.local_median_val(masked_u.copy(),
                                                masked_u.copy(), 3, 3)

    print('data')
    print(u2.data)
    print('image mask')
    print(u2.mask)
    print('invalid vector mask')
    print(mask2)

    print('Assert expected results')
    assert np.isnan(u[N, N])

    assert mask[N, N]

    assert np.isnan(u1[N, N])
    assert mask1[N, N]

    assert np.isnan(u2.data[N, N])
    assert mask2[N, N]
    assert u2.mask[N + 1, N + 1]
def variance_feature(img_grey):
    print('[INFO] Computing variance feature.')
    varianceMatrix = ndimage.generic_filter(img_grey, np.var, size=1)
    return varianceMatrix
Exemple #49
0
    def outlier_rej(vals,
                    weights,
                    axes,
                    order=5,
                    mode='smooth',
                    max_ncycles=3,
                    max_rms=3.,
                    max_rms_noise=0.,
                    window_noise=11.,
                    fix_rms=0.,
                    fix_rms_noise=0.,
                    replace=False):
        """
        Reject outliers using a running median
        val = the array (avg must be 0)
        weights = the weights to convert into flags
        axes = array with axes values (1d or 2d)
        order = "see polyfit()"
        max_ncycles = maximum number of cycles
        max_rms = number of rms times for outlier flagging
        max_rms_noise = cut on the rms of the rmss
        window_noise = window used to calculate the rmss to detect noise
        replace = instead of flag it, replace the data point with the smoothed one

        return: flags array and final rms
        """

        # renormalize axes to have decent numbers
        if len(axes) == 1:
            axes[0] -= axes[0][0]
            axes[0] /= axes[0][1] - axes[0][0]
        elif len(axes) == 2:
            axes[0] -= axes[0][0]
            axes[0] /= (axes[0][1] - axes[0][0])
            axes[1] -= axes[1][0]
            axes[1] /= (axes[1][1] - axes[1][0])
        else:
            logging.error(
                'FLAG operation can flag only along 1 or 2 axes. Given axes: '
                + str(axesToFlag))
            return

        # test artificial data
        #axes[0] = np.array(range(len(axes[0])))*24414.0625
        #axes[1] = np.array(range(len(axes[1])))*5
        #vals = np.empty( shape=[len(axes[0]), len(axes[1])] )
        #for i, xi in enumerate(axes[0]):
        #    for j, yj in enumerate(axes[1]):
        #        vals[i,j] = 10*xi + yj**2
        #weights = np.ones_like(vals)

        if replace:
            orig_weights = np.copy(weights)

        for i in range(max_ncycles):

            # all is flagged? break
            if (weights == 0).all():
                rms = 0.
                break

            if mode == 'smooth':
                vals_smooth = np.copy(vals)
                np.putmask(vals_smooth, weights == 0, np.nan)
                # speedup: if all data are used then just do a median and don't call the filter
                if all(o == 0 for o in order):
                    vals_smooth = np.ones(
                        vals_smooth.shape) * np.nanmedian(vals_smooth)
                else:
                    for i, o in enumerate(order):
                        if o == 0: order[i] = vals_smooth.shape[i]
                    vals_smooth = generic_filter(vals_smooth,
                                                 np.nanmedian,
                                                 size=order,
                                                 mode='constant',
                                                 cval=np.nan)
                vals_detrend = vals - vals_smooth
            # TODO: should be rolling
            elif mode == 'poly':
                # get polynomia and values
                if len(axes) == 1:
                    fit_sol = polyfit(axes[0], z=vals, w=weights, order=order)
                    vals_detrend = vals - polyval(axes[0], m=fit_sol)
                elif len(axes) == 2:
                    fit_sol = polyfit(axes[0],
                                      axes[1],
                                      z=vals,
                                      w=weights,
                                      order=order)
                    vals_smooth = polyval(axes[0], axes[1], m=fit_sol)
                    vals_detrend = vals - vals_smooth
            # TODO: should be rolling
            elif mode == 'spline':
                # get spline
                if len(axes) == 1:
                    spline = scipy.interpolate.UnivariateSpline(axes[0],
                                                                y=vals,
                                                                w=weights,
                                                                k=order[0])
                    vals_detrend = vals - spline(axes[0])
                elif len(axes) == 2:
                    x, y = np.meshgrid(axes[0], axes[1], indexing='ij')
                    # spline doesn't like w=0
                    z = vals[(weights != 0)].flatten()
                    x = x[(weights != 0)].flatten()
                    y = y[(weights != 0)].flatten()
                    w = weights[(weights != 0)].flatten()
                    spline = scipy.interpolate.SmoothBivariateSpline(
                        x, y, z, w, kx=order[0], ky=order[1])
                    vals_smooth = spline(axes[0], axes[1])
                    vals_detrend = vals - vals_smooth

            # remove outliers
            if max_rms > 0 or fix_rms > 0:
                # median calc https://en.wikipedia.org/wiki/Median_absolute_deviation
                rms = 1.4826 * np.nanmedian(
                    np.abs(vals_detrend[(weights != 0)]))
                if np.isnan(rms): weights[:] = 0
                elif fix_rms > 0:
                    flags = abs(vals_detrend) > fix_rms
                    weights[flags] = 0
                else:
                    flags = abs(vals_detrend) > max_rms * rms
                    weights[flags] = 0

            # remove noisy regions of data
            if max_rms_noise > 0 or fix_rms_noise > 0:
                rmses = rolling_rms(vals_detrend, window_noise)
                rms = 1.4826 * np.nanmedian(abs(rmses))

                # rejection
                if fix_rms_noise > 0:
                    flags = rmses > fix_rms_noise
                else:
                    flags = rmses > (max_rms_noise * rms)
                weights[flags] = 0

            # all is flagged? break
            if (weights == 0).all():
                rms == 0.
                break

            # no flags? break
            if (flags == False).all():
                break

        # replace (outlier) flagged values with smoothed ones
        if replace:
            logging.debug('Replacing %.2f%% of the data.' %
                          np.sum(orig_weights != weights))
            vals[np.where(orig_weights != weights)] = vals_smooth[np.where(
                orig_weights != weights)]
            weights = orig_weights

        # plot 1d
        plot = False
        if plot:
            import matplotlib as mpl
            mpl.use("Agg")
            import matplotlib.pyplot as plt
            plt.plot(axes[1][weights[0] == 0], vals[0][weights[0] == 0], 'ro')
            plt.plot(axes[1], vals[0], 'k.')
            plt.plot(axes[1], vals_smooth[0], 'r.')
            plt.plot(axes[1], vals_detrend[0], 'g.')
            plt.savefig('test.png')
            sys.exit(1)

        return weights, vals, rms
Exemple #50
0
def feature_extraction(img):

    df = pd.DataFrame()

    reshape_img = img.reshape(-1)
    df['Original Image'] = reshape_img

    num = 1
    kernels = []
    for theta in range(2):
        theta = theta / 4. * np.pi
        for sigma in (1, 3):
            for lamda in np.arange(0, np.pi, np.pi / 4):
                for gamma in (0.05, 0.5):
                    #               print(theta, sigma, , lamda, frequency)

                    gabor_label = 'Gabor' + str(num)
                    #                    print(gabor_label)
                    ksize = 9
                    kernel = cv2.getGaborKernel((ksize, ksize),
                                                sigma,
                                                theta,
                                                lamda,
                                                gamma,
                                                0,
                                                ktype=cv2.CV_32F)
                    kernels.append(kernel)
                    #Now filter image and add values to new column
                    fimg = cv2.filter2D(reshape_img, cv2.CV_8UC3, kernel)
                    filtered_img = fimg.reshape(-1)
                    df[gabor_label] = filtered_img  #Modify this to add new column for each gabor
                    num += 1
    edges = cv2.Canny(img, 100, 200)
    edges1 = edges.reshape(-1)
    df['Canny Edge'] = edges1

    from skimage.filters import roberts, sobel, scharr, prewitt

    edge_roberts = roberts(img)
    edge_roberts1 = edge_roberts.reshape(-1)
    df['Roberts'] = edge_roberts1

    edge_sobel = sobel(img)
    edge_sobel1 = edge_sobel.reshape(-1)
    df['Sobel'] = edge_sobel1

    edge_scharr = scharr(img)
    edge_scharr1 = edge_scharr.reshape(-1)
    df['Scharr'] = edge_scharr1

    #Feature 7 is Prewitt
    edge_prewitt = prewitt(img)
    edge_prewitt1 = edge_prewitt.reshape(-1)
    df['Prewitt'] = edge_prewitt1

    #Feature 8 is Gaussian with sigma=3
    from scipy import ndimage as nd
    gaussian_img = nd.gaussian_filter(img, sigma=3)
    gaussian_img1 = gaussian_img.reshape(-1)
    df['Gaussian s3'] = gaussian_img1

    #Feature 9 is Gaussian with sigma=7
    gaussian_img2 = nd.gaussian_filter(img, sigma=7)
    gaussian_img3 = gaussian_img2.reshape(-1)
    df['Gaussian s7'] = gaussian_img3

    median_img = nd.median_filter(img, size=3)
    median_img1 = median_img.reshape(-1)
    df['Median s3'] = median_img1

    #Feature 11 is Variance with size=3
    variance_img = nd.generic_filter(img, np.var, size=3)
    variance_img1 = variance_img.reshape(-1)
    df['Variance s3'] = variance_img1

    hariis_image_gray = np.float32(img)
    harris = cv2.cornerHarris(hariis_image_gray, 25, 1, 0.06)
    harris = harris.reshape(-1)
    df['Hariss'] = harris

    orb = cv2.ORB_create(20000)
    kp, des = orb.detectAndCompute(img, None)
    orb_img = cv2.drawKeypoints(img, kp, None, flags=None)
    orb_img = cv2.cvtColor(orb_img, cv2.COLOR_BGR2GRAY)
    orb_img = orb_img.reshape(-1)
    df['ORB'] = orb_img

    kmeans = KMeans(n_clusters=3, random_state=0).fit(img)
    kmeans_lbl = kmeans.cluster_centers_[kmeans.labels_]
    kmeans_lbl = kmeans_lbl.reshape(-1)
    df['kmeans'] = kmeans_lbl

    return df
Exemple #51
0
def scale_invariant_point_detection(img, sigma, k, sigma_final, threshold):
    if img.shape[2] > 1:
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # number of scale iterations
    n = math.ceil((math.log(sigma_final) - math.log(sigma)) / math.log(k))

    # init scale space
    h = img.shape[0]
    w = img.shape[1]
    scale_space = np.zeros((h, w, n))

    # generate the Laplacian of Gaussian for the first scale level
    filt_size = 2 * math.ceil(3 * sigma) + 1
    log_filter = np.zeros((filt_size, filt_size))
    log_filter[filt_size // 2, filt_size // 2] = 1
    log_filter = nd.gaussian_laplace(log_filter, sigma)
    log_filter = sigma * sigma * log_filter

    # generate the Laplacian of Gaussian for the remaining levels
    img_res = img.copy()
    for i in range(n):
        im_filtered = filter2d(img_res, log_filter)
        im_filtered = np.power(im_filtered, 2)
        scale_space[:, :, i] = cv2.resize(im_filtered,
                                          (img.shape[1], img.shape[0]),
                                          interpolation=cv2.INTER_CUBIC)

        if i != n - 1:
            img_res = cv2.resize(img, (math.ceil(img.shape[1] / (k**(i + 1))),
                                       math.ceil(img.shape[0] / (k**(i + 1)))),
                                 interpolation=cv2.INTER_CUBIC)
            print('add layer...')

    # perform non-maximum suppression for each scale-space slice
    super_size = 5
    max_space = np.zeros((h, w, n))
    for i in range(n):
        max_space[:, :, i] = nd.generic_filter(scale_space[:, :, i],
                                               lambda x: np.max(x),
                                               size=(super_size, super_size))

    # perform non-maximum suppression between scales and threshold
    for i in range(n):
        max_space[:, :, i] = np.max(max_space[:, :,
                                              max(i - 1, 0):min(i + 2, n - 1)],
                                    axis=2)

    max_space = max_space * (max_space == scale_space)
    print("max: %f" % max_space.max())
    print("min: %f" % max_space.min())

    # record the positions and correspondence radius of scale invariant blobs
    r = []
    c = []
    rad = []
    for i in range(n):
        [rows, cols] = np.where(max_space[:, :, i] > threshold)
        num_blobs = len(rows)
        radii = sigma * (k**i) * (2**0.5)
        radii = [radii] * num_blobs
        r.extend(rows)
        c.extend(cols)
        rad.extend(radii)

    return c, r, rad
Exemple #52
0
    # spatial smoothing...
    # spatially smooth the 2-D daily slices of data using a mean generic filter. (without any aggregation)
    footprint_type = 'queens'
    footprint_lu = {
        'rooks': np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]),
        'queens': np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
    }

    footprint = footprint_lu[footprint_type]
    print('spatial smooth')
    # spatial_smoothed = spatial_smooth( da_interp.values, footprint=footprint, ncpus=30 )
    # spatial_smoothed = spatial_smooth_serial( da_interp.values, footprint=footprint )
    # spatial_smoothed = spatial_smooth( da_interp.values, size=3, ncpus=ncpus )
    spatial_smoothed = [
        generic_filter(a.copy(),
                       LowLevelCallable(spatial_smooth_serial_numba.ctypes),
                       footprint=footprint) for a in da_interp.values
    ]

    # # # # # TEST NEW SMOOTHING:
    # arr_list = [a.copy() for a in da_interp.values]
    # f = partial( mean_filter_2D, footprint=footprint )
    # pool = mp.Pool( ncpus-1 )
    # spatial_smoothed = pool.map( f, arr_list )
    # pool.close()
    # pool.join()
    # # spatial_smoothed = np.array(out_arr)
    # # # # # # END TEST

    # mask the spatial smoothed outputs with the mask at each 2D slice.
    def _maskit(x, mask):
def FCC(input, output, window = 20, breakpoint = 0.01):
    """
    Autor = Juanma Cintas Rodríguez
    Fecha = 21/03/2019
    email = [email protected]

    Descripción:
    Crea una proporción de una clase respecto al total de puntos, basado en el cálculo de Fracción Cabida Cubierta (FCC)
    presentado por García et al (2011) (DOI:10.10016/j.jag.2011.03.006). Dependerá del raster introducido en la función
    si es calculada la FCC, la TCC o la SCC.

    Argumentos:
    @input = Raster del cual será computada la relación (FCC, TCC o SCC)
    @output = Nombre del raster a generar.
    @winodw = Tamaño de la ventana móvil usada para calcular la relación. A cosiderar que, cuanto menor sea la resolución,
    mayor deberá ser la ventana móvil para conseguir buenos resultados. Una medida puede ser 4 veces la resolución del raster.
    @breakpoint = Valor a partir del cual se consideraran las celdas del raster como vegetación.


    La documentación de la libería pdal se puede encontrar en la siguiente
    dirección: https://pdal.io/index.html

    Documentación acerca de gdal/ogr y su API de python puede ser encontrada en el siguiente enlace:
    https://gdal.org

    Documentación acerca de la librería scipy puede ser encontrada en el siguiente enlace:
    https://www.scipy.org/

    Documentación acerca de la libreri numpy puede ser encontrada en el siguiente enlace:
    http://www.numpy.org/
    """

    def compute_fraction(array):
        nveg = np.sum(array == 1)
        total = len(array)
        out = (nveg / total) * 100
        return (out)

    # Reading data needed
    tch = input
    in_ds = gdal.Open(tch)
    rows = in_ds.RasterYSize
    cols = in_ds.RasterXSize
    in_band = in_ds.GetRasterBand(1)
    data = in_band.ReadAsArray(0, 0, cols, rows).astype(np.float)

    # Reclassifying data
    data[data > breakpoint] = 1
    data[data <= breakpoint] = 0

    # Computing fraction on the whole raster through a moving window.
    TCC = ndimage.generic_filter(data, compute_fraction, size = window)

    # Setting output
    gtiff_driver = gdal.GetDriverByName("GTiff")
    out_ds = gtiff_driver.Create(output, cols, rows, 1, in_band.DataType)
    out_ds.SetProjection(in_ds.GetProjection())
    out_ds.SetGeoTransform(in_ds.GetGeoTransform())

    # Writing data
    out_band = out_ds.GetRasterBand(1)
    out_band.WriteArray(TCC)
    # out_ds.BuildOverviews("Average", [2, 4, 8, 16, 32])

    out_ds.FlushCache()

    del in_ds, out_ds
Exemple #54
0
def threshold_local(image,
                    block_size,
                    method='gaussian',
                    offset=0,
                    mode='reflect',
                    param=None):
    """Compute a threshold mask image based on local pixel neighborhood.

    Also known as adaptive or dynamic thresholding. The threshold value is
    the weighted mean for the local neighborhood of a pixel subtracted by a
    constant. Alternatively the threshold can be determined dynamically by a
    given function, using the 'generic' method.

    Parameters
    ----------
    image : (N, M) ndarray
        Input image.
    block_size : int
        Odd size of pixel neighborhood which is used to calculate the
        threshold value (e.g. 3, 5, 7, ..., 21, ...).
    method : {'generic', 'gaussian', 'mean', 'median'}, optional
        Method used to determine adaptive threshold for local neighbourhood in
        weighted mean image.

        * 'generic': use custom function (see `param` parameter)
        * 'gaussian': apply gaussian filter (see `param` parameter for custom\
                      sigma value)
        * 'mean': apply arithmetic mean filter
        * 'median': apply median rank filter

        By default the 'gaussian' method is used.
    offset : float, optional
        Constant subtracted from weighted mean of neighborhood to calculate
        the local threshold value. Default offset is 0.
    mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
        The mode parameter determines how the array borders are handled, where
        cval is the value when mode is equal to 'constant'.
        Default is 'reflect'.
    param : {int, function}, optional
        Either specify sigma for 'gaussian' method or function object for
        'generic' method. This functions takes the flat array of local
        neighbourhood as a single argument and returns the calculated
        threshold for the centre pixel.

    Returns
    -------
    threshold : (N, M) ndarray
        Threshold image. All pixels in the input image higher than the
        corresponding pixel in the threshold image are considered foreground.

    References
    ----------
    .. [1] http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#adaptivethreshold

    Examples
    --------
    >>> from skimage.data import camera
    >>> image = camera()[:50, :50]
    >>> binary_image1 = image > threshold_local(image, 15, 'mean')
    >>> func = lambda arr: arr.mean()
    >>> binary_image2 = image > threshold_local(image, 15, 'generic',
    ...                                         param=func)
    """
    if block_size % 2 == 0:
        raise ValueError("The kwarg ``block_size`` must be odd! Given "
                         "``block_size`` {0} is even.".format(block_size))
    assert_nD(image, 2)
    thresh_image = np.zeros(image.shape, 'double')
    if method == 'generic':
        ndi.generic_filter(image,
                           param,
                           block_size,
                           output=thresh_image,
                           mode=mode)
    elif method == 'gaussian':
        if param is None:
            # automatically determine sigma which covers > 99% of distribution
            sigma = (block_size - 1) / 6.0
        else:
            sigma = param
        ndi.gaussian_filter(image, sigma, output=thresh_image, mode=mode)
    elif method == 'mean':
        mask = 1. / block_size * np.ones((block_size, ))
        # separation of filters to speedup convolution
        ndi.convolve1d(image, mask, axis=0, output=thresh_image, mode=mode)
        ndi.convolve1d(thresh_image,
                       mask,
                       axis=1,
                       output=thresh_image,
                       mode=mode)
    elif method == 'median':
        ndi.median_filter(image, block_size, output=thresh_image, mode=mode)

    return thresh_image - offset
def thres_contrast(img, n=DEFAULT_N):
    threshold_matrix = generic_filter(img, contrast_aux, size=(n, n))
    return apply_threshold(img, threshold_matrix)
Exemple #56
0
    def process(self, temperature_cube, orography_cube, land_sea_mask_cube):
        """Calculates the lapse rate from the temperature and orography cubes.

        Args:
            temperature_cube (iris.cube.Cube):
                Cube of air temperatures (K).

            orography_cube (iris.cube.Cube):
                Cube containing orography data (metres)

            land_sea_mask_cube (iris.cube.Cube):
                Cube containing a binary land-sea mask. True for land-points
                and False for Sea.

        Returns:
            iris.cube.Cube:
                Cube containing lapse rate (K m-1)

        Raises
        ------
        TypeError: If input cubes are not cubes
        ValueError: If input cubes are the wrong units.

        """

        if not isinstance(temperature_cube, iris.cube.Cube):
            msg = "Temperature input is not a cube, but {}"
            raise TypeError(msg.format(type(temperature_cube)))

        if not isinstance(orography_cube, iris.cube.Cube):
            msg = "Orography input is not a cube, but {}"
            raise TypeError(msg.format(type(orography_cube)))

        if not isinstance(land_sea_mask_cube, iris.cube.Cube):
            msg = "Land/Sea mask input is not a cube, but {}"
            raise TypeError(msg.format(type(land_sea_mask_cube)))

        # Converts cube units.
        temperature_cube.convert_units('K')
        orography_cube.convert_units('metres')

        check_cube_not_float64(temperature_cube, fix=True)

        # Extract x/y co-ordinates.
        x_coord = temperature_cube.coord(axis='x').name()
        y_coord = temperature_cube.coord(axis='y').name()

        # Extract orography and land/sea mask data.
        orography_data = next(orography_cube.slices([y_coord, x_coord])).data
        land_sea_mask = next(land_sea_mask_cube.slices([y_coord,
                                                        x_coord])).data
        # Fill sea points with NaN values.
        orography_data = np.where(land_sea_mask, orography_data, np.nan)

        # Extract data array dimensions to define output arrays.
        dataarray_shape = next(temperature_cube.slices([y_coord,
                                                        x_coord])).shape
        dataarray_size = dataarray_shape[0] * dataarray_shape[1]

        # Array containing all of the subsections extracted from data array.
        # Also enforce single precision to speed up calculations.
        all_temp_subsections = np.zeros(
            (dataarray_size, self.nbhoodarray_size), dtype=np.float32)
        all_orog_subsections = np.zeros(
            (dataarray_size, self.nbhoodarray_size), dtype=np.float32)

        # Attempts to extract realizations. If cube doesn't contain the
        # dimension then place within list.
        try:
            slices_over_realization = temperature_cube.slices_over(
                "realization")
        except iris.exceptions.CoordinateNotFoundError:
            slices_over_realization = [temperature_cube]

        # Creates cube list to hold lapse rate data.
        lapse_rate_cube_list = iris.cube.CubeList([])

        for temp_slice in slices_over_realization:

            # Create slice to store lapse rate values.
            lapse_rate_slice = temp_slice

            temperature_data = temp_slice.data

            # Fill sea points with NaN values. Can't use Numpy mask since not
            # recognised by "generic_filter" function.
            temperature_data = np.where(land_sea_mask, temperature_data,
                                        np.nan)

            # Saves all neighbourhoods into "all_temp_subsections".
            # cval is value given to points outside the array.
            fnc = SaveNeighbourhood(allbuffers=all_temp_subsections)
            generic_filter(temperature_data,
                           fnc.filter,
                           size=self.nbhood_size,
                           mode='constant',
                           cval=np.nan)

            fnc = SaveNeighbourhood(allbuffers=all_orog_subsections)
            generic_filter(orography_data,
                           fnc.filter,
                           size=self.nbhood_size,
                           mode='constant',
                           cval=np.nan)

            # height_diff_mask is True for points where the height
            # difference between the central point and its neighbours
            # is > max_height_diff.
            height_diff_mask = self._create_heightdiff_mask(
                all_orog_subsections)

            # Mask points with extreme height differences as NaN.
            all_orog_subsections = np.where(height_diff_mask, np.nan,
                                            all_orog_subsections)
            all_temp_subsections = np.where(height_diff_mask, np.nan,
                                            all_temp_subsections)

            # Loop through both arrays and find gradient of each subsection.
            # The gradient indicates lapse rate - save into another array.
            # TODO: This for loop is the bottleneck in the code and needs to
            # be parallelised.
            lapse_rate_array = [
                self._calc_lapse_rate(temp, orog) for temp, orog in zip(
                    all_temp_subsections, all_orog_subsections)
            ]

            lapse_rate_array = np.array(
                lapse_rate_array, dtype=np.float32).reshape(dataarray_shape)

            # Enforces upper and lower limits on lapse rate values.
            lapse_rate_array = np.where(lapse_rate_array < self.min_lapse_rate,
                                        self.min_lapse_rate, lapse_rate_array)
            lapse_rate_array = np.where(lapse_rate_array > self.max_lapse_rate,
                                        self.max_lapse_rate, lapse_rate_array)

            lapse_rate_slice.data = lapse_rate_array
            lapse_rate_cube_list.append(lapse_rate_slice)

        lapse_rate_cube = lapse_rate_cube_list.merge_cube()
        lapse_rate_cube.rename('air_temperature_lapse_rate')
        lapse_rate_cube.units = 'K m-1'

        return lapse_rate_cube
def thres_median(img, n=DEFAULT_N):
    threshold_matrix = generic_filter(img, np.median, size=(n, n))
    return apply_threshold(img, threshold_matrix)
def thres_global(img, T=128):
    return generic_filter(img, lambda pi: 0 if pi > T else 255, size=(1, 1))
def thres_niblack(img, n=DEFAULT_N, k=0.1):
    threshold_matrix = generic_filter(img, niblack_aux(k), size=(n, n))
    return apply_threshold(img, threshold_matrix)
 def ordfilt2(self, A):
     return ndimage.generic_filter(A,
                                   self.local_filter,
                                   size=(self.mask_size, self.mask_size))