예제 #1
0
def lee_filter(img, size, mode='reflect'):
    """
    lee滤波算法

    Reference:
    Lee, J. S.: Speckle suppression and analysis for SAR images, Opt. Eng., 25, 636–643, 1986.
    @param img: 输入的影像 numpy数组
    @param size: 滤波窗口大小
    @param mode: 滤波边缘处理方式 详见:https://docs.scipy.org/doc/scipy-0.19.1/reference/generated/scipy.ndimage.uniform_filter.html
    @return 返回滤波后的影像
    """

    img_mean = uniform_filter(img, (size, size), mode=mode)
    img_sqr_mean = uniform_filter(img ** 2, (size, size))

    img_variance = img_sqr_mean - img_mean ** 2
    overall_variance = variance(img)

    np.seterr(divide='ignore', invalid='ignore')

    img_weights = img_variance ** 2 / \
                  (img_variance ** 2 + overall_variance ** 2)
    img_output = img_mean + img_weights * (img - img_mean)

    return img_output.astype(np.int)
예제 #2
0
def lee_filter2(img, window=(3, 3)):
    """ Apply a Lee filter to a numpy array. Does not modify original.
    
    Code is based on:
    https://stackoverflow.com/questions/39785970/speckle-lee-filter-in-python
    
    PCI implementation is found at
    http://www.pcigeomatics.com/geomatica-help/references/pciFunction_r/python/P_fle.html
    
    
    *Parameters*
    
    img : numpy array  
        Array to which filter is applied
    window : int
        Size of filter
    
    *Returns*
    
    array 
        filtered array
    """

    img_mean = uniform_filter(img, window)
    img_sqr = np.square(img)

    img_sqr_mean = uniform_filter(img_sqr, window)
    img_variance = img_sqr_mean - img_mean**2

    overall_variance = variance(img)

    img_weights = img_variance / (img_variance + overall_variance)
    img_output = img_mean + img_weights * (img - img_mean)

    return img_output
예제 #3
0
 def lee_filter(self, img):
     img_mean = uniform_filter(img, (self.intensity, self.intensity))
     img_sqr_mean = uniform_filter(img**2, (self.intensity, self.intensity))
     img_variance = img_sqr_mean - img_mean**2
     overall_variance = variance(img)
     img_weights = img_variance / (img_variance + overall_variance)
     img_output = img_mean + img_weights * (img - img_mean)
     return img_output
예제 #4
0
def lee_filter(img, size):
    img_mean = uniform_filter(img, (size, size))
    img_sqr_mean = uniform_filter(img**2, (size, size))
    img_variance = img_sqr_mean - img_mean**2
    overall_variance = variance(img)
    img_weights = img_variance / (img_variance + overall_variance)
    img_output = img_mean + img_weights * (img - img_mean)
    return img_output
예제 #5
0
def lee_filter(x, size):
    img_mean = uniform_filter(x, (size, size))
    img_sq_mean = uniform_filter(x**2, (size, size))
    img_var = img_sq_mean - img_mean**2

    overall_var = variance(x)
    img_weights = img_var / (img_var + overall_var)
    return img_mean + img_weights * (x - img_mean)
예제 #6
0
def srad1(img, niter, gamma, kernel, option):
    if img.ndim == 3:
        img = img.mean(2)

    img = img.astype('float32')
    imgout = img.copy()

    for ii in range(niter):
        for i in range(1, imgout.shape[0] - 1):
            for j in range(1, imgout.shape[1] - 1):
                dN = imgout[i, j - 1] - imgout[i, j]
                dS = imgout[i, j + 1] - imgout[i, j]
                dE = imgout[i + 1, j] - imgout[i, j]
                dW = imgout[i - 1, j] - imgout[i, j]

                #gm2 = dN**2 + dS**2 + dE**2 + dW**2/imgout[i,j]**2
                #laplacian = imgout[i,j+1] + imgout[i,j-1] + imgout[i+1,j] + imgout[i-1,j]/imgout[i,j]

                q2N = 0.5 * (dN**2 / imgout[i, j]**2) - 0.0625 * (
                    (imgout[i, j - 1] / imgout[i, j])**
                    2) / (1 + (0.25 * (imgout[i, j - 1] / imgout[i, j])))**2
                q2S = 0.5 * (dS**2 / imgout[i, j]**2) - 0.0625 * (
                    (imgout[i, j + 1] / imgout[i, j])**
                    2) / (1 + (0.25 * (imgout[i, j + 1] / imgout[i, j])))**2
                q2E = 0.5 * (dE**2 / imgout[i, j]**2) - 0.0625 * (
                    (imgout[i + 1, j] / imgout[i, j])**
                    2) / (1 + (0.25 * (imgout[i + 1, j] / imgout[i, j])))**2
                q2W = 0.5 * (dW**2 / imgout[i, j]**2) - 0.0625 * (
                    (imgout[i - 1, j] / imgout[i, j])**
                    2) / (1 + (0.25 * (imgout[i - 1, j] / imgout[i, j])))**2

                img_mean = uniform_filter(img, kernel)
                img_sqr_mean = uniform_filter(img**2, kernel)
                img_var = img_sqr_mean - img_mean**2
                overall_var = variance(img)
                q02 = img_var / (img_var + overall_var)

                if option == 1:
                    gN = np.exp(-(q2N - q02 / (q02 * (1 + q02))))
                    gS = np.exp(-(q2S - q02 / (q02 * (1 + q02))))
                    gE = np.exp(-(q2E - q02 / (q02 * (1 + q02))))
                    gW = np.exp(-(q2W - q02 / (q02 * (1 + q02))))
                elif option == 2:
                    gN = 1 / (1 + (q2N - q02 / (q02 * (1 + q02))))
                    gS = 1 / (1 + (q2S - q02 / (q02 * (1 + q02))))
                    gE = 1 / (1 + (q2E - q02 / (q02 * (1 + q02))))
                    gW = 1 / (1 + (q2W - q02 / (q02 * (1 + q02))))

                imgout[
                    i,
                    j] = imgout[i, j] * (1 - gamma * (gN + gS + gE + gW)) + (
                        gamma *
                        (gN * imgout[i, j - 1] + gS * imgout[i, j + 1] +
                         gE * imgout[i + 1, j] + gW * imgout[i - 1, j]))
                #imgout[i,j] = imgout[i, j] + (gamma/4)*(gN*dN + gS*dS + gE*dE + gW*dW)
    return imgout
def lee(image, size):
    image_mean = uniform_filter(image, (size, size))
    image_sqr_mean = uniform_filter(image**2, (size, size))
    image_var = image_sqr_mean - image_mean**2

    overall_var = variance(image)

    image_weights = image_var / (image_var + overall_var)
    image_dspk = image_mean + image_weights * (image - image_mean)
    return image_dspk    
예제 #8
0
def lee_filter(img):
    # speckle filter https://stackoverflow.com/questions/39785970/speckle-lee-filter-in-python
    w, h, c = img.shape
    img_mean = uniform_filter(img, (w, w, 1))
    img_sqr_mean = uniform_filter(img**2, (w, w, 1))
    img_variance = img_sqr_mean - img_mean**2
    overall_variance = variance(img)
    img_weights = img_variance**2 / (img_variance**2 + overall_variance**2)
    img_output = img_mean + img_weights * (img - img_mean)
    return img_output
def lee_filter(img, size=7):
#     print(np.min(img),np.max(img))
    img_mean = uniform_filter(img, (size, size))
    img_sqr_mean = uniform_filter(img**2, (size, size))
    img_variance = img_sqr_mean - img_mean**2
    overall_variance = variance(img)
    img_weights = img_variance**2 / (img_variance**2 + overall_variance**2)
    img_output = img_mean + img_weights * (img - img_mean)
#     print(np.min(img_output),np.max(img_output))
    return img_output
예제 #10
0
def lee_filter(img, size):
    img = cv2.imread(img)
    img_output = np.zeros(img.shape)
    for i in range(3):
        img_mean = uniform_filter(img[:, :, i], (size, size))
        img_sqr_mean = uniform_filter(img[:, :, i]**2, (size, size))
        img_variance = img_sqr_mean - img_mean**2
        overall_variance = variance(img[:, :, i])
        img_weights = img_variance / (img_variance + overall_variance)
        img_output[:, :,
                   i] = img_mean + img_weights * (img[:, :, i] - img_mean)
    return img_output
예제 #11
0
def lee_filter(img, size=5):
    """Run a lee filter on an image to remove speckle noise"""
    # https://stackoverflow.com/questions/39785970/speckle-lee-filter-in-python
    img = img.astype(np.float32)
    img_mean = uniform_filter(img, (size, size))
    img_sqr_mean = uniform_filter(img**2, (size, size))
    img_variance = img_sqr_mean - img_mean**2

    overall_variance = variance(img)

    img_weights = img_variance**2 / (img_variance**2 + overall_variance**2)
    img_output = img_mean + img_weights * (img - img_mean)
    return img_output
예제 #12
0
파일: sarcube.py 프로젝트: fangfy/radar
def lee_filter_2d(da, size):
    """
    Apply lee filter of specified window size.
    Adapted from https://stackoverflow.com/questions/39785970/speckle-lee-filter-in-python
    Input is a 2d data array.
    """
    img = da.values
    img_mean = uniform_filter(img, (size, size))
    img_sqr_mean = uniform_filter(img**2, (size, size))
    img_variance = img_sqr_mean - img_mean**2
    overall_variance = variance(img)
    img_weights = img_variance / (img_variance + overall_variance)
    img_output = img_mean + img_weights * (img - img_mean)
    return img_output
예제 #13
0
파일: mytools.py 프로젝트: jorag/deadwood
def lee_filter(img, size):
    """Lee filter for SAR despeckling.
    
    From Alex I.'s answer here:
    https://stackoverflow.com/questions/39785970/speckle-lee-filter-in-python
    """
    img_mean = uniform_filter(img, (size, size))
    img_sqr_mean = uniform_filter(img**2, (size, size))
    img_variance = img_sqr_mean - img_mean**2

    overall_variance = variance(img)

    img_weights = img_variance / (img_variance + overall_variance)
    img_output = img_mean + img_weights * (img - img_mean)
    return img_output
예제 #14
0
    def lee_filter(img, size):
        from scipy.ndimage.filters import uniform_filter
        from scipy.ndimage.measurements import variance
        mask = np.isnan(img)
        img[mask] = 0.0
        img_mean = uniform_filter(img, (size, size))
        img_sqr_mean = uniform_filter(img**2, (size, size))
        img_variance = img_sqr_mean - img_mean**2

        overall_variance = variance(img)

        img_weights = img_variance / (img_variance + overall_variance)
        img_output = img_mean + img_weights * (img - img_mean)
        img_output[mask] = np.nan
        return img_output
예제 #15
0
def __test_image_variance():
    wsi_data = parse_dataset(
        '/media/shishigami/6CC13AD35BD48D86/C16Data/train/data_test.csv')
    coords = randint(45300, 50780), randint(102700, 108600)
    coords = randint(62742, 88063), randint(4870, 29340)
    coords = 69560, 120800
    slide = wsi_data[1]
    dim = (768, 768)
    img_1, label_1 = slide.read_region_and_label(coords, 1, dim)

    from scipy.ndimage import measurements

    print(measurements.variance(img_1))
    print(image_variance(img_1))
    plt.imshow(img_1)
    plt.show()
예제 #16
0
def leeFilter1D_Add(I, window_size):
    """
    Implementation of Additive Lee filter

    Where I is the signal and windows_size the number of pixel take into account
    for the local mean
    output = localmean + K * (I - localmean)
    """

    I = np.array(I)
    mean_I = uniform_filter(I, (window_size))
    sqr_mean_I = uniform_filter(I**2, (window_size))
    var_I = sqr_mean_I - mean_I**2

    overall_variance = variance(I)

    weight_I = var_I / (var_I + overall_variance)
    output_I = mean_I + weight_I * (I - mean_I)
    return output_I
예제 #17
0
def lee_filter(img, size):
    """Applies a Lee filter
    Parameters
    ----------
    img : numpy ndarray
    size : int
        filter size.
    Returns
    -------
    numpy ndarray
        Result.
    """
    img_mean = uniform_filter(img, (size, size))
    img_sqr_mean = uniform_filter(img**2, (size, size))
    img_variance = img_sqr_mean - img_mean**2

    overall_variance = variance(img)

    img_weights = img_variance / (img_variance + overall_variance)
    img_output = img_mean + img_weights * (img - img_mean)
    return img_output
예제 #18
0
    def _calculate_focus_measure(self, src, operator, roi):
        '''
            see
            IMPLEMENTATION OF A PASSIVE AUTOMATIC FOCUSING ALGORITHM
            FOR DIGITAL STILL CAMERA
            DOI 10.1109/30.468047
            and
            http://cybertron.cg.tu-berlin.de/pdci10/frankencam/#autofocus
        '''

        # need to resize to 640,480. this is the space the roi is in
        #        s = resize(grayspace(pychron), 640, 480)
        src = grayspace(src)
        v = crop(src, *roi)

        di = dict(var=lambda x: variance(x),
                  laplace=lambda x: get_focus_measure(x, 'laplace'),
                  sobel=lambda x: ndsum(
                      generic_gradient_magnitude(x, sobel, mode='nearest')))

        func = di[operator]
        return func(v)
예제 #19
0
    def _calculate_focus_measure(self, src, operator, roi):
        '''
            see
            IMPLEMENTATION OF A PASSIVE AUTOMATIC FOCUSING ALGORITHM
            FOR DIGITAL STILL CAMERA
            DOI 10.1109/30.468047
            and
            http://cybertron.cg.tu-berlin.de/pdci10/frankencam/#autofocus
        '''

        # need to resize to 640,480. this is the space the roi is in
#        s = resize(grayspace(pychron), 640, 480)
        src = grayspace(src)
        v = crop(src, *roi)

        di = dict(var=lambda x:variance(x),
                  laplace=lambda x: get_focus_measure(x, 'laplace'),
                  sobel=lambda x: ndsum(generic_gradient_magnitude(x, sobel, mode='nearest'))
                  )

        func = di[operator]
        return func(v)
예제 #20
0
def predicteff(o, m):
    '''
    For paired time series vectors o (observed) and m (model), calculate
    the prediction efficiency.  This metric is a measure of skill and is
    defined as 1 - (MSE/theta**2) where MSE is the Mean Square Error
    and theta**2 is the variance of the observations.  A value of 1
    indicates a perfect forecast.
    
    For more information, see
    http://www.swpc.noaa.gov/forecast_verification/Glossary.html#skill
    '''

    from scipy.ndimage.measurements import variance

    # Check input values.
    assert o.size == m.size, 'Input arrays must have same size!'
    assert (len(o.shape) == 1) and (len(m.shape) == 1), \
        'Input arrays must be vectors!'

    var = variance(o)
    peff = 1.0 - mse(o, m) / var

    return peff
예제 #21
0
def lee_filter(img, size):
    '''
    Applies Lee Filter

    Parameters
    ----------
    img : array-like, image to apply filter on
    size : integer,
           The sizes of the uniform filter are given for each axis
    Returns
    -------
    Filtered array. Has the same shape as `input`.
    '''
    img_mean = uniform_filter(img, (size, size))
    img_sqr_mean = uniform_filter(img**2, (size, size))
    img_variance = img_sqr_mean - img_mean**2

    overall_variance = variance(img)

    img_weights = img_variance**2 / (img_variance**2 + overall_variance**2)
    img_output = img_mean + img_weights * (img - img_mean)

    return img_output
예제 #22
0
def lee_filter(img, window=(5, 5)):
    """ Apply a Lee filter to a numpy array. Modifies original array
     
    *Parameters*
    
    img : numpy array  
        Array to which filter is applied
    window : int
        Size of filter
    
    """
    d = np.array(img, dtype='float32')
    img_variance, img_mean = moving_window_sd(d,
                                              window,
                                              return_mean=True,
                                              return_variance=True)

    lbl, nlbl = label(d)
    overall_variance = variance(d, lbl)

    # set overall = overall_var + img_var
    overall_variance = np.add(overall_variance, img_variance)

    # set d = img - img_mean
    np.subtract(d, img_mean, out=d)

    # set  img_variance = img_variance / overall_var + img_var = weights
    np.divide(img_variance, overall_variance, out=img_variance)
    del overall_variance

    # set d = weights * img - img_mean
    np.multiply(img_variance, d, out=d)

    np.add(img_mean, d, out=d)  # d = img_mean + weights * img - img_mean

    return d
예제 #23
0
def label_objects(dataset=None, labels=None, out=None, out_features=None,
                  source=None, return_labels=False):
    DM = DataModel.instance()

    log.info('+ Loading data into memory')
    data = DM.load_slices(dataset)
    if labels is None:
        data += 1
        labels = set(np.unique(data)) - set([0])
    else:
        data += 1
        labels = np.asarray(labels) + 1

    obj_labels = []

    log.info('+ Extracting individual objects')
    new_labels = np.zeros(data.shape, np.int32)
    total_labels = 0
    num = 0

    for label in labels:
        mask = (data == label)
        tmp_data = data.copy()
        tmp_data[~mask] = 0
        tmp_labels, num = splabel(tmp_data, structure=octahedron(1))
        mask = (tmp_labels > 0)
        new_labels[mask] = tmp_labels[mask] + total_labels
        total_labels += num
        obj_labels += [label] * num

    log.info('+ {} Objects found'.format(total_labels))
    log.info('+ Saving results')
    DM.create_empty_dataset(out, DM.data_shape, new_labels.dtype)
    DM.write_slices(out, new_labels, params=dict(active=True, num_objects=total_labels))

    log.info('+ Loading source to memory')
    data = DM.load_slices(source)
    objs = new_labels
    objects = new_labels
    num_objects = total_labels
    objlabels = np.arange(1, num_objects+1)

    log.info('+ Computing Average intensity')
    feature = measure.mean(data, objs, index=objlabels)
    DM.create_empty_dataset(out_features[0], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[0], feature, params=dict(active=True))

    """log.info('+ Computing Median intensity')
    objs.shape = -1
    data.shape = -1
    feature = binned_statistic(objs, data, statistic='median',
                               bins=num_objects+1)[0]
    feature = feature[objlabels]
    out_features[1].write_direct(feature)
    out_features[1].attrs['active'] = True
    objs.shape = dataset.shape
    data.shape = dataset.shape"""

    log.info('+ Computing Sum of intensity')
    feature = measure.sum(data, objs, index=objlabels)
    DM.create_empty_dataset(out_features[1], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[1], feature, params=dict(active=True))

    log.info('+ Computing Standard Deviation of intensity')
    feature = measure.standard_deviation(data, objs, index=objlabels)
    DM.create_empty_dataset(out_features[2], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[2], feature, params=dict(active=True))

    log.info('+ Computing Variance of intensity')
    feature = measure.variance(data, objs, index=objlabels)
    DM.create_empty_dataset(out_features[3], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[3], feature, params=dict(active=True))

    log.info('+ Computing Area')
    objs.shape = -1
    feature = np.bincount(objs, minlength=num_objects+1)[1:]
    DM.create_empty_dataset(out_features[4], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[4], feature, params=dict(active=True))
    DM.create_empty_dataset(out_features[5], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[5], np.log10(feature), params=dict(active=True))
    objs.shape = data.shape

    log.info('+ Computing Bounding Box')
    obj_windows = measure.find_objects(objs)
    feature = []; depth = []; height = []; width = [];
    for w in obj_windows:
        feature.append((w[0].stop - w[0].start) *
                       (w[1].stop - w[1].start) *
                       (w[2].stop - w[2].start))
        depth.append(w[0].stop - w[0].start)
        height.append(w[1].stop - w[1].start)
        width.append(w[2].stop - w[2].start)

    feature = np.asarray(feature, np.float32)
    DM.create_empty_dataset(out_features[6], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[6], feature, params=dict(active=True))
    #depth
    depth = np.asarray(depth, np.float32)
    DM.create_empty_dataset(out_features[7], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[7], depth, params=dict(active=True))
    # height
    height = np.asarray(height, np.float32)
    DM.create_empty_dataset(out_features[8], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[8], height, params=dict(active=True))
    # width
    width = np.asarray(width, np.float32)
    DM.create_empty_dataset(out_features[9], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[9], width, params=dict(active=True))
    # log10
    DM.create_empty_dataset(out_features[10], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[10], np.log10(feature), params=dict(active=True))

    log.info('+ Computing Oriented Bounding Box')
    ori_feature = []; ori_depth = []; ori_height = []; ori_width = [];
    for i, w in enumerate(obj_windows):
        z, y, x = np.where(objs[w] == i+1)
        coords = np.c_[z, y, x]
        if coords.shape[0] >= 3:
            coords = PCA(n_components=3).fit_transform(coords)
        cmin, cmax = coords.min(0), coords.max(0)
        zz, yy, xx = (cmax[0] - cmin[0] + 1,
                      cmax[1] - cmin[1] + 1,
                      cmax[2] - cmin[2] + 1)
        ori_feature.append(zz * yy * xx)
        ori_depth.append(zz)
        ori_height.append(yy)
        ori_width.append(xx)

    ori_feature = np.asarray(ori_feature, np.float32)
    DM.create_empty_dataset(out_features[11], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[11], ori_feature, params=dict(active=True))
    #depth
    ori_depth = np.asarray(ori_depth, np.float32)
    DM.create_empty_dataset(out_features[12], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[12], ori_depth, params=dict(active=True))
    # height
    ori_height = np.asarray(ori_height, np.float32)
    DM.create_empty_dataset(out_features[13], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[13], ori_height, params=dict(active=True))
    # width
    ori_width = np.asarray(ori_width, np.float32)
    DM.create_empty_dataset(out_features[14], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[14], ori_width, params=dict(active=True))
    # log10
    DM.create_empty_dataset(out_features[15], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[15], np.log10(ori_feature), params=dict(active=True))

    log.info('+ Computing Positions')
    pos = measure.center_of_mass(objs, labels=objs, index=objlabels)
    pos = np.asarray(pos, dtype=np.float32)
    DM.create_empty_dataset(out_features[16], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[16], pos[:, 2].copy(), params=dict(active=True))
    DM.create_empty_dataset(out_features[17], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[17], pos[:, 1].copy(), params=dict(active=True))
    DM.create_empty_dataset(out_features[18], (num_objects,), np.float32, check=False)
    DM.write_dataset(out_features[18], pos[:, 0].copy(), params=dict(active=True))

    if return_labels:
        return out, total_labels, np.asarray(obj_labels)

    return out, total_labels