コード例 #1
0
def clean_from_hazy(image, t_fine, win_size, omega, t0):

    # image - input hazy image of size [Height,Width,Num_channles]
    # t_fine - refined, predicted transmission map, of size [Height,Width]
    # win_size - DCP window size, default is [15,15]
    # omega - DCP omega (residual haze) parameter, default is 0.95
    # t0 - DCP threshold parameter, defauls is 0.1
    # output:
    # J - reconstructed dehazed image of size [Height,Width,Num_channels]

    H = np.shape(image)[0]
    W = np.shape(image)[1]
    C = np.shape(image)[2]  # number of channels = 3
    patch_size = win_size[0] * win_size[1]
    padded_image = np.pad(image,
                          (((win_size[0] - 1) / 2, (win_size[0] - 1) / 2),
                           ((win_size[1] - 1) / 2,
                            (win_size[1] - 1) / 2), (0, 0)), 'edge')
    num_patches = H * W
    image_win = viewW(padded_image, (win_size[0], win_size[1], C)).reshape(
        num_patches, patch_size, C)
    initial_DCP = np.min(np.reshape(image_win, (num_patches, -1)), axis=1)
    DCP_sorted_inds = np.argsort(initial_DCP)[::-1]
    brightest_pixel_coords = DCP_sorted_inds[:int(0.001 * num_patches)]
    reshaped_image = np.reshape(image, (H * W, C))
    brightest_pixels = reshaped_image[brightest_pixel_coords, :]
    A = np.max(brightest_pixels, axis=0)
    t_fine[t_fine < t0] = t0
    ##### added for speedup at 12.9.19
    image = image / 255.0
    A = A / 255.00
    J = (image - A) / np.stack((t_fine, t_fine, t_fine), axis=-1) + A
    J = (J - np.min(J)) / (np.max(J) - np.min(J))

    return J
コード例 #2
0
def patches(a, patch_shape):
    side_size = patch_shape
    ext_size = (side_size[0] - 1) // 2, (side_size[1] - 1) // 2
    img = np.pad(a, ([ext_size[0]], [ext_size[1]]),
                 'constant',
                 constant_values=(0))
    return viewW(img, patch_shape)
コード例 #3
0
def compute_transmittance(image, win_size, omega):

    # image - input hazy image of size [Height,Width,Num_channels]
    # win_size - DCP window size, default is [15,15]
    # omega - DCP omega (residual haze) parameter, default is 0.95
    # outputs:
    # t - output coarse transmission map of size [Height,Width]
    # A - estimation of airlight vector of size [3,1]

    H = np.shape(image)[0]
    W = np.shape(image)[1]
    C = np.shape(image)[2]  # number of channels = 3
    patch_size = win_size[0] * win_size[1]
    padded_image = np.pad(image,
                          (((win_size[0] - 1) / 2, (win_size[0] - 1) / 2),
                           ((win_size[1] - 1) / 2,
                            (win_size[1] - 1) / 2), (0, 0)), 'edge')
    num_patches = H * W
    image_win = viewW(padded_image, (win_size[0], win_size[1], C)).reshape(
        num_patches, patch_size, C)
    initial_DCP = np.min(np.reshape(image_win, (num_patches, -1)), axis=1)
    DCP_sorted_inds = np.argsort(initial_DCP)[::-1]
    brightest_pixel_coords = DCP_sorted_inds[:int(0.001 * num_patches)]
    reshaped_image = np.reshape(image, (H * W, C))
    brightest_pixels = reshaped_image[brightest_pixel_coords, :]
    A = np.max(brightest_pixels, axis=0)
    DCP = np.min(np.reshape(image_win / A, (num_patches, -1)), axis=1)
    t = np.reshape(1 - omega * DCP, (H, W))

    return t, A
コード例 #4
0
def im2col(A, window, stepsize=1):
    """
    an im2col function, transferring an image to patches of size window (length
    2 list). the step size is the stride of the sliding window.
    :param A: The original image (NxM size matrix of pixel values).
    :param window: Length 2 list of 2D window size.
    :param stepsize: The step size for choosing patches (default is 1).
    :return: A (heightXwidth)x(NxM) matrix of image patches.
    """
    return viewW(np.ascontiguousarray(A), (window[0], window[1])).reshape(
        -1, window[0] * window[1]).T[:, ::stepsize]
コード例 #5
0
def compute_laplacian_matte(image, eps=1e-5, f_size=[3, 3]):

    # inputs:
    # image - natural input image of size [Height,Width,Num_channels]
    # eps - epsilon parameter in weights calculation
    # f_size - window size in weight calcaultion
    # output:
    # L - sparse Matting Laplcian matrix

    H = np.shape(image)[0]
    W = np.shape(image)[1]
    C = np.shape(image)[2]  # num channels = 3
    patch_size = f_size[0] * f_size[1]
    num_patches = (H - (f_size[0] - 1)) * (W - (f_size[1] - 1))
    inds = np.arange(H * W).reshape(H, W)
    inds_win = viewW(inds, f_size).reshape(num_patches, patch_size)
    I = np.repeat(inds_win, patch_size, axis=1)
    J = np.tile(inds_win, (1, patch_size))
    image_win = viewW(image, (f_size[0], f_size[1], C)).reshape(
        num_patches, patch_size, C)
    image_mean = np.mean(image_win, axis=1, keepdims=True)
    image_var = (np.matmul(np.transpose(image_win,(0,2,1)),image_win)/patch_size - \
                 np.matmul(np.transpose(image_mean,(0,2,1)),image_mean))
    matrix_to_invert = (eps / patch_size) * np.eye(C) + image_var
    var_fac = np.linalg.inv(matrix_to_invert)
    X = np.matmul(image_win - image_mean, var_fac)
    V = np.matmul(X, np.transpose(image_win - image_mean, (0, 2, 1)))
    weights = (1.0 / patch_size) * (1 + V)
    V = np.eye(patch_size) - weights
    V = V.reshape((-1, patch_size * patch_size))

    L = sp.sparse.coo_matrix((V.ravel(), (I.ravel(), J.ravel())),
                             shape=(H * W, H * W))
    L = L.tocsr()

    return L
コード例 #6
0
ファイル: scan_tile3.py プロジェクト: caoliang/cloudpoint
def fill_zero_regions(a, kernel_size=3):
    hk = kernel_size // 2  # half_kernel_size

    a4D = viewW(a, (kernel_size, kernel_size))
    sliced_a = a[hk:-hk, hk:-hk]
    zeros_mask = sliced_a == 0
    zero_neighs = a4D[zeros_mask].reshape(-1, kernel_size**2)
    n = len(zero_neighs)  # num_zeros

    scale = zero_neighs.max() + 1
    zno = zero_neighs + scale * np.arange(n)[:, None]  # zero_neighs_offsetted

    count = np.bincount(zno.ravel(), minlength=n * scale).reshape(n, -1)
    modevals = count[:, 1:].argmax(1) + 1
    sliced_a[zeros_mask] = modevals
    return a
コード例 #7
0
ファイル: layers.py プロジェクト: wenqinYe/Neural-Tetris
 def im2col(self, A, BSZ, stepsize=1):
     """
     Allows for matrix multiplication of convolution. Turns matrix
     into column vectors representing the sliding windows.
         Credit: https://stackoverflow.com/questions/30109068/implement-matlabs-im2col-sliding-in-python
     
     :param A: Matrix
     :param BSZ: Batch size
     :param stepsize: Step size
     
     :return: Row vectors representing convolutions.
     """
     return np.transpose(
         viewW(A,
               (BSZ[0], BSZ[1])).reshape(-1,
                                         BSZ[0] * BSZ[1]).T[:, ::stepsize])
コード例 #8
0
def colfilt(A, kernelSize, option):

    from skimage.util import view_as_windows as viewW
    import numpy as np

    A = np.lib.pad(A, ((int(
        (kernelSize[0] - 1) / 2), int((kernelSize[0] - 1) / 2)), (int(
            (kernelSize[1] - 1) / 2), int((kernelSize[1] - 1) / 2))),
                   mode='constant',
                   constant_values=np.nan)

    B = viewW(A, kernelSize).reshape(-1,
                                     kernelSize[0] * kernelSize[1]).T[:, ::1]

    output_size = (A.shape[0] - kernelSize[0] + 1,
                   A.shape[1] - kernelSize[1] + 1)
    C = np.zeros(output_size, dtype=A.dtype)
    if option == 0:  #    max
        C = np.nanmax(B, axis=0).reshape(output_size)
    elif option == 1:  #  min
        C = np.nanmin(B, axis=0).reshape(output_size)
    elif option == 2:  #  mean
        C = np.nanmean(B, axis=0).reshape(output_size)
    elif option == 3:  #  median
        C = np.nanmedian(B, axis=0).reshape(output_size)
    elif option == 4:  #  range
        C = np.nanmax(B, axis=0).reshape(output_size) - np.nanmin(
            B, axis=0).reshape(output_size)
    elif option == 6:  #  MAD (Median Absolute Deviation)
        m = B.shape[0]
        D = np.abs(B - np.dot(np.ones(
            (m, 1), dtype=A.dtype), np.array([np.nanmedian(B, axis=0)])))
        C = np.nanmedian(D, axis=0).reshape(output_size)
    elif option[
            0] == 5:  #  displacement distance count with option[1] being the threshold
        m = B.shape[0]
        c = int(np.round((m + 1) / 2) - 1)
        #        c = 0
        D = np.abs(B -
                   np.dot(np.ones((m, 1), dtype=A.dtype), np.array([B[c, :]])))
        C = np.sum(D < option[1], axis=0).reshape(output_size)
    else:
        sys.exit('invalid option for columnwise neighborhood filtering')

    C = C.astype(A.dtype)

    return C
コード例 #9
0
ファイル: thinning.py プロジェクト: saintnever/FingerPad
 def A(a):
     # woodoo magic !!
     side_size = (3, 3)
     ext_size = (side_size[0] - 1) // 2, (side_size[1] - 1) // 2
     img = np.pad(a, ([ext_size[0]], [ext_size[1]]),
                  'constant',
                  constant_values=(0))
     out = viewW(img, side_size)
     out = out.reshape(out.shape[0:2] + (9, ))
     out = out[:, :, np.uint8([1, 2, 5, 8, 7, 6, 3, 0, 1])]
     out[:, :, -1] = out[:, :, 0]
     n_0to1 = np.zeros(a.shape, np.uint8)
     n_0to1[:, :] = np.sum(np.diff(out[:, :], axis=2) == 1, axis=2)
     n_0to1[0, :] = 0
     n_0to1[-1, :] = 0
     n_0to1[:, 0] = 0
     n_0to1[:, -1] = 0
     return n_0to1
コード例 #10
0
def im2col_sliding_strided_v2(A, BSZ, stepsize=1):
    return viewW(A, (BSZ[0], BSZ[1])).reshape(-1,
                                              BSZ[0] * BSZ[1]).T[:, ::stepsize]