コード例 #1
0
def de_vignette(image, sigma):
    # setting the gaussian deviation to match the size of the image
    rows, cols = image.shape[:2]
    kernel_x = cv2.getGaussianKernel(cols, sigma)
    kernel_y = cv2.getGaussianKernel(rows, sigma)
    # sigma = gaussian standard deviation

    kernel = kernel_y * kernel_x.T
    mask = kernel / kernel.max()

    vignette = np.zeros_like(image)
    for i in range(3):
        vignette[:, :, i] = image[:, :, i] * (1 / mask)
    return vignette
コード例 #2
0
def ridge_orient(im, gradientsigma, blocksigma, orientsmoothsigma):
    rows, cols = im.shape
    sze = np.fix(6 * gradientsigma)
    if np.remainder(sze, 2) == 0:
        sze = sze + 1

    gauss = cv2.getGaussianKernel(np.int(sze), gradientsigma)
    f = gauss * gauss.T

    fy, fx = np.gradient(f)

    Gx = signal.convolve2d(im, fx, mode='same')
    Gy = signal.convolve2d(im, fy, mode='same')

    Gxx = np.power(Gx, 2)
    Gyy = np.power(Gy, 2)
    Gxy = Gx * Gy

    sze = np.fix(6 * blocksigma)

    gauss = cv2.getGaussianKernel(np.int(sze), blocksigma)
    f = gauss * gauss.T

    Gxx = ndimage.convolve(Gxx, f)
    Gyy = ndimage.convolve(Gyy, f)
    Gxy = 2 * ndimage.convolve(Gxy, f)
    denom = np.sqrt(np.power(Gxy, 2) +
                    np.power((Gxx - Gyy), 2)) + np.finfo(float).eps

    sin2theta = Gxy / denom
    cos2theta = (Gxx - Gyy) / denom

    if orientsmoothsigma:
        sze = np.fix(6 * orientsmoothsigma)
        if np.remainder(sze, 2) == 0:
            sze = sze + 1
        gauss = cv2.getGaussianKernel(np.int(sze), orientsmoothsigma)
        f = gauss * gauss.T
        cos2theta = ndimage.convolve(cos2theta, f)
        sin2theta = ndimage.convolve(sin2theta, f)

    orientim = np.pi / 2 + np.arctan2(sin2theta, cos2theta) / 2
    return (orientim)
コード例 #3
0
def blurImage2(in_image: np.ndarray, kernel_size: int) -> np.ndarray:
    """
    Blur an image using a Gaussian kernel using OpenCV built-in functions
    :param inImage: Input image
    :param kernelSize: Kernel size
    :return: The Blurred image
    """

    sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8
    guassian = cv.getGaussianKernel(kernel_size, sigma)
    guassian = guassian * guassian.transpose()
    return cv.filter2D(in_image, -1, guassian, borderType=cv.BORDER_REPLICATE)
コード例 #4
0
def getSpaceFilter(filterName):
    '''
    Return a np.array of space filter by name.
    '''
    gaussianKernel = cv2.getGaussianKernel(5, 1)
    filters = {
        'gaussian': gaussianKernel * gaussianKernel.T,
        'mean': np.ones((3, 3)),
        'laplacian': np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]]),
    }
    if not filterName in filters:
        raise NotImplementedError(
            '{} filter not supported yet.'.format(filterName))
    return filters[filterName]
コード例 #5
0
def overlay(bw1, bw2, L1=15, S1=13, L2=15, S2=13, showfft=False, fname=None):
    # Overlay takes in two black and white images

    gauss1 = np.outer(cv2.getGaussianKernel(L1,S1), cv2.getGaussianKernel(L1,S1))
    gauss1 /=  np.sum(gauss1)
    gauss2 = np.outer(cv2.getGaussianKernel(L2,S2), cv2.getGaussianKernel(L2,S2))
    gauss2 /=  np.sum(gauss2)
    lpf_kernel = gauss1
    impulse = np.zeros(gauss2.shape)
    impulse[impulse.shape[0] // 2, impulse.shape[1] // 2] = 1
    hpf_kernel = cv2.subtract(impulse, gauss2)

    im1_filt = convolve2d(bw1, lpf_kernel, mode="same")
    im2_filt = convolve2d(bw2, hpf_kernel, mode="same")

    if showfft and fname:
        postlpf = np.abs(np.fft.fftshift(np.fft.fft2(im1_filt)))
        posthpf = np.log(np.abs(np.fft.fftshift(np.fft.fft2(im2_filt))))
        scipy.misc.imsave("output/" + fname + "hpffft.jpg", posthpf)
        scipy.misc.imsave("output/" + fname + "lpffft.jpg", postlpf)

    final = (im1_filt + im2_filt) / 2
    return final
コード例 #6
0
def convolve_3d(im, kernel_type="gaussian", L=15, S=13):
    kernel = None
    gauss = np.outer(cv2.getGaussianKernel(L,S), cv2.getGaussianKernel(L,S))
    if kernel_type == "gaussian":
        kernel = gauss / np.sum(gauss)
    else:
        kernel = gauss / np.sum(gauss)
        impulse = np.zeros(gauss.shape)
        impulse[impulse.shape[0] // 2, impulse.shape[1] // 2] = 1
        kernel = cv2.subtract(impulse, kernel)


    final_color = []
    for i in range(3):
        curr = convolve2d(im[:,:,i], kernel, mode="same")
        final_color.append(curr)

    fin_shape = (final_color[0].shape[0], final_color[0].shape[1], 3)
    final = np.zeros(fin_shape)
    final[..., 0] = final_color[0]
    final[..., 1] = final_color[1]
    final[..., 2] = final_color[2]
    final = np.clip(final, 0, 1)
    return final
コード例 #7
0
ファイル: student_sift.py プロジェクト: apostlewang/CS308_CV
def get_features(image, x, y, feature_width, scales=None):
    """
    To start with, you might want to simply use normalized patches as your
    local feature. This is very simple to code and works OK. However, to get
    full credit you will need to implement the more effective SIFT descriptor
    (See Szeliski 4.1.2 or the original publications at
    http://www.cs.ubc.ca/~lowe/keypoints/)

    Your implementation does not need to exactly match the SIFT reference.
    Here are the key properties your (baseline) descriptor should have:
    (1) a 4x4 grid of cells, each feature_width/4. It is simply the
        terminology used in the feature literature to describe the spatial
        bins where gradient distributions will be described.
    (2) each cell should have a histogram of the local distribution of
        gradients in 8 orientations. Appending these histograms together will
        give you 4x4 x 8 = 128 dimensions.
    (3) Each feature should be normalized to unit length.

    You do not need to perform the interpolation in which each gradient
    measurement contributes to multiple orientation bins in multiple cells
    As described in Szeliski, a single gradient measurement creates a
    weighted contribution to the 4 nearest cells and the 2 nearest
    orientation bins within each cell, for 8 total contributions. This type
    of interpolation probably will help, though.

    You do not have to explicitly compute the gradient orientation at each
    pixel (although you are free to do so). You can instead filter with
    oriented filters (e.g. a filter that responds to edges with a specific
    orientation). All of your SIFT-like feature can be constructed entirely
    from filtering fairly quickly in this way.

    You do not need to do the normalize -> threshold -> normalize again
    operation as detailed in Szeliski and the SIFT paper. It can help, though.

    Another simple trick which can help is to raise each element of the final
    feature vector to some power that is less than one.

    Args:
    -   image: A numpy array of shape (m,n) or (m,n,c). can be grayscale or color, your choice
    -   x: A numpy array of shape (k,), the x-coordinates of interest points
    -   y: A numpy array of shape (k,), the y-coordinates of interest points
    -   feature_width: integer representing the local feature width in pixels.
            You can assume that feature_width will be a multiple of 4 (i.e. every
                cell of your local SIFT-like feature will have an integer width
                and height). This is the initial window size we examine around
                each keypoint.
    -   scales: Python list or tuple if you want to detect and describe features
            at multiple scales

    You may also detect and describe features at particular orientations.

    Returns:
    -   fv: A numpy array of shape (k, feat_dim) representing a feature vector.
            "feat_dim" is the feature_dimensionality (e.g. 128 for standard SIFT).
            These are the computed features.
    """
    assert image.ndim == 2, 'Image must be grayscale'
    #############################################################################
    # TODO: YOUR CODE HERE                                                      #
    # If you choose to implement rotation invariance, enabling it should not    #
    # decrease your matching accuracy.                                          #
    #############################################################################
    kernel = cv2.getGaussianKernel(feature_width, 1)
    image = cv2.filter2D(image, ddepth=-1, kernel=kernel)
    #微分
    dx = cv2.Sobel(image, cv2.CV_64F, 1, 0)
    dy = cv2.Sobel(image, cv2.CV_64F, 0, 1)
    #计算每个像素点的
    magnitudes = (dx**2 + dy**2)**.5
    magnitudes = cv2.filter2D(magnitudes, ddepth=-1, kernel=kernel)
    orientations = np.arctan2(dy, dx)

    for i in range(orientations.shape[0]):
        for j in range(orientations.shape[1]):
            orientations[i][j] = int(4 *
                                     (orientations[i][j] + np.pi) / np.pi) - 1
    #方向从0-7
    #print(orientations.shape)
    num_cells = int(feature_width / 4)
    fv = np.zeros((len(x), num_cells**2 * 8))
    for i in range(len(x)):
        p_x = int(x[i] - feature_width / 2)
        p_y = int(y[i] - feature_width / 2)
        for j in range(num_cells):
            for k in range(num_cells):
                cell_idx = 4 * j + k
                cell_x = p_x + 4 * j
                cell_y = p_y + 4 * k
                #print(cell_x)
                #cell_orient = orientations[200:204][200:204]
                #cell_mag = magnitudes[cell_x:cell_x+4][cell_y:cell_y+4]
                #print(cell_orient)
                for z in range(8):
                    for xx in range(4):
                        for yy in range(4):
                            if orientations[cell_y + yy][cell_x + xx] == z:
                                fv[i][cell_idx * 8 +
                                      z] += magnitudes[cell_y + yy][cell_x +
                                                                    xx]
    fv = fv / fv.max()

    #############################################################################
    #                             END OF YOUR CODE                              #
    #############################################################################
    return fv
コード例 #8
0
kernel = np.ones((5, 5)) / 25.0  # 커널 생성
image_kernel = cv2.filter2D(image_Blur, -1, kernel)  # 커널 적용
plt.imshow(image_kernel, cmap='gray'), plt.xticks([]), plt.yticks([])
plt.show()

image_very_blurry_kernel = cv2.GaussianBlur(image_Blur, (5, 5),
                                            0)  # GaussianBlur 적용
plt.imshow(image_very_blurry_kernel,
           cmap='gray'), plt.xticks([]), plt.yticks([])
plt.show()

## kernel + vector + Blur
# GaussianBlur의 세번째 매개변수 = X축 9 너비 방향의 표준편차
# GaussianBlur에 사용한 커널은 각 축 방향으로 가우시안 분포를 따르는 1차원 배열을 만든 다음 외적으로 생성
# getGaussianKernel를 사용하여 1차원 배열을 만들고 Numpy outer 함수로 외적 계산
gaus_vector = cv2.getGaussianKernel(5, 0)
gaus_kernel = np.outer(gaus_vector, gaus_vector)  # Vector를 외적으로 커널 생성
image_kernel_vector = cv2.filter2D(image_Blur, -1, gaus_kernel)  # 커널 적용
plt.imshow(image_kernel, cmap='gray'), plt.xticks([]), plt.yticks([])
plt.show()

## 이미지 선명하게
# 대상 픽셀을 강조하는 커널 생성 후 filter2D를 사용하여 이미지 커널에 적용
# 중앙 픽셀을 부각하는 커널을 생성하면 이미지 경계선에서 대비가 더욱 두드러지는 효과 발생
image_clear = cv2.imread('../img/black.jpg', cv2.IMREAD_GRAYSCALE)
kernel_clear = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])  # 커널 생성

image_sharp = cv2.filter2D(image_clear, -1, kernel_clear)  # 이미지 선명하게 생성
plt.imshow(image_sharp, cmap='gray'), plt.axis('off')
plt.show()
コード例 #9
0
def get_interest_points(image, feature_width):
    """
    Implement the Harris corner detector (See Szeliski 4.1.1) to start with.
    You can create additional interest point detector functions (e.g. MSER)
    for extra credit.

    If you're finding spurious interest point detections near the boundaries,
    it is safe to simply suppress the gradients / corners near the edges of
    the image.

    Useful in this function in order to (a) suppress boundary interest
    points (where a feature wouldn't fit entirely in the image, anyway)
    or (b) scale the image filters being used. Or you can ignore it.

    By default you do not need to make scale and orientation invariant
    local features.

    The lecture slides and textbook are a bit vague on how to do the
    non-maximum suppression once you've thresholded the cornerness score.
    You are free to experiment. For example, you could compute connected
    components and take the maximum value within each component.
    Alternatively, you could run a max() operator on each sliding window. You
    could use this to ensure that every interest point is at a local maximum
    of cornerness.

    Args:
    -   image: A numpy array of shape (m,n,c),
                image may be grayscale of color (your choice)
    -   feature_width: integer representing the local feature width in pixels.

    Returns:
    -   x: A numpy array of shape (N,) containing x-coordinates of interest points
    -   y: A numpy array of shape (N,) containing y-coordinates of interest points
    -   confidences (optional): numpy nd-array of dim (N,) containing the strength
            of each interest point
    -   scales (optional): A numpy array of shape (N,) containing the scale at each
            interest point
    -   orientations (optional): A numpy array of shape (N,) containing the orientation
            at each interest point
    """
    confidences, scales, orientations = None, None, None
    #############################################################################
    # TODO: YOUR HARRIS CORNER DETECTOR CODE HERE                                                      #
    #############################################################################
    m = image.shape[0]
    n = image.shape[1]
    # 1. Image derivatives
    Ix = cv2.Sobel(image, cv2.CV_64F, 1, 0)
    Iy = cv2.Sobel(image, cv2.CV_64F, 0, 1)
    # 2. Square of derivatives
    Ixx = np.multiply(Ix, Ix)
    Iyy = np.multiply(Iy, Iy)
    Ixy = np.multiply(Ix, Iy)
    kernel = cv2.getGaussianKernel(3, 1)
    # 3. Gaussian filter
    Ixx = cv2.filter2D(Ixx, ddepth=-1, kernel=kernel)
    Ixy = cv2.filter2D(Ixy, ddepth=-1, kernel=kernel)
    Iyy = cv2.filter2D(Iyy, ddepth=-1, kernel=kernel)
    # 4. Cornerness function
    # A numpy array of shape (m,n,1)
    r_harris = np.multiply(Ixx,
                           Iyy) - np.square(Ixy) - 0.05 * np.square(Ixx + Iyy)
    # Remove interest points that are too close to a border
    larger = np.where(r_harris > np.abs(3 * np.mean(r_harris)))
    larger = np.transpose(larger)
    half = feature_width / 2
    no_border = []
    for i in range(len(larger)):
        if larger[i][0] > half and larger[i][0] < m - half:
            if larger[i][1] > half and larger[i][1] < n - half:
                no_border.append(larger[i])

    #############################################################################
    #                             END OF YOUR CODE                              #
    #############################################################################

    #############################################################################
    # TODO: YOUR ADAPTIVE NON-MAXIMAL SUPPRESSION CODE HERE                     #
    # While most feature detectors simply look for local maxima in              #
    # the interest function, this can lead to an uneven distribution            #
    # of feature points across the image, e.g., points will be denser           #
    # in regions of higher contrast. To mitigate this problem, Brown,           #
    # Szeliski, and Winder (2005) only detect features that are both            #
    # local maxima and whose response value is significantly (10%)              #
    # greater than that of all of its neighbors within a radius r. The          #
    # goal is to retain only those points that are a maximum in a               #
    # neighborhood of radius r pixels. One way to do so is to sort all          #
    # points by the response strength, from large to small response.            #
    # The first entry in the list is the global maximum, which is not           #
    # suppressed at any radius. Then, we can iterate through the list           #
    # and compute the distance to each interest point ahead of it in            #
    # the list (these are pixels with even greater response strength).          #
    # The minimum of distances to a keypoint's stronger neighbors               #
    # (multiplying these neighbors by >=1.1 to add robustness) is the           #
    # radius within which the current point is a local maximum. We              #
    # call this the suppression radius of this interest point, and we           #
    # save these suppression radii. Finally, we sort the suppression            #
    # radii from large to small, and return the n keypoints                     #
    # associated with the top n suppression radii, in this sorted               #
    # orderself. Feel free to experiment with n, we used n=1500.                #
    #                                                                           #
    # See:                                                                      #
    # https://www.microsoft.com/en-us/research/wp-content/uploads/2005/06/cvpr05.pdf
    # or                                                                        #
    # https://www.cs.ucsb.edu/~holl/pubs/Gauglitz-2011-ICIP.pdf                 #
    #############################################################################
    num_return = 2000
    tri = np.zeros((len(no_border), 3))
    y = np.zeros(len(no_border))
    x = np.zeros(len(no_border))
    strength = np.zeros(len(no_border))
    for i in range(len(no_border)):
        y_idx = no_border[i][0]
        x_idx = no_border[i][1]
        val = r_harris[y_idx][x_idx]
        #print(val)
        tri[i][0] = val
        tri[i][1] = y_idx
        tri[i][2] = x_idx

    tri = sorted(tri, key=lambda x: x[0], reverse=True)
    for i in range(len(tri)):
        strength[i] = tri[i][0]
        y[i] = tri[i][1]
        x[i] = tri[i][2]
    four = np.zeros((len(no_border), 4))

    four[0] = np.array([tri[0][0], tri[0][1], tri[0][2], m * m + n * n + 1])

    for i in range(len(tri) - 1):
        stronger = np.where(0.9 * strength[i + 1] < strength)
        #stronger = np.transpose(stronger)
        stronger = stronger[0]
        stronger = np.array(stronger)
        r = np.square(x[i + 1] - x[stronger]) + np.square(y[i + 1] -
                                                          y[stronger])
        #print(x[a])
        r.sort()
        #第一个元素是0,与本身距离
        radius = r[1]
        four[i] = np.array(
            [radius, tri[i + 1][0], tri[i + 1][1], tri[i + 1][2]])
    four = sorted(four, key=lambda x: x[0], reverse=True)
    y = np.zeros(num_return)
    x = np.zeros(num_return)
    for i in range(num_return):
        x[i] = four[i][3]
        y[i] = four[i][2]

    #############################################################################
    #                             END OF YOUR CODE                              #
    #############################################################################
    return x, y, confidences, scales, orientations