Example #1
0
def test_square_image():
    im = np.zeros((50, 50)).astype(float)
    im[:25, :25] = 1.

    # Moravec
    results = peak_local_max(corner_moravec(im),
                             min_distance=10, threshold_rel=0)
    # interest points along edge
    assert len(results) == 57

    # Harris
    results = peak_local_max(corner_harris(im, method='k'),
                             min_distance=10, threshold_rel=0)
    # interest at corner
    assert len(results) == 1

    results = peak_local_max(corner_harris(im, method='eps'),
                             min_distance=10, threshold_rel=0)
    # interest at corner
    assert len(results) == 1

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im),
                             min_distance=10, threshold_rel=0)
    # interest at corner
    assert len(results) == 1
Example #2
0
def test_binary_descriptors_rotation_crosscheck_true():
    """Verify matched keypoints and their corresponding masks results between
    image and its rotated version with the expected keypoint pairs with
    cross_check enabled."""
    img = data.astronaut()
    img = rgb2gray(img)
    tform = tf.SimilarityTransform(scale=1, rotation=0.15, translation=(0, 0))
    rotated_img = tf.warp(img, tform, clip=False)

    extractor = BRIEF(descriptor_size=512)

    keypoints1 = corner_peaks(corner_harris(img), min_distance=5,
                              threshold_abs=0, threshold_rel=0.1)
    extractor.extract(img, keypoints1)
    descriptors1 = extractor.descriptors

    keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5,
                              threshold_abs=0, threshold_rel=0.1)
    extractor.extract(rotated_img, keypoints2)
    descriptors2 = extractor.descriptors

    matches = match_descriptors(descriptors1, descriptors2, cross_check=True)

    exp_matches1 = np.array([ 0,  2,  3,  4,  5,  6,  9, 11, 12, 13, 14, 17,
                             18, 19, 21, 22, 23, 26, 27, 28, 29, 31, 32, 33,
                             34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46])
    exp_matches2 = np.array([ 0,  2,  3,  1,  4,  6,  5,  7, 13, 10,  9, 11,
                             15,  8, 14, 12, 16, 18, 19, 21, 20, 24, 25, 26,
                             28, 27, 22, 23, 29, 30, 31, 32, 35, 33, 34, 36])
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)
Example #3
0
def test_rotated_img():
    """
    The harris filter should yield the same results with an image and it's
    rotation.
    """
    im = img_as_float(data.astronaut().mean(axis=2))
    im_rotated = im.T

    # Moravec
    results = peak_local_max(corner_moravec(im),
                             min_distance=10, threshold_rel=0)
    results_rotated = peak_local_max(corner_moravec(im_rotated),
                                     min_distance=10, threshold_rel=0)
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()

    # Harris
    results = peak_local_max(corner_harris(im),
                             min_distance=10, threshold_rel=0)
    results_rotated = peak_local_max(corner_harris(im_rotated),
                                     min_distance=10, threshold_rel=0)
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im),
                             min_distance=10, threshold_rel=0)
    results_rotated = peak_local_max(corner_shi_tomasi(im_rotated),
                                     min_distance=10, threshold_rel=0)
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()
Example #4
0
def test_rotated_lena():
    """
    The harris filter should yield the same results with an image and it's
    rotation.
    """
    im = img_as_float(data.lena().mean(axis=2))
    im_rotated = im.T

    # Moravec
    results = peak_local_max(corner_moravec(im))
    results_rotated = peak_local_max(corner_moravec(im_rotated))
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()

    # Harris
    results = peak_local_max(corner_harris(im))
    results_rotated = peak_local_max(corner_harris(im_rotated))
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im))
    results_rotated = peak_local_max(corner_shi_tomasi(im_rotated))
    assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
    assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()
Example #5
0
    def process(self, img2, image_gray):
        # img2 = warp(img2)
        patch_size = [640]
        img2 = rgb2gray(img2)
        image_gray = rgb2gray(img2)

        blobs_dog = blob_dog(image_gray, min_sigma=0.2, max_sigma=225, sigma_ratio=1.6, threshold=.5)
        blobs_dog[:, 2] = blobs_dog[:, 2]

        blobs = [blobs_dog]
        colors = ['black']
        titles = ['Difference of Gaussian']
        sequence = zip(blobs, colors, titles)

        # plt.imshow(img2)
        # plt.axis("equal")
        # plt.show()

        for blobs, color, title in sequence:
            print(len(blobs))
            for blob in blobs:
                y, x, r = blob
                plotx = x
                ploty = y
                for i in range (3):
                    keypoints1 = corner_peaks(corner_harris(Array.image_arr[i]), min_distance=1)
                    keypoints2 = corner_peaks(corner_harris(img2), min_distance=1)

                    extractor = BRIEF(patch_size=30, mode="uniform")

                    extractor.extract(Array.image_arr[i], keypoints1)
                    keypoints1 = keypoints1[extractor.mask]
                    descriptors1 = extractor.descriptors

                    extractor.extract(img2, keypoints2)
                    keypoints2 = keypoints2[extractor.mask]
                    descriptors2 = extractor.descriptors

                    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
                    
                    # print(keypoints1, keypoints2)
                    # print(matches12)
                    #FUCKGGGPLAYT
                    for pizdezh in matches12:
                        X = keypoints2[pizdezh[1]][1]
                        Y = keypoints2[pizdezh[1]][0]

                    if sqrt((plotx - X)**2 + (ploty - Y)**2) < r:
                        seen = [{
                            "type": Array.type_arr[i],
                            "center_shift": (plotx - 160/2) * -0.02,
                            "distance": image_gray[y][x] / 0.08
                        }]
                        print seen
                        data.seen.add(seen)
                        break
Example #6
0
def get_harris_corners(im, edge_discard=20):
    """
    This function takes a b&w image and an optional amount to discard
    on the edge (default is 5 pixels), and finds all harris corners
    in the image. Harris corners near the edge are discarded and the
    coordinates of the remaining corners are returned. A 2d array (h)
    containing the h value of every pixel is also returned.

    h is the same shape as the original image, im.
    coords is 2 x n (ys, xs).
    """

    assert edge_discard >= 20

    # find harris corners
    h = corner_harris(im, method='eps', sigma=1)
    coords = peak_local_max(h, min_distance=1, indices=True)

    # discard points on edge
    edge = edge_discard  # pixels
    mask = (coords[:, 0] > edge) & \
           (coords[:, 0] < im.shape[0] - edge) & \
           (coords[:, 1] > edge) & \
           (coords[:, 1] < im.shape[1] - edge)
    coords = coords[mask].T
    return h, coords
Example #7
0
def extract_corner_harris(patch):
    """ Extract four corner points using harris corner detection algorithm

    """
    # Find corner with harris corner detection
    coords = corner_peaks(corner_harris(patch, k=0.1), min_distance=5)
    coords_subpix = corner_subpix(patch, coords, window_size=13)

    # Find the nearest point for each corner
    dim = patch.shape
    corners = [(0, 0), (dim[0], 0), (dim[0], dim[1]), (0, dim[1])]

    dest_points = [[] for x in range(4)]
    for i in xrange(4):
        dest_points[i] = search_closest_points(corners[i], coords_subpix)

    # Check for error
    try:
        epsilon = 1e-10
        for i in xrange(4):
            for j in xrange(i + 1, 4):
                if calc_distance(dest_points[i], dest_points[j]) < epsilon:
                    print 'Error point'
                    return []
    except TypeError:
        return []

    # Reverse y,x position to x,y
    for i in xrange(4):
        dest_points[i][1], dest_points[i][0] = dest_points[i][0], dest_points[i][1]

    return dest_points
Example #8
0
def guess_corners(bw):
    """
    Infer the corners of an image using a Sobel filter to find the edges and a
    Harris filter to find the corners.  Takes a single color chanel.

    Parameters
    ----------
    bw : (m x n) ndarray of ints

    Returns
    -------
    corners : pixel coordinates of plot corners, unsorted
    outline : (m x n) ndarray of bools True -> plot area
    """
    assert len(bw.shape) == 2
    bw = img_as_uint(bw)
    e_map = ndimage.sobel(bw)

    markers = np.zeros(bw.shape, dtype=int)
    markers[bw < 30] = 1
    markers[bw > 150] = 2
    seg = ndimage.watershed_ift(e_map, np.asarray(markers, dtype=int))

    outline = ndimage.binary_fill_holes(1 - seg)
    corners = corner_harris(np.asarray(outline, dtype=int))
    corners = approximate_polygon(corners, 1)
    return corners, outline
Example #9
0
def test_subpix_dot():
    img = np.zeros((50, 50))
    img[25, 25] = 255
    corner = peak_local_max(corner_harris(img),
                            min_distance=10, threshold_rel=0, num_peaks=1)
    subpix = corner_subpix(img, corner)
    assert_array_equal(subpix[0], (25, 25))
Example #10
0
def test_subpix():
    img = np.zeros((50, 50))
    img[:25,:25] = 255
    img[25:,25:] = 255
    corner = peak_local_max(corner_harris(img), num_peaks=1)
    subpix = corner_subpix(img, corner)
    assert_array_equal(subpix[0], (24.5, 24.5))
Example #11
0
def test_uniform_mode():
    """Verify the computed BRIEF descriptors with expected for uniform mode."""
    img = data.coins()

    keypoints = corner_peaks(corner_harris(img), min_distance=5, threshold_abs=0, threshold_rel=0.1)

    extractor = BRIEF(descriptor_size=8, sigma=2, mode="uniform")

    extractor.extract(img, keypoints[:8])

    expected = np.array(
        [
            [False, False, False, True, True, True, False, False],
            [True, True, True, False, True, False, False, True],
            [True, True, True, False, True, True, False, True],
            [True, True, True, True, False, True, False, True],
            [True, True, True, True, True, True, False, False],
            [True, True, True, True, True, True, True, True],
            [False, False, False, True, True, True, True, True],
            [False, True, False, True, False, True, True, True],
        ],
        dtype=bool,
    )

    assert_array_equal(extractor.descriptors, expected)
Example #12
0
def featurize(img_name):
    """Load an image and convert it into a dictionary of features"""
    img = plt.imread(os.path.join('stimuli', img_name + '.png'))
    height, width, _ = img.shape
    features = defaultdict(int)
    for y in range(height):
        for x in range(width):
            features['red'] += img[y][x][0]
            features['green'] += img[y][x][1]
            features['blue'] += img[y][x][2]
            features['alpha'] += img[y][x][3]

    grey = color.rgb2grey(img)
    for y in range(height):
        for x in range(width):
            for key, value in per_pixel(grey, y, x):
                features[key] += value

    # Normalize over image size
    for key, value in features.items():
        features[key] = float(value) / height / width

    features['blob'] = feature.blob_dog(grey).shape[0]
    features['corners'] = feature.corner_peaks(
        feature.corner_harris(grey)).shape[0]
    return features
Example #13
0
def dumb_matcher(img1, img2):
    kps = lambda img: feature.corner_peaks(feature.corner_harris(img), min_distance = 2)
    kp1 = kps(img1)
    kp2 = kps(img2)
    to_set = lambda aoa: set(map(lambda x: (x[0], x[1]), aoa))
    s1 = to_set(kp1)
    s2 = to_set(kp2)
    return float(len(s1 & s2) * 2) / (len(s1) + len(s2))
Example #14
0
def test_subpix_no_class():
    img = np.zeros((50, 50))
    subpix = corner_subpix(img, np.array([[25, 25]]))
    assert_array_equal(subpix[0], (np.nan, np.nan))

    img[25, 25] = 1e-10
    corner = peak_local_max(corner_harris(img), num_peaks=1)
    subpix = corner_subpix(img, np.array([[25, 25]]))
    assert_array_equal(subpix[0], (np.nan, np.nan))
Example #15
0
def find_corners(path, min_distance=5):
    """Find corners in an image at path
    
    Returns the image and the corner lists.
    """
    from skimage.feature import corner_harris, corner_peaks
    img = imread(path, flatten=True)
    corners = corner_peaks(corner_harris(img), min_distance=min_distance)
    return img, corners
def main():
    """Load image, calculate harris scores (window functions: matrix of ones, gauss)
    and plot the results."""
    img = data.checkerboard()
    score_window = harris_ones(img, 7)
    score_gauss = harris_gauss(img)
    util.plot_images_grayscale(
        [img, score_window, score_gauss, feature.corner_harris(img)],
        ["Image", "Harris-Score (ones)", "Harris-Score (gauss)", "Harris-Score (ground truth)"]
    )
Example #17
0
def test_num_peaks():
    """For a bunch of different values of num_peaks, check that
    peak_local_max returns exactly the right amount of peaks. Test
    is run on Lena in order to produce a sufficient number of corners"""

    lena_corners = corner_harris(data.lena())

    for i in range(20):
        n = np.random.random_integers(20)
        results = peak_local_max(lena_corners, num_peaks=n)
        assert (results.shape[0] == n)
Example #18
0
def test_noisy_square_image():
    im = np.zeros((50, 50)).astype(float)
    im[:25, :25] = 1.
    np.random.seed(seed=1234)
    im = im + np.random.uniform(size=im.shape) * .2

    # Moravec
    results = peak_local_max(corner_moravec(im))
    # undefined number of interest points
    assert results.any()

    # Harris
    results = peak_local_max(corner_harris(im, sigma=1.5, method='k'))
    assert len(results) == 1
    results = peak_local_max(corner_harris(im, sigma=1.5, method='eps'))
    assert len(results) == 1

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im, sigma=1.5))
    assert len(results) == 1
Example #19
0
def test_match_keypoints_brief_lena_translation():
    """Test matched keypoints between lena image and its translated version."""
    img = data.lena()
    img = rgb2gray(img)
    img.shape
    tform = tf.SimilarityTransform(scale=1, rotation=0, translation=(15, 20))
    translated_img = tf.warp(img, tform)

    keypoints1 = corner_peaks(corner_harris(img), min_distance=5)
    descriptors1, keypoints1 = brief(img, keypoints1, descriptor_size=512)

    keypoints2 = corner_peaks(corner_harris(translated_img), min_distance=5)
    descriptors2, keypoints2 = brief(translated_img, keypoints2,
                                     descriptor_size=512)

    matched_keypoints = match_keypoints_brief(keypoints1, descriptors1,
                                              keypoints2, descriptors2,
                                              threshold=0.10)

    assert_array_equal(matched_keypoints[:, 0, :], matched_keypoints[:, 1, :] +
                       [20, 15])
Example #20
0
def corners(provider):
    """
    number of corners
    """

    gray = provider.as_gray()

    # TODO custom parameters would give arise to exceptions of mis-matched shapes
    coords = corner_peaks(corner_harris(gray))#, min_distance=5)
    coords_subpix = corner_subpix(gray, coords)#, window_size=13)

    return len(coords_subpix)
Example #21
0
def test_num_peaks():
    """For a bunch of different values of num_peaks, check that
    peak_local_max returns exactly the right amount of peaks. Test
    is run on the astronaut image in order to produce a sufficient number of corners"""

    img_corners = corner_harris(rgb2gray(data.astronaut()))

    for i in range(20):
        n = np.random.randint(1, 21)
        results = peak_local_max(img_corners,
                                 min_distance=10, threshold_rel=0, num_peaks=n)
        assert (results.shape[0] == n)
Example #22
0
def test_square_image():
    im = np.zeros((50, 50)).astype(float)
    im[:25, :25] = 1.0

    # Moravec
    results = peak_local_max(corner_moravec(im))
    # interest points along edge
    assert len(results) == 57

    # Harris
    results = peak_local_max(corner_harris(im, method="k"))
    # interest at corner
    assert len(results) == 1

    results = peak_local_max(corner_harris(im, method="eps"))
    # interest at corner
    assert len(results) == 1

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im))
    # interest at corner
    assert len(results) == 1
Example #23
0
def test_match_keypoints_brief_lena_rotation():
    """Verify matched keypoints result between lena image and its rotated
    version with the expected keypoint pairs."""
    img = data.lena()
    img = rgb2gray(img)
    img.shape
    tform = tf.SimilarityTransform(scale=1, rotation=0.10, translation=(0, 0))
    rotated_img = tf.warp(img, tform)

    keypoints1 = corner_peaks(corner_harris(img), min_distance=5)
    descriptors1, keypoints1 = brief(img, keypoints1, descriptor_size=512)

    keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5)
    descriptors2, keypoints2 = brief(rotated_img, keypoints2,
                                     descriptor_size=512)

    matched_keypoints = match_keypoints_brief(keypoints1, descriptors1,
                                              keypoints2, descriptors2,
                                              threshold=0.07)

    expected = np.array([[[263, 272],
                          [234, 298]],

                         [[271, 120],
                          [258, 146]],

                         [[323, 164],
                          [305, 195]],

                         [[414,  70],
                          [405, 111]],

                         [[435, 181],
                          [415, 223]],

                         [[454, 176],
                          [435, 221]]])

    assert_array_equal(matched_keypoints, expected)
Example #24
0
def test_squared_dot():
    im = np.zeros((50, 50))
    im[4:8, 4:8] = 1
    im = img_as_float(im)

    # Moravec fails

    # Harris
    results = peak_local_max(corner_harris(im))
    assert (results == np.array([[6, 6]])).all()

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im))
    assert (results == np.array([[6, 6]])).all()
Example #25
0
def test_subpix_border():
    img = np.zeros((50, 50))
    img[1:25,1:25] = 255
    img[25:-1,25:-1] = 255
    corner = corner_peaks(corner_harris(img), min_distance=1)
    subpix = corner_subpix(img, corner, window_size=11)
    ref = np.array([[ 0.52040816,  0.52040816],
                    [ 0.52040816, 24.47959184],
                    [24.47959184,  0.52040816],
                    [24.5       , 24.5       ],
                    [24.52040816, 48.47959184],
                    [48.47959184, 24.52040816],
                    [48.47959184, 48.47959184]])
    assert_almost_equal(subpix, ref)
def nameTheShape (file):
    coords = corner_peaks(corner_harris(file), min_distance=5)
    noCorners = len (coords)
    if (noCorners == 3):
        shapeName = "triangle"
        return (shapeName)
    elif (noCorners == 4):
        shapeName = "quadrilateral"
        return (shapeName)
    elif (noCorners == 5):
        shapeName = "pentagon"
        return (shapeName)
    else:
        shapeName = "ERROR!!!"
        return (shapeName)
Example #27
0
def test_squared_dot():
    im = np.zeros((50, 50))
    im[4:8, 4:8] = 1
    im = img_as_float(im)

    # Moravec fails

    # Harris
    results = peak_local_max(corner_harris(im),
                             min_distance=10, threshold_rel=0)
    assert (results == np.array([[6, 6]])).all()

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im),
                             min_distance=10, threshold_rel=0)
    assert (results == np.array([[6, 6]])).all()
Example #28
0
def getFeatures(img, bbox):
    cimg = corner_harris(img,sigma=1,k=0.1)
#    plt.imshow(cimg)
#    plt.show()
    x = np.zeros((FEATS_PER_FACE,bbox.shape[0]))
    y = np.zeros((FEATS_PER_FACE,bbox.shape[0]))

    for i, box in enumerate(bbox):
        bboxcimg = cimg*0
        boxx, boxy = np.meshgrid(np.arange(box[0,0], box[2,0]), np.arange(box[0,1], box[1,1]))
        bboxcimg[boxx,boxy] = cimg[boxx,boxy]
        fx, fy, rmax = anms(bboxcimg, FEATS_PER_FACE)
        x[:,i] = fx
        y[:,i] = fy

    return x, y
Example #29
0
def test_num_peaks():
    """For a bunch of different values of num_peaks, check that
    peak_local_max returns exactly the right amount of peaks. Test
    is run on the astronaut image in order to produce a sufficient number of
    corners.
    """

    img_corners = corner_harris(rgb2gray(data.astronaut()))

    for i in range(20):
        n = np.random.randint(1, 21)
        results = peak_local_max(img_corners,
                                 min_distance=10,
                                 threshold_rel=0,
                                 num_peaks=n)
        assert (results.shape[0] == n)
def main(argv):
    image = io.imread(argv[0], True)
    smooth = gaussian(image, sigma=4, mode='reflect')
    binary = image > threshold_otsu(smooth)

    skeleton = skeletonize_3d(invert(binary))

    coords = corner_peaks(corner_harris(binary, k=0.2, sigma=4),
                          min_distance=5)
    coords_subpix = corner_subpix(binary, coords, window_size=13)

    fig, ax = plt.subplots()
    ax.imshow(binary, interpolation='nearest', cmap=plt.cm.gray)
    ax.plot(coords[:, 1], coords[:, 0], '+r', markersize=15)
    # ax.plot(coords_subpix[:, 1], coords_subpix[:, 0], '+r', markersize=15)
    ax.axis((0, 600, 600, 0))
    plt.show()
def main(inputfilename, oututfilename):
    # Load image
    image = imread(inputfilename)
    image = rgb2gray(image)

    # Apply corner detection algorithm
    corners = corner_harris(image)
    coords = corner_peaks(corners, min_distance=5)
    coords_subpix = corner_subpix(image, coords, window_size=13)

    # Diplay the image
    fig, ax = plt.subplots()
    ax.imshow(image, interpolation='nearest', cmap=plt.cm.gray)
    ax.plot(coords[:, 1], coords[:, 0], '.b', markersize=3)
    ax.plot(coords_subpix[:, 1], coords_subpix[:, 0], '+r', markersize=15)
    ax.axis((0, 350, 350, 0))
    plt.savefig(oututfilename)
def corner_demo():
    image = io.imread("D:/images/home.jpg")
    gray = color.rgb2gray(image)
    coords = feature.corner_peaks(feature.corner_harris(gray), min_distance=5)
    fig, axes = plt.subplots(1, 2, figsize=(8, 4))
    ax = axes.ravel()

    ax[0].imshow(image)
    ax[0].set_title("Input ")
    ax[1].imshow(image)
    ax[1].set_title("harris corner detection")
    ax[0].axis('off')
    ax[1].axis('off')
    ax[1].plot(coords[:, 1], coords[:, 0], color='red', marker='o',
            linestyle='None', markersize=4)

    fig.tight_layout()
    plt.show()
Example #33
0
 def identifyPlate(self, roiTestImageResized):
     roiTestImageResized = roiTestImageResized.astype('uint8')
     ImageResized = cv2.resize(roiTestImageResized, (70, 30),
                               interpolation=cv2.INTER_CUBIC)
     cannyFeature = cv2.Canny(ImageResized, 100, 200)
     cannyStacked = np.hstack(cannyFeature)
     # cannyFeature = canny(ImageResized, sigma=3.0, low_threshold=None, high_threshold=None, mask=None, use_quantiles=False)
     cannyFeatureReshaped = cannyFeature.reshape(cannyFeature.shape[0] *
                                                 cannyFeature.shape[1])
     harrisCornerFeature = corner_harris(ImageResized,
                                         method='k',
                                         k=0.05,
                                         eps=1e-06,
                                         sigma=1)
     harrisCornerFeature = harrisCornerFeature.reshape(
         harrisCornerFeature.shape[0] * harrisCornerFeature.shape[1])
     nbr = self.clf.predict(cannyStacked)
     return nbr[0], self.clf.decision_function(cannyStacked)[0]
Example #34
0
def get_head_tail(image, radius=12, sigma=4, min_distance=10):
    """
    Make a head tail mask of a worm
    :param image: binary worm image
    :param radius: radius used around point
    :param sigma: harris detector radius
    :param min_distance: distance between head and tail
    :return: mask of head and tail
    """
    hc = corner_harris(image, sigma=sigma)
    cp = corner_peaks(hc, min_distance=min_distance, num_peaks=2)
    mask = np.zeros_like(image)

    for c in cp:
        rr, cc = circle(c[0], c[1], radius, shape=mask.shape)
        mask[rr, cc] = 1

    return image & mask
Example #35
0
def test_noisy_square_image():
    im = np.zeros((50, 50)).astype(float)
    im[:25, :25] = 1.
    np.random.seed(seed=1234)
    im = im + np.random.uniform(size=im.shape) * .2

    # Moravec
    results = peak_local_max(corner_moravec(im))
    # undefined number of interest points
    assert results.any()

    # Harris
    results = peak_local_max(corner_harris(im, sigma=1.5))
    assert len(results) == 1

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im, sigma=1.5))
    assert len(results) == 1
Example #36
0
def test_square_image():
    im = np.zeros((50, 50)).astype(float)
    im[:25, :25] = 1.

    # Moravec
    results = peak_local_max(corner_moravec(im))
    # interest points along edge
    assert len(results) == 57

    # Harris
    results = peak_local_max(corner_harris(im))
    # interest at corner
    assert len(results) == 1

    # Shi-Tomasi
    results = peak_local_max(corner_shi_tomasi(im))
    # interest at corner
    assert len(results) == 1
Example #37
0
 def harris_skimage(self, image, num_peaks, **kwargs):
     coords_subpix = np.zeros_like(image)
     cornerness_matrix = sf.corner_peaks(
         sf.corner_harris(image), min_distance=1,
         num_peaks=num_peaks)  # larger distance -> fewer points
     coords_subpix = sf.corner_subpix(
         image, cornerness_matrix, window_size=13,
         alpha=kwargs["alpha"])  # sub pixel accuracy
     display.draw_points(image,
                         cornerness_matrix,
                         '_',
                         self.path[2:-1],
                         method_name=kwargs['method'],
                         name=self.name,
                         sp=coords_subpix,
                         counter=kwargs["counter"])
     print("detected points: ", cornerness_matrix.shape[0])
     return cornerness_matrix, coords_subpix
Example #38
0
def keyboard_crop(im):
    corners = corner_harris(im)
    lim = {
        'west': corners[:, 1].min() - 3,
        'east': corners[:, 1].max() + 3,
        'north': corners[:, 0].max() - 3,
        'south': corners[:, 0].min() + 3,
    }
    limiter = KeyboardCrop(im, lim)

    limiter.edge_effects['west'] = {'left': -1, 'right': +1}
    limiter.edge_effects['north'] = {'up': +1, 'down': -1}
    limiter.edge_effects['east'] = {'left': +1, 'right': -1}
    limiter.edge_effects['south'] = {'up': -1, 'down': +1}

    ylo, xhi, yhi, xlo = limiter.get_edges()
    cropped = im[ylo:yhi, xlo:xhi]
    return cropped
Example #39
0
def keyboard_crop(im):
    corners = corner_harris(im)
    lim = {
        'west': corners[:, 1].min()-3,
        'east': corners[:, 1].max()+3,
        'north': corners[:, 0].max()-3,
        'south': corners[:, 0].min()+3,
    }
    limiter = KeyboardCrop(im, lim)

    limiter.edge_effects['west'] = {'left': -1, 'right': +1}
    limiter.edge_effects['north'] = {'up': +1, 'down': -1}
    limiter.edge_effects['east'] = {'left': +1, 'right': -1}
    limiter.edge_effects['south'] = {'up': -1, 'down': +1}

    ylo, xhi, yhi, xlo = limiter.get_edges()
    cropped = im[ylo:yhi, xlo:xhi]
    return cropped
Example #40
0
def forward(ob):
    """ 
  Takes raw (768,1024,3) uint8 screen and returns list of VNC events.
  The browser window indents the origin of MiniWob 
  by 75 pixels from top and
  10 pixels from the left. 
  The first 50 pixels along height are the query.
  """
    if ob is None: return []

    x = ob['vision']
    crop = x[75:75 + 50 + 160, 10:10 + 160, :]  # miniwob coordinates crop
    square = x[75 + 50:75 + 50 + 160, 10:10 + 160, :]
    gray = rgb2gray(square)
    print gray
    coords = corner_peaks(corner_harris(gray), min_distance=5)
    coords_subpix = corner_subpix(gray, coords, window_size=13)
    for item in coords_subpix:
        pass
        #print item[0]+75+50,item[1]+10
    newy = coords_subpix[:, 0]
    newx = coords_subpix[:, 1]
    newy = newy[np.logical_not(np.isnan(newy))]
    newx = newx[np.logical_not(np.isnan(newx))]
    #if newx == None or newy == None:
    #return []

    goal_y, goal_x = np.mean(newy) + 125, np.mean(newx) + 10
    if math.isnan(goal_y) or math.isnan(goal_x):
        return []

    print goal_y, goal_x
    #xcoord = np.random.randint(0, 160) + 10         # todo: something more clever here
    #ycoord = np.random.randint(0, 160) + 75 + 50    # todo: something more clever here
    #print ycoord,xcoord
    # 1. move to x,y with left button released, and click there (2. and 3.)
    action = [
        universe.spaces.PointerEvent(goal_x, goal_y, 0),
        universe.spaces.PointerEvent(goal_x, goal_y, 1),
        universe.spaces.PointerEvent(goal_x, goal_y, 0)
    ]

    return action
Example #41
0
def test_normal_mode():
    """Verify the computed BRIEF descriptors with expected for normal mode."""
    img = data.coins()

    keypoints = corner_peaks(corner_harris(img), min_distance=5)

    extractor = BRIEF(descriptor_size=8, sigma=2)

    extractor.extract(img, keypoints[:8])

    expected = np.array([[False,  True, False, False,  True, False,  True, False],
                         [ True, False,  True,  True, False,  True, False, False],
                         [ True, False, False,  True, False,  True, False,  True],
                         [ True,  True,  True,  True, False,  True, False,  True],
                         [ True,  True,  True, False, False,  True,  True,  True],
                         [False, False, False, False,  True, False, False, False],
                         [False,  True, False, False,  True, False,  True, False],
                         [False, False, False, False, False, False, False, False]], dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
Example #42
0
def test_uniform_mode(dtype):
    """Verify the computed BRIEF descriptors with expected for uniform mode."""
    img = data.coins().astype(dtype)

    keypoints = corner_peaks(corner_harris(img),
                             min_distance=5,
                             threshold_abs=0,
                             threshold_rel=0.1)

    extractor = BRIEF(descriptor_size=8, sigma=2, mode='uniform')

    extractor.extract(img, keypoints[:8])

    expected = np.array([[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 1, 0, 0],
                         [1, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1],
                         [1, 1, 1, 0, 0, 1, 0, 0], [1, 1, 1, 1, 0, 1, 0, 0],
                         [1, 1, 0, 0, 0, 1, 0, 0], [0, 1, 1, 1, 0, 1, 1, 1]],
                        dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
Example #43
0
 def get_pnodes(self):
     corners = np.array(corner_peaks(corner_harris(self.map_), min_distance=1))
     print("Corners located!")
     corner_map = np.zeros(self.map_.shape)
     for corner in corners:
         corner_map[corner[0], corner[1]] = 1
     selem_mat = np.ones((12, 12))
     for _ in range(5):
         corner_map = dilation(corner_map)
     for _ in range(1):
         corner_map = opening(corner_map, selem=selem_mat)
     for _ in range(8):
         corner_map = dilation(corner_map)
     print("Pseudo-nodes located!")
     p_nodes = []
     for i in range(len(corner_map)):
         for j in range(len(corner_map[i])):
             if corner_map[i, j] > 0.5:
                 p_nodes.append([i, j])
     return p_nodes
Example #44
0
def centre_button(ob):

  if ob is None: 
    return -1,-1
  x = ob['vision']
  crop = x[75:75+50+160, 10:10+160, :]               # miniwob coordinates crop
  square = x[75+50:75+50+160, 10:10+160, :]  
  gray =rgb2gray(square)
  coords = corner_peaks(corner_harris(gray), min_distance=5)
  coords_subpix = corner_subpix(gray, coords, window_size=13)
  newy = coords_subpix[:,0]
  newx = coords_subpix[:,1]
  newy = newy[np.logical_not(np.isnan(newy))]
  newx = newx[np.logical_not(np.isnan(newx))]

  goal_y,goal_x = np.mean(newy)+125,np.mean(newx)+10
  if math.isnan(goal_y) or math.isnan(goal_x) or goal_y ==None:
    return -1,-1
  
  return goal_y,goal_x
Example #45
0
def test_uniform_mode():
    """Verify the computed BRIEF descriptors with expected for uniform mode."""
    img = rgb2gray(data.lena())

    keypoints = corner_peaks(corner_harris(img), min_distance=5)

    extractor = BRIEF(descriptor_size=8, sigma=2, mode='uniform')

    extractor.extract(img, keypoints[:8])

    expected = np.array([[ True, False,  True, False, False,  True, False, False],
                         [False,  True, False, False,  True,  True,  True,  True],
                         [ True, False, False, False, False, False, False, False],
                         [False,  True,  True, False, False, False,  True, False],
                         [False, False, False, False, False, False,  True, False],
                         [False,  True, False, False,  True, False, False, False],
                         [False, False,  True,  True, False, False,  True,  True],
                         [ True,  True, False, False, False, False, False, False]], dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
Example #46
0
    def show_features(self, gd_file):
        r_img = self.cp.resize_img(PIL.Image.open(gd_file),
                                   base_width=g_prisma_image_size,
                                   keep_size=False)
        l_img = np.float32(r_img.convert('L'))
        ll_img = np.float32(l_img / 255)

        coords = corner_peaks(corner_harris(ll_img), min_distance=5)
        coords_subpix = corner_subpix(ll_img, coords, window_size=25)

        plt.figure(figsize=(8, 8))
        plt.imshow(r_img, interpolation='nearest')
        plt.plot(coords_subpix[:, 1],
                 coords_subpix[:, 0],
                 '+r',
                 markersize=15,
                 mew=5)
        plt.plot(coords[:, 1], coords[:, 0], '.b', markersize=7)
        plt.axis('off')
        plt.show()
def calculate_descriptors(X):
    extractor = BRIEF()

    Descriptors = []
    for i in range(len(X)):
        Im = np.asarray(X[i, :, :, :], dtype='float32')
        Max = np.amax(Im)
        Im = Im / Max
        Im = rgb2gray(Im)
        keypoints = corner_peaks(corner_harris(Im), min_distance=5)
        extractor.extract(Im, keypoints)
        Temp = extractor.descriptors
        Descriptors.append(
            np.asarray(np.round(np.average(Temp, axis=0)), dtype='int32'))

    Descriptors_matrix = np.zeros([len(X), 256])
    for i in range(len(X)):
        Descriptors_matrix[i, :] = Descriptors[i]

    return Descriptors_matrix
Example #48
0
def test_uniform_mode():
    """Verify the computed BRIEF descriptors with expected for uniform mode."""
    img = data.coins()

    keypoints = corner_peaks(corner_harris(img), min_distance=5)

    extractor = BRIEF(descriptor_size=8, sigma=2, mode='uniform')

    extractor.extract(img, keypoints[:8])

    expected = np.array([[False, False, False, True, True, True, False, False],
                         [True, True, True, False, True, False, False, True],
                         [True, True, True, False, True, True, False, True],
                         [True, True, True, True, False, True, False, True],
                         [True, True, True, True, True, True, False, False],
                         [True, True, True, True, True, True, True, True],
                         [False, False, False, True, True, True, True, True],
                         [False, True, False, True, False, True, True, True]],
                        dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
Example #49
0
def test_normal_mode():
    """Verify the computed BRIEF descriptors with expected for normal mode."""
    img = data.coins()

    keypoints = corner_peaks(corner_harris(img), min_distance=5,
                             threshold_abs=0, threshold_rel=0.1)

    extractor = BRIEF(descriptor_size=8, sigma=2)

    extractor.extract(img, keypoints[:8])

    expected = np.array([[False,  True, False, False,  True, False,  True, False],
                         [ True, False,  True,  True, False,  True, False, False],
                         [ True, False, False,  True, False,  True, False,  True],
                         [ True,  True,  True,  True, False,  True, False,  True],
                         [ True,  True,  True, False, False,  True,  True,  True],
                         [False, False, False, False,  True, False, False, False],
                         [False,  True, False, False,  True, False,  True, False],
                         [False, False, False, False, False, False, False, False]], dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
    def get_bbox_(self, data: TransformedImageData, image: array) -> BBox:
        contours = find_contours(image, .5)
        if len(contours) != 1:
            return BBox.from_image(image)

        image = sobel(image)

        coords = corner_peaks(corner_harris(image),
                              threshold_rel=0,
                              num_peaks=4)
        x_values = sorted(coords[:, 1])
        y_values = sorted(coords[:, 0])
        min_x, max_x = x_values[1:3]
        min_y, max_y = y_values[1:3]

        if self.debug_level >= DebugLevel.REPORT:
            imshow(image, cmap="gray")
            plot(coords[:, 1], coords[:, 0], '+r', markersize=15)
            self.savers_['corners'].save(data.name)

        return BBox(min_x, min_y, max_x, max_y)
Example #51
0
def find_keypoints(img, scheme="SURF", radius=None):
    if scheme == "SURF":
        detector = cv2.xfeatures2d.SURF_create(hessianThreshold=400,
                                               nOctaves=4,
                                               nOctaveLayers=3,
                                               extended=False,
                                               upright=True)
    elif scheme == "SIFT":
        detector = cv2.xfeatures2d.SIFT_create(nOctaveLayers=3, sigma=1.3)
    elif scheme == "BRISK":
        detector = cv2.BRISK_create(thresh=30, octaves=3)
    elif scheme == "ORB":
        detector = cv2.ORB_create(nfeatures=10000)

    if scheme not in ["HARRIS"]:
        kps = detector.detect(img, None)
    else:
        cnrs = corner_peaks(corner_harris(img), min_distance=radius)
        kps = [FakeCVFpt(xy) for xy in cnrs]

    return kps
Example #52
0
def briefRotLite(im, compareX, compareY, uX, uY,):
    locs, desc = None, None
    # YOUR CODE HERE
    method = feat.corner_harris(im,sigma = 1.5)
    locs = feat.corner_peaks(method, min_distance = 2)
    patch_width = 9

    # Load the matrices that we saved:
    comp_x = sio.loadmat('testPattern.mat')['compareX'][0]
    comp_y = sio.loadmat('testPattern.mat')['compareY'][0]

    unrav_x = np.unravel_index(comp_x + 40, (patch_width, patch_width))
    unrav_y = np.unravel_index(comp_y + 40, (patch_width, patch_width))

    # Find the identity matrix
    I = np.dot(locs.T, locs)

    # Compute the principal direction (d) after computing SVD on I
    _,_,SVD = np.linalg.svd(I)
    d = np.array( SVD[0,:] )

    # Compute rotation matrix now that you have principal direction
    R = np.array([ [d[0],d[1] ], [-d[1],d[0]] ])

    # Now that you have the rotation matrix, unravel it it to find the new location of Y
    y = np.unravel_index(comp_y + 40, (patch_width, patch_width))
    
    # Compute the dot product of the rotaiton matrix with coordinates
    y = np.dot(R, y)
    y_range = y.shape[1]

    # Update the new locations
    y_ = np.array([ 9*y[0,i] + y[1,i] for i in range(y_range) ]) - 40

    # Now that that is done, recompute the brief to try and find the keypoints
    locs, desc = computeBrief(im, locs, comp_x, comp_y, unrav_x, unrav_y)
    


    return locs, desc
Example #53
0
def getFeatures(img, bbox):
    #TODO: Your code here
    import numpy as np
    import cv2
    import scipy
    from skimage.feature import corner_shi_tomasi, corner_harris
    [row, col,
     dim] = np.asarray(bbox.shape)  #find the number of faces detected
    bbox = bbox.astype(int)  #cast corners of the bounding box to integer
    neighbors = np.ones(
        (3, 3))  #define mask for neighbors of local maximum suppression
    xcoord = []  #preallocate feature x coordinates list
    ycoord = []  #preallocate feature y coordinates list
    numPt = 0  #keep track of max number of feature points for each face
    for i in range(row):
        currentFace = img[bbox[i, 0, 1]:bbox[i, 2, 1], bbox[i, 0, 0]:bbox[
            i, 1, 0]]  #extract image that is in the bounding box
        currentFace = corner_harris(currentFace)  #corner edge detection
        idx = (currentFace > 0.0005)  #threshold suppression
        currentFace = currentFace * idx  #filter out points that did not make it past threshold suppression
        localM = scipy.ndimage.filters.maximum_filter(
            currentFace, footprint=neighbors
        )  #local maximum suppression using 3x3 neighborhood
        msk = (currentFace == localM)  #
        msk = msk * currentFace  #
        [y, x] = np.where(msk > 0)  #
        x = x + bbox[i, 0, 0]  #shift coordinates back to their real position
        y = y + bbox[i, 0, 1]
        if len(x) > numPt:
            numPt = len(x)  #update max feature points
        xcoord.append(x)  #add detected features to their respective list
        ycoord.append(y)
    x = np.ones((numPt, row)) * -1  #preallocate x coordinate return variable
    y = np.ones((numPt, row)) * -1  #preallocate y coordinate return variable
    for k in range(
            row
    ):  #fill in return variables based on contents xcoord and ycoord lists
        x[0:len(xcoord[k]), k] = xcoord[k]
        y[0:len(ycoord[k]), k] = ycoord[k]
    return x, y
Example #54
0
def exer13(testImageFolder,saveImageFolder):
    """
    Deliverables: 
        Include in the report the code for your function. 
        Apply this function to the modelhouses.png image and 
        create a figure of the resulting corner points overlaid 
        on the modelhouses.png image. 
        Remember to indicate your choice of parameter settings 
        in the caption of the figure.
    """
    
    
    img = plt.imread(testImageFolder + "modelhouses.png")
    
    im = normalize(img, norm='max')
    

    sigma = 2
    Kthresholds = [0,1,15,20]
    K = list(map(lambda x: x/1e2, Kthresholds))

    for j in range(len(K)):
            res = corner_harris(im, sigma = sigma, k = K[j], method='k')
            peaks = corner_peaks(res*-1)
            
            fig = plt.figure()
            ax = plt.subplot(1,1,1)
            ax.plot(peaks[:,1],peaks[:,0], '.b')
            ax.imshow(img, cmap=plt.cm.gray)
    
            ax.axis('off')
            # title = r'Harris_corner $\sigma={},k={}$'.format(2,K[j])
            # ax.set_title(title, fontsize=11)
            
            fig.tight_layout()
            
            filename = "exer13-" + 'k={}_sigma={}_inverted'.format(Kthresholds[j],sigma)
    
            plt.savefig(saveImageFolder + filename)    
            plt.close()
def rotate_and_crop_image(img_path):
    # open the image
    img = Image.open(img_path)
    date = re.findall(r'(\d\d\d\d-\d\d-\d\d_\d\d-\d\d)', img_path)[0]
    imgNr = re.findall(r'(image\d).jpg', img_path)[0]

    # rotate
    img = img.rotate(272 - 180, resample=Image.BICUBIC,
                     expand=True)  # rotate image

    # crop
    # box = (815, 1345, 920, 1380)
    box = (815 - 100, 1345 - 100, 920 + 100, 1380 + 100)
    img = img.crop(box)

    # save rotated and cropped
    saveDir = "{}/{}/cropped/".format(path_cropped_digits, date)
    if not os.path.exists(saveDir):
        os.makedirs(saveDir)
    save_path_img_rotated = '{}/{}.png'.format(saveDir, imgNr)
    img.save(save_path_img_rotated)

    # load into skimage
    im = io.imread(save_path_img_rotated, as_grey=True)
    #canny filtering
    # c1 = feature.canny(im, sigma=3)
    c2 = feature.canny(im, sigma=5)

    # corner detection
    coords = corner_peaks(corner_harris(c2), min_distance=25)
    coords_subpix = corner_subpix(c2, coords, window_size=13)

    fig, ax = plt.subplots()
    ax.imshow(c2, interpolation='nearest', cmap=plt.cm.gray)
    ax.plot(coords[:, 1], coords[:, 0], '.b', markersize=3)
    ax.plot(coords_subpix[:, 1], coords_subpix[:, 0], '+r', markersize=15)
    # ax.axis((0, 350, 350, 0))
    plt.show()
    sys.exit()
Example #56
0
    def find_corners(self, image, min_distance, window_size):
        """
        :param image:
        :param min_distance:
        :param window_size:
        :return: Points identified as corners.
        """

        # Locate corners, returned values are row/col coordinates (rcs).
        corner_rcs = corner_peaks(corner_harris(image), min_distance)
        subpix_rcs = corner_subpix(image=image,
                                   corners=corner_rcs,
                                   window_size=window_size)

        corners = []
        for i, subpix_rc in enumerate(subpix_rcs):
            if np.isnan(subpix_rc).any():
                corners.append((corner_rcs[i][1], corner_rcs[i][0]))
            else:
                corners.append((subpix_rc[1], subpix_rc[0]))

        return tuple(corners)
Example #57
0
def harris_voronoi(img_data, vor_arr, h_smoothness, dist_on_skel, is_alpha):
    #Using Harris edges and a Voronoi diagram from that to get regions for
    #every stroke before it hits an angle, so we can just join strokes without
    #getting unwanted black patches from merging points with acute angles

    #prep and feed Harris corner data
    h_data = smooth(vor_arr, h_smoothness)
    harris = corner_peaks(corner_harris(h_data),
                          threshold_rel=0,
                          min_distance=1)
    vor = voronoi3(harris)

    #get connected lines from the finite Voronoi data
    np_img = np.zeros(dist_on_skel.shape)
    for i in vor:
        x0 = keep_in_bounds(i[0][0], dist_on_skel, 0)
        y0 = keep_in_bounds(i[0][1], dist_on_skel, 1)
        x1 = keep_in_bounds(i[1][0], dist_on_skel, 0)
        y1 = keep_in_bounds(i[1][1], dist_on_skel, 1)

        rr, cc = line(x0, y0, x1, y1)
        try:
            np_img[rr, cc] = 1
        except IndexError:
            continue

    #prep
    #print("image: " + str(type(np_img[0,0])) + " img_data: " + str(type(img_data[0,0])))
    image = np_img
    image = invert(image)
    img_data = img_data != 0

    image = smooth(image, 0.7)
    image *= img_data

    #get regions
    label_image = label(image)

    return label_image
Example #58
0
def post_procoess_v1(mask, roi_width=30):
    """Remove discrete pixels and close gaps
    Argument:
        roi width: corner detection range
    """
    # 1) Filling holes
    bw = label(mask == 1)
    bw = binary_opening(bw, rectangle(3, 15))
    bw = remove_small_objects(bw, min_size=4096, connectivity=2)
    bw = remove_small_holes(bw, min_size=4096, connectivity=2)
    # 2) Detect evil interval
    _, num = label(bw, return_num=True)
    if num > 1:
        print('Fixed discontinuity')
        bw = loop_close(num, bw)
    # 3) Detect breach in the edge
    w, rw = mask.shape[1], roi_width
    coords = corner_peaks(corner_harris(bw[:, w // 2 - rw:w // 2 + rw], k=0.2),
                          min_distance=10)
    if len(coords):
        print('Detected corner and filling')
        bw = loop_corner(coords, bw, w, rw)
    return bw.astype(np.uint8)
Example #59
0
    def _detect_octave(self, octave_image):
        # Extract keypoints for current octave
        fast_response = corner_fast(octave_image, self.fast_n,
                                    self.fast_threshold)
        keypoints = corner_peaks(fast_response, min_distance=1)

        if len(keypoints) == 0:
            return (np.zeros((0, 2), dtype=np.double),
                    np.zeros((0, ), dtype=np.double),
                    np.zeros((0, ), dtype=np.double))

        mask = _mask_border_keypoints(octave_image.shape, keypoints,
                                      distance=16)
        keypoints = keypoints[mask]

        orientations = corner_orientations(octave_image, keypoints,
                                           OFAST_MASK)

        harris_response = corner_harris(octave_image, method='k',
                                        k=self.harris_k)
        responses = harris_response[keypoints[:, 0], keypoints[:, 1]]

        return keypoints, orientations, responses
Example #60
0
def feat_2():
    I = imread('images/modelhouses.png')
    harris_I = feature.corner_harris(I)
    harrisSigma_I = feature.corner_harris(I, sigma=5.0)
    harrisK_I = feature.corner_harris(I, method='k', k=0.90)

    harrisSigma2_I = feature.corner_harris(I, sigma=20.0)
    harrisSigmaK_I = feature.corner_harris(I, method='k', sigma=5.0, k=0.90)
    harrisEPS_I = feature.corner_harris(I, eps=2, method='eps')
    harrisAll_I = feature.corner_harris(I, eps=1e-48, method='eps', sigma=4.2)

    fig, ax = plt.subplots(2, 2)
    ax[0][0].imshow(I, cmap='gray')
    ax[0][0].set_title('Original image "modelhouses.png"')
    ax[0][0].axis('off')
    ax[0][1].imshow(harris_I, cmap='gray')
    ax[0][1].set_title('corner_harris')
    ax[0][1].axis('off')
    ax[1][0].imshow(harrisSigma_I, cmap='gray')
    ax[1][0].set_title('corner_harris Sigma = 5.0')
    ax[1][0].axis('off')
    ax[1][1].imshow(harrisK_I, cmap='gray')
    ax[1][1].set_title('corner_harris k = 0.90')
    ax[1][1].axis('off')
    plt.show()

    fig, ax = plt.subplots(2, 2)
    ax[0][0].imshow(harrisSigma2_I, cmap='gray')
    ax[0][0].set_title('corner_harris Sigma = 20.0')
    ax[0][0].axis('off')
    ax[0][1].imshow(harrisSigmaK_I, cmap='gray')
    ax[0][1].set_title('corner_harris K=0.90, Sigma = 5')
    ax[0][1].axis('off')
    ax[1][0].imshow(harrisEPS_I, cmap='gray')
    ax[1][0].set_title('corner_harris eps=2')
    ax[1][0].axis('off')
    ax[1][1].imshow(harrisAll_I, cmap='gray')
    ax[1][1].set_title('corner_harris eps=1e-48, sigma=4.2')
    ax[1][1].axis('off')
    plt.show()