Пример #1
0
def test_uniform_mode():
    """Verify the computed BRIEF descriptors with expected for uniform mode."""
    img = data.coins()

    keypoints = corner_peaks(corner_harris(img), min_distance=5, threshold_abs=0, threshold_rel=0.1)

    extractor = BRIEF(descriptor_size=8, sigma=2, mode="uniform")

    extractor.extract(img, keypoints[:8])

    expected = np.array(
        [
            [False, False, False, True, True, True, False, False],
            [True, True, True, False, True, False, False, True],
            [True, True, True, False, True, True, False, True],
            [True, True, True, True, False, True, False, True],
            [True, True, True, True, True, True, False, False],
            [True, True, True, True, True, True, True, True],
            [False, False, False, True, True, True, True, True],
            [False, True, False, True, False, True, True, True],
        ],
        dtype=bool,
    )

    assert_array_equal(extractor.descriptors, expected)
Пример #2
0
def test_normal_mode():
    """Verify the computed BRIEF descriptors with expected for normal mode."""
    img = data.coins()

    keypoints = corner_peaks(corner_harris(img),
                             min_distance=5,
                             threshold_abs=0,
                             threshold_rel=0.1)

    extractor = BRIEF(descriptor_size=8, sigma=2)

    extractor.extract(img, keypoints[:8])

    expected = np.array(
        [[False, True, False, False, True, False, True, False],
         [True, False, True, True, False, True, False, False],
         [True, False, False, True, False, True, False, True],
         [True, True, True, True, False, True, False, True],
         [True, True, True, False, False, True, True, True],
         [False, False, False, False, True, False, False, False],
         [False, True, False, False, True, False, True, False],
         [False, False, False, False, False, False, False, False]],
        dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
Пример #3
0
def test_binary_descriptors_lena_rotation_crosscheck_true():
    """Verify matched keypoints and their corresponding masks results between
    lena image and its rotated version with the expected keypoint pairs with
    cross_check enabled."""
    img = data.lena()
    img = rgb2gray(img)
    tform = tf.SimilarityTransform(scale=1, rotation=0.15, translation=(0, 0))
    rotated_img = tf.warp(img, tform, clip=False)

    extractor = BRIEF(descriptor_size=512)

    keypoints1 = corner_peaks(corner_harris(img), min_distance=5)
    extractor.extract(img, keypoints1)
    descriptors1 = extractor.descriptors

    keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5)
    extractor.extract(rotated_img, keypoints2)
    descriptors2 = extractor.descriptors

    matches = match_descriptors(descriptors1, descriptors2, cross_check=True)

    exp_matches1 = np.array([
        0, 1, 2, 4, 6, 7, 9, 10, 11, 12, 13, 15, 16, 17, 19, 20, 21, 24, 26,
        27, 28, 29, 30, 35, 36, 38, 39, 40, 42, 44, 45
    ])
    exp_matches2 = np.array([
        33, 0, 35, 1, 3, 2, 6, 4, 9, 11, 10, 7, 8, 5, 14, 13, 15, 16, 17, 18,
        19, 21, 22, 24, 23, 26, 27, 25, 28, 29, 30
    ])
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)
def get_brief_feats(img,kp):
    img_gray = rgb2gray(img)
    brief = BRIEF()
    brief.extract(img_gray,kp)
    descriptors = brief.descriptors
    
    return descriptors
Пример #5
0
def calc_corners(*imgs):
    b = BRIEF()
    for c_img in imgs:
        corner_img = corner_harris(c_img)
        coords = corner_peaks(corner_img, min_distance=5)
        b.extract(c_img, coords)
        yield {"keypoints": coords, "descriptors": b.descriptors}
Пример #6
0
def MatchPics(I1,I2):
    
    if I1.ndim == 3:
        I1 = rgb2gray(I1)   
    if I2.ndim == 3:
        I2 = rgb2gray(I2)
    
    points1 = corner_peaks(corner_fast(I1,n=12,threshold=0.15),min_distance=1)
    points2 = corner_peaks(corner_fast(I2,n=12,threshold=0.15),min_distance=1)
    
    extractor = BRIEF()
    
    extractor.extract(I1,points1)
    points1 = points1[extractor.mask]
    descriptors1 = extractor.descriptors
    
    extractor.extract(I2,points2)
    points2 = points2[extractor.mask]
    descriptors2 = extractor.descriptors
    
    matches = match_descriptors(descriptors1,descriptors2,metric = 'hamming',cross_check=True)
    
    #these points are y,x (row,col)
    locs1 = points1[matches[:,0]]
    locs2 = points2[matches[:,1]]
    #Change to x,y (col,row)
    xy1 = np.array([locs1[:,1],locs1[:,0]])
    xy1 = xy1.transpose()
    xy2 = np.array([locs2[:,1],locs2[:,0]])
    xy2 = xy2.transpose()
    fig, ax = plt.subplots()
    plot_matches(ax,I1,I2,points1,points2,matches,keypoints_color='r',only_matches=True)#,matches_color='y')
    
    return [xy1,xy2]
Пример #7
0
def test_binary_descriptors_rotation_crosscheck_true():
    """Verify matched keypoints and their corresponding masks results between
    image and its rotated version with the expected keypoint pairs with
    cross_check enabled."""
    img = data.astronaut()
    img = rgb2gray(img)
    tform = tf.SimilarityTransform(scale=1, rotation=0.15, translation=(0, 0))
    rotated_img = tf.warp(img, tform, clip=False)

    extractor = BRIEF(descriptor_size=512)

    keypoints1 = corner_peaks(corner_harris(img), min_distance=5,
                              threshold_abs=0, threshold_rel=0.1)
    extractor.extract(img, keypoints1)
    descriptors1 = extractor.descriptors

    keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5,
                              threshold_abs=0, threshold_rel=0.1)
    extractor.extract(rotated_img, keypoints2)
    descriptors2 = extractor.descriptors

    matches = match_descriptors(descriptors1, descriptors2, cross_check=True)

    exp_matches1 = np.array([ 0,  2,  3,  4,  5,  6,  9, 11, 12, 13, 14, 17,
                             18, 19, 21, 22, 23, 26, 27, 28, 29, 31, 32, 33,
                             34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46])
    exp_matches2 = np.array([ 0,  2,  3,  1,  4,  6,  5,  7, 13, 10,  9, 11,
                             15,  8, 14, 12, 16, 18, 19, 21, 20, 24, 25, 26,
                             28, 27, 22, 23, 29, 30, 31, 32, 35, 33, 34, 36])
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)
Пример #8
0
def test_binary_descriptors_rotation_crosscheck_false():
    """Verify matched keypoints and their corresponding masks results between
    image and its rotated version with the expected keypoint pairs with
    cross_check disabled."""
    img = data.astronaut()
    img = rgb2gray(img)
    tform = transform.SimilarityTransform(scale=1,
                                          rotation=0.15,
                                          translation=(0, 0))
    rotated_img = transform.warp(img, tform, clip=False)

    extractor = BRIEF(descriptor_size=512)

    keypoints1 = corner_peaks(corner_harris(img),
                              min_distance=5,
                              threshold_abs=0,
                              threshold_rel=0.1)
    extractor.extract(img, keypoints1)
    descriptors1 = extractor.descriptors

    keypoints2 = corner_peaks(corner_harris(rotated_img),
                              min_distance=5,
                              threshold_abs=0,
                              threshold_rel=0.1)
    extractor.extract(rotated_img, keypoints2)
    descriptors2 = extractor.descriptors

    matches = match_descriptors(descriptors1, descriptors2, cross_check=False)

    exp_matches1 = np.array([
        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
        20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
        38, 39, 40, 41, 42, 43, 44, 45, 46
    ])
    exp_matches2 = np.array([
        0, 31, 2, 3, 1, 4, 6, 4, 38, 5, 27, 7, 13, 10, 9, 27, 7, 11, 15, 8, 23,
        14, 12, 16, 10, 25, 18, 19, 21, 20, 41, 24, 25, 26, 28, 27, 22, 23, 29,
        30, 31, 32, 35, 33, 34, 30, 36
    ])
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)

    # minkowski takes a different code path, therefore we test it explicitly
    matches = match_descriptors(descriptors1,
                                descriptors2,
                                metric='minkowski',
                                cross_check=False)
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)

    # it also has an extra parameter
    matches = match_descriptors(descriptors1,
                                descriptors2,
                                metric='minkowski',
                                p=4,
                                cross_check=False)
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)
Пример #9
0
def test_border():
    img = np.zeros((100, 100))
    keypoints = np.array([[1, 1], [20, 20], [50, 50], [80, 80]])

    extractor = BRIEF(patch_size=41)
    extractor.extract(img, keypoints)

    assert extractor.descriptors.shape[0] == 3
    assert_array_equal(extractor.mask, (False, True, True, True))
Пример #10
0
    def calculate(self, resource):
        except_image_only(resource)
        im = image2numpy(resource.image, remap='gray')
        keypoints = corner_peaks(corner_harris(im), min_distance=1)
        extractor = BRIEF_()
        extractor.extract(im, keypoints)

        #initalizing rows for the table
        return (extractor.descriptors, keypoints[:,0], keypoints[:,1])
Пример #11
0
def test_border():
    img = np.zeros((100, 100))
    keypoints = np.array([[1, 1], [20, 20], [50, 50], [80, 80]])

    extractor = BRIEF(patch_size=41)
    extractor.extract(img, keypoints)

    assert extractor.descriptors.shape[0] == 3
    assert_array_equal(extractor.mask, (False, True, True, True))
Пример #12
0
    def process(self, img2, image_gray):
        # img2 = warp(img2)
        patch_size = [640]
        img2 = rgb2gray(img2)
        image_gray = rgb2gray(img2)

        blobs_dog = blob_dog(image_gray, min_sigma=0.2, max_sigma=225, sigma_ratio=1.6, threshold=.5)
        blobs_dog[:, 2] = blobs_dog[:, 2]

        blobs = [blobs_dog]
        colors = ['black']
        titles = ['Difference of Gaussian']
        sequence = zip(blobs, colors, titles)

        # plt.imshow(img2)
        # plt.axis("equal")
        # plt.show()

        for blobs, color, title in sequence:
            print(len(blobs))
            for blob in blobs:
                y, x, r = blob
                plotx = x
                ploty = y
                for i in range (3):
                    keypoints1 = corner_peaks(corner_harris(Array.image_arr[i]), min_distance=1)
                    keypoints2 = corner_peaks(corner_harris(img2), min_distance=1)

                    extractor = BRIEF(patch_size=30, mode="uniform")

                    extractor.extract(Array.image_arr[i], keypoints1)
                    keypoints1 = keypoints1[extractor.mask]
                    descriptors1 = extractor.descriptors

                    extractor.extract(img2, keypoints2)
                    keypoints2 = keypoints2[extractor.mask]
                    descriptors2 = extractor.descriptors

                    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
                    
                    # print(keypoints1, keypoints2)
                    # print(matches12)
                    #FUCKGGGPLAYT
                    for pizdezh in matches12:
                        X = keypoints2[pizdezh[1]][1]
                        Y = keypoints2[pizdezh[1]][0]

                    if sqrt((plotx - X)**2 + (ploty - Y)**2) < r:
                        seen = [{
                            "type": Array.type_arr[i],
                            "center_shift": (plotx - 160/2) * -0.02,
                            "distance": image_gray[y][x] / 0.08
                        }]
                        print seen
                        data.seen.add(seen)
                        break
Пример #13
0
def produceMatches(imL1, imR1, panoramas=False, overlap_size=None):

    imL = imL1.copy()
    imR = imR1.copy()

    if (panoramas):
        if (overlap_size == None):
            overlap_size = int(imL.shape[1] * 0.4)
        imL[:, :-overlap_size, :] = 0
        imR[:, overlap_size:, :] = 0

    imLgray = rgb2gray(imL)
    imRgray = rgb2gray(imR)

    keypointsL = corner_peaks(corner_harris(imLgray),
                              threshold_rel=0.001,
                              min_distance=10)
    keypointsR = corner_peaks(corner_harris(imRgray),
                              threshold_rel=0.001,
                              min_distance=10)

    extractor = BRIEF()

    extractor.extract(imLgray, keypointsL)
    keypointsL = keypointsL[extractor.mask]
    descriptorsL = extractor.descriptors

    extractor.extract(imRgray, keypointsR)
    keypointsR = keypointsR[extractor.mask]
    descriptorsR = extractor.descriptors

    matchesLR = match_descriptors(descriptorsL, descriptorsR, cross_check=True)

    src = []
    dst = []
    for coord in matchesLR:
        src.append(keypointsL[coord[0]])
        dst.append(keypointsR[coord[1]])
    src = np.array(src)
    dst = np.array(dst)

    src_c = src.copy()
    dst_c = dst.copy()
    src_c[:, 1] = src[:, 0]
    src_c[:, 0] = src[:, 1]
    dst_c[:, 1] = dst[:, 0]
    dst_c[:, 0] = dst[:, 1]

    # robustly estimate affine transform model with RANSAC
    model_robust, inliers = ransac((src_c, dst_c),
                                   ProjectiveTransform,
                                   min_samples=4,
                                   residual_threshold=8,
                                   max_trials=250)

    return (matchesLR, model_robust, inliers)
 def extract(self, sample):
     points = self._get_points(sample)
     radius = np.min(sample.size) // 4
     extractor = BRIEF(self.length, patch_size=radius)
     extractor.extract(sample.gray, points)
     points = points[extractor.mask]
     if not len(extractor.descriptors):
         raise FeatureExtractionError(self,
             'Could not extract BRIEF descriptor')
     descriptor = extractor.descriptors[0].tolist()
     return descriptor
Пример #15
0
def keypoints_and_descriptors_brief(img):
    """Detect key point using BRIEF and return keypoints and descriptors.""" 
    gray = rgb2gray(img)
    extractor = BRIEF(patch_size=5)

    keypoints = corner_peaks(corner_harris(gray), min_distance=1)

    extractor.extract(gray, keypoints)
    keypoints = keypoints[extractor.mask]
    descriptors = extractor.descriptors

    return keypoints, descriptors
Пример #16
0
 def extract(self):
     points = self.get_points()
     radius = np.min(self.size) // 4
     extractor = BRIEF(type(self).length)
     extractor.extract(self.gray, points)
     points = points[extractor.mask]
     if len(extractor.descriptors):
         descriptor = extractor.descriptors[0].tolist()
         return descriptor
     else:
         print('Could not extract BRIEF descriptor')
         return [0] * type(self).length
def BRIEF_descriptor(filename, intermediate_point):
    print("Making BRIEF descriptors...")

    # Load image and descriptor
    input = rgb2gray(io.imread(filename))
    extractor = BRIEF()

    # Convert 'list' to 'numpy.ndarray'
    intermediate_point_array = numpy.array(intermediate_point)

    # Descriptor extraction
    extractor.extract(input, intermediate_point_array)
    descriptors = extractor.descriptors

    return descriptors
Пример #18
0
def extractDaisyDescriptors(img, patch_size):

    extractor = BRIEF(patch_size=patch_size)
    i_idx = np.arange(0, img.shape[0])
    j_idx = np.arange(0, img.shape[1])
    kps_i, kps_j = np.meshgrid(i_idx, j_idx)
    kps_i = kps_i.ravel()
    kps_i.shape = (kps_i.shape[0], 1)
    kps_j = kps_j.ravel()
    kps_j.shape = (kps_j.shape[0], 1)
    for i in range(img.shape[2]):
        extractor.extract(img[:, :, i], np.concatenate((kps_i, kps_j), axis=1))
        dsc = extractor.descriptors

    return dsc, kps_i, kps_j
Пример #19
0
def extract_by_brief(img1, img2, min_distance=1):
    extractor = BRIEF()
    detector = CENSURE(mode='STAR')
    detector.detect(img1)
    keyp1 = detector.keypoints

    detector.detect(img2)
    keyp2 = detector.keypoints

    extractor.extract(img1, keyp1)
    desc1 = extractor.descriptors

    extractor.extract(img1, keyp2)
    desc2 = extractor.descriptors

    return [keyp1, keyp2, desc1, desc2]
Пример #20
0
def apply_brief(left, right, descriptor_size, num_elements):
    """
    computes BRIEF descriptor on both images.
    :param left: left image.
    :param right: right image.
    :param descriptor_size: size of window of the BRIEF descriptor.
    :param num_elements: length of the feature vector.
    :return: (H x W) array, H = height and W = width, of type np.int64
    """
    # TODO: apply BRIEF descriptor on both images. You will have to convert the BRIEF feature vector to a int64.

    extractor = BRIEF(descriptor_size=num_elements,
                      patch_size=descriptor_size,
                      mode='normal')

    left_rows, left_cols = left.shape

    left_indices = np.empty((left_rows, left_cols, 2))
    left_indices[..., 0] = np.arange(left_rows)[:, None]
    left_indices[..., 1] = np.arange(left_cols)
    left_indices = left_indices.reshape(left_cols * left_rows, 2)

    right_rows, right_cols = right.shape

    right_indices = np.empty((right_rows, right_cols, 2))
    right_indices[..., 0] = np.arange(right_rows)[:, None]
    right_indices[..., 1] = np.arange(right_cols)
    right_indices = right_indices.reshape(right_cols * right_rows, 2)

    extractor.extract(left, left_indices)
    left_desc = extractor.descriptors.astype(np.int64).reshape(
        left_rows - descriptor_size + 1, left_cols - descriptor_size + 1, 128)
    extractor.extract(right, right_indices)
    right_desc = extractor.descriptors.astype(np.int64).reshape(
        right_rows - descriptor_size + 1, right_cols - descriptor_size + 1,
        128)

    left_desc = np.pad(np.apply_along_axis(convert_brief, 2, left_desc),
                       ((3, 4), (3, 4)))
    right_desc = np.pad(np.apply_along_axis(convert_brief, 2, right_desc),
                        ((3, 4), (3, 4)))

    return (left_desc, right_desc)
Пример #21
0
def test_uniform_mode():
    """Verify the computed BRIEF descriptors with expected for uniform mode."""
    img = rgb2gray(data.lena())

    keypoints = corner_peaks(corner_harris(img), min_distance=5)

    extractor = BRIEF(descriptor_size=8, sigma=2, mode='uniform')

    extractor.extract(img, keypoints[:8])

    expected = np.array([[ True, False,  True, False, False,  True, False, False],
                         [False,  True, False, False,  True,  True,  True,  True],
                         [ True, False, False, False, False, False, False, False],
                         [False,  True,  True, False, False, False,  True, False],
                         [False, False, False, False, False, False,  True, False],
                         [False,  True, False, False,  True, False, False, False],
                         [False, False,  True,  True, False, False,  True,  True],
                         [ True,  True, False, False, False, False, False, False]], dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
Пример #22
0
def calculate_descriptors(X):
    extractor = BRIEF()

    Descriptors = []
    for i in range(len(X)):
        Im = np.asarray(X[i, :, :, :], dtype='float32')
        Max = np.amax(Im)
        Im = Im / Max
        Im = rgb2gray(Im)
        keypoints = corner_peaks(corner_harris(Im), min_distance=5)
        extractor.extract(Im, keypoints)
        Temp = extractor.descriptors
        Descriptors.append(
            np.asarray(np.round(np.average(Temp, axis=0)), dtype='int32'))

    Descriptors_matrix = np.zeros([len(X), 256])
    for i in range(len(X)):
        Descriptors_matrix[i, :] = Descriptors[i]

    return Descriptors_matrix
Пример #23
0
def test_normal_mode():
    """Verify the computed BRIEF descriptors with expected for normal mode."""
    img = data.coins()

    keypoints = corner_peaks(corner_harris(img), min_distance=5)

    extractor = BRIEF(descriptor_size=8, sigma=2)

    extractor.extract(img, keypoints[:8])

    expected = np.array([[False,  True, False, False,  True, False,  True, False],
                         [ True, False,  True,  True, False,  True, False, False],
                         [ True, False, False,  True, False,  True, False,  True],
                         [ True,  True,  True,  True, False,  True, False,  True],
                         [ True,  True,  True, False, False,  True,  True,  True],
                         [False, False, False, False,  True, False, False, False],
                         [False,  True, False, False,  True, False,  True, False],
                         [False, False, False, False, False, False, False, False]], dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
Пример #24
0
def test_uniform_mode(dtype):
    """Verify the computed BRIEF descriptors with expected for uniform mode."""
    img = data.coins().astype(dtype)

    keypoints = corner_peaks(corner_harris(img),
                             min_distance=5,
                             threshold_abs=0,
                             threshold_rel=0.1)

    extractor = BRIEF(descriptor_size=8, sigma=2, mode='uniform')

    extractor.extract(img, keypoints[:8])

    expected = np.array([[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 1, 0, 0],
                         [1, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1],
                         [1, 1, 1, 0, 0, 1, 0, 0], [1, 1, 1, 1, 0, 1, 0, 0],
                         [1, 1, 0, 0, 0, 1, 0, 0], [0, 1, 1, 1, 0, 1, 1, 1]],
                        dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
Пример #25
0
def test_uniform_mode():
    """Verify the computed BRIEF descriptors with expected for uniform mode."""
    img = data.coins()

    keypoints = corner_peaks(corner_harris(img), min_distance=5)

    extractor = BRIEF(descriptor_size=8, sigma=2, mode='uniform')

    extractor.extract(img, keypoints[:8])

    expected = np.array([[False, False, False, True, True, True, False, False],
                         [True, True, True, False, True, False, False, True],
                         [True, True, True, False, True, True, False, True],
                         [True, True, True, True, False, True, False, True],
                         [True, True, True, True, True, True, False, False],
                         [True, True, True, True, True, True, True, True],
                         [False, False, False, True, True, True, True, True],
                         [False, True, False, True, False, True, True, True]],
                        dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
Пример #26
0
com_b = algo.commie(Sb)
base = np.array([[com_a['x'], com_a['y'], com_b['x'], com_b['y']]])

# Detect key points
target = int(np.floor(min([Ea.sum(), Eb.sum()]) * keyfill))
print("Picking key points ... ", end="")
kp_a = algo.spawn(Ea, base[:, [0, 1]], target, r_min=detail)
kp_b = algo.spawn(Eb, base[:, [2, 3]], target, r_min=detail)
print("done")

# Match key points
print("Matching key points ... ", end="")
stopwatch = -time()
extractor = BRIEF(patch_size=patch_size, sigma=0)

extractor.extract(Ga, kp_a[:, [1, 0]])
dc_a = extractor.descriptors

extractor.extract(Gb, kp_b[:, [1, 0]])
dc_b = extractor.descriptors

matches = match_descriptors(dc_a, dc_b)
nodes = np.column_stack((kp_a[matches[:, 0]], kp_b[matches[:, 1]]))

stopwatch += time()
print("done in {0:.3f}s".format(stopwatch))

##########
# Review #
##########
Пример #27
0
from skimage.color import rgb2gray
import matplotlib.pyplot as plt


img1 = rgb2gray(data.astronaut())
tform = tf.AffineTransform(scale=(1.2, 1.2), translation=(0, -100))
img2 = tf.warp(img1, tform)
img3 = tf.rotate(img1, 25)

keypoints1 = corner_peaks(corner_harris(img1), min_distance=5)
keypoints2 = corner_peaks(corner_harris(img2), min_distance=5)
keypoints3 = corner_peaks(corner_harris(img3), min_distance=5)

extractor = BRIEF()

extractor.extract(img1, keypoints1)
keypoints1 = keypoints1[extractor.mask]
descriptors1 = extractor.descriptors

extractor.extract(img2, keypoints2)
keypoints2 = keypoints2[extractor.mask]
descriptors2 = extractor.descriptors

extractor.extract(img3, keypoints3)
keypoints3 = keypoints3[extractor.mask]
descriptors3 = extractor.descriptors

matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)

fig, ax = plt.subplots(nrows=2, ncols=1)
Пример #28
0
frame_number = '000' + str(frame // 100) + str(frame // 10) + str(frame % 10)

# load the image into a NUMPY array using matplotlib's imread function
left_img_file = root_pathname + image_folder + sequence_number + left_camera + frame_number + '.png'
l_image = plt.imread(left_img_file)
right_img_file = root_pathname + image_folder + sequence_number + right_camera + frame_number + '.png'
r_image = plt.imread(right_img_file)

# find Harris corner features in each camera
l_keypoints = corner_peaks(corner_harris(l_image), min_distance=10)
r_keypoints = corner_peaks(corner_harris(r_image), min_distance=10)
# TODO Replace the two lines above with the Shi-Tomasi detector

# for each corner found, extract the BRIEF descriptor
extractor = BRIEF(sigma=1.0)
extractor.extract(l_image, l_keypoints)
l_descriptors = extractor.descriptors

# not all keypoints get descriptors. Remove the ones that didn't:
mask = extractor.mask
l_keypoints = l_keypoints[mask]

extractor.extract(r_image, r_keypoints)
r_descriptors = extractor.descriptors
mask = extractor.mask
r_keypoints = r_keypoints[mask]

# plot the found keypoints on top of the left image
fig, ax = plt.subplots(figsize=(20, 5))
plt.imshow(l_image, cmap=cm.gray)
plt.plot(l_keypoints[:, 1], l_keypoints[:, 0], '+', r_keypoints[:, 1],
Пример #29
0
def apply_brief(left, right, descriptor_size, num_elements, pad):
    """
    computes BRIEF descriptor on both images.
    :param left: left image.
    :param right: right image.
    :param descriptor_size: size of window of the BRIEF descriptor.
    :param num_elements: length of the feature vector.
    :return: (H x W) array, H = height and W = width, of type np.int64
    """

    # TODO: apply BRIEF descriptor on both images. You will have to convert the BRIEF feature vector to a int64.
    imSize = left.shape
    height = imSize[0]
    width = imSize[1]

    keyPoints = np.array([[i % height, int(i / height)]
                          for i in range(height * width)], np.int32)

    leftExtractor = BRIEF(descriptor_size=num_elements,
                          patch_size=descriptor_size * descriptor_size,
                          mode='normal')
    rightExtractor = BRIEF(descriptor_size=num_elements,
                           patch_size=descriptor_size * descriptor_size,
                           mode='normal')

    leftExtractor.extract(left, keyPoints)
    rightExtractor.extract(right, keyPoints)

    leftKeyPoints = keyPoints[leftExtractor.mask]
    rightKeyPoints = keyPoints[rightExtractor.mask]

    leftOutput = np.zeros((height, width), np.int64)
    rightOutput = np.zeros((height, width), np.int64)

    leftDescCount = 0
    rightDescCount = 0

    for point in leftKeyPoints:
        i = point[0]
        j = point[1]
        value = 0
        for k in range(num_elements):
            if (leftExtractor.descriptors[leftDescCount][num_elements - 1 -
                                                         k]):
                value += pow(2, k)
        leftOutput[i][j] = np.int64(value)
        leftDescCount += 1

    for point in rightKeyPoints:
        i = point[0]
        j = point[1]
        value = 0
        for k in range(num_elements):
            if (rightExtractor.descriptors[rightDescCount][num_elements - 1 -
                                                           k]):
                value += pow(2, k)
        rightOutput[i][j] = np.int64(value)
        rightDescCount += 1

#    for i in range(height):
#        for j in range(width - 1):
#            leftVal = leftOutput[i][j]
#            leftNeighbour = leftOutput[i][j+1]
#            if(leftVal == 0 and leftNeighbour != 0):
#                leftOutput[i][j] = leftNeighbour
#
#            rightVal = rightOutput[i][j]
#            rightNeighbour = rightOutput[i][j+1]
#            if(rightVal == 0 and rightNeighbour != 0):
#                rightOutput[i][j] = rightNeighbour

#
#    for i in range(height):
#        for j in range(width):
#            id = j*height + i
#            if(leftExtractor.mask[id]):
#                value = 0
#                for k in range(num_elements):
#                    if(leftExtractor.descriptors[leftDescCount][num_elements - 1 - k]):
#                        value += pow(2, k)
#                leftOutput[i][j] = np.int64(value)
#                leftDescCount += 1
#            else:
#                leftOutput[i][j] = np.int64(rd.randint(0, pow(2, num_elements) - 1))
#            if(rightExtractor.mask[id]):
#                value = 0
#                for k in range(num_elements):
#                    if(rightExtractor.descriptors[rightDescCount][num_elements - 1 - k]):
#                        value += pow(2, k)
#                rightOutput[i][j] = np.int64(value)
#                rightDescCount += 1
#            else:
#                rightOutput[i][j] = np.int64(rd.randint(0, pow(2, num_elements) - 1))
#
    print(leftDescCount, rightDescCount)

    leftDesc = leftOutput[pad:(height - pad), pad:(width - pad)]
    rightDesc = rightOutput[pad:(height - pad), pad:(width - pad)]

    print(height, width)
    print(leftOutput.shape, rightOutput.shape)
    print(leftDesc.shape, rightDesc.shape)

    return leftDesc, rightDesc
Пример #30
0
def start_ransac(img1, img2, brief, common_factor=0.25):

    #https://www.researchgate.net/publication/264197576_scikit-image_Image_processing_in_Python
    img1 = transform.rescale(img1, common_factor, multichannel=False)
    img2 = transform.rescale(img2, common_factor, multichannel=False)

    print(img1.shape)
    print(img2.shape)

    if brief:
        #BRIEF
        keypoints1 = corner_peaks(corner_harris(img1), min_distance=5)
        keypoints2 = corner_peaks(corner_harris(img2), min_distance=5)

        extractor = BRIEF()

        extractor.extract(img1, keypoints1)
        keypoints1 = keypoints1[extractor.mask]
        descriptors1 = extractor.descriptors

        extractor.extract(img2, keypoints2)
        keypoints2 = keypoints2[extractor.mask]
        descriptors2 = extractor.descriptors

        matches12 = match_descriptors(descriptors1,
                                      descriptors2,
                                      cross_check=True)
    else:
        #ORB
        orb = ORB(n_keypoints=1000, fast_threshold=0.05)

        orb.detect_and_extract(img1)
        keypoints1 = orb.keypoints
        desciptors1 = orb.descriptors

        orb.detect_and_extract(img2)
        keypoints2 = orb.keypoints
        desciptors2 = orb.descriptors

        matches12 = match_descriptors(desciptors1,
                                      desciptors2,
                                      cross_check=True)

    src = keypoints2[matches12[:, 1]][:, ::-1]
    dst = keypoints1[matches12[:, 0]][:, ::-1]

    model_robust, inliers = \
        ransac((src, dst), transform.SimilarityTransform, min_samples=4, residual_threshold=2)

    #r, c = img2.shape[:2]

    #corners = np.array([[0, 0],
    #    [0, r],
    #    [c, 0],
    #[c,r]])

    #warped_corners = model_robust(corners)
    #all_corners = np.vstack((warped_corners, corners))

    #corner_min = np.min(all_corners, axis=0)
    #corner_max = np.max(all_corners, axis=0)

    #output_shape = (corner_max - corner_min)
    #output_shape = np.ceil(output_shape[::-1])

    #offset = transform.SimilarityTransform(translation=-corner_min)

    #Not really cool rescaling
    #offset_tmatrix =  np.copy(offset.params)
    #offset_tmatrix[0, 2] = offset_tmatrix[0, 2]/common_factor
    #offset_tmatrix[0, 2] = offset_tmatrix[0, 2]/rescale_trans
    #offset_tmatrix[1, 2] = offset_tmatrix[1, 2]/common_factor
    #offset_tmatrix[1, 2] = offset_tmatrix[1, 2]/rescale_trans

    model_robust_tmatrix = np.copy(model_robust.params)
    model_robust_tmatrix[0, 2] = model_robust_tmatrix[0, 2] / common_factor
    #model_robust_tmatrix[0, 2] = model_robust_tmatrix[0, 2]/rescale_trans
    model_robust_tmatrix[1, 2] = model_robust_tmatrix[1, 2] / common_factor
    #model_robust_tmatrix[1, 2] = model_robust_tmatrix[1, 2]/rescale_trans

    #model_robust_offset_tmatrix = np.copy((model_robust+offset).params)
    #model_robust_offset_tmatrix[0, 2] = offset_tmatrix[0, 2] + model_robust_tmatrix[0, 2]
    #model_robust_offset_tmatrix[1, 2] = offset_tmatrix[1, 2] + model_robust_tmatrix[1, 2]

    #factor2 = 1.05
    #img3_ = warp(img3, np.linalg.inv(offset_tmatrix), output_shape=(img3.shape[0]*factor2, img3.shape[1]*factor2))
    #img4_ = warp(img4, np.linalg.inv(model_robust_offset_tmatrix), output_shape=(img3.shape[0]*factor2, img3.shape[1]*factor2))

    img1_ = img1  #= warp(img1, offset.inverse, output_shape=output_shape, cval=-1)
    img2_ = warp(
        img2, model_robust.inverse, cval=-1
    )  #(model_robust+offset).inverse, output_shape=output_shape, cval=-1)

    fig = plt.figure(constrained_layout=True)
    gs = fig.add_gridspec(3, 2)
    f_ax1 = fig.add_subplot(gs[0, :])
    plot_matches(f_ax1, img1, img2, keypoints1, keypoints2, matches12)
    f_ax1.axis('off')
    #f_ax1.set_title(filename1+" vs. "+filename2)
    f_ax2 = fig.add_subplot(gs[1, 0])
    f_ax2.imshow(img1)
    f_ax2.axis('off')
    f_ax2.set_title("img1")
    f_ax3 = fig.add_subplot(gs[1, 1])
    f_ax3.imshow(img1_)
    f_ax3.axis('off')
    f_ax3.set_title("img1_")
    #f_ax4 = fig.add_subplot(gs[1, 2])
    #f_ax4.imshow(img3_)
    #f_ax4.axis('off')
    #f_ax4.set_title("img3_")
    f_ax5 = fig.add_subplot(gs[2, 0])
    f_ax5.imshow(img2)
    f_ax5.axis('off')
    f_ax5.set_title("img2")
    f_ax6 = fig.add_subplot(gs[2, 1])
    f_ax6.imshow(img2_)
    f_ax6.axis('off')
    f_ax6.set_title("img2_")
    #f_ax7 = fig.add_subplot(gs[2, 2])
    #f_ax7.imshow(img4_)
    #f_ax7.axis('off')
    #f_ax7.set_title("img4_")
    plt.show()

    return model_robust_tmatrix
    '''
Пример #31
0
def brief(img):
    keypoints = corner_peaks(corner_harris(img), min_distance=1)
    extractor = BRIEF(descriptor_size=64, patch_size=12)
    extractor.extract(img, keypoints)
    print extractor.descriptors
    return extractor.descriptors
Пример #32
0
def start_ransac(img1, img2, brief=True, common_factor=0.25):

    img1 = transform.rescale(img1, common_factor, multichannel=False)
    img2 = transform.rescale(img2, common_factor, multichannel=False)

    print(img1.shape)
    print(img2.shape)

    if brief:
        #BRIEF
        keypoints1 = corner_peaks(corner_harris(img1), min_distance=5)
        keypoints2 = corner_peaks(corner_harris(img2), min_distance=5)

        extractor = BRIEF()

        extractor.extract(img1, keypoints1)
        keypoints1 = keypoints1[extractor.mask]
        descriptors1 = extractor.descriptors

        extractor.extract(img2, keypoints2)
        keypoints2 = keypoints2[extractor.mask]
        descriptors2 = extractor.descriptors

        matches12 = match_descriptors(descriptors1,
                                      descriptors2,
                                      cross_check=True)
    else:
        #ORB
        orb = ORB(n_keypoints=1000, fast_threshold=0.05)

        orb.detect_and_extract(img1)
        keypoints1 = orb.keypoints
        desciptors1 = orb.descriptors

        orb.detect_and_extract(img2)
        keypoints2 = orb.keypoints
        desciptors2 = orb.descriptors

        matches12 = match_descriptors(desciptors1,
                                      desciptors2,
                                      cross_check=True)

    src = keypoints2[matches12[:, 1]][:, ::-1]
    dst = keypoints1[matches12[:, 0]][:, ::-1]

    model_robust, inliers = \
        ransac((src, dst), transform.SimilarityTransform, min_samples=4, residual_threshold=2)

    model_robust_tmatrix = np.copy(model_robust.params)
    model_robust_tmatrix[0, 2] = model_robust_tmatrix[0, 2] / common_factor
    model_robust_tmatrix[1, 2] = model_robust_tmatrix[1, 2] / common_factor

    img1_ = img1
    img2_ = warp(img2, model_robust.inverse)

    if False:

        fig = plt.figure(constrained_layout=True)
        gs = fig.add_gridspec(3, 2)
        f_ax1 = fig.add_subplot(gs[0, :])
        plot_matches(f_ax1, img1, img2, keypoints1, keypoints2, matches12)
        f_ax1.axis('off')
        f_ax2 = fig.add_subplot(gs[1, 0])
        f_ax2.imshow(img1)
        f_ax2.axis('off')
        f_ax2.set_title("img1")
        f_ax3 = fig.add_subplot(gs[1, 1])
        f_ax3.imshow(img1_)
        f_ax3.axis('off')
        f_ax3.set_title("img1_")
        #f_ax4 = fig.add_subplot(gs[1, 2])
        #f_ax4.imshow(img3_)
        #f_ax4.axis('off')
        #f_ax4.set_title("img3_")
        f_ax5 = fig.add_subplot(gs[2, 0])
        f_ax5.imshow(img2)
        f_ax5.axis('off')
        f_ax5.set_title("img2")
        f_ax6 = fig.add_subplot(gs[2, 1])
        f_ax6.imshow(img2_)
        f_ax6.axis('off')
        f_ax6.set_title("img2_")
        #f_ax7 = fig.add_subplot(gs[2, 2])
        #f_ax7.imshow(img4_)
        #f_ax7.axis('off')
        #f_ax7.set_title("img4_")
        plt.show()

    return model_robust_tmatrix
Пример #33
0
def apply_brief(left, right, descriptor_size, num_elements):
    """
    computes BRIEF descriptor on both images.
    :param left: left image.
    :param right: right image.
    :param descriptor_size: size of window of the BRIEF descriptor.
    :param num_elements: length of the feature vector.
    :return: (H x W) array, H = height and W = width, of type np.int64
    """

    # on génère la liste des points clefs. Comme on utilise une approche dense, chaque pixel est un point clef
    pixels_coordonates = [[[j, i] for i in range(left.shape[1])]
                          for j in range(left.shape[0])]
    # la liste des points clefs doit être sous la forme d'une liste de coordonnée
    keypoints = np.array(pixels_coordonates).reshape(
        (left.shape[1] * left.shape[0]), 2)

    # on utilise Brief du package skimage pour obtenir les descripteurs de chaque pixel
    extractor = BRIEF(descriptor_size=num_elements,
                      patch_size=descriptor_size,
                      mode='normal')

    extractor.extract(left, keypoints)
    descriptors1 = extractor.descriptors

    extractor.extract(right, keypoints)
    descriptors2 = extractor.descriptors

    # Nous avons maitenant un vecteur qui décrit notre image pixel par pixel.
    # Pour appliquer SGM, nous devons reconstruire la forme initiale de l'image.
    # Selon la taille de la fenetre d'analyse (descriptor_size) les bords de l'images n'ont pas pu être traité,
    # La taille de l'image est donc réduite de la taille du descritpeur
    descriptors1.resize((left.shape[0] - descriptor_size + 1,
                         left.shape[1] - descriptor_size + 1, num_elements),
                        refcheck=False)
    descriptors2.resize((left.shape[0] - descriptor_size + 1,
                         left.shape[1] - descriptor_size + 1, num_elements),
                        refcheck=False)

    # Ici, la taille de descripteur est de 128 bit.
    # Le problème est que SGM et la distance de hamming prennent en entrée un entier
    # nous devons donc transformer 128 bit en un entier.
    # numpy et le traitement qui suit (distance de hamming) n'admet pas d'entier suppérieur à 64 bit.
    # Nous devons donc réduire la dimension de notre descripteur.
    # nous avons testé plusieurs valeures de réduction, et avons choisi 1 bit sur 20.
    # C'est la plus grande réduction de dimension testée sans altération des résultats obtenus sur l'image des cones.

    concat_desc1 = np.apply_along_axis(
        lambda list: int(''.join(
            [str(int(v)) if i % 20 == 0 else '' for i, v in enumerate(list)])),
        2, descriptors1)
    concat_desc2 = np.apply_along_axis(
        lambda list: int(''.join(
            [str(int(v)) if i % 20 == 0 else '' for i, v in enumerate(list)])),
        2, descriptors2)

    # Enfin, comme il est requis de passer une matrice de taille égale à l'image de départ, nous allons rajouter des 0
    # là ou l'information manque, en bordure d'image.
    padding_pattern = (descriptor_size // 2 - 1, descriptor_size // 2)

    padded_descr1 = np.pad(concat_desc1, (padding_pattern, padding_pattern),
                           'constant')
    padded_descr2 = np.pad(concat_desc2, (padding_pattern, padding_pattern),
                           'constant')

    return (padded_descr1, padded_descr2)
Пример #34
0
class FeatureDisplacementReward():
    def __init__(self):
        self.memeories = []
        self.fdMax = cfg.shot_h * cfg.shot_h + cfg.shot_w * cfg.shot_w
        self.extractor = BRIEF()

    def median_outlier_filter(self, signal, threshold=3):
        signal = signal.copy()
        diff = np.abs(signal - np.median(signal))
        median_diff = np.median(diff)
        s = diff / (float(median_diff) + 1e-6)
        mask = s > threshold
        signal[mask] = np.median(signal)
        return signal

    def get_feature_displacment(self, img1, img2):

        # if cfg.shot_c == 3:
        #     img1 = rgb2gray(img1)
        #     img2 = rgb2gray(img2)
        keypoints1 = corner_peaks(corner_harris(img1),
                                  min_distance=5,
                                  threshold_rel=0.02)
        keypoints2 = corner_peaks(corner_harris(img2),
                                  min_distance=5,
                                  threshold_rel=0.02)

        self.extractor.extract(img1, keypoints1)
        keypoints1 = keypoints1[self.extractor.mask]
        descriptors1 = self.extractor.descriptors

        self.extractor.extract(img2, keypoints2)
        keypoints2 = keypoints2[self.extractor.mask]
        descriptors2 = self.extractor.descriptors

        if descriptors1.shape[0] == 0 or descriptors2.shape[0] == 0:
            return None  # no feature found

        matches = match_descriptors(descriptors1,
                                    descriptors2,
                                    cross_check=True,
                                    max_ratio=0.85)
        if matches.shape[0] < 4: return None
        dist = np.sum(
            (keypoints1[matches[:, 0]] - keypoints2[matches[:, 1]])**2, axis=1)
        return np.mean(self.median_outlier_filter(dist))

    def get_reward(self, pre_scrshot, cur_scrshot):
        # pre_scrshot = np.squeeze(pre_scrshot) # before action
        cur_scrshot = np.squeeze(cur_scrshot)  # after action

        # pre_scrshot = (pre_scrshot*255).astype(dtype=int)
        cur_scrshot = (cur_scrshot * 255).astype(dtype=int)

        min_mem_fd = self.fdMax
        min_mem_id = -1
        for id, mem_shot in enumerate(self.memeories):
            fd = self.get_feature_displacment(cur_scrshot, mem_shot)
            if fd:
                min_mem_fd = min(fd, min_mem_fd)
                min_mem_id = id
        if min_mem_id == len(
                self.memeories
        ) - 1:  # if cur_shot is closer to last memory position
            self.memeories.append(cur_scrshot)
            return cfg.base_reward * min_mem_fd / self.fdMax
        elif min_mem_id > 0:  # cur_shot is closer to older memory position
            return 0
        else:  # cur_scrshot & cur_scrshot have no similar features to memories at all
            self.memeories.append(cur_scrshot)
            cfg.base_reward

    def clear(self):
        self.memeories = []


# end class SimilarityReward
Пример #35
0
def compute_costs(left, right, parameters, save_images):
    """
    first step of the sgm algorithm, matching cost based on the chosen descriptor
        A) census transform (BRIEF) and hamming distance
        B) HOG and SSD
    :param left: left image.
    :param right: right image.
    :param parameters: structure containing parameters of the algorithm.
    :param save_images: whether to save census images or not.
    :return: H x W x D array with the matching costs.
    """
    assert left.shape[0] == right.shape[0] and left.shape[1] == right.shape[
        1], 'left & right must have the same shape.'
    assert parameters.max_disparity > 0, 'maximum disparity must be greater than 0.'

    descriptor = parameters.descriptor

    height = left.shape[0]
    width = left.shape[1]
    cheight = parameters.csize[0]
    cwidth = parameters.csize[1]
    y_offset = int(cheight / 2)
    x_offset = int(cwidth / 2)
    disparity = parameters.max_disparity

    if descriptor == "BRIEF":
        brief_extractor = BRIEF(
            descriptor_size=parameters.BRIEF_descriptor_size,
            patch_size=cheight,
            mode='normal')
        img_dtype = np.uint8
        left_features = np.zeros(shape=(height, width,
                                        parameters.BRIEF_descriptor_size),
                                 dtype=np.bool)
        right_features = np.zeros(shape=(height, width,
                                         parameters.BRIEF_descriptor_size),
                                  dtype=np.bool)
    elif descriptor == "HOG":
        img_dtype = np.float
        left_features = np.zeros(shape=(height, width,
                                        parameters.HOG_orientations),
                                 dtype=np.float)
        right_features = np.zeros(shape=(height, width,
                                         parameters.HOG_orientations),
                                  dtype=np.float)
    else:
        img_dtype = np.uint8
        left_features = np.zeros(shape=(height, width), dtype=np.uint64)
        right_features = np.zeros(shape=(height, width), dtype=np.uint64)
    left_features_img = np.zeros(shape=(height, width), dtype=img_dtype)
    right_features_img = np.zeros(shape=(height, width), dtype=img_dtype)

    print('\tComputing left and right features...', end='')
    sys.stdout.flush()
    dawn = t.time()
    if descriptor == 'BRIEF':
        pixels = cartesian([np.arange(height), np.arange(width)])
        # LEFT
        brief_extractor.extract(left, pixels)
        descriptors = brief_extractor.descriptors
        if cheight == 7:
            left_features[2:-3, 2:-3] = np.reshape(
                descriptors, (height - cheight + 2, width - cwidth + 2,
                              left_features.shape[-1]))  # for cell of 7x7
        elif cheight == 15:
            left_features[6:-7, 6:-7] = np.reshape(
                descriptors, (height - cheight + 2, width - cwidth + 2,
                              left_features.shape[-1]))  # for cell of 15x15
        elif cheight == 49:
            left_features[23:-24, 23:-24] = np.reshape(
                descriptors, (height - cheight + 2, width - cwidth + 2,
                              left_features.shape[-1]))  # for cell of 49x49
        left_features_img[:] = left_features.sum(axis=-1)
        # RIGHT
        brief_extractor.extract(right, pixels)
        descriptors = brief_extractor.descriptors
        if cheight == 7:
            right_features[2:-3, 2:-3] = np.reshape(
                descriptors, (height - cheight + 2, width - cwidth + 2,
                              right_features.shape[-1]))  # for cell of 7x7
        elif cheight == 15:
            right_features[6:-7, 6:-7] = np.reshape(
                descriptors, (height - cheight + 2, width - cwidth + 2,
                              right_features.shape[-1]))  # for cell of 15x15
        elif cheight == 49:
            right_features[23:-24, 23:-24] = np.reshape(
                descriptors, (height - cheight + 2, width - cwidth + 2,
                              right_features.shape[-1]))  # for cell of 49x49
        right_features_img[:] = right_features.sum(axis=-1)
    # pixels on the border will have no features
    for y in range(y_offset, height - y_offset):
        for x in range(x_offset, width - x_offset):
            # LEFT
            image = left[(y - y_offset):(y + y_offset + 1),
                         (x - x_offset):(x + x_offset + 1)]
            if descriptor == 'HOG':
                left_features[y, x] = hog(image,
                                          parameters.HOG_orientations,
                                          pixels_per_cell=(cheight, cwidth),
                                          cells_per_block=(1, 1))
                left_features_img[y, x] = left_features[y, x].sum()
            elif descriptor == 'census':
                center_pixel = left[y, x]
                reference = np.full(shape=(cheight, cwidth),
                                    fill_value=center_pixel,
                                    dtype=np.int64)
                comparison = image - reference
                left_census = np.int64(0)
                for j in range(comparison.shape[0]):
                    for i in range(comparison.shape[1]):
                        if (i, j) != (y_offset, x_offset):
                            left_census = left_census << 1
                            if comparison[j, i] < 0:
                                bit = 1
                            else:
                                bit = 0
                            left_census = left_census | bit
                left_features_img[y, x] = np.uint8(left_census)
                left_features[y, x] = left_census

            # RIGHT
            image = right[(y - y_offset):(y + y_offset + 1),
                          (x - x_offset):(x + x_offset + 1)]
            if descriptor == 'HOG':
                right_features[y, x] = hog(image,
                                           parameters.HOG_orientations,
                                           pixels_per_cell=(cheight, cwidth),
                                           cells_per_block=(1, 1))
                right_features_img[y, x] = right_features[y, x].sum()
            elif descriptor == 'census':
                center_pixel = right[y, x]
                reference = np.full(shape=(cheight, cwidth),
                                    fill_value=center_pixel,
                                    dtype=np.int64)
                comparison = image - reference
                right_census = np.int64(0)
                for j in range(comparison.shape[0]):
                    for i in range(comparison.shape[1]):
                        if (i, j) != (y_offset, x_offset):
                            right_census = right_census << 1
                            if comparison[j, i] < 0:
                                bit = 1
                            else:
                                bit = 0
                            right_census = right_census | bit
                right_features_img[y, x] = np.uint8(right_census)
                right_features[y, x] = right_census

    dusk = t.time()
    print('\t(done in {:.2f}s)'.format(dusk - dawn))

    if save_images:
        if descriptor != "census":
            # Normalizing the summed features for visualization
            left_features_img = 255 * (
                left_features_img - left_features_img.min()).astype(
                    np.float) / (left_features_img.max() -
                                 left_features_img.min()).astype(np.float)
            right_features_img = 255 * (
                right_features_img - right_features_img.min()).astype(
                    np.float) / (right_features_img.max() -
                                 right_features_img.min()).astype(np.float)
        cv2.imwrite(f'{parameters.folder}/left_features.png',
                    left_features_img)
        cv2.imwrite(f'{parameters.folder}/right_features.png',
                    right_features_img)

    print('\tComputing cost volumes...', end='')
    sys.stdout.flush()
    dawn = t.time()

    if descriptor == "BRIEF":
        cost_volume_dtype = np.uint16
        lfeatures = np.zeros(shape=(height, width,
                                    parameters.BRIEF_descriptor_size),
                             dtype=np.bool)
        rfeatures = np.zeros(shape=(height, width,
                                    parameters.BRIEF_descriptor_size),
                             dtype=np.bool)
    elif descriptor == "HOG":
        cost_volume_dtype = np.float
        lfeatures = np.zeros(shape=(height, width,
                                    parameters.HOG_orientations),
                             dtype=np.float)
        rfeatures = np.zeros(shape=(height, width,
                                    parameters.HOG_orientations),
                             dtype=np.float)
    else:
        cost_volume_dtype = np.uint32
        lfeatures = np.zeros(shape=(height, width), dtype=np.int64)
        rfeatures = np.zeros(shape=(height, width), dtype=np.int64)
    left_cost_volume = np.zeros(shape=(height, width, disparity),
                                dtype=cost_volume_dtype)
    right_cost_volume = np.zeros(shape=(height, width, disparity),
                                 dtype=cost_volume_dtype)
    for d in range(0, disparity):
        # LEFT
        rfeatures[:, (x_offset +
                      d):(width -
                          x_offset)] = right_features[:, x_offset:(width - d -
                                                                   x_offset)]
        if descriptor == 'BRIEF':
            left_cost_volume[:, :,
                             d] = np.count_nonzero(left_features != rfeatures,
                                                   axis=-1)
        elif descriptor == "HOG":
            diff = left_features - rfeatures  # (H, W, orientations)
            left_cost_volume[:, :, d] = np.sqrt(np.sum(
                diff**2, 2))  # Summed Squared Difference along the last axis
        else:
            left_xor = np.int64(
                np.bitwise_xor(np.int64(left_features), rfeatures))
            left_distance = np.zeros(shape=(height, width), dtype=np.uint32)
            while not np.all(left_xor == 0):
                tmp = left_xor - 1
                mask = left_xor != 0
                left_xor[mask] = np.bitwise_and(left_xor[mask], tmp[mask])
                left_distance[mask] = left_distance[mask] + 1
            left_cost_volume[:, :, d] = left_distance

        # RIGHT
        lfeatures[:,
                  x_offset:(width - d -
                            x_offset)] = left_features[:,
                                                       (x_offset +
                                                        d):(width - x_offset)]
        if descriptor == 'BRIEF':
            right_cost_volume[:, :, d] = np.count_nonzero(
                right_features != lfeatures, axis=-1)
        elif descriptor == 'HOG':
            diff = right_features - lfeatures  # (H, W, orientations)
            right_cost_volume[:, :, d] = np.sqrt(np.sum(
                diff**2, 2))  # Summed Squared Difference along the last axis
        else:
            right_xor = np.int64(
                np.bitwise_xor(np.int64(right_features), lfeatures))
            right_distance = np.zeros(shape=(height, width), dtype=np.uint32)
            while not np.all(right_xor == 0):
                tmp = right_xor - 1
                mask = right_xor != 0
                right_xor[mask] = np.bitwise_and(right_xor[mask], tmp[mask])
                right_distance[mask] = right_distance[mask] + 1
            right_cost_volume[:, :, d] = right_distance

    dusk = t.time()
    print('\t(done in {:.2f}s)'.format(dusk - dawn))

    return left_cost_volume, right_cost_volume
Пример #36
0
ax[0].imshow(img2)
ax[0].set_title("Original")
ax[1].imshow(gray2, cmap=plt.cm.gray)
ax[1].set_title("Grayscale")
fig.tight_layout()
plt.show()

tform = transform.AffineTransform(scale=(1.3, 1.1), rotation=0.5,
                                  translation=(0, -200))
gray3 = transform.warp(gray1, tform)
gray4 = transform.rotate(gray1, 180)

descriptor_extractor = BRIEF(patch_size=5)

keypoints1 = corner_peaks(corner_harris(gray1), min_distance=1, threshold_rel=0)
descriptor_extractor.extract(gray1, keypoints1)
descriptors1 = descriptor_extractor.descriptors

keypoints2 = corner_peaks(corner_harris(gray2), min_distance=1, threshold_rel=0)
descriptor_extractor.extract(gray2, keypoints2)
descriptors2 = descriptor_extractor.descriptors

keypoints3 = corner_peaks(corner_harris(gray3), min_distance=1, threshold_rel=0)
descriptor_extractor.extract(gray3, keypoints3)
descriptors3 = descriptor_extractor.descriptors

keypoints4 = corner_peaks(corner_harris(gray4), min_distance=1, threshold_rel=0)
descriptor_extractor.extract(gray4, keypoints4)
descriptors4 = descriptor_extractor.descriptors

matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
Пример #37
0
from skimage.color import rgb2gray
import matplotlib.pyplot as plt


img1 = rgb2gray(data.astronaut())
tform = transform.AffineTransform(scale=(1.2, 1.2), translation=(0, -100))
img2 = transform.warp(img1, tform)
img3 = transform.rotate(img1, 25)

keypoints1 = corner_peaks(corner_harris(img1), min_distance=5)
keypoints2 = corner_peaks(corner_harris(img2), min_distance=5)
keypoints3 = corner_peaks(corner_harris(img3), min_distance=5)

extractor = BRIEF()

extractor.extract(img1, keypoints1)
keypoints1 = keypoints1[extractor.mask]
descriptors1 = extractor.descriptors

extractor.extract(img2, keypoints2)
keypoints2 = keypoints2[extractor.mask]
descriptors2 = extractor.descriptors

extractor.extract(img3, keypoints3)
keypoints3 = keypoints3[extractor.mask]
descriptors3 = extractor.descriptors

matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)

fig, ax = plt.subplots(nrows=2, ncols=1)
#!/usr/bin/python2 -utt
# -*- coding: utf-8 -*-
import os
import sys
#sys.path.insert(0, '/home/ubuntu/dev/opencv-3.1/build/lib')
from aux.numpy_sift import SIFTDescriptor
import cv2
import time
import numpy as np
from skimage.feature import BRIEF
try:
    input_img_fname = sys.argv[1]
    output_fname = sys.argv[2]
except:
    print("Wrong input format. Try BRIEF.py img.jpg out.txt")
    sys.exit(1)
image = cv2.imread(input_img_fname, 0)
h, w = image.shape
print(h, w)
BR = BRIEF(patch_size=w - 1)
n_patches = h / w
keypoints = np.zeros((n_patches, 2))
t = time.time()
for i in range(n_patches):
    keypoints[i, :] = np.array([i * w + float(w) / 2., float(w) / 2.])
BR.extract(image, keypoints)
descriptors_for_net = BR.descriptors
np.savetxt(output_fname, descriptors_for_net, delimiter=' ', fmt='%i')