def get_brief_feats(img,kp):
    img_gray = rgb2gray(img)
    brief = BRIEF()
    brief.extract(img_gray,kp)
    descriptors = brief.descriptors
    
    return descriptors
Пример #2
0
def test_binary_descriptors_rotation_crosscheck_true():
    """Verify matched keypoints and their corresponding masks results between
    image and its rotated version with the expected keypoint pairs with
    cross_check enabled."""
    img = data.astronaut()
    img = rgb2gray(img)
    tform = tf.SimilarityTransform(scale=1, rotation=0.15, translation=(0, 0))
    rotated_img = tf.warp(img, tform, clip=False)

    extractor = BRIEF(descriptor_size=512)

    keypoints1 = corner_peaks(corner_harris(img), min_distance=5,
                              threshold_abs=0, threshold_rel=0.1)
    extractor.extract(img, keypoints1)
    descriptors1 = extractor.descriptors

    keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5,
                              threshold_abs=0, threshold_rel=0.1)
    extractor.extract(rotated_img, keypoints2)
    descriptors2 = extractor.descriptors

    matches = match_descriptors(descriptors1, descriptors2, cross_check=True)

    exp_matches1 = np.array([ 0,  2,  3,  4,  5,  6,  9, 11, 12, 13, 14, 17,
                             18, 19, 21, 22, 23, 26, 27, 28, 29, 31, 32, 33,
                             34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46])
    exp_matches2 = np.array([ 0,  2,  3,  1,  4,  6,  5,  7, 13, 10,  9, 11,
                             15,  8, 14, 12, 16, 18, 19, 21, 20, 24, 25, 26,
                             28, 27, 22, 23, 29, 30, 31, 32, 35, 33, 34, 36])
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)
Пример #3
0
def calc_corners(*imgs):
    b = BRIEF()
    for c_img in imgs:
        corner_img = corner_harris(c_img)
        coords = corner_peaks(corner_img, min_distance=5)
        b.extract(c_img, coords)
        yield {"keypoints": coords, "descriptors": b.descriptors}
Пример #4
0
def test_uniform_mode():
    """Verify the computed BRIEF descriptors with expected for uniform mode."""
    img = data.coins()

    keypoints = corner_peaks(corner_harris(img), min_distance=5, threshold_abs=0, threshold_rel=0.1)

    extractor = BRIEF(descriptor_size=8, sigma=2, mode="uniform")

    extractor.extract(img, keypoints[:8])

    expected = np.array(
        [
            [False, False, False, True, True, True, False, False],
            [True, True, True, False, True, False, False, True],
            [True, True, True, False, True, True, False, True],
            [True, True, True, True, False, True, False, True],
            [True, True, True, True, True, True, False, False],
            [True, True, True, True, True, True, True, True],
            [False, False, False, True, True, True, True, True],
            [False, True, False, True, False, True, True, True],
        ],
        dtype=bool,
    )

    assert_array_equal(extractor.descriptors, expected)
Пример #5
0
def test_normal_mode():
    """Verify the computed BRIEF descriptors with expected for normal mode."""
    img = data.coins()

    keypoints = corner_peaks(corner_harris(img),
                             min_distance=5,
                             threshold_abs=0,
                             threshold_rel=0.1)

    extractor = BRIEF(descriptor_size=8, sigma=2)

    extractor.extract(img, keypoints[:8])

    expected = np.array(
        [[False, True, False, False, True, False, True, False],
         [True, False, True, True, False, True, False, False],
         [True, False, False, True, False, True, False, True],
         [True, True, True, True, False, True, False, True],
         [True, True, True, False, False, True, True, True],
         [False, False, False, False, True, False, False, False],
         [False, True, False, False, True, False, True, False],
         [False, False, False, False, False, False, False, False]],
        dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
Пример #6
0
def test_border():
    img = np.zeros((100, 100))
    keypoints = np.array([[1, 1], [20, 20], [50, 50], [80, 80]])

    extractor = BRIEF(patch_size=41)
    extractor.extract(img, keypoints)

    assert extractor.descriptors.shape[0] == 3
    assert_array_equal(extractor.mask, (False, True, True, True))
Пример #7
0
    def calculate(self, resource):
        except_image_only(resource)
        im = image2numpy(resource.image, remap='gray')
        keypoints = corner_peaks(corner_harris(im), min_distance=1)
        extractor = BRIEF_()
        extractor.extract(im, keypoints)

        #initalizing rows for the table
        return (extractor.descriptors, keypoints[:,0], keypoints[:,1])
Пример #8
0
def test_border():
    img = np.zeros((100, 100))
    keypoints = np.array([[1, 1], [20, 20], [50, 50], [80, 80]])

    extractor = BRIEF(patch_size=41)
    extractor.extract(img, keypoints)

    assert extractor.descriptors.shape[0] == 3
    assert_array_equal(extractor.mask, (False, True, True, True))
Пример #9
0
    def process(self, img2, image_gray):
        # img2 = warp(img2)
        patch_size = [640]
        img2 = rgb2gray(img2)
        image_gray = rgb2gray(img2)

        blobs_dog = blob_dog(image_gray, min_sigma=0.2, max_sigma=225, sigma_ratio=1.6, threshold=.5)
        blobs_dog[:, 2] = blobs_dog[:, 2]

        blobs = [blobs_dog]
        colors = ['black']
        titles = ['Difference of Gaussian']
        sequence = zip(blobs, colors, titles)

        # plt.imshow(img2)
        # plt.axis("equal")
        # plt.show()

        for blobs, color, title in sequence:
            print(len(blobs))
            for blob in blobs:
                y, x, r = blob
                plotx = x
                ploty = y
                for i in range (3):
                    keypoints1 = corner_peaks(corner_harris(Array.image_arr[i]), min_distance=1)
                    keypoints2 = corner_peaks(corner_harris(img2), min_distance=1)

                    extractor = BRIEF(patch_size=30, mode="uniform")

                    extractor.extract(Array.image_arr[i], keypoints1)
                    keypoints1 = keypoints1[extractor.mask]
                    descriptors1 = extractor.descriptors

                    extractor.extract(img2, keypoints2)
                    keypoints2 = keypoints2[extractor.mask]
                    descriptors2 = extractor.descriptors

                    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
                    
                    # print(keypoints1, keypoints2)
                    # print(matches12)
                    #FUCKGGGPLAYT
                    for pizdezh in matches12:
                        X = keypoints2[pizdezh[1]][1]
                        Y = keypoints2[pizdezh[1]][0]

                    if sqrt((plotx - X)**2 + (ploty - Y)**2) < r:
                        seen = [{
                            "type": Array.type_arr[i],
                            "center_shift": (plotx - 160/2) * -0.02,
                            "distance": image_gray[y][x] / 0.08
                        }]
                        print seen
                        data.seen.add(seen)
                        break
 def extract(self, sample):
     points = self._get_points(sample)
     radius = np.min(sample.size) // 4
     extractor = BRIEF(self.length, patch_size=radius)
     extractor.extract(sample.gray, points)
     points = points[extractor.mask]
     if not len(extractor.descriptors):
         raise FeatureExtractionError(self,
             'Could not extract BRIEF descriptor')
     descriptor = extractor.descriptors[0].tolist()
     return descriptor
Пример #11
0
def keypoints_and_descriptors_brief(img):
    """Detect key point using BRIEF and return keypoints and descriptors.""" 
    gray = rgb2gray(img)
    extractor = BRIEF(patch_size=5)

    keypoints = corner_peaks(corner_harris(gray), min_distance=1)

    extractor.extract(gray, keypoints)
    keypoints = keypoints[extractor.mask]
    descriptors = extractor.descriptors

    return keypoints, descriptors
Пример #12
0
 def extract(self):
     points = self.get_points()
     radius = np.min(self.size) // 4
     extractor = BRIEF(type(self).length)
     extractor.extract(self.gray, points)
     points = points[extractor.mask]
     if len(extractor.descriptors):
         descriptor = extractor.descriptors[0].tolist()
         return descriptor
     else:
         print('Could not extract BRIEF descriptor')
         return [0] * type(self).length
def BRIEF_descriptor(filename, intermediate_point):
    print("Making BRIEF descriptors...")

    # Load image and descriptor
    input = rgb2gray(io.imread(filename))
    extractor = BRIEF()

    # Convert 'list' to 'numpy.ndarray'
    intermediate_point_array = numpy.array(intermediate_point)

    # Descriptor extraction
    extractor.extract(input, intermediate_point_array)
    descriptors = extractor.descriptors

    return descriptors
Пример #14
0
def extractDaisyDescriptors(img, patch_size):

    extractor = BRIEF(patch_size=patch_size)
    i_idx = np.arange(0, img.shape[0])
    j_idx = np.arange(0, img.shape[1])
    kps_i, kps_j = np.meshgrid(i_idx, j_idx)
    kps_i = kps_i.ravel()
    kps_i.shape = (kps_i.shape[0], 1)
    kps_j = kps_j.ravel()
    kps_j.shape = (kps_j.shape[0], 1)
    for i in range(img.shape[2]):
        extractor.extract(img[:, :, i], np.concatenate((kps_i, kps_j), axis=1))
        dsc = extractor.descriptors

    return dsc, kps_i, kps_j
Пример #15
0
def extract_by_brief(img1, img2, min_distance=1):
    extractor = BRIEF()
    detector = CENSURE(mode='STAR')
    detector.detect(img1)
    keyp1 = detector.keypoints

    detector.detect(img2)
    keyp2 = detector.keypoints

    extractor.extract(img1, keyp1)
    desc1 = extractor.descriptors

    extractor.extract(img1, keyp2)
    desc2 = extractor.descriptors

    return [keyp1, keyp2, desc1, desc2]
def selectFeatures(useList):
    DataSet = []
    LabelSet = []
    lengthV = []
    trainPaths = ['./fruit/' + c + '_train/' for c in classes]
    testPaths = ['./fruit/' + c + ' test/' for c in classes]
    for c in range(len(classes)):
        className = classes[c]
        path = trainPaths[c]
        detector = CENSURE()
        detector2 = ORB(n_keypoints=50)
        detector3 = BRIEF(patch_size=49)
        files = os.listdir(path)
        #sample
        files = random.sample(files, 100)
        nfiles = len(files)
        for i in range(nfiles):
            featureVector = []
            infile = files[i]
            img = io.imread(path + infile, as_grey=True)
            hist = np.histogram(img, bins=256)
            img = resize(img, (400, 400))
            detector2.detect_and_extract(img)
            detector.detect(img)
            a = fd = hog(img,
                         orientations=9,
                         pixels_per_cell=(32, 32),
                         cells_per_block=(1, 1),
                         visualise=False)
            for h in hist:
                fd = np.append(fd, h)
            if (useList[0]):
                fd = np.append(fd, [np.array(detector.keypoints).flatten()])
            if (useList[1]):
                fd = np.append(fd, detector2.keypoints)
            if (useList[2]):
                fd = np.append(fd, edgeExtract(img, 100))
            l1 = len(fd)
            corners = corner_peaks(corner_harris(img), min_distance=1)
            if (useList[3]):
                fd = np.append(fd, corners)
            lengthV.append(len(fd))
            DataSet.append(fd)
            ind = classes.index(className)
            LabelSet.append(ind)
    max = np.amax(lengthV)
    lengthV = []
    DataSet2 = []
    for d in DataSet:
        d = np.pad(d, (0, max - len(d)), 'constant')
        DataSet2.append(d)
        lengthV.append(len(d))
    DataSet = DataSet2
    res = 0
    #perform gridsearch with one thread
    if __name__ == '__main__':
        res = gridSearch(DataSet, LabelSet, False)
        return res
Пример #17
0
def test_uniform_mode(dtype):
    """Verify the computed BRIEF descriptors with expected for uniform mode."""
    img = data.coins().astype(dtype)

    keypoints = corner_peaks(corner_harris(img),
                             min_distance=5,
                             threshold_abs=0,
                             threshold_rel=0.1)

    extractor = BRIEF(descriptor_size=8, sigma=2, mode='uniform')

    extractor.extract(img, keypoints[:8])

    expected = np.array([[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 1, 0, 0],
                         [1, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1],
                         [1, 1, 1, 0, 0, 1, 0, 0], [1, 1, 1, 1, 0, 1, 0, 0],
                         [1, 1, 0, 0, 0, 1, 0, 0], [0, 1, 1, 1, 0, 1, 1, 1]],
                        dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
Пример #18
0
def test_uniform_mode():
    """Verify the computed BRIEF descriptors with expected for uniform mode."""
    img = rgb2gray(data.lena())

    keypoints = corner_peaks(corner_harris(img), min_distance=5)

    extractor = BRIEF(descriptor_size=8, sigma=2, mode='uniform')

    extractor.extract(img, keypoints[:8])

    expected = np.array([[ True, False,  True, False, False,  True, False, False],
                         [False,  True, False, False,  True,  True,  True,  True],
                         [ True, False, False, False, False, False, False, False],
                         [False,  True,  True, False, False, False,  True, False],
                         [False, False, False, False, False, False,  True, False],
                         [False,  True, False, False,  True, False, False, False],
                         [False, False,  True,  True, False, False,  True,  True],
                         [ True,  True, False, False, False, False, False, False]], dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
Пример #19
0
def calculate_descriptors(X):
    extractor = BRIEF()

    Descriptors = []
    for i in range(len(X)):
        Im = np.asarray(X[i, :, :, :], dtype='float32')
        Max = np.amax(Im)
        Im = Im / Max
        Im = rgb2gray(Im)
        keypoints = corner_peaks(corner_harris(Im), min_distance=5)
        extractor.extract(Im, keypoints)
        Temp = extractor.descriptors
        Descriptors.append(
            np.asarray(np.round(np.average(Temp, axis=0)), dtype='int32'))

    Descriptors_matrix = np.zeros([len(X), 256])
    for i in range(len(X)):
        Descriptors_matrix[i, :] = Descriptors[i]

    return Descriptors_matrix
Пример #20
0
def test_normal_mode():
    """Verify the computed BRIEF descriptors with expected for normal mode."""
    img = data.coins()

    keypoints = corner_peaks(corner_harris(img), min_distance=5)

    extractor = BRIEF(descriptor_size=8, sigma=2)

    extractor.extract(img, keypoints[:8])

    expected = np.array([[False,  True, False, False,  True, False,  True, False],
                         [ True, False,  True,  True, False,  True, False, False],
                         [ True, False, False,  True, False,  True, False,  True],
                         [ True,  True,  True,  True, False,  True, False,  True],
                         [ True,  True,  True, False, False,  True,  True,  True],
                         [False, False, False, False,  True, False, False, False],
                         [False,  True, False, False,  True, False,  True, False],
                         [False, False, False, False, False, False, False, False]], dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
Пример #21
0
def test_uniform_mode():
    """Verify the computed BRIEF descriptors with expected for uniform mode."""
    img = data.coins()

    keypoints = corner_peaks(corner_harris(img), min_distance=5)

    extractor = BRIEF(descriptor_size=8, sigma=2, mode='uniform')

    extractor.extract(img, keypoints[:8])

    expected = np.array([[False, False, False, True, True, True, False, False],
                         [True, True, True, False, True, False, False, True],
                         [True, True, True, False, True, True, False, True],
                         [True, True, True, True, False, True, False, True],
                         [True, True, True, True, True, True, False, False],
                         [True, True, True, True, True, True, True, True],
                         [False, False, False, True, True, True, True, True],
                         [False, True, False, True, False, True, True, True]],
                        dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
Пример #22
0
def MatchPics(I1,I2):
    
    if I1.ndim == 3:
        I1 = rgb2gray(I1)   
    if I2.ndim == 3:
        I2 = rgb2gray(I2)
    
    points1 = corner_peaks(corner_fast(I1,n=12,threshold=0.15),min_distance=1)
    points2 = corner_peaks(corner_fast(I2,n=12,threshold=0.15),min_distance=1)
    
    extractor = BRIEF()
    
    extractor.extract(I1,points1)
    points1 = points1[extractor.mask]
    descriptors1 = extractor.descriptors
    
    extractor.extract(I2,points2)
    points2 = points2[extractor.mask]
    descriptors2 = extractor.descriptors
    
    matches = match_descriptors(descriptors1,descriptors2,metric = 'hamming',cross_check=True)
    
    #these points are y,x (row,col)
    locs1 = points1[matches[:,0]]
    locs2 = points2[matches[:,1]]
    #Change to x,y (col,row)
    xy1 = np.array([locs1[:,1],locs1[:,0]])
    xy1 = xy1.transpose()
    xy2 = np.array([locs2[:,1],locs2[:,0]])
    xy2 = xy2.transpose()
    fig, ax = plt.subplots()
    plot_matches(ax,I1,I2,points1,points2,matches,keypoints_color='r',only_matches=True)#,matches_color='y')
    
    return [xy1,xy2]
Пример #23
0
def test_binary_descriptors_lena_rotation_crosscheck_true():
    """Verify matched keypoints and their corresponding masks results between
    lena image and its rotated version with the expected keypoint pairs with
    cross_check enabled."""
    img = data.lena()
    img = rgb2gray(img)
    tform = tf.SimilarityTransform(scale=1, rotation=0.15, translation=(0, 0))
    rotated_img = tf.warp(img, tform, clip=False)

    extractor = BRIEF(descriptor_size=512)

    keypoints1 = corner_peaks(corner_harris(img), min_distance=5)
    extractor.extract(img, keypoints1)
    descriptors1 = extractor.descriptors

    keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5)
    extractor.extract(rotated_img, keypoints2)
    descriptors2 = extractor.descriptors

    matches = match_descriptors(descriptors1, descriptors2, cross_check=True)

    exp_matches1 = np.array([
        0, 1, 2, 4, 6, 7, 9, 10, 11, 12, 13, 15, 16, 17, 19, 20, 21, 24, 26,
        27, 28, 29, 30, 35, 36, 38, 39, 40, 42, 44, 45
    ])
    exp_matches2 = np.array([
        33, 0, 35, 1, 3, 2, 6, 4, 9, 11, 10, 7, 8, 5, 14, 13, 15, 16, 17, 18,
        19, 21, 22, 24, 23, 26, 27, 25, 28, 29, 30
    ])
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)
Пример #24
0
def test_binary_descriptors_rotation_crosscheck_false():
    """Verify matched keypoints and their corresponding masks results between
    image and its rotated version with the expected keypoint pairs with
    cross_check disabled."""
    img = data.astronaut()
    img = rgb2gray(img)
    tform = transform.SimilarityTransform(scale=1,
                                          rotation=0.15,
                                          translation=(0, 0))
    rotated_img = transform.warp(img, tform, clip=False)

    extractor = BRIEF(descriptor_size=512)

    keypoints1 = corner_peaks(corner_harris(img),
                              min_distance=5,
                              threshold_abs=0,
                              threshold_rel=0.1)
    extractor.extract(img, keypoints1)
    descriptors1 = extractor.descriptors

    keypoints2 = corner_peaks(corner_harris(rotated_img),
                              min_distance=5,
                              threshold_abs=0,
                              threshold_rel=0.1)
    extractor.extract(rotated_img, keypoints2)
    descriptors2 = extractor.descriptors

    matches = match_descriptors(descriptors1, descriptors2, cross_check=False)

    exp_matches1 = np.array([
        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
        20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
        38, 39, 40, 41, 42, 43, 44, 45, 46
    ])
    exp_matches2 = np.array([
        0, 31, 2, 3, 1, 4, 6, 4, 38, 5, 27, 7, 13, 10, 9, 27, 7, 11, 15, 8, 23,
        14, 12, 16, 10, 25, 18, 19, 21, 20, 41, 24, 25, 26, 28, 27, 22, 23, 29,
        30, 31, 32, 35, 33, 34, 30, 36
    ])
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)

    # minkowski takes a different code path, therefore we test it explicitly
    matches = match_descriptors(descriptors1,
                                descriptors2,
                                metric='minkowski',
                                cross_check=False)
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)

    # it also has an extra parameter
    matches = match_descriptors(descriptors1,
                                descriptors2,
                                metric='minkowski',
                                p=4,
                                cross_check=False)
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)
Пример #25
0
def produceMatches(imL1, imR1, panoramas=False, overlap_size=None):

    imL = imL1.copy()
    imR = imR1.copy()

    if (panoramas):
        if (overlap_size == None):
            overlap_size = int(imL.shape[1] * 0.4)
        imL[:, :-overlap_size, :] = 0
        imR[:, overlap_size:, :] = 0

    imLgray = rgb2gray(imL)
    imRgray = rgb2gray(imR)

    keypointsL = corner_peaks(corner_harris(imLgray),
                              threshold_rel=0.001,
                              min_distance=10)
    keypointsR = corner_peaks(corner_harris(imRgray),
                              threshold_rel=0.001,
                              min_distance=10)

    extractor = BRIEF()

    extractor.extract(imLgray, keypointsL)
    keypointsL = keypointsL[extractor.mask]
    descriptorsL = extractor.descriptors

    extractor.extract(imRgray, keypointsR)
    keypointsR = keypointsR[extractor.mask]
    descriptorsR = extractor.descriptors

    matchesLR = match_descriptors(descriptorsL, descriptorsR, cross_check=True)

    src = []
    dst = []
    for coord in matchesLR:
        src.append(keypointsL[coord[0]])
        dst.append(keypointsR[coord[1]])
    src = np.array(src)
    dst = np.array(dst)

    src_c = src.copy()
    dst_c = dst.copy()
    src_c[:, 1] = src[:, 0]
    src_c[:, 0] = src[:, 1]
    dst_c[:, 1] = dst[:, 0]
    dst_c[:, 0] = dst[:, 1]

    # robustly estimate affine transform model with RANSAC
    model_robust, inliers = ransac((src_c, dst_c),
                                   ProjectiveTransform,
                                   min_samples=4,
                                   residual_threshold=8,
                                   max_trials=250)

    return (matchesLR, model_robust, inliers)
Пример #26
0
def apply_brief(left, right, descriptor_size, num_elements):
    """
    computes BRIEF descriptor on both images.
    :param left: left image.
    :param right: right image.
    :param descriptor_size: size of window of the BRIEF descriptor.
    :param num_elements: length of the feature vector.
    :return: (H x W) array, H = height and W = width, of type np.int64
    """
    # TODO: apply BRIEF descriptor on both images. You will have to convert the BRIEF feature vector to a int64.

    extractor = BRIEF(descriptor_size=num_elements,
                      patch_size=descriptor_size,
                      mode='normal')

    left_rows, left_cols = left.shape

    left_indices = np.empty((left_rows, left_cols, 2))
    left_indices[..., 0] = np.arange(left_rows)[:, None]
    left_indices[..., 1] = np.arange(left_cols)
    left_indices = left_indices.reshape(left_cols * left_rows, 2)

    right_rows, right_cols = right.shape

    right_indices = np.empty((right_rows, right_cols, 2))
    right_indices[..., 0] = np.arange(right_rows)[:, None]
    right_indices[..., 1] = np.arange(right_cols)
    right_indices = right_indices.reshape(right_cols * right_rows, 2)

    extractor.extract(left, left_indices)
    left_desc = extractor.descriptors.astype(np.int64).reshape(
        left_rows - descriptor_size + 1, left_cols - descriptor_size + 1, 128)
    extractor.extract(right, right_indices)
    right_desc = extractor.descriptors.astype(np.int64).reshape(
        right_rows - descriptor_size + 1, right_cols - descriptor_size + 1,
        128)

    left_desc = np.pad(np.apply_along_axis(convert_brief, 2, left_desc),
                       ((3, 4), (3, 4)))
    right_desc = np.pad(np.apply_along_axis(convert_brief, 2, right_desc),
                        ((3, 4), (3, 4)))

    return (left_desc, right_desc)
Пример #27
0
def test_color_image_unsupported_error():
    """Brief descriptors can be evaluated on gray-scale images only."""
    img = np.zeros((20, 20, 3))
    keypoints = np.asarray([[7, 5], [11, 13]])
    with testing.raises(ValueError):
        BRIEF().extract(img, keypoints)
Пример #28
0
from skimage.feature import (match_descriptors, corner_peaks, corner_harris,
                             plot_matches, BRIEF)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt


img1 = rgb2gray(data.astronaut())
tform = tf.AffineTransform(scale=(1.2, 1.2), translation=(0, -100))
img2 = tf.warp(img1, tform)
img3 = tf.rotate(img1, 25)

keypoints1 = corner_peaks(corner_harris(img1), min_distance=5)
keypoints2 = corner_peaks(corner_harris(img2), min_distance=5)
keypoints3 = corner_peaks(corner_harris(img3), min_distance=5)

extractor = BRIEF()

extractor.extract(img1, keypoints1)
keypoints1 = keypoints1[extractor.mask]
descriptors1 = extractor.descriptors

extractor.extract(img2, keypoints2)
keypoints2 = keypoints2[extractor.mask]
descriptors2 = extractor.descriptors

extractor.extract(img3, keypoints3)
keypoints3 = keypoints3[extractor.mask]
descriptors3 = extractor.descriptors

matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)
Пример #29
0
from tadataka.feature import extract_features, Features, Matcher
from tadataka.triangulation import Triangulation
from tadataka.flow_estimation.extrema_tracker import ExtremaTracker
from tadataka.flow_estimation.image_curvature import extract_curvature_extrema
from tadataka.flow_estimation.image_curvature import compute_image_curvature
from tadataka.flow_estimation.flow_estimation import estimate_affine_transform
from tadataka.plot import plot_map, plot_matches
from tadataka.pose import Pose
from tadataka.triangulation import TwoViewTriangulation
from tadataka.utils import is_in_image_range
from matplotlib import rcParams
# rcParams["savefig.dpi"] = 800

brief = BRIEF(
    # descriptor_size=512,
    # patch_size=64,
    # mode="uniform",
    # sigma=0.1
)

match = Matcher()


def extract_dense_features(image):
    image = rgb2gray(image)
    keypoints = extract_curvature_extrema(image, percentile=95)
    keypoints = xy_to_yx(keypoints)
    brief.extract(image, keypoints)
    keypoints = keypoints[brief.mask]
    keypoints = yx_to_xy(keypoints)

    return Features(keypoints, brief.descriptors)
Пример #30
0
frame_number = '000' + str(frame // 100) + str(frame // 10) + str(frame % 10)

# load the image into a NUMPY array using matplotlib's imread function
left_img_file = root_pathname + image_folder + sequence_number + left_camera + frame_number + '.png'
l_image = plt.imread(left_img_file)
right_img_file = root_pathname + image_folder + sequence_number + right_camera + frame_number + '.png'
r_image = plt.imread(right_img_file)

# find Harris corner features in each camera
l_keypoints = corner_peaks(corner_harris(l_image), min_distance=10)
r_keypoints = corner_peaks(corner_harris(r_image), min_distance=10)
# TODO Replace the two lines above with the Shi-Tomasi detector

# for each corner found, extract the BRIEF descriptor
extractor = BRIEF(sigma=1.0)
extractor.extract(l_image, l_keypoints)
l_descriptors = extractor.descriptors

# not all keypoints get descriptors. Remove the ones that didn't:
mask = extractor.mask
l_keypoints = l_keypoints[mask]

extractor.extract(r_image, r_keypoints)
r_descriptors = extractor.descriptors
mask = extractor.mask
r_keypoints = r_keypoints[mask]

# plot the found keypoints on top of the left image
fig, ax = plt.subplots(figsize=(20, 5))
plt.imshow(l_image, cmap=cm.gray)
Пример #31
0
gray2 = rgb2gray(img2)
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
ax = axes.ravel()
ax[0].imshow(img2)
ax[0].set_title("Original")
ax[1].imshow(gray2, cmap=plt.cm.gray)
ax[1].set_title("Grayscale")
fig.tight_layout()
plt.show()

tform = transform.AffineTransform(scale=(1.3, 1.1), rotation=0.5,
                                  translation=(0, -200))
gray3 = transform.warp(gray1, tform)
gray4 = transform.rotate(gray1, 180)

descriptor_extractor = BRIEF(patch_size=5)

keypoints1 = corner_peaks(corner_harris(gray1), min_distance=1, threshold_rel=0)
descriptor_extractor.extract(gray1, keypoints1)
descriptors1 = descriptor_extractor.descriptors

keypoints2 = corner_peaks(corner_harris(gray2), min_distance=1, threshold_rel=0)
descriptor_extractor.extract(gray2, keypoints2)
descriptors2 = descriptor_extractor.descriptors

keypoints3 = corner_peaks(corner_harris(gray3), min_distance=1, threshold_rel=0)
descriptor_extractor.extract(gray3, keypoints3)
descriptors3 = descriptor_extractor.descriptors

keypoints4 = corner_peaks(corner_harris(gray4), min_distance=1, threshold_rel=0)
descriptor_extractor.extract(gray4, keypoints4)
Пример #32
0
def apply_brief(left, right, descriptor_size, num_elements):
    """
    computes BRIEF descriptor on both images.
    :param left: left image.
    :param right: right image.
    :param descriptor_size: size of window of the BRIEF descriptor.
    :param num_elements: length of the feature vector.
    :return: (H x W) array, H = height and W = width, of type np.int64
    """

    # on génère la liste des points clefs. Comme on utilise une approche dense, chaque pixel est un point clef
    pixels_coordonates = [[[j, i] for i in range(left.shape[1])]
                          for j in range(left.shape[0])]
    # la liste des points clefs doit être sous la forme d'une liste de coordonnée
    keypoints = np.array(pixels_coordonates).reshape(
        (left.shape[1] * left.shape[0]), 2)

    # on utilise Brief du package skimage pour obtenir les descripteurs de chaque pixel
    extractor = BRIEF(descriptor_size=num_elements,
                      patch_size=descriptor_size,
                      mode='normal')

    extractor.extract(left, keypoints)
    descriptors1 = extractor.descriptors

    extractor.extract(right, keypoints)
    descriptors2 = extractor.descriptors

    # Nous avons maitenant un vecteur qui décrit notre image pixel par pixel.
    # Pour appliquer SGM, nous devons reconstruire la forme initiale de l'image.
    # Selon la taille de la fenetre d'analyse (descriptor_size) les bords de l'images n'ont pas pu être traité,
    # La taille de l'image est donc réduite de la taille du descritpeur
    descriptors1.resize((left.shape[0] - descriptor_size + 1,
                         left.shape[1] - descriptor_size + 1, num_elements),
                        refcheck=False)
    descriptors2.resize((left.shape[0] - descriptor_size + 1,
                         left.shape[1] - descriptor_size + 1, num_elements),
                        refcheck=False)

    # Ici, la taille de descripteur est de 128 bit.
    # Le problème est que SGM et la distance de hamming prennent en entrée un entier
    # nous devons donc transformer 128 bit en un entier.
    # numpy et le traitement qui suit (distance de hamming) n'admet pas d'entier suppérieur à 64 bit.
    # Nous devons donc réduire la dimension de notre descripteur.
    # nous avons testé plusieurs valeures de réduction, et avons choisi 1 bit sur 20.
    # C'est la plus grande réduction de dimension testée sans altération des résultats obtenus sur l'image des cones.

    concat_desc1 = np.apply_along_axis(
        lambda list: int(''.join(
            [str(int(v)) if i % 20 == 0 else '' for i, v in enumerate(list)])),
        2, descriptors1)
    concat_desc2 = np.apply_along_axis(
        lambda list: int(''.join(
            [str(int(v)) if i % 20 == 0 else '' for i, v in enumerate(list)])),
        2, descriptors2)

    # Enfin, comme il est requis de passer une matrice de taille égale à l'image de départ, nous allons rajouter des 0
    # là ou l'information manque, en bordure d'image.
    padding_pattern = (descriptor_size // 2 - 1, descriptor_size // 2)

    padded_descr1 = np.pad(concat_desc1, (padding_pattern, padding_pattern),
                           'constant')
    padded_descr2 = np.pad(concat_desc2, (padding_pattern, padding_pattern),
                           'constant')

    return (padded_descr1, padded_descr2)
#!/usr/bin/python2 -utt
# -*- coding: utf-8 -*-
import os
import sys
#sys.path.insert(0, '/home/ubuntu/dev/opencv-3.1/build/lib')
from aux.numpy_sift import SIFTDescriptor
import cv2
import time
import numpy as np
from skimage.feature import BRIEF
try:
    input_img_fname = sys.argv[1]
    output_fname = sys.argv[2]
except:
    print("Wrong input format. Try BRIEF.py img.jpg out.txt")
    sys.exit(1)
image = cv2.imread(input_img_fname, 0)
h, w = image.shape
print(h, w)
BR = BRIEF(patch_size=w - 1)
n_patches = h / w
keypoints = np.zeros((n_patches, 2))
t = time.time()
for i in range(n_patches):
    keypoints[i, :] = np.array([i * w + float(w) / 2., float(w) / 2.])
BR.extract(image, keypoints)
descriptors_for_net = BR.descriptors
np.savetxt(output_fname, descriptors_for_net, delimiter=' ', fmt='%i')
Пример #34
0
class FeatureDisplacementReward():
    def __init__(self):
        self.memeories = []
        self.fdMax = cfg.shot_h * cfg.shot_h + cfg.shot_w * cfg.shot_w
        self.extractor = BRIEF()

    def median_outlier_filter(self, signal, threshold=3):
        signal = signal.copy()
        diff = np.abs(signal - np.median(signal))
        median_diff = np.median(diff)
        s = diff / (float(median_diff) + 1e-6)
        mask = s > threshold
        signal[mask] = np.median(signal)
        return signal

    def get_feature_displacment(self, img1, img2):

        # if cfg.shot_c == 3:
        #     img1 = rgb2gray(img1)
        #     img2 = rgb2gray(img2)
        keypoints1 = corner_peaks(corner_harris(img1),
                                  min_distance=5,
                                  threshold_rel=0.02)
        keypoints2 = corner_peaks(corner_harris(img2),
                                  min_distance=5,
                                  threshold_rel=0.02)

        self.extractor.extract(img1, keypoints1)
        keypoints1 = keypoints1[self.extractor.mask]
        descriptors1 = self.extractor.descriptors

        self.extractor.extract(img2, keypoints2)
        keypoints2 = keypoints2[self.extractor.mask]
        descriptors2 = self.extractor.descriptors

        if descriptors1.shape[0] == 0 or descriptors2.shape[0] == 0:
            return None  # no feature found

        matches = match_descriptors(descriptors1,
                                    descriptors2,
                                    cross_check=True,
                                    max_ratio=0.85)
        if matches.shape[0] < 4: return None
        dist = np.sum(
            (keypoints1[matches[:, 0]] - keypoints2[matches[:, 1]])**2, axis=1)
        return np.mean(self.median_outlier_filter(dist))

    def get_reward(self, pre_scrshot, cur_scrshot):
        # pre_scrshot = np.squeeze(pre_scrshot) # before action
        cur_scrshot = np.squeeze(cur_scrshot)  # after action

        # pre_scrshot = (pre_scrshot*255).astype(dtype=int)
        cur_scrshot = (cur_scrshot * 255).astype(dtype=int)

        min_mem_fd = self.fdMax
        min_mem_id = -1
        for id, mem_shot in enumerate(self.memeories):
            fd = self.get_feature_displacment(cur_scrshot, mem_shot)
            if fd:
                min_mem_fd = min(fd, min_mem_fd)
                min_mem_id = id
        if min_mem_id == len(
                self.memeories
        ) - 1:  # if cur_shot is closer to last memory position
            self.memeories.append(cur_scrshot)
            return cfg.base_reward * min_mem_fd / self.fdMax
        elif min_mem_id > 0:  # cur_shot is closer to older memory position
            return 0
        else:  # cur_scrshot & cur_scrshot have no similar features to memories at all
            self.memeories.append(cur_scrshot)
            cfg.base_reward

    def clear(self):
        self.memeories = []


# end class SimilarityReward
Пример #35
0
def test_unsupported_mode():
    with testing.raises(ValueError):
        BRIEF(mode='foobar')
Пример #36
0
 def __init__(self):
     self.memeories = []
     self.fdMax = cfg.shot_h * cfg.shot_h + cfg.shot_w * cfg.shot_w
     self.extractor = BRIEF()
Пример #37
0
from skimage.feature import (match_descriptors, corner_peaks, corner_harris,
                             plot_matches, BRIEF)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt


img1 = rgb2gray(data.astronaut())
tform = transform.AffineTransform(scale=(1.2, 1.2), translation=(0, -100))
img2 = transform.warp(img1, tform)
img3 = transform.rotate(img1, 25)

keypoints1 = corner_peaks(corner_harris(img1), min_distance=5)
keypoints2 = corner_peaks(corner_harris(img2), min_distance=5)
keypoints3 = corner_peaks(corner_harris(img3), min_distance=5)

extractor = BRIEF()

extractor.extract(img1, keypoints1)
keypoints1 = keypoints1[extractor.mask]
descriptors1 = extractor.descriptors

extractor.extract(img2, keypoints2)
keypoints2 = keypoints2[extractor.mask]
descriptors2 = extractor.descriptors

extractor.extract(img3, keypoints3)
keypoints3 = keypoints3[extractor.mask]
descriptors3 = extractor.descriptors

matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)
Пример #38
0
import numpy as np
from skimage import io
from skimage.feature import (match_descriptors, corner_peaks, corner_fast,
                             plot_matches, BRIEF)
import matplotlib.pyplot as plt

img1 = io.imread("./data/training/image_2/000000_10.png", as_gray=True)
img2 = io.imread("./data/training/image_2/000000_11.png", as_gray=True)
extractor = BRIEF(descriptor_size=128, patch_size=9, mode='normal')

left_rows, left_cols = img1.shape

left_indices = np.empty((left_rows, left_cols, 2))
left_indices[..., 0] = np.arange(left_rows)[:, None]
left_indices[..., 1] = np.arange(left_cols)
left_indices = left_indices.reshape(left_cols * left_rows, 2)

#extractor.extract()