Пример #1
0
def MatchPics(I1,I2):
    
    if I1.ndim == 3:
        I1 = rgb2gray(I1)   
    if I2.ndim == 3:
        I2 = rgb2gray(I2)
    
    points1 = corner_peaks(corner_fast(I1,n=12,threshold=0.15),min_distance=1)
    points2 = corner_peaks(corner_fast(I2,n=12,threshold=0.15),min_distance=1)
    
    extractor = BRIEF()
    
    extractor.extract(I1,points1)
    points1 = points1[extractor.mask]
    descriptors1 = extractor.descriptors
    
    extractor.extract(I2,points2)
    points2 = points2[extractor.mask]
    descriptors2 = extractor.descriptors
    
    matches = match_descriptors(descriptors1,descriptors2,metric = 'hamming',cross_check=True)
    
    #these points are y,x (row,col)
    locs1 = points1[matches[:,0]]
    locs2 = points2[matches[:,1]]
    #Change to x,y (col,row)
    xy1 = np.array([locs1[:,1],locs1[:,0]])
    xy1 = xy1.transpose()
    xy2 = np.array([locs2[:,1],locs2[:,0]])
    xy2 = xy2.transpose()
    fig, ax = plt.subplots()
    plot_matches(ax,I1,I2,points1,points2,matches,keypoints_color='r',only_matches=True)#,matches_color='y')
    
    return [xy1,xy2]
Пример #2
0
def calc_corners(*imgs):
    b = BRIEF()
    for c_img in imgs:
        corner_img = corner_harris(c_img)
        coords = corner_peaks(corner_img, min_distance=5)
        b.extract(c_img, coords)
        yield {"keypoints": coords, "descriptors": b.descriptors}
Пример #3
0
def test_binary_descriptors_lena_rotation_crosscheck_true():
    """Verify matched keypoints and their corresponding masks results between
    lena image and its rotated version with the expected keypoint pairs with
    cross_check enabled."""
    img = data.lena()
    img = rgb2gray(img)
    tform = tf.SimilarityTransform(scale=1, rotation=0.15, translation=(0, 0))
    rotated_img = tf.warp(img, tform, clip=False)

    extractor = BRIEF(descriptor_size=512)

    keypoints1 = corner_peaks(corner_harris(img), min_distance=5)
    extractor.extract(img, keypoints1)
    descriptors1 = extractor.descriptors

    keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5)
    extractor.extract(rotated_img, keypoints2)
    descriptors2 = extractor.descriptors

    matches = match_descriptors(descriptors1, descriptors2, cross_check=True)

    exp_matches1 = np.array([
        0, 1, 2, 4, 6, 7, 9, 10, 11, 12, 13, 15, 16, 17, 19, 20, 21, 24, 26,
        27, 28, 29, 30, 35, 36, 38, 39, 40, 42, 44, 45
    ])
    exp_matches2 = np.array([
        33, 0, 35, 1, 3, 2, 6, 4, 9, 11, 10, 7, 8, 5, 14, 13, 15, 16, 17, 18,
        19, 21, 22, 24, 23, 26, 27, 25, 28, 29, 30
    ])
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)
Пример #4
0
def test_normal_mode():
    """Verify the computed BRIEF descriptors with expected for normal mode."""
    img = data.coins()

    keypoints = corner_peaks(corner_harris(img),
                             min_distance=5,
                             threshold_abs=0,
                             threshold_rel=0.1)

    extractor = BRIEF(descriptor_size=8, sigma=2)

    extractor.extract(img, keypoints[:8])

    expected = np.array(
        [[False, True, False, False, True, False, True, False],
         [True, False, True, True, False, True, False, False],
         [True, False, False, True, False, True, False, True],
         [True, True, True, True, False, True, False, True],
         [True, True, True, False, False, True, True, True],
         [False, False, False, False, True, False, False, False],
         [False, True, False, False, True, False, True, False],
         [False, False, False, False, False, False, False, False]],
        dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
def get_brief_feats(img,kp):
    img_gray = rgb2gray(img)
    brief = BRIEF()
    brief.extract(img_gray,kp)
    descriptors = brief.descriptors
    
    return descriptors
def selectFeatures(useList):
    DataSet = []
    LabelSet = []
    lengthV = []
    trainPaths = ['./fruit/' + c + '_train/' for c in classes]
    testPaths = ['./fruit/' + c + ' test/' for c in classes]
    for c in range(len(classes)):
        className = classes[c]
        path = trainPaths[c]
        detector = CENSURE()
        detector2 = ORB(n_keypoints=50)
        detector3 = BRIEF(patch_size=49)
        files = os.listdir(path)
        #sample
        files = random.sample(files, 100)
        nfiles = len(files)
        for i in range(nfiles):
            featureVector = []
            infile = files[i]
            img = io.imread(path + infile, as_grey=True)
            hist = np.histogram(img, bins=256)
            img = resize(img, (400, 400))
            detector2.detect_and_extract(img)
            detector.detect(img)
            a = fd = hog(img,
                         orientations=9,
                         pixels_per_cell=(32, 32),
                         cells_per_block=(1, 1),
                         visualise=False)
            for h in hist:
                fd = np.append(fd, h)
            if (useList[0]):
                fd = np.append(fd, [np.array(detector.keypoints).flatten()])
            if (useList[1]):
                fd = np.append(fd, detector2.keypoints)
            if (useList[2]):
                fd = np.append(fd, edgeExtract(img, 100))
            l1 = len(fd)
            corners = corner_peaks(corner_harris(img), min_distance=1)
            if (useList[3]):
                fd = np.append(fd, corners)
            lengthV.append(len(fd))
            DataSet.append(fd)
            ind = classes.index(className)
            LabelSet.append(ind)
    max = np.amax(lengthV)
    lengthV = []
    DataSet2 = []
    for d in DataSet:
        d = np.pad(d, (0, max - len(d)), 'constant')
        DataSet2.append(d)
        lengthV.append(len(d))
    DataSet = DataSet2
    res = 0
    #perform gridsearch with one thread
    if __name__ == '__main__':
        res = gridSearch(DataSet, LabelSet, False)
        return res
Пример #7
0
def test_binary_descriptors_rotation_crosscheck_false():
    """Verify matched keypoints and their corresponding masks results between
    image and its rotated version with the expected keypoint pairs with
    cross_check disabled."""
    img = data.astronaut()
    img = rgb2gray(img)
    tform = transform.SimilarityTransform(scale=1,
                                          rotation=0.15,
                                          translation=(0, 0))
    rotated_img = transform.warp(img, tform, clip=False)

    extractor = BRIEF(descriptor_size=512)

    keypoints1 = corner_peaks(corner_harris(img),
                              min_distance=5,
                              threshold_abs=0,
                              threshold_rel=0.1)
    extractor.extract(img, keypoints1)
    descriptors1 = extractor.descriptors

    keypoints2 = corner_peaks(corner_harris(rotated_img),
                              min_distance=5,
                              threshold_abs=0,
                              threshold_rel=0.1)
    extractor.extract(rotated_img, keypoints2)
    descriptors2 = extractor.descriptors

    matches = match_descriptors(descriptors1, descriptors2, cross_check=False)

    exp_matches1 = np.array([
        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
        20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
        38, 39, 40, 41, 42, 43, 44, 45, 46
    ])
    exp_matches2 = np.array([
        0, 31, 2, 3, 1, 4, 6, 4, 38, 5, 27, 7, 13, 10, 9, 27, 7, 11, 15, 8, 23,
        14, 12, 16, 10, 25, 18, 19, 21, 20, 41, 24, 25, 26, 28, 27, 22, 23, 29,
        30, 31, 32, 35, 33, 34, 30, 36
    ])
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)

    # minkowski takes a different code path, therefore we test it explicitly
    matches = match_descriptors(descriptors1,
                                descriptors2,
                                metric='minkowski',
                                cross_check=False)
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)

    # it also has an extra parameter
    matches = match_descriptors(descriptors1,
                                descriptors2,
                                metric='minkowski',
                                p=4,
                                cross_check=False)
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)
Пример #8
0
def test_border():
    img = np.zeros((100, 100))
    keypoints = np.array([[1, 1], [20, 20], [50, 50], [80, 80]])

    extractor = BRIEF(patch_size=41)
    extractor.extract(img, keypoints)

    assert extractor.descriptors.shape[0] == 3
    assert_array_equal(extractor.mask, (False, True, True, True))
Пример #9
0
def produceMatches(imL1, imR1, panoramas=False, overlap_size=None):

    imL = imL1.copy()
    imR = imR1.copy()

    if (panoramas):
        if (overlap_size == None):
            overlap_size = int(imL.shape[1] * 0.4)
        imL[:, :-overlap_size, :] = 0
        imR[:, overlap_size:, :] = 0

    imLgray = rgb2gray(imL)
    imRgray = rgb2gray(imR)

    keypointsL = corner_peaks(corner_harris(imLgray),
                              threshold_rel=0.001,
                              min_distance=10)
    keypointsR = corner_peaks(corner_harris(imRgray),
                              threshold_rel=0.001,
                              min_distance=10)

    extractor = BRIEF()

    extractor.extract(imLgray, keypointsL)
    keypointsL = keypointsL[extractor.mask]
    descriptorsL = extractor.descriptors

    extractor.extract(imRgray, keypointsR)
    keypointsR = keypointsR[extractor.mask]
    descriptorsR = extractor.descriptors

    matchesLR = match_descriptors(descriptorsL, descriptorsR, cross_check=True)

    src = []
    dst = []
    for coord in matchesLR:
        src.append(keypointsL[coord[0]])
        dst.append(keypointsR[coord[1]])
    src = np.array(src)
    dst = np.array(dst)

    src_c = src.copy()
    dst_c = dst.copy()
    src_c[:, 1] = src[:, 0]
    src_c[:, 0] = src[:, 1]
    dst_c[:, 1] = dst[:, 0]
    dst_c[:, 0] = dst[:, 1]

    # robustly estimate affine transform model with RANSAC
    model_robust, inliers = ransac((src_c, dst_c),
                                   ProjectiveTransform,
                                   min_samples=4,
                                   residual_threshold=8,
                                   max_trials=250)

    return (matchesLR, model_robust, inliers)
 def extract(self, sample):
     points = self._get_points(sample)
     radius = np.min(sample.size) // 4
     extractor = BRIEF(self.length, patch_size=radius)
     extractor.extract(sample.gray, points)
     points = points[extractor.mask]
     if not len(extractor.descriptors):
         raise FeatureExtractionError(self,
             'Could not extract BRIEF descriptor')
     descriptor = extractor.descriptors[0].tolist()
     return descriptor
Пример #11
0
 def extract(self):
     points = self.get_points()
     radius = np.min(self.size) // 4
     extractor = BRIEF(type(self).length)
     extractor.extract(self.gray, points)
     points = points[extractor.mask]
     if len(extractor.descriptors):
         descriptor = extractor.descriptors[0].tolist()
         return descriptor
     else:
         print('Could not extract BRIEF descriptor')
         return [0] * type(self).length
Пример #12
0
def keypoints_and_descriptors_brief(img):
    """Detect key point using BRIEF and return keypoints and descriptors.""" 
    gray = rgb2gray(img)
    extractor = BRIEF(patch_size=5)

    keypoints = corner_peaks(corner_harris(gray), min_distance=1)

    extractor.extract(gray, keypoints)
    keypoints = keypoints[extractor.mask]
    descriptors = extractor.descriptors

    return keypoints, descriptors
def BRIEF_descriptor(filename, intermediate_point):
    print("Making BRIEF descriptors...")

    # Load image and descriptor
    input = rgb2gray(io.imread(filename))
    extractor = BRIEF()

    # Convert 'list' to 'numpy.ndarray'
    intermediate_point_array = numpy.array(intermediate_point)

    # Descriptor extraction
    extractor.extract(input, intermediate_point_array)
    descriptors = extractor.descriptors

    return descriptors
Пример #14
0
def extractDaisyDescriptors(img, patch_size):

    extractor = BRIEF(patch_size=patch_size)
    i_idx = np.arange(0, img.shape[0])
    j_idx = np.arange(0, img.shape[1])
    kps_i, kps_j = np.meshgrid(i_idx, j_idx)
    kps_i = kps_i.ravel()
    kps_i.shape = (kps_i.shape[0], 1)
    kps_j = kps_j.ravel()
    kps_j.shape = (kps_j.shape[0], 1)
    for i in range(img.shape[2]):
        extractor.extract(img[:, :, i], np.concatenate((kps_i, kps_j), axis=1))
        dsc = extractor.descriptors

    return dsc, kps_i, kps_j
Пример #15
0
def apply_brief(left, right, descriptor_size, num_elements):
    """
    computes BRIEF descriptor on both images.
    :param left: left image.
    :param right: right image.
    :param descriptor_size: size of window of the BRIEF descriptor.
    :param num_elements: length of the feature vector.
    :return: (H x W) array, H = height and W = width, of type np.int64
    """
    # TODO: apply BRIEF descriptor on both images. You will have to convert the BRIEF feature vector to a int64.

    extractor = BRIEF(descriptor_size=num_elements,
                      patch_size=descriptor_size,
                      mode='normal')

    left_rows, left_cols = left.shape

    left_indices = np.empty((left_rows, left_cols, 2))
    left_indices[..., 0] = np.arange(left_rows)[:, None]
    left_indices[..., 1] = np.arange(left_cols)
    left_indices = left_indices.reshape(left_cols * left_rows, 2)

    right_rows, right_cols = right.shape

    right_indices = np.empty((right_rows, right_cols, 2))
    right_indices[..., 0] = np.arange(right_rows)[:, None]
    right_indices[..., 1] = np.arange(right_cols)
    right_indices = right_indices.reshape(right_cols * right_rows, 2)

    extractor.extract(left, left_indices)
    left_desc = extractor.descriptors.astype(np.int64).reshape(
        left_rows - descriptor_size + 1, left_cols - descriptor_size + 1, 128)
    extractor.extract(right, right_indices)
    right_desc = extractor.descriptors.astype(np.int64).reshape(
        right_rows - descriptor_size + 1, right_cols - descriptor_size + 1,
        128)

    left_desc = np.pad(np.apply_along_axis(convert_brief, 2, left_desc),
                       ((3, 4), (3, 4)))
    right_desc = np.pad(np.apply_along_axis(convert_brief, 2, right_desc),
                        ((3, 4), (3, 4)))

    return (left_desc, right_desc)
Пример #16
0
def test_uniform_mode(dtype):
    """Verify the computed BRIEF descriptors with expected for uniform mode."""
    img = data.coins().astype(dtype)

    keypoints = corner_peaks(corner_harris(img),
                             min_distance=5,
                             threshold_abs=0,
                             threshold_rel=0.1)

    extractor = BRIEF(descriptor_size=8, sigma=2, mode='uniform')

    extractor.extract(img, keypoints[:8])

    expected = np.array([[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 1, 0, 0],
                         [1, 1, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1],
                         [1, 1, 1, 0, 0, 1, 0, 0], [1, 1, 1, 1, 0, 1, 0, 0],
                         [1, 1, 0, 0, 0, 1, 0, 0], [0, 1, 1, 1, 0, 1, 1, 1]],
                        dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
Пример #17
0
def calculate_descriptors(X):
    extractor = BRIEF()

    Descriptors = []
    for i in range(len(X)):
        Im = np.asarray(X[i, :, :, :], dtype='float32')
        Max = np.amax(Im)
        Im = Im / Max
        Im = rgb2gray(Im)
        keypoints = corner_peaks(corner_harris(Im), min_distance=5)
        extractor.extract(Im, keypoints)
        Temp = extractor.descriptors
        Descriptors.append(
            np.asarray(np.round(np.average(Temp, axis=0)), dtype='int32'))

    Descriptors_matrix = np.zeros([len(X), 256])
    for i in range(len(X)):
        Descriptors_matrix[i, :] = Descriptors[i]

    return Descriptors_matrix
Пример #18
0
def test_uniform_mode():
    """Verify the computed BRIEF descriptors with expected for uniform mode."""
    img = data.coins()

    keypoints = corner_peaks(corner_harris(img), min_distance=5)

    extractor = BRIEF(descriptor_size=8, sigma=2, mode='uniform')

    extractor.extract(img, keypoints[:8])

    expected = np.array([[False, False, False, True, True, True, False, False],
                         [True, True, True, False, True, False, False, True],
                         [True, True, True, False, True, True, False, True],
                         [True, True, True, True, False, True, False, True],
                         [True, True, True, True, True, True, False, False],
                         [True, True, True, True, True, True, True, True],
                         [False, False, False, True, True, True, True, True],
                         [False, True, False, True, False, True, True, True]],
                        dtype=bool)

    assert_array_equal(extractor.descriptors, expected)
Пример #19
0
from tadataka.feature import extract_features, Features, Matcher
from tadataka.triangulation import Triangulation
from tadataka.flow_estimation.extrema_tracker import ExtremaTracker
from tadataka.flow_estimation.image_curvature import extract_curvature_extrema
from tadataka.flow_estimation.image_curvature import compute_image_curvature
from tadataka.flow_estimation.flow_estimation import estimate_affine_transform
from tadataka.plot import plot_map, plot_matches
from tadataka.pose import Pose
from tadataka.triangulation import TwoViewTriangulation
from tadataka.utils import is_in_image_range
from matplotlib import rcParams
# rcParams["savefig.dpi"] = 800

brief = BRIEF(
    # descriptor_size=512,
    # patch_size=64,
    # mode="uniform",
    # sigma=0.1
)

match = Matcher()


def extract_dense_features(image):
    image = rgb2gray(image)
    keypoints = extract_curvature_extrema(image, percentile=95)
    keypoints = xy_to_yx(keypoints)
    brief.extract(image, keypoints)
    keypoints = keypoints[brief.mask]
    keypoints = yx_to_xy(keypoints)

    return Features(keypoints, brief.descriptors)
Пример #20
0
frame_number = '000' + str(frame // 100) + str(frame // 10) + str(frame % 10)

# load the image into a NUMPY array using matplotlib's imread function
left_img_file = root_pathname + image_folder + sequence_number + left_camera + frame_number + '.png'
l_image = plt.imread(left_img_file)
right_img_file = root_pathname + image_folder + sequence_number + right_camera + frame_number + '.png'
r_image = plt.imread(right_img_file)

# find Harris corner features in each camera
l_keypoints = corner_peaks(corner_harris(l_image), min_distance=10)
r_keypoints = corner_peaks(corner_harris(r_image), min_distance=10)
# TODO Replace the two lines above with the Shi-Tomasi detector

# for each corner found, extract the BRIEF descriptor
extractor = BRIEF(sigma=1.0)
extractor.extract(l_image, l_keypoints)
l_descriptors = extractor.descriptors

# not all keypoints get descriptors. Remove the ones that didn't:
mask = extractor.mask
l_keypoints = l_keypoints[mask]

extractor.extract(r_image, r_keypoints)
r_descriptors = extractor.descriptors
mask = extractor.mask
r_keypoints = r_keypoints[mask]

# plot the found keypoints on top of the left image
fig, ax = plt.subplots(figsize=(20, 5))
plt.imshow(l_image, cmap=cm.gray)
Пример #21
0
def compute_costs(left, right, parameters, save_images):
    """
    first step of the sgm algorithm, matching cost based on the chosen descriptor
        A) census transform (BRIEF) and hamming distance
        B) HOG and SSD
    :param left: left image.
    :param right: right image.
    :param parameters: structure containing parameters of the algorithm.
    :param save_images: whether to save census images or not.
    :return: H x W x D array with the matching costs.
    """
    assert left.shape[0] == right.shape[0] and left.shape[1] == right.shape[
        1], 'left & right must have the same shape.'
    assert parameters.max_disparity > 0, 'maximum disparity must be greater than 0.'

    descriptor = parameters.descriptor

    height = left.shape[0]
    width = left.shape[1]
    cheight = parameters.csize[0]
    cwidth = parameters.csize[1]
    y_offset = int(cheight / 2)
    x_offset = int(cwidth / 2)
    disparity = parameters.max_disparity

    if descriptor == "BRIEF":
        brief_extractor = BRIEF(
            descriptor_size=parameters.BRIEF_descriptor_size,
            patch_size=cheight,
            mode='normal')
        img_dtype = np.uint8
        left_features = np.zeros(shape=(height, width,
                                        parameters.BRIEF_descriptor_size),
                                 dtype=np.bool)
        right_features = np.zeros(shape=(height, width,
                                         parameters.BRIEF_descriptor_size),
                                  dtype=np.bool)
    elif descriptor == "HOG":
        img_dtype = np.float
        left_features = np.zeros(shape=(height, width,
                                        parameters.HOG_orientations),
                                 dtype=np.float)
        right_features = np.zeros(shape=(height, width,
                                         parameters.HOG_orientations),
                                  dtype=np.float)
    else:
        img_dtype = np.uint8
        left_features = np.zeros(shape=(height, width), dtype=np.uint64)
        right_features = np.zeros(shape=(height, width), dtype=np.uint64)
    left_features_img = np.zeros(shape=(height, width), dtype=img_dtype)
    right_features_img = np.zeros(shape=(height, width), dtype=img_dtype)

    print('\tComputing left and right features...', end='')
    sys.stdout.flush()
    dawn = t.time()
    if descriptor == 'BRIEF':
        pixels = cartesian([np.arange(height), np.arange(width)])
        # LEFT
        brief_extractor.extract(left, pixels)
        descriptors = brief_extractor.descriptors
        if cheight == 7:
            left_features[2:-3, 2:-3] = np.reshape(
                descriptors, (height - cheight + 2, width - cwidth + 2,
                              left_features.shape[-1]))  # for cell of 7x7
        elif cheight == 15:
            left_features[6:-7, 6:-7] = np.reshape(
                descriptors, (height - cheight + 2, width - cwidth + 2,
                              left_features.shape[-1]))  # for cell of 15x15
        elif cheight == 49:
            left_features[23:-24, 23:-24] = np.reshape(
                descriptors, (height - cheight + 2, width - cwidth + 2,
                              left_features.shape[-1]))  # for cell of 49x49
        left_features_img[:] = left_features.sum(axis=-1)
        # RIGHT
        brief_extractor.extract(right, pixels)
        descriptors = brief_extractor.descriptors
        if cheight == 7:
            right_features[2:-3, 2:-3] = np.reshape(
                descriptors, (height - cheight + 2, width - cwidth + 2,
                              right_features.shape[-1]))  # for cell of 7x7
        elif cheight == 15:
            right_features[6:-7, 6:-7] = np.reshape(
                descriptors, (height - cheight + 2, width - cwidth + 2,
                              right_features.shape[-1]))  # for cell of 15x15
        elif cheight == 49:
            right_features[23:-24, 23:-24] = np.reshape(
                descriptors, (height - cheight + 2, width - cwidth + 2,
                              right_features.shape[-1]))  # for cell of 49x49
        right_features_img[:] = right_features.sum(axis=-1)
    # pixels on the border will have no features
    for y in range(y_offset, height - y_offset):
        for x in range(x_offset, width - x_offset):
            # LEFT
            image = left[(y - y_offset):(y + y_offset + 1),
                         (x - x_offset):(x + x_offset + 1)]
            if descriptor == 'HOG':
                left_features[y, x] = hog(image,
                                          parameters.HOG_orientations,
                                          pixels_per_cell=(cheight, cwidth),
                                          cells_per_block=(1, 1))
                left_features_img[y, x] = left_features[y, x].sum()
            elif descriptor == 'census':
                center_pixel = left[y, x]
                reference = np.full(shape=(cheight, cwidth),
                                    fill_value=center_pixel,
                                    dtype=np.int64)
                comparison = image - reference
                left_census = np.int64(0)
                for j in range(comparison.shape[0]):
                    for i in range(comparison.shape[1]):
                        if (i, j) != (y_offset, x_offset):
                            left_census = left_census << 1
                            if comparison[j, i] < 0:
                                bit = 1
                            else:
                                bit = 0
                            left_census = left_census | bit
                left_features_img[y, x] = np.uint8(left_census)
                left_features[y, x] = left_census

            # RIGHT
            image = right[(y - y_offset):(y + y_offset + 1),
                          (x - x_offset):(x + x_offset + 1)]
            if descriptor == 'HOG':
                right_features[y, x] = hog(image,
                                           parameters.HOG_orientations,
                                           pixels_per_cell=(cheight, cwidth),
                                           cells_per_block=(1, 1))
                right_features_img[y, x] = right_features[y, x].sum()
            elif descriptor == 'census':
                center_pixel = right[y, x]
                reference = np.full(shape=(cheight, cwidth),
                                    fill_value=center_pixel,
                                    dtype=np.int64)
                comparison = image - reference
                right_census = np.int64(0)
                for j in range(comparison.shape[0]):
                    for i in range(comparison.shape[1]):
                        if (i, j) != (y_offset, x_offset):
                            right_census = right_census << 1
                            if comparison[j, i] < 0:
                                bit = 1
                            else:
                                bit = 0
                            right_census = right_census | bit
                right_features_img[y, x] = np.uint8(right_census)
                right_features[y, x] = right_census

    dusk = t.time()
    print('\t(done in {:.2f}s)'.format(dusk - dawn))

    if save_images:
        if descriptor != "census":
            # Normalizing the summed features for visualization
            left_features_img = 255 * (
                left_features_img - left_features_img.min()).astype(
                    np.float) / (left_features_img.max() -
                                 left_features_img.min()).astype(np.float)
            right_features_img = 255 * (
                right_features_img - right_features_img.min()).astype(
                    np.float) / (right_features_img.max() -
                                 right_features_img.min()).astype(np.float)
        cv2.imwrite(f'{parameters.folder}/left_features.png',
                    left_features_img)
        cv2.imwrite(f'{parameters.folder}/right_features.png',
                    right_features_img)

    print('\tComputing cost volumes...', end='')
    sys.stdout.flush()
    dawn = t.time()

    if descriptor == "BRIEF":
        cost_volume_dtype = np.uint16
        lfeatures = np.zeros(shape=(height, width,
                                    parameters.BRIEF_descriptor_size),
                             dtype=np.bool)
        rfeatures = np.zeros(shape=(height, width,
                                    parameters.BRIEF_descriptor_size),
                             dtype=np.bool)
    elif descriptor == "HOG":
        cost_volume_dtype = np.float
        lfeatures = np.zeros(shape=(height, width,
                                    parameters.HOG_orientations),
                             dtype=np.float)
        rfeatures = np.zeros(shape=(height, width,
                                    parameters.HOG_orientations),
                             dtype=np.float)
    else:
        cost_volume_dtype = np.uint32
        lfeatures = np.zeros(shape=(height, width), dtype=np.int64)
        rfeatures = np.zeros(shape=(height, width), dtype=np.int64)
    left_cost_volume = np.zeros(shape=(height, width, disparity),
                                dtype=cost_volume_dtype)
    right_cost_volume = np.zeros(shape=(height, width, disparity),
                                 dtype=cost_volume_dtype)
    for d in range(0, disparity):
        # LEFT
        rfeatures[:, (x_offset +
                      d):(width -
                          x_offset)] = right_features[:, x_offset:(width - d -
                                                                   x_offset)]
        if descriptor == 'BRIEF':
            left_cost_volume[:, :,
                             d] = np.count_nonzero(left_features != rfeatures,
                                                   axis=-1)
        elif descriptor == "HOG":
            diff = left_features - rfeatures  # (H, W, orientations)
            left_cost_volume[:, :, d] = np.sqrt(np.sum(
                diff**2, 2))  # Summed Squared Difference along the last axis
        else:
            left_xor = np.int64(
                np.bitwise_xor(np.int64(left_features), rfeatures))
            left_distance = np.zeros(shape=(height, width), dtype=np.uint32)
            while not np.all(left_xor == 0):
                tmp = left_xor - 1
                mask = left_xor != 0
                left_xor[mask] = np.bitwise_and(left_xor[mask], tmp[mask])
                left_distance[mask] = left_distance[mask] + 1
            left_cost_volume[:, :, d] = left_distance

        # RIGHT
        lfeatures[:,
                  x_offset:(width - d -
                            x_offset)] = left_features[:,
                                                       (x_offset +
                                                        d):(width - x_offset)]
        if descriptor == 'BRIEF':
            right_cost_volume[:, :, d] = np.count_nonzero(
                right_features != lfeatures, axis=-1)
        elif descriptor == 'HOG':
            diff = right_features - lfeatures  # (H, W, orientations)
            right_cost_volume[:, :, d] = np.sqrt(np.sum(
                diff**2, 2))  # Summed Squared Difference along the last axis
        else:
            right_xor = np.int64(
                np.bitwise_xor(np.int64(right_features), lfeatures))
            right_distance = np.zeros(shape=(height, width), dtype=np.uint32)
            while not np.all(right_xor == 0):
                tmp = right_xor - 1
                mask = right_xor != 0
                right_xor[mask] = np.bitwise_and(right_xor[mask], tmp[mask])
                right_distance[mask] = right_distance[mask] + 1
            right_cost_volume[:, :, d] = right_distance

    dusk = t.time()
    print('\t(done in {:.2f}s)'.format(dusk - dawn))

    return left_cost_volume, right_cost_volume
Пример #22
0
def test_color_image_unsupported_error():
    """Brief descriptors can be evaluated on gray-scale images only."""
    img = np.zeros((20, 20, 3))
    keypoints = np.asarray([[7, 5], [11, 13]])
    with testing.raises(ValueError):
        BRIEF().extract(img, keypoints)
Пример #23
0
def start_ransac(img1, img2, brief=True, common_factor=0.25):

    img1 = transform.rescale(img1, common_factor, multichannel=False)
    img2 = transform.rescale(img2, common_factor, multichannel=False)

    print(img1.shape)
    print(img2.shape)

    if brief:
        #BRIEF
        keypoints1 = corner_peaks(corner_harris(img1), min_distance=5)
        keypoints2 = corner_peaks(corner_harris(img2), min_distance=5)

        extractor = BRIEF()

        extractor.extract(img1, keypoints1)
        keypoints1 = keypoints1[extractor.mask]
        descriptors1 = extractor.descriptors

        extractor.extract(img2, keypoints2)
        keypoints2 = keypoints2[extractor.mask]
        descriptors2 = extractor.descriptors

        matches12 = match_descriptors(descriptors1,
                                      descriptors2,
                                      cross_check=True)
    else:
        #ORB
        orb = ORB(n_keypoints=1000, fast_threshold=0.05)

        orb.detect_and_extract(img1)
        keypoints1 = orb.keypoints
        desciptors1 = orb.descriptors

        orb.detect_and_extract(img2)
        keypoints2 = orb.keypoints
        desciptors2 = orb.descriptors

        matches12 = match_descriptors(desciptors1,
                                      desciptors2,
                                      cross_check=True)

    src = keypoints2[matches12[:, 1]][:, ::-1]
    dst = keypoints1[matches12[:, 0]][:, ::-1]

    model_robust, inliers = \
        ransac((src, dst), transform.SimilarityTransform, min_samples=4, residual_threshold=2)

    model_robust_tmatrix = np.copy(model_robust.params)
    model_robust_tmatrix[0, 2] = model_robust_tmatrix[0, 2] / common_factor
    model_robust_tmatrix[1, 2] = model_robust_tmatrix[1, 2] / common_factor

    img1_ = img1
    img2_ = warp(img2, model_robust.inverse)

    if False:

        fig = plt.figure(constrained_layout=True)
        gs = fig.add_gridspec(3, 2)
        f_ax1 = fig.add_subplot(gs[0, :])
        plot_matches(f_ax1, img1, img2, keypoints1, keypoints2, matches12)
        f_ax1.axis('off')
        f_ax2 = fig.add_subplot(gs[1, 0])
        f_ax2.imshow(img1)
        f_ax2.axis('off')
        f_ax2.set_title("img1")
        f_ax3 = fig.add_subplot(gs[1, 1])
        f_ax3.imshow(img1_)
        f_ax3.axis('off')
        f_ax3.set_title("img1_")
        #f_ax4 = fig.add_subplot(gs[1, 2])
        #f_ax4.imshow(img3_)
        #f_ax4.axis('off')
        #f_ax4.set_title("img3_")
        f_ax5 = fig.add_subplot(gs[2, 0])
        f_ax5.imshow(img2)
        f_ax5.axis('off')
        f_ax5.set_title("img2")
        f_ax6 = fig.add_subplot(gs[2, 1])
        f_ax6.imshow(img2_)
        f_ax6.axis('off')
        f_ax6.set_title("img2_")
        #f_ax7 = fig.add_subplot(gs[2, 2])
        #f_ax7.imshow(img4_)
        #f_ax7.axis('off')
        #f_ax7.set_title("img4_")
        plt.show()

    return model_robust_tmatrix
Пример #24
0
def test_unsupported_mode():
    with testing.raises(ValueError):
        BRIEF(mode='foobar')
Пример #25
0
 def __init__(self):
     self.memeories = []
     self.fdMax = cfg.shot_h * cfg.shot_h + cfg.shot_w * cfg.shot_w
     self.extractor = BRIEF()
Пример #26
0
import numpy as np
from skimage import io
from skimage.feature import (match_descriptors, corner_peaks, corner_fast,
                             plot_matches, BRIEF)
import matplotlib.pyplot as plt

img1 = io.imread("./data/training/image_2/000000_10.png", as_gray=True)
img2 = io.imread("./data/training/image_2/000000_11.png", as_gray=True)
extractor = BRIEF(descriptor_size=128, patch_size=9, mode='normal')

left_rows, left_cols = img1.shape

left_indices = np.empty((left_rows, left_cols, 2))
left_indices[..., 0] = np.arange(left_rows)[:, None]
left_indices[..., 1] = np.arange(left_cols)
left_indices = left_indices.reshape(left_cols * left_rows, 2)

#extractor.extract()
#!/usr/bin/python2 -utt
# -*- coding: utf-8 -*-
import os
import sys
#sys.path.insert(0, '/home/ubuntu/dev/opencv-3.1/build/lib')
from aux.numpy_sift import SIFTDescriptor
import cv2
import time
import numpy as np
from skimage.feature import BRIEF
try:
    input_img_fname = sys.argv[1]
    output_fname = sys.argv[2]
except:
    print("Wrong input format. Try BRIEF.py img.jpg out.txt")
    sys.exit(1)
image = cv2.imread(input_img_fname, 0)
h, w = image.shape
print(h, w)
BR = BRIEF(patch_size=w - 1)
n_patches = h / w
keypoints = np.zeros((n_patches, 2))
t = time.time()
for i in range(n_patches):
    keypoints[i, :] = np.array([i * w + float(w) / 2., float(w) / 2.])
BR.extract(image, keypoints)
descriptors_for_net = BR.descriptors
np.savetxt(output_fname, descriptors_for_net, delimiter=' ', fmt='%i')
Пример #28
0
def apply_brief(left, right, descriptor_size, num_elements):
    """
    computes BRIEF descriptor on both images.
    :param left: left image.
    :param right: right image.
    :param descriptor_size: size of window of the BRIEF descriptor.
    :param num_elements: length of the feature vector.
    :return: (H x W) array, H = height and W = width, of type np.int64
    """

    # on génère la liste des points clefs. Comme on utilise une approche dense, chaque pixel est un point clef
    pixels_coordonates = [[[j, i] for i in range(left.shape[1])]
                          for j in range(left.shape[0])]
    # la liste des points clefs doit être sous la forme d'une liste de coordonnée
    keypoints = np.array(pixels_coordonates).reshape(
        (left.shape[1] * left.shape[0]), 2)

    # on utilise Brief du package skimage pour obtenir les descripteurs de chaque pixel
    extractor = BRIEF(descriptor_size=num_elements,
                      patch_size=descriptor_size,
                      mode='normal')

    extractor.extract(left, keypoints)
    descriptors1 = extractor.descriptors

    extractor.extract(right, keypoints)
    descriptors2 = extractor.descriptors

    # Nous avons maitenant un vecteur qui décrit notre image pixel par pixel.
    # Pour appliquer SGM, nous devons reconstruire la forme initiale de l'image.
    # Selon la taille de la fenetre d'analyse (descriptor_size) les bords de l'images n'ont pas pu être traité,
    # La taille de l'image est donc réduite de la taille du descritpeur
    descriptors1.resize((left.shape[0] - descriptor_size + 1,
                         left.shape[1] - descriptor_size + 1, num_elements),
                        refcheck=False)
    descriptors2.resize((left.shape[0] - descriptor_size + 1,
                         left.shape[1] - descriptor_size + 1, num_elements),
                        refcheck=False)

    # Ici, la taille de descripteur est de 128 bit.
    # Le problème est que SGM et la distance de hamming prennent en entrée un entier
    # nous devons donc transformer 128 bit en un entier.
    # numpy et le traitement qui suit (distance de hamming) n'admet pas d'entier suppérieur à 64 bit.
    # Nous devons donc réduire la dimension de notre descripteur.
    # nous avons testé plusieurs valeures de réduction, et avons choisi 1 bit sur 20.
    # C'est la plus grande réduction de dimension testée sans altération des résultats obtenus sur l'image des cones.

    concat_desc1 = np.apply_along_axis(
        lambda list: int(''.join(
            [str(int(v)) if i % 20 == 0 else '' for i, v in enumerate(list)])),
        2, descriptors1)
    concat_desc2 = np.apply_along_axis(
        lambda list: int(''.join(
            [str(int(v)) if i % 20 == 0 else '' for i, v in enumerate(list)])),
        2, descriptors2)

    # Enfin, comme il est requis de passer une matrice de taille égale à l'image de départ, nous allons rajouter des 0
    # là ou l'information manque, en bordure d'image.
    padding_pattern = (descriptor_size // 2 - 1, descriptor_size // 2)

    padded_descr1 = np.pad(concat_desc1, (padding_pattern, padding_pattern),
                           'constant')
    padded_descr2 = np.pad(concat_desc2, (padding_pattern, padding_pattern),
                           'constant')

    return (padded_descr1, padded_descr2)
Пример #29
0
from skimage.feature import (match_descriptors, corner_peaks, corner_harris,
                             plot_matches, BRIEF)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt


img1 = rgb2gray(data.astronaut())
tform = transform.AffineTransform(scale=(1.2, 1.2), translation=(0, -100))
img2 = transform.warp(img1, tform)
img3 = transform.rotate(img1, 25)

keypoints1 = corner_peaks(corner_harris(img1), min_distance=5)
keypoints2 = corner_peaks(corner_harris(img2), min_distance=5)
keypoints3 = corner_peaks(corner_harris(img3), min_distance=5)

extractor = BRIEF()

extractor.extract(img1, keypoints1)
keypoints1 = keypoints1[extractor.mask]
descriptors1 = extractor.descriptors

extractor.extract(img2, keypoints2)
keypoints2 = keypoints2[extractor.mask]
descriptors2 = extractor.descriptors

extractor.extract(img3, keypoints3)
keypoints3 = keypoints3[extractor.mask]
descriptors3 = extractor.descriptors

matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)
Пример #30
0
gray2 = rgb2gray(img2)
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
ax = axes.ravel()
ax[0].imshow(img2)
ax[0].set_title("Original")
ax[1].imshow(gray2, cmap=plt.cm.gray)
ax[1].set_title("Grayscale")
fig.tight_layout()
plt.show()

tform = transform.AffineTransform(scale=(1.3, 1.1), rotation=0.5,
                                  translation=(0, -200))
gray3 = transform.warp(gray1, tform)
gray4 = transform.rotate(gray1, 180)

descriptor_extractor = BRIEF(patch_size=5)

keypoints1 = corner_peaks(corner_harris(gray1), min_distance=1, threshold_rel=0)
descriptor_extractor.extract(gray1, keypoints1)
descriptors1 = descriptor_extractor.descriptors

keypoints2 = corner_peaks(corner_harris(gray2), min_distance=1, threshold_rel=0)
descriptor_extractor.extract(gray2, keypoints2)
descriptors2 = descriptor_extractor.descriptors

keypoints3 = corner_peaks(corner_harris(gray3), min_distance=1, threshold_rel=0)
descriptor_extractor.extract(gray3, keypoints3)
descriptors3 = descriptor_extractor.descriptors

keypoints4 = corner_peaks(corner_harris(gray4), min_distance=1, threshold_rel=0)
descriptor_extractor.extract(gray4, keypoints4)