示例#1
0
文件: process.py 项目: kirk86/kaggle
def orb(img):
    img1 = rgb2gray(img)
    img2 = transform.rotate(img1, 180)
    tform = transform.AffineTransform(scale=(1.3, 1.1),
                                      rotation=0.5,
                                      translation=(0, -200))
    img3 = transform.warp(img1, tform)

    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(img1)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors

    descriptor_extractor.detect_and_extract(img2)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors

    descriptor_extractor.detect_and_extract(img3)
    keypoints3 = descriptor_extractor.keypoints
    descriptors3 = descriptor_extractor.descriptors

    matches1 = match_descriptors(descriptors1, descriptors2, cross_check=True)
    matches2 = match_descriptors(descriptors1, descriptors3, cross_check=True)

    return np.hstack(
        (keypoints1[matches1[:, 0]].ravel(), keypoints2[matches2[:,
                                                                 1]].ravel()))
示例#2
0
def show_matches(img1, img2, feat1, feat2):
    matches12 = match_descriptors(
        feat1["descriptors"], feat2["descriptors"], cross_check=True
    )

    fig, (ax3, ax2) = plt.subplots(1, 2, figsize=(15, 5))
    c_matches = match_descriptors(
        feat1["descriptors"], feat2["descriptors"], cross_check=True
    )

    plot_matches(ax3, img1, img2, feat1["keypoints"], feat1["keypoints"], matches12)

    ax2.plot(feat1["keypoints"][:, 1], feat1["keypoints"][:, 0], ".", label="Before")

    ax2.plot(feat2["keypoints"][:, 1], feat2["keypoints"][:, 0], ".", label="After")

    for i, (c_idx, n_idx) in enumerate(c_matches):
        x_vec = [feat1["keypoints"][c_idx, 0], feat2["keypoints"][n_idx, 0]]
        y_vec = [feat1["keypoints"][c_idx, 1], feat2["keypoints"][n_idx, 1]]
        dist = np.sqrt(np.square(np.diff(x_vec)) + np.square(np.diff(y_vec)))
        alpha = np.clip(50 / dist, 0, 1)

        ax2.plot(y_vec, x_vec, "k-", alpha=alpha, label="Match" if i == 0 else "")

    ax2.legend()

    ax3.set_title(r"{} $\rightarrow$ {}".format("Before", "After"))
示例#3
0
def test_max_distance():
    descs1 = np.zeros((10, 128))
    descs2 = np.zeros((15, 128))

    descs1[0, :] = 1

    matches = match_descriptors(descs1,
                                descs2,
                                metric='euclidean',
                                max_distance=0.1,
                                cross_check=False)
    assert len(matches) == 9

    matches = match_descriptors(descs1,
                                descs2,
                                metric='euclidean',
                                max_distance=np.sqrt(128.1),
                                cross_check=False)
    assert len(matches) == 10

    matches = match_descriptors(descs1,
                                descs2,
                                metric='euclidean',
                                max_distance=0.1,
                                cross_check=True)
    assert_equal(matches, [[1, 0]])

    matches = match_descriptors(descs1,
                                descs2,
                                metric='euclidean',
                                max_distance=np.sqrt(128.1),
                                cross_check=True)
    assert_equal(matches, [[1, 0]])
示例#4
0
def test_binary_descriptors_unequal_descriptor_sizes_error():
    """Sizes of descriptors of keypoints to be matched should be equal."""
    descs1 = np.array([[True, True, False, True], [False, True, False, True]])
    descs2 = np.array([[True, False, False, True, False],
                       [False, True, True, True, False]])
    with testing.raises(ValueError):
        match_descriptors(descs1, descs2)
def match_robust(src_keypoints,
                 src_descriptors,
                 dest_keypoints,
                 dest_descriptors,
                 method='brute-force',
                 model_class=AffineTransform,
                 min_samples=4,
                 residual_threshold=1,
                 max_trials=5000,
                 **kwargs):
    """Find matches of keypoints between two images and filter outlier with RANSAC.
    
    src_keypoints, dest_keypoints:
        (both) `np.array(float)` of the shape (N, 2)
        Coordinates of keypoints in format (x, y) each
    src_descriptors, dest_descriptors:
        (both) `np.array(float)` of the shape (N, P) 
        Descriptors of the keypoints. P is a descriptor dim
     model_class:
         `skimage.transform`
         Model of plane transformation
     min_samples:
         int
         Number of points needed to construct the transformation.
         The harder the model is the more points are needed.
         See the docs to know about each particular model.
     residual_threshold:
         int
         Distance in pixel in which the points are considered as "the same".
     max_trials:
         int
         Number of trials to do before stopping.
         The more is the number of keypoints, the larger `max_trials` should be.
     **kwargs:
         dict
         Other params, kept unchanged
     
    
    return:
        model, matches
        model: transformation of the 1st frame to the second
        matches: 
            `np.array` of shape == (num_matched_keypoints, 2)
            List of matches in format: np.array([[src_kp_id1, dest_kp_id1], ...], dtype=int)
            
    """
    if method == 'brute-force':
        matches = match_descriptors(src_descriptors, dest_descriptors)
    elif method == 'flann':
        matches = match_descriptors(src_descriptors, dest_descriptors)
    model, inliers = ransac(
        (src_keypoints[matches[:, 0]], dest_keypoints[matches[:, 1]]),
        model_class=model_class,
        min_samples=min_samples,
        residual_threshold=residual_threshold,
        max_trials=max_trials,
        **kwargs)
    matches = matches[inliers]
    return model, matches
示例#6
0
def test_binary_descriptors_unequal_descriptor_sizes_error():
    """Sizes of descriptors of keypoints to be matched should be equal."""
    descs1 = np.array([[True, True, False, True],
                       [False, True, False, True]])
    descs2 = np.array([[True, False, False, True, False],
                       [False, True, True, True, False]])
    with testing.raises(ValueError):
        match_descriptors(descs1, descs2)
示例#7
0
def test_binary_descriptors_rotation_crosscheck_false():
    """Verify matched keypoints and their corresponding masks results between
    image and its rotated version with the expected keypoint pairs with
    cross_check disabled."""
    img = data.astronaut()
    img = rgb2gray(img)
    tform = transform.SimilarityTransform(scale=1,
                                          rotation=0.15,
                                          translation=(0, 0))
    rotated_img = transform.warp(img, tform, clip=False)

    extractor = BRIEF(descriptor_size=512)

    keypoints1 = corner_peaks(corner_harris(img),
                              min_distance=5,
                              threshold_abs=0,
                              threshold_rel=0.1)
    extractor.extract(img, keypoints1)
    descriptors1 = extractor.descriptors

    keypoints2 = corner_peaks(corner_harris(rotated_img),
                              min_distance=5,
                              threshold_abs=0,
                              threshold_rel=0.1)
    extractor.extract(rotated_img, keypoints2)
    descriptors2 = extractor.descriptors

    matches = match_descriptors(descriptors1, descriptors2, cross_check=False)

    exp_matches1 = np.array([
        0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
        20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
        38, 39, 40, 41, 42, 43, 44, 45, 46
    ])
    exp_matches2 = np.array([
        0, 31, 2, 3, 1, 4, 6, 4, 38, 5, 27, 7, 13, 10, 9, 27, 7, 11, 15, 8, 23,
        14, 12, 16, 10, 25, 18, 19, 21, 20, 41, 24, 25, 26, 28, 27, 22, 23, 29,
        30, 31, 32, 35, 33, 34, 30, 36
    ])
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)

    # minkowski takes a different code path, therefore we test it explicitly
    matches = match_descriptors(descriptors1,
                                descriptors2,
                                metric='minkowski',
                                cross_check=False)
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)

    # it also has an extra parameter
    matches = match_descriptors(descriptors1,
                                descriptors2,
                                metric='minkowski',
                                p=4,
                                cross_check=False)
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)
示例#8
0
def epipolar_rectify(imL,imR,show_matches=True):
    descriptor_extractor = ORB(n_keypoints=2000)
    
    descriptor_extractor.detect_and_extract(imL)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors    
    
    descriptor_extractor.detect_and_extract(imR)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors        
    
    matches12 = match_descriptors(descriptors1, descriptors2,metric='hamming', cross_check=True)
    
    pts1=keypoints1[matches12[:,0],:]
    pts2=keypoints2[matches12[:,1],:]    
    
    
    F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_RANSAC)
    pts1 = pts1[mask.ravel()==1]
    pts2 = pts2[mask.ravel()==1]
    
    res,H1,H2=cv2.stereoRectifyUncalibrated(pts1,pts2,F,imL.shape,10)
    
    if show_matches:
        fig, ax = plt.subplots(nrows=1, ncols=1)
        plot_matches(ax, imL, imR, keypoints1, keypoints2, matches12)    
    
    return H1,H2
示例#9
0
def drawMatches2(image1, image2, feat1, feat2):
    # image1 = cv2.imread(file1)
    # image2 = cv2.imread(file2)
    image1 = np.array(cv2.cvtColor(np.array(image1), cv2.COLOR_BGR2RGB))
    image2 = np.array(cv2.cvtColor(np.array(image2), cv2.COLOR_BGR2RGB))

    matches = match_descriptors(feat1['descriptors'],
                                feat2['descriptors'],
                                cross_check=True)
    keypoints_left = feat1['keypoints'][matches[:, 0], :2].T
    keypoints_right = feat2['keypoints'][matches[:, 1], :2].T

    # print(keypoints_left.shape, keypoints_right.shape)

    for i in range(keypoints_left.shape[1]):
        image1 = cv2.circle(
            image1, (int(keypoints_left[0, i]), int(keypoints_left[1, i])), 2,
            (0, 0, 255), 4)
    for i in range(keypoints_right.shape[1]):
        image2 = cv2.circle(
            image2, (int(keypoints_right[0, i]), int(keypoints_right[1, i])),
            2, (0, 0, 255), 4)

    im4 = cv2.hconcat([image1, image2])

    for i in range(keypoints_left.shape[1]):
        im4 = cv2.line(im4,
                       (int(keypoints_left[0, i]), int(keypoints_left[1, i])),
                       (int(keypoints_right[0, i]) + image1.shape[1],
                        int(keypoints_right[1, i])), (0, 255, 0), 1)

    cv2.imshow("Image_lines", im4)
    cv2.waitKey(0)
示例#10
0
def findH(img1, img2):
    from skimage.feature import ORB, match_descriptors

    # load image
    img1_gray = skimage.color.rgb2gray(img1)
    img2_gray = skimage.color.rgb2gray(img2)

    # extract points
    detector_extractor1 = ORB(n_keypoints=3000)
    detector_extractor1.detect_and_extract(img1_gray)
    detector_extractor2 = ORB(n_keypoints=3000)
    detector_extractor2.detect_and_extract(img2_gray)
    matches = match_descriptors(detector_extractor1.descriptors,
                                detector_extractor2.descriptors)
    match_pts1 = detector_extractor1.keypoints[matches[:, 0]].astype(int)
    match_pts2 = detector_extractor2.keypoints[matches[:, 1]].astype(int)

    # call RANSAC
    match_pts1 = np.flip(match_pts1, axis=1)
    match_pts2 = np.flip(match_pts2, axis=1)
    H_2to1, _ = computeHransac(match_pts1, match_pts2)
    H_2to1 = H_2to1 / H_2to1[2, 2]
    print('tranform H:')
    print(H_2to1)

    return H_2to1
示例#11
0
    def match_features(self):
        self.tforms = [ProjectiveTransform()]
        self.new_corners = np.copy(self.corners)

        for i in range(1, self.num_imgs):
            # Find correspondences between I(n) and I(n-1).
            matches = match_descriptors(self.descriptors[i - 1],
                                        self.descriptors[i],
                                        cross_check=True)

            # Estimate the transformation between I(n) and I(n-1).
            src = self.keypoints[i][matches[:, 1]][:, ::-1]
            dst = self.keypoints[i - 1][matches[:, 0]][:, ::-1]

            model, _ = ransac((src, dst),
                              ProjectiveTransform,
                              4,
                              residual_threshold=2,
                              max_trials=2000)
            self.tforms.append(
                ProjectiveTransform(model.params @ self.tforms[-1].params))

            # Compute new corners transformed by models
            self.new_corners[i] = self.tforms[-1](self.corners[i])

        corners_min = np.min(self.new_corners, axis=1)
        corners_max = np.max(self.new_corners, axis=1)

        self.xLim = corners_max[:, 0] - corners_min[:, 0]
        self.yLim = corners_max[:, 1] - corners_min[:, 1]
示例#12
0
def CENSURETransform(img1, img2, fp1, fp2):
    bw_img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY).astype("double")
    bw_img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY).astype("double")
    cen = CENSURE()
    cen.detect(bw_img1)
    kp1 = cen.keypoints
    cen.detect(bw_img2)
    kp2 = cen.keypoints

    kp1 = np.append(kp1, fp1, axis=0)
    kp2 = np.append(kp2, fp2, axis=0)

    matches = match_descriptors(kp1, kp2, cross_check=True)

    # fig, ax = plt.subplots(nrows=1, ncols=1)
    # plot_matches(ax, img1, img2, kp1, kp2, matches)
    # plt.show()

    points1 = kp1[matches[:, 0]]
    points2 = kp2[matches[:, 1]]

    M, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
    if M is None:
        return
    else:
        dst = cv2.warpPerspective(img1, M, (img1.shape[1], img1.shape[0]))

    return dst
示例#13
0
def iris_scan_orb(request):

    from skimage import io
    from skimage.feature import (match_descriptors, ORB)
    from skimage.color import rgb2gray
    from .settings import MEDIA_ROOT

    img1 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS3.jpg'))  # Query
    img2 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS6.jpg'))  # Comparing to

    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(img1)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors  # Query Descriptor

    descriptor_extractor.detect_and_extract(img2)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors  # Comparing To Descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    # print("Matched: ", len(matches12), " of ", len(descriptors1))
    percent = len(matches12) / len(descriptors1) * 100

    # print("Percent Match - ", percent, "%")
    """if percent > 80:
        print("Matched!")
    else:
        print("Not Matched!")"""

    return render(request, 'scan.html', {'percent': percent})
示例#14
0
def iris_scan_orb_android(file_name):

    from skimage import io
    from skimage.feature import (match_descriptors, ORB)
    from skimage.color import rgb2gray
    from .settings import MEDIA_ROOT

    img1 = rgb2gray(io.imread(MEDIA_ROOT + '/' + file_name))  # Query
    img2 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS9.jpg'))  # Comparing to

    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(img1)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors  # Query Descriptor

    descriptor_extractor.detect_and_extract(img2)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors  # Comparing To Descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    percent = len(matches12) / len(descriptors1) * 100

    return percent
def featurePointMatching(image0, image1, decimation=1, n_keypoints=750):
    """
    Get a transformation model knowing 2 images
    """
    # 2 images, possibly with a certain decimation factor (reduce the size of the image)
    image0 = transform.rescale(image0, 1 / float(decimation))
    image1 = transform.rescale(image1, 1 / float(decimation))
    orb = ORB(n_keypoints=n_keypoints,
              fast_threshold=0.05)  # definition of the ORB detector
    # Get the keypoints from the first image
    orb.detect_and_extract(image0)
    keypoints1 = orb.keypoints
    descriptors1 = orb.descriptors
    # Get the keypoints from the second image
    orb.detect_and_extract(image1)
    keypoints2 = orb.keypoints
    descriptors2 = orb.descriptors
    # Matching of descriptors
    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
    # Select keypoints from both images with RANSAC algorithm
    src = keypoints1[matches12[:, 0]][:, ::-1]
    dst = keypoints2[matches12[:, 1]][:, ::-1]
    model_robust, inliers = \
        ransac((src, dst), transform.EuclideanTransform,
               min_samples=6, residual_threshold=2)
    # Get the inliners
    outliers = inliers == False
    return model_robust, inliers, outliers, src, dst
示例#16
0
def orb(img_path):
    image = PIL.Image.open(img_path).convert('L')
    img1 = np.array(image)
    img2 = tf.rotate(img1, 180)

    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(img1)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors

    descriptor_extractor.detect_and_extract(img2)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    fig, ax = plt.subplots(nrows=2, ncols=1)

    plt.gray()

    plot_matches(ax[0], img1, img2, keypoints1, keypoints2, matches12)
    ax[0].axis('off')
    ax[0].set_title(img_path)

    plt.show()

    return matches12.shape[0]
示例#17
0
def iris_scan_orb_android(file_name):

    from skimage import io
    from skimage.feature import (match_descriptors, ORB)
    from skimage.color import rgb2gray
    from .settings import MEDIA_ROOT

    img1 = rgb2gray(io.imread(MEDIA_ROOT + '/'+ file_name))  # Query
    img2 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS9.jpg'))  # Comparing to

    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(img1)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors  # Query Descriptor

    descriptor_extractor.detect_and_extract(img2)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors  # Comparing To Descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    percent = len(matches12) / len(descriptors1) * 100

    return percent
示例#18
0
def	drawMatches(image1, image2, feat1, feat2):
	image1 = np.array(image1)
	image2 = np.array(image2)

	matches = match_descriptors(feat1['descriptors'], feat2['descriptors'], cross_check=True)
	print('Number of raw matches: %d.' % matches.shape[0])

	keypoints_left = feat1['keypoints'][matches[:, 0], : 2]
	keypoints_right = feat2['keypoints'][matches[:, 1], : 2]
	np.random.seed(0)
	model, inliers = ransac(
		(keypoints_left, keypoints_right),
		ProjectiveTransform, min_samples=4,
		residual_threshold=8, max_trials=10000
	)
	n_inliers = np.sum(inliers)
	print('Number of inliers: %d.' % n_inliers)

	inlier_keypoints_left = [cv2.KeyPoint(point[0], point[1], 1) for point in keypoints_left[inliers]]
	inlier_keypoints_right = [cv2.KeyPoint(point[0], point[1], 1) for point in keypoints_right[inliers]]
	placeholder_matches = [cv2.DMatch(idx, idx, 1) for idx in range(n_inliers)]
	image3 = cv2.drawMatches(image1, inlier_keypoints_left, image2, inlier_keypoints_right, placeholder_matches, None)

	plt.figure(figsize=(20, 20))
	plt.imshow(image3)
	plt.axis('off')
	plt.show()
示例#19
0
def test_binary_descriptors():
    descs1 = np.array([[True, True, False, True, True],
                       [False, True, False, True, True]])
    descs2 = np.array([[True, False, False, True, False],
                       [False, False, True, True, True]])
    matches = match_descriptors(descs1, descs2)
    assert_equal(matches, [[0, 0], [1, 1]])
示例#20
0
def iris_scan_orb(request):

    from skimage import io
    from skimage.feature import (match_descriptors, ORB)
    from skimage.color import rgb2gray
    from .settings import MEDIA_ROOT

    img1 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS3.jpg'))  # Query
    img2 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS6.jpg'))  # Comparing to

    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(img1)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors  # Query Descriptor

    descriptor_extractor.detect_and_extract(img2)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors  # Comparing To Descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    # print("Matched: ", len(matches12), " of ", len(descriptors1))
    percent = len(matches12) / len(descriptors1) * 100

    # print("Percent Match - ", percent, "%")

    """if percent > 80:
        print("Matched!")
    else:
        print("Not Matched!")"""

    return render(request, 'scan.html', {'percent': percent})
示例#21
0
def feature_registration(src, dst):
    """Register dst to src using feature detection."""
    # First convert both image to grayscale.
    src_gray = rgb2gray(src)
    dst_gray = rgb2gray(dst)

    # Scale down.
    src_scaled = transform.rescale(src_gray, 0.25)
    dst_scaled = transform.rescale(src_gray, 0.25)

    orb = ORB(n_keypoints=1000, fast_threshold=0.05)
    orb.detect_and_extract(src_scaled)
    src_keypoints = orb.keypoints
    src_descriptors = orb.descriptors

    orb.detect_and_extract(dst_scaled)
    dst_keypoints = orb.keypoints
    dst_descriptors = orb.descriptors

    matches = match_descriptors(src_descriptors, dst_descriptors, cross_check=True)

    src_points = src_keypoints[matches[:,0]][:, ::-1]
    dst_points = dst_keypoints[matches[:,0]][:, ::,-1]

    model, inlier = ransac(
        (src_points, dst_points), ProjectiveTransform, min_samples=4, residual_threshold=2)
    )
示例#22
0
    def get_feature_displacment(self, img1, img2):

        # if cfg.shot_c == 3:
        #     img1 = rgb2gray(img1)
        #     img2 = rgb2gray(img2)
        keypoints1 = corner_peaks(corner_harris(img1),
                                  min_distance=5,
                                  threshold_rel=0.02)
        keypoints2 = corner_peaks(corner_harris(img2),
                                  min_distance=5,
                                  threshold_rel=0.02)

        self.extractor.extract(img1, keypoints1)
        keypoints1 = keypoints1[self.extractor.mask]
        descriptors1 = self.extractor.descriptors

        self.extractor.extract(img2, keypoints2)
        keypoints2 = keypoints2[self.extractor.mask]
        descriptors2 = self.extractor.descriptors

        if descriptors1.shape[0] == 0 or descriptors2.shape[0] == 0:
            return None  # no feature found

        matches = match_descriptors(descriptors1,
                                    descriptors2,
                                    cross_check=True,
                                    max_ratio=0.85)
        if matches.shape[0] < 4: return None
        dist = np.sum(
            (keypoints1[matches[:, 0]] - keypoints2[matches[:, 1]])**2, axis=1)
        return np.mean(self.median_outlier_filter(dist))
示例#23
0
def test_binary_descriptors_lena_rotation_crosscheck_true():
    """Verify matched keypoints and their corresponding masks results between
    lena image and its rotated version with the expected keypoint pairs with
    cross_check enabled."""
    img = data.lena()
    img = rgb2gray(img)
    tform = tf.SimilarityTransform(scale=1, rotation=0.15, translation=(0, 0))
    rotated_img = tf.warp(img, tform, clip=False)

    extractor = BRIEF(descriptor_size=512)

    keypoints1 = corner_peaks(corner_harris(img), min_distance=5)
    extractor.extract(img, keypoints1)
    descriptors1 = extractor.descriptors

    keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5)
    extractor.extract(rotated_img, keypoints2)
    descriptors2 = extractor.descriptors

    matches = match_descriptors(descriptors1, descriptors2, cross_check=True)

    exp_matches1 = np.array([
        0, 1, 2, 4, 6, 7, 9, 10, 11, 12, 13, 15, 16, 17, 19, 20, 21, 24, 26,
        27, 28, 29, 30, 35, 36, 38, 39, 40, 42, 44, 45
    ])
    exp_matches2 = np.array([
        33, 0, 35, 1, 3, 2, 6, 4, 9, 11, 10, 7, 8, 5, 14, 13, 15, 16, 17, 18,
        19, 21, 22, 24, 23, 26, 27, 25, 28, 29, 30
    ])
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)
示例#24
0
def	drawMatches(image1, image2, feat1, feat2):
	t0 = time.time()
	matches = match_descriptors(feat1['descriptors'], feat2['descriptors'], cross_check=True)
	t1 = time.time()
	print("Time to extract matches: ", t1-t0)
	print('Number of raw matches: %d.' % matches.shape[0])

	keypoints_left = feat1['keypoints'][matches[:, 0], : 2]
	keypoints_right = feat2['keypoints'][matches[:, 1], : 2]
	
	# print(type(matches), matches.shape, feat1['keypoints'].shape)
	# print(feat1['keypoints'][0:4, :])

	np.random.seed(0)
	t0 = time.time()
	model, inliers = ransac(
		(keypoints_left, keypoints_right),
		AffineTransform, min_samples=4,
		residual_threshold=8, max_trials=10000
	)
	t1 = time.time()
	print("Time for ransac: ", t1-t0)
	n_inliers = np.sum(inliers)
	print('Number of inliers: %d.' % n_inliers)

	inlier_keypoints_left = [cv2.KeyPoint(point[0], point[1], 1) for point in keypoints_left[inliers]]
	inlier_keypoints_right = [cv2.KeyPoint(point[0], point[1], 1) for point in keypoints_right[inliers]]
	placeholder_matches = [cv2.DMatch(idx, idx, 1) for idx in range(n_inliers)]
	image3 = cv2.drawMatches(image1, inlier_keypoints_left, image2, inlier_keypoints_right, placeholder_matches, None)

	plt.figure(figsize=(20, 20))
	plt.imshow(image3)
	plt.axis('off')
	plt.show()
示例#25
0
文件: MatchPics.py 项目: nicwigs/491
def MatchPics(I1,I2):
    
    if I1.ndim == 3:
        I1 = rgb2gray(I1)   
    if I2.ndim == 3:
        I2 = rgb2gray(I2)
    
    points1 = corner_peaks(corner_fast(I1,n=12,threshold=0.15),min_distance=1)
    points2 = corner_peaks(corner_fast(I2,n=12,threshold=0.15),min_distance=1)
    
    extractor = BRIEF()
    
    extractor.extract(I1,points1)
    points1 = points1[extractor.mask]
    descriptors1 = extractor.descriptors
    
    extractor.extract(I2,points2)
    points2 = points2[extractor.mask]
    descriptors2 = extractor.descriptors
    
    matches = match_descriptors(descriptors1,descriptors2,metric = 'hamming',cross_check=True)
    
    #these points are y,x (row,col)
    locs1 = points1[matches[:,0]]
    locs2 = points2[matches[:,1]]
    #Change to x,y (col,row)
    xy1 = np.array([locs1[:,1],locs1[:,0]])
    xy1 = xy1.transpose()
    xy2 = np.array([locs2[:,1],locs2[:,0]])
    xy2 = xy2.transpose()
    fig, ax = plt.subplots()
    plot_matches(ax,I1,I2,points1,points2,matches,keypoints_color='r',only_matches=True)#,matches_color='y')
    
    return [xy1,xy2]
示例#26
0
def test_binary_descriptors_rotation_crosscheck_true():
    """Verify matched keypoints and their corresponding masks results between
    image and its rotated version with the expected keypoint pairs with
    cross_check enabled."""
    img = data.astronaut()
    img = rgb2gray(img)
    tform = tf.SimilarityTransform(scale=1, rotation=0.15, translation=(0, 0))
    rotated_img = tf.warp(img, tform, clip=False)

    extractor = BRIEF(descriptor_size=512)

    keypoints1 = corner_peaks(corner_harris(img), min_distance=5,
                              threshold_abs=0, threshold_rel=0.1)
    extractor.extract(img, keypoints1)
    descriptors1 = extractor.descriptors

    keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5,
                              threshold_abs=0, threshold_rel=0.1)
    extractor.extract(rotated_img, keypoints2)
    descriptors2 = extractor.descriptors

    matches = match_descriptors(descriptors1, descriptors2, cross_check=True)

    exp_matches1 = np.array([ 0,  2,  3,  4,  5,  6,  9, 11, 12, 13, 14, 17,
                             18, 19, 21, 22, 23, 26, 27, 28, 29, 31, 32, 33,
                             34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46])
    exp_matches2 = np.array([ 0,  2,  3,  1,  4,  6,  5,  7, 13, 10,  9, 11,
                             15,  8, 14, 12, 16, 18, 19, 21, 20, 24, 25, 26,
                             28, 27, 22, 23, 29, 30, 31, 32, 35, 33, 34, 36])
    assert_equal(matches[:, 0], exp_matches1)
    assert_equal(matches[:, 1], exp_matches2)
示例#27
0
def test_binary_descriptors():
    descs1 = np.array([[True, True, False, True, True],
                     [False, True, False, True, True]])
    descs2 = np.array([[True, False, False, True, False],
                     [False, False, True, True, True]])
    matches = match_descriptors(descs1, descs2)
    assert_equal(matches, [[0, 0], [1, 1]])
示例#28
0
def main():
    baseImg = loadResized("base.jpg", 600, 410)
    atlasImg = loadResized("atlas.jpg", 600, 410)

    orb = (ORB(n_keypoints=800,
               fast_threshold=0.05), ORB(n_keypoints=800, fast_threshold=0.05))
    orb[0].detect_and_extract(baseImg)
    orb[1].detect_and_extract(atlasImg)
    baseData = [orb[0].keypoints, orb[0].descriptors]
    atlasData = [orb[1].keypoints, orb[1].descriptors]

    match = match_descriptors(baseData[1], atlasData[1])

    dst = baseData[0][match[:, 0]][:, ::-1]
    src = atlasData[0][match[:, 1]][:, ::-1]

    robust, inliers = ransac((src, dst),
                             ProjectiveTransform,
                             min_samples=4,
                             residual_threshold=1,
                             max_trials=300)

    r, c = baseImg.shape[:2]
    corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])
    warpedCorners = robust(corners)

    allCorners = np.vstack((warpedCorners, corners))
    cornerMin = np.min(allCorners, axis=0)
    cornerMax = np.max(allCorners, axis=0)
    outputShape = (cornerMax - cornerMin)
    outputShape = np.ceil(outputShape[::-1]).astype(int)

    offSet = SimilarityTransform(translation=cornerMin)
    atlasWarped = warp(atlasImg,
                       offSet.inverse,
                       order=3,
                       output_shape=outputShape,
                       cval=-1)
    atlasMask = (atlasWarped != -1)
    atlasWarped[~atlasMask] = 0

    fig, ax = plt.subplots(figsize=(12, 12))
    diffImg = atlasWarped - baseImg
    ax.imshow(diffImg, cmap="gray")
    ax.axis("off")
    plt.show()

    compare(atlasWarped, baseImg, figsize=(12, 10))

    costs = generateCosts(np.abs(atlasWarped, baseImg), atlasWarped & baseImg)
    fig, ax = plt.subplots(figsize=(15, 12))
    ax.imshow(costs, cmap="gray", interpolation="none")
    ax.axis("off")

    outputImg = cv2.addWeighted(baseImg, .3, atlasImg, 1, 0)

    cv2.imshow("Output", outputImg)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
示例#29
0
def recognize(pattern, name, scene):
    zipped_matches = []
    match = match_descriptors(scene,
                              pattern,
                              cross_check=True,
                              max_distance=0.2)
    zipped_matches.append([match.size, name, match])
    return zipped_matches
示例#30
0
def get_pair(outputdir,
             imgs,
             p,
             offsets,
             ds,
             overlap_fraction,
             orb,
             plotpairs=0,
             res_th=10,
             num_inliers=100,
             transformname="EuclideanTransform"):
    """Create inlier keypoint pairs."""

    pair_tstart = time()

    overlap_pixels = [
        int(math.ceil(d * of * 1 / ds))
        for d, of in zip(imgs[0][0].shape, overlap_fraction)
    ]

    f1, f2 = downsample_images(p, imgs, ds)
    p1, p2 = select_imregions(p[2], f1, f2, overlap_pixels)
    kp1, de1 = get_keypoints(orb, p1)
    kp2, de2 = get_keypoints(orb, p2)
    kp1, kp2 = reset_imregions(p[2], kp1, kp2, overlap_pixels, f1.shape)

    matches = match_descriptors(de1, de2, cross_check=True)
    dst = kp1[matches[:, 0]][:, ::-1]
    src = kp2[matches[:, 1]][:, ::-1]
    transform = eval("tf.%s" % transformname)
    model, inliers = ransac((src, dst),
                            transform,
                            min_samples=4,
                            residual_threshold=res_th,
                            max_trials=1000,
                            stop_sample_num=num_inliers)

    # get the weighing kernel in z
    k = gaussian(offsets * 2 + 1, 1, sym=True)
    w = k[offsets - (p[1][0] - p[0][0])]

    # transform from downsampled space to full
    S = np.array([[ds, 0, 0], [0, ds, 0], [0, 0, 1]])
    s = np.c_[src, np.ones(src.shape[0])].dot(S)[inliers, :2]
    d = np.c_[dst, np.ones(dst.shape[0])].dot(S)[inliers, :2]
    pair = (p, s, d, model, w)

    pairstring = generate_pairstring(offsets, ds, p)
    pairfile = path.join(outputdir, pairstring + '.pickle')
    pickle.dump(pair, open(pairfile, 'wb'))
    if plotpairs:
        plot_pair_ransac(outputdir, pairstring, p, f1, f2, kp1, kp2, matches,
                         inliers)

    print('%s done in: %6.2f s; matches: %05d; inliers: %05d' %
          (pairstring, time() - pair_tstart, len(matches), np.sum(inliers)))

    return pair
示例#31
0
def register_image_pair(idx, path_img_target, path_img_source, path_out):
    """ register two images together

    :param int idx: empty parameter for using the function in parallel
    :param str path_img_target: path to the target image
    :param str path_img_source: path to the source image
    :param str path_out: path for exporting the output
    :return tuple(str,float):
    """
    start = time.time()
    # load and denoise reference image
    img_target = io.imread(path_img_target)[..., :3]
    img_target = denoise_wavelet(img_target,
                                 wavelet_levels=7,
                                 multichannel=True)
    img_target_gray = rgb2gray(img_target)

    # load and denoise moving image
    img_source = io.imread(path_img_source)[..., :3]
    img_source = denoise_bilateral(img_source,
                                   sigma_color=0.05,
                                   sigma_spatial=2,
                                   multichannel=True)
    img_source_gray = rgb2gray(img_source)

    # detect ORB features on both images
    detector_target = ORB(n_keypoints=150)
    detector_source = ORB(n_keypoints=150)
    detector_target.detect_and_extract(img_target_gray)
    detector_source.detect_and_extract(img_source_gray)
    matches = match_descriptors(detector_target.descriptors,
                                detector_source.descriptors)
    # robustly estimate affine transform model with RANSAC
    model, _ = ransac(
        (detector_target.keypoints[matches[:, 0]],
         detector_source.keypoints[matches[:, 1]]),
        AffineTransform,
        min_samples=25,
        max_trials=500,
        residual_threshold=0.9,
    )

    # warping source image with estimated transformations
    path_img_warped = os.path.join(path_out, NAME_IMAGE_WARPED % idx)
    if model:
        img_warped = warp(img_target,
                          model.inverse,
                          output_shape=img_target.shape[:2])
        try:
            io.imsave(path_img_warped, img_warped)
        except Exception:
            traceback.print_exc()
    else:
        warnings.warn("Image registration failed.", RuntimeWarning)
        path_img_warped = None
    # summarise experiment
    execution_time = time.time() - start
    return path_img_warped, execution_time
def drawMatches(file1, file2, feat1, feat2):
    image1 = np.array(Image.open(file1).convert('RGB'))
    image2 = np.array(Image.open(file2).convert('RGB'))

    matches = match_descriptors(feat1['descriptors'],
                                feat2['descriptors'],
                                cross_check=True)
    print('Number of raw matches: %d.' % matches.shape[0])

    keypoints_left = feat1['keypoints'][matches[:, 0], :2]
    keypoints_right = feat2['keypoints'][matches[:, 1], :2]
    keypoints_left_new = []
    keypoints_right_new = []

    for i in range(keypoints_left.shape[0]):
        if np.all(image1[int(keypoints_left[i, 1]),
                         int(keypoints_left[i, 0])]) == 0:
            continue
        keypoints_left_new.append(keypoints_left[i])
    keypoints_left_new = np.array(keypoints_left_new)[:]

    for i in range(0, keypoints_right.shape[0]):
        if np.all(image2[int(keypoints_right[i, 1]),
                         int(keypoints_right[i, 0])]) == 0:
            continue
        keypoints_right_new.append(keypoints_right[i])
    keypoints_right_new = np.array(keypoints_right_new)[:]

    print(keypoints_left_new.shape, keypoints_right_new.shape)

    np.random.seed(0)
    model, inliers = ransac((keypoints_left_new, keypoints_right_new),
                            ProjectiveTransform,
                            min_samples=4,
                            residual_threshold=8,
                            max_trials=10000)
    n_inliers = np.sum(inliers)
    print('Number of inliers: %d.' % n_inliers)

    inlier_keypoints_left = [
        cv2.KeyPoint(point[0], point[1], 1)
        for point in keypoints_left_new[inliers]
    ]
    inlier_keypoints_right = [
        cv2.KeyPoint(point[0], point[1], 1)
        for point in keypoints_right_new[inliers]
    ]
    placeholder_matches = [cv2.DMatch(idx, idx, 1) for idx in range(n_inliers)]

    image3 = cv2.drawMatches(image1, inlier_keypoints_left, image2,
                             inlier_keypoints_right, placeholder_matches, None)
    #image3 = Image.fromarray(image3)
    #image3.save('/home/udit/d2-net/media/rcar_Pairs_overcast/9_extract.jpg')
    cv2.imwrite('/home/udit/d2-net/extract.jpg', image3)
    plt.figure(figsize=(20, 20))
    plt.imshow(image3)
    plt.axis('off')
    plt.show()
示例#33
0
def ransac_transform(src_keypoints,
                     src_descriptors,
                     dest_keypoints,
                     dest_descriptors,
                     max_trials=N_TRIALS,
                     residual_threshold=1,
                     return_matches=False):
    """Match keypoints of 2 images and find ProjectiveTransform using RANSAC algorithm.

    src_keypoints ((N, 2) np.ndarray) : source coordinates
    src_descriptors ((N, 256) np.ndarray) : source descriptors
    dest_keypoints ((N, 2) np.ndarray) : destination coordinates
    dest_descriptors ((N, 256) np.ndarray) : destination descriptors
    max_trials (int) : maximum number of iterations for random sample selection.
    residual_threshold (float) : maximum distance for a data point to be classified as an inlier.
    return_matches (bool) : if True function returns matches

    Returns:
        skimage.transform.ProjectiveTransform : transform of source image to destination image
        (Optional)(N, 2) np.ndarray : inliers' indexes of source and destination images
    """

    # your code here
    matches = match_descriptors(src_descriptors, dest_descriptors)
    n = matches.shape[0]
    res_inds = [-1, -1, -1, -1]
    res_kol = 0
    for trial in range(max_trials):
        inds = random.sample(range(n), 4)
        h = find_homography(src_keypoints[matches[inds, 0]],
                            dest_keypoints[matches[inds, 1]])
        projected = ProjectiveTransform(h)(src_keypoints[matches[:, 0]])
        dist = np.sqrt(
            np.power(projected[:, 0] - dest_keypoints[matches[:, 1], 0], 2) +
            np.power(projected[:, 1] - dest_keypoints[matches[:, 1], 1], 2))
        kol = np.sum(dist < residual_threshold)
        if kol > res_kol:
            print("trial: {}, kol: {}".format(trial, kol))
            res_kol = kol
            res_inds = inds
            if res_kol > len(src_keypoints) / 10 and trial > max_trials / 10:
                break
    h = find_homography(src_keypoints[matches[res_inds, 0]],
                        dest_keypoints[matches[res_inds, 1]])
    transform = ProjectiveTransform(h)
    projected = transform(src_keypoints[matches[:, 0]])
    dist = np.sqrt(
        np.power(projected[:, 0] - dest_keypoints[matches[:, 1], 0], 2) +
        np.power(projected[:, 1] - dest_keypoints[matches[:, 1], 1], 2))
    inliers = matches[dist < residual_threshold]
    print("{} inliers matched".format(inliers.shape[0]))
    transform = ProjectiveTransform(
        find_homography(src_keypoints[inliers[:, 0]],
                        dest_keypoints[inliers[:, 1]]))
    if return_matches:
        return transform, inliers
    else:
        return transform
def get_displacement(image0, image1):
    """
    Gets displacement (in pixels I think) difference between 2 images using scikit-image
    not as accurate as the opencv version i think.

    :param image0: reference image
    :param image1: target image
    :return:
    """
    from skimage.feature import (match_descriptors, ORB, plot_matches)
    from skimage.color import rgb2gray
    from scipy.spatial.distance import hamming
    from scipy import misc
    image0_gray = rgb2gray(image0)
    image1_gray = rgb2gray(image1)
    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(image0_gray)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors

    descriptor_extractor.detect_and_extract(image1_gray)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    # Sort the matches based on distance.  Least distance
    # is better
    distances12 = []
    for match in matches12:
        distance = hamming(descriptors1[match[0]], descriptors2[match[1]])
        distances12.append(distance)

    indices = np.arange(len(matches12))
    indices = [index for (_, index) in sorted(zip(distances12, indices))]
    matches12 = matches12[indices]

    # collect displacement from the first 10 matches
    dx_list = []
    dy_list = []
    for mat in matches12[:10]:
        # Get the matching key points for each of the images
        img1_idx = mat[0]
        img2_idx = mat[1]

        # x - columns
        # y - rows
        (x1, y1) = keypoints1[img1_idx]
        (x2, y2) = keypoints2[img2_idx]
        dx_list.append(abs(x1 - x2))
        dy_list.append(abs(y1 - y2))

    dx_median = np.median(np.asarray(dx_list, dtype=np.double))
    dy_median = np.median(np.asarray(dy_list, dtype=np.double))
    # plot_matches(image0, image1, descriptors1, descriptors2, matches12[:10])
    return dx_median, dy_median
示例#35
0
def get_pairs(imgs, unique_pairs, offsets, subsample_factor, overlap_pixels,
              n_kp):
    """Create inlier keypoint pairs."""

    orb = ORB(n_keypoints=n_kp, fast_threshold=0.05)
    k = gaussian(offsets * 2 + 1, 1, sym=True)
    tf = SimilarityTransform  # tf = RigidTransform
    tf0 = SimilarityTransform()
    # FIXME: is there no rigid model in scikit-image???
    # this is needed for input to RANSAC

    pairs = []
    init_tfs = np.empty([n_slcs, n_tiles, 3])
    for p in unique_pairs:
        pair_tstart = time()

        full_im1, full_im2 = subsample_images(p, imgs, subsample_factor)

        part_im1, part_im2 = select_imregions(p[2], full_im1, full_im2,
                                              overlap_pixels)
        keyp_im1, desc_im1 = get_keypoints(orb, part_im1)
        keyp_im2, desc_im2 = get_keypoints(orb, part_im2)
        keyp_im1, keyp_im2 = reset_imregions(p[2], keyp_im1, keyp_im2,
                                             overlap_pixels, full_im1.shape)

        matches = match_descriptors(desc_im1, desc_im2, cross_check=True)
        dst = keyp_im1[matches[:, 0]][:, ::-1]
        src = keyp_im2[matches[:, 1]][:, ::-1]
        model, inliers = ransac((src, dst),
                                tf,
                                min_samples=4,
                                residual_threshold=2,
                                max_trials=300)

        w = k[offsets - (p[1][0] - p[0][0])]

        pairs.append((p, src[inliers], dst[inliers], model, w))

        if (p[0][1] == 0) & (p[0][0] == p[1][0]
                             ):  # referenced to tile 0 within the same slice
            tf1 = tf0.__add__(model)
            itf = [
                math.acos(min(tf1.params[0, 0],
                              1)),  # FIXME!!! with RigidTransform
                tf1.params[0, 2],
                tf1.params[1, 2]
            ]
            init_tfs[p[1][0], p[1][1], :] = np.array(itf)
        if (p[0][1] == p[1][1] == 0) & (
                p[1][0] - p[0][0] == 1):  # if [slcX,tile0] to [slcX-1,tile0]
            tf0 = tf0.__add__(model)

        plot_pair_ransac(p, full_im1, full_im2, keyp_im1, keyp_im2, matches,
                         inliers)
        print('Pair done in: %.2f s' % (time() - pair_tstart, ))

    return pairs, init_tfs
示例#36
0
def get_matrix(image_tif_bgrn, image_jpg_bgr, verbose=False):
    """Get similarity transform matrix
	ORB Limitation: https://github.com/scikit-image/scikit-image/issues/1472 """
    im_tif_adjusted = match_color_curve_tif2jpg(image_tif_bgrn, image_jpg_bgr)
    jpg_gray = cv2.cvtColor(image_jpg_bgr, cv2.COLOR_BGR2GRAY).astype(np.uint8)
    tif_gray = cv2.cvtColor(im_tif_adjusted,
                            cv2.COLOR_BGR2GRAY).astype(np.uint8)

    number_of_keypoints = 100

    # Initialize ORB
    # This number of keypoints is large enough for robust results,
    # but low enough to run quickly.
    orb = ORB(n_keypoints=number_of_keypoints, fast_threshold=0.05)
    orb2 = ORB(n_keypoints=number_of_keypoints, fast_threshold=0.05)
    try:
        # Detect keypoints
        orb.detect_and_extract(jpg_gray)
        keypoints_jpg = orb.keypoints
        descriptors_jpg = orb.descriptors
        orb2.detect_and_extract(tif_gray)
        keypoints_tif = orb2.keypoints
        descriptors_tif = orb2.descriptors
    except IndexError:
        raise KeypointDetectionException('ORB Keypoint detection failed')

    # Match descriptors between images
    matches = match_descriptors(descriptors_jpg,
                                descriptors_tif,
                                cross_check=True)

    # Select keypoints from
    #   * source (image to be registered)
    #   * target (reference image)
    src = keypoints_jpg[matches[:, 0]][:, ::-1]
    dst = keypoints_tif[matches[:, 1]][:, ::-1]

    model_robust, inliers = ransac((src, dst),
                                   TranslationTransform,
                                   min_samples=4,
                                   residual_threshold=1,
                                   max_trials=300)
    if verbose:
        print(inliers)
        print("number of matching keypoints", np.sum(inliers))

    if inliers is None or np.sum(inliers) < 3 or model_robust is None:
        raise ValueError('Possible mismatched JPG and TIF')

    if is_translational(model_robust):
        # we assume src and dst are not rotated relative to each other
        # get rid of any rotational noise introduced during normalization/centering in transform estimate function
        model_robust.params[0, 0] = 1.0
        model_robust.params[1, 1] = 1.0
        return model_robust
    else:
        raise ValueError('Invalid Model')
示例#37
0
文件: feature.py 项目: JiaminShi/HDR
def produceMatches(imL1, imR1, panoramas=False, overlap_size=None):

    imL = imL1.copy()
    imR = imR1.copy()

    if (panoramas):
        if (overlap_size == None):
            overlap_size = int(imL.shape[1] * 0.4)
        imL[:, :-overlap_size, :] = 0
        imR[:, overlap_size:, :] = 0

    imLgray = rgb2gray(imL)
    imRgray = rgb2gray(imR)

    keypointsL = corner_peaks(corner_harris(imLgray),
                              threshold_rel=0.001,
                              min_distance=10)
    keypointsR = corner_peaks(corner_harris(imRgray),
                              threshold_rel=0.001,
                              min_distance=10)

    extractor = BRIEF()

    extractor.extract(imLgray, keypointsL)
    keypointsL = keypointsL[extractor.mask]
    descriptorsL = extractor.descriptors

    extractor.extract(imRgray, keypointsR)
    keypointsR = keypointsR[extractor.mask]
    descriptorsR = extractor.descriptors

    matchesLR = match_descriptors(descriptorsL, descriptorsR, cross_check=True)

    src = []
    dst = []
    for coord in matchesLR:
        src.append(keypointsL[coord[0]])
        dst.append(keypointsR[coord[1]])
    src = np.array(src)
    dst = np.array(dst)

    src_c = src.copy()
    dst_c = dst.copy()
    src_c[:, 1] = src[:, 0]
    src_c[:, 0] = src[:, 1]
    dst_c[:, 1] = dst[:, 0]
    dst_c[:, 0] = dst[:, 1]

    # robustly estimate affine transform model with RANSAC
    model_robust, inliers = ransac((src_c, dst_c),
                                   ProjectiveTransform,
                                   min_samples=4,
                                   residual_threshold=8,
                                   max_trials=250)

    return (matchesLR, model_robust, inliers)
示例#38
0
    def process(self, img2, image_gray):
        # img2 = warp(img2)
        patch_size = [640]
        img2 = rgb2gray(img2)
        image_gray = rgb2gray(img2)

        blobs_dog = blob_dog(image_gray, min_sigma=0.2, max_sigma=225, sigma_ratio=1.6, threshold=.5)
        blobs_dog[:, 2] = blobs_dog[:, 2]

        blobs = [blobs_dog]
        colors = ['black']
        titles = ['Difference of Gaussian']
        sequence = zip(blobs, colors, titles)

        # plt.imshow(img2)
        # plt.axis("equal")
        # plt.show()

        for blobs, color, title in sequence:
            print(len(blobs))
            for blob in blobs:
                y, x, r = blob
                plotx = x
                ploty = y
                for i in range (3):
                    keypoints1 = corner_peaks(corner_harris(Array.image_arr[i]), min_distance=1)
                    keypoints2 = corner_peaks(corner_harris(img2), min_distance=1)

                    extractor = BRIEF(patch_size=30, mode="uniform")

                    extractor.extract(Array.image_arr[i], keypoints1)
                    keypoints1 = keypoints1[extractor.mask]
                    descriptors1 = extractor.descriptors

                    extractor.extract(img2, keypoints2)
                    keypoints2 = keypoints2[extractor.mask]
                    descriptors2 = extractor.descriptors

                    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
                    
                    # print(keypoints1, keypoints2)
                    # print(matches12)
                    #FUCKGGGPLAYT
                    for pizdezh in matches12:
                        X = keypoints2[pizdezh[1]][1]
                        Y = keypoints2[pizdezh[1]][0]

                    if sqrt((plotx - X)**2 + (ploty - Y)**2) < r:
                        seen = [{
                            "type": Array.type_arr[i],
                            "center_shift": (plotx - 160/2) * -0.02,
                            "distance": image_gray[y][x] / 0.08
                        }]
                        print seen
                        data.seen.add(seen)
                        break
示例#39
0
def run(img1_path, img2_path, type='ORB'):
    img1, img2 = load_images(img1_path, img2_path)

    if type == 'ORB':
        keyp1, keyp2, desc1, desc2 = extract_by_orb(img1, img2, 100)
    elif type == 'BRIEF':
        keyp1, keyp2, desc1, desc2 = extract_by_brief(img1, img2, 10)

    matches = match_descriptors(desc1, desc2, cross_check=True)

    plot(img1, img2, keyp1, keyp2, matches)
示例#40
0
def test_max_distance():
    descs1 = np.zeros((10, 128))
    descs2 = np.zeros((15, 128))

    descs1[0, :] = 1

    matches =  match_descriptors(descs1, descs2, metric='euclidean',
                                 max_distance=0.1, cross_check=False)
    assert len(matches) == 9

    matches =  match_descriptors(descs1, descs2, metric='euclidean',
                                 max_distance=np.sqrt(128.1),
                                 cross_check=False)
    assert len(matches) == 10

    matches =  match_descriptors(descs1, descs2, metric='euclidean',
                                 max_distance=0.1,
                                 cross_check=True)
    assert_equal(matches, [[1, 0]])

    matches =  match_descriptors(descs1, descs2, metric='euclidean',
                                 max_distance=np.sqrt(128.1),
                                 cross_check=True)
    assert_equal(matches, [[1, 0]])
示例#41
0
def test_max_ratio():
    descs1 = 10 * np.arange(10)[:, None].astype(np.float32)
    descs2 = 10 * np.arange(15)[:, None].astype(np.float32)

    descs2[0] = 5.0

    matches = match_descriptors(descs1, descs2, metric='euclidean',
                                max_ratio=1.0, cross_check=False)
    assert_equal(len(matches), 10)

    matches = match_descriptors(descs1, descs2, metric='euclidean',
                                max_ratio=0.6, cross_check=False)
    assert_equal(len(matches), 10)

    matches = match_descriptors(descs1, descs2, metric='euclidean',
                                max_ratio=0.5, cross_check=False)
    assert_equal(len(matches), 9)

    descs1[0] = 7.5

    matches = match_descriptors(descs1, descs2, metric='euclidean',
                                max_ratio=0.5, cross_check=False)
    assert_equal(len(matches), 9)

    descs2 = 10 * np.arange(1)[:, None].astype(np.float32)

    matches = match_descriptors(descs1, descs2, metric='euclidean',
                                max_ratio=1.0, cross_check=False)
    assert_equal(len(matches), 10)

    matches = match_descriptors(descs1, descs2, metric='euclidean',
                                max_ratio=0.5, cross_check=False)
    assert_equal(len(matches), 10)

    descs1 = 10 * np.arange(1)[:, None].astype(np.float32)

    matches = match_descriptors(descs1, descs2, metric='euclidean',
                                max_ratio=1.0, cross_check=False)
    assert_equal(len(matches), 1)

    matches = match_descriptors(descs1, descs2, metric='euclidean',
                                max_ratio=0.5, cross_check=False)
    assert_equal(len(matches), 1)
def getDisplacement(Image0, Image1):
    Image0Gray = rgb2gray(Image0)
    Image1Gray = rgb2gray(Image1)
    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(Image0Gray)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors

    descriptor_extractor.detect_and_extract(Image1Gray)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    # Sort the matches based on distance.  Least distance
    # is better
    distances12 = []
    for match in matches12:
        distance = hamming(descriptors1[match[0]], descriptors2[match[1]])
        distances12.append(distance)

    indices = np.range(len(matches12))
    indices = [index for (_, index) in sorted(zip(distances12, indices))]
    matches12 = matches12[indices]

    # collect displacement from the first 10 matches
    dxList = []
    dyList = []
    for mat in matches12[:10]:
        # Get the matching keypoints for each of the images
        img1_idx = mat[0]
        img2_idx = mat[1]

        # x - columns
        # y - rows
        (x1, y1) = keypoints1[img1_idx]
        (x2, y2) = keypoints2[img2_idx]
        dxList.append(abs(x1 - x2))
        dyList.append(abs(y1 - y2))

    dxMedian = np.median(np.asarray(dxList, dtype=np.double))
    dyMedian = np.median(np.asarray(dyList, dtype=np.double))
    plot_matches(Image0, Image1, descriptors1, descriptors2, matches12[:10])
    return dxMedian, dyMedian
示例#43
0
    def NextGID(self,image):
        """ Calculates the next Group ID for the input image """
        NewImg = self.LoadImage(image,Greyscale=True,scale=0.25)
        self.orb.detect_and_extract(NewImg)
        NewImgKeyDescr = (self.orb.keypoints, self.orb.descriptors)

        for PreImgKeyDescr in reversed(self.ImagesKeypointsDescriptors):
            # Check for overlap
            matcheOfDesc = match_descriptors(PreImgKeyDescr[1], NewImgKeyDescr[1], cross_check=True)

            # Select keypoints from the source (image to be registered)
            # and target (reference image)
            src = NewImgKeyDescr[0][matcheOfDesc[:, 1]][:, ::-1]
            dst = PreImgKeyDescr[0][matcheOfDesc[:, 0]][:, ::-1]

            model_robust, inliers = ransac((src, dst), ProjectiveTransform,
                                           min_samples=4, residual_threshold=1, max_trials=300)                
                
            NumberOfTrueMatches = np.sum(inliers)  #len(inliers[inliers])

            if NumberOfTrueMatches > 100 :
                # Image has overlap
                logger.debug('Image {0} found a match! (No: of Matches={1})'.format(image,NumberOfTrueMatches))
                break
            else :
                logger.debug('Image {0} not matching..(No: of Matches={1})'.format(image,NumberOfTrueMatches))
                continue

        else:
            # None of the images in the for loop has any overlap...So this is a new Group
            self.ImagesKeypointsDescriptors = [] # Erase all previous group items
            # self.ImagesWithOverlap = [] 

            # Increment Group ID
            self.CurrentGroupID += 1
            logger.debug('Starting a new Panorama group (GID={0})'.format(self.CurrentGroupID))

        # Append the latest image to the current group
        self.ImagesKeypointsDescriptors.append(NewImgKeyDescr) 
        # self.ImagesWithOverlap.append(NewImg)

        # Return the current  group ID
        return self.CurrentGroupID
示例#44
0
    def _orb_ransac_shift(self, im1, im2, template):
        descriptor_extractor = ORB() #n_keypoints=self.parameters['n_keypoints'])
        key1, des1 = self._find_key_points(descriptor_extractor, im1)
        key2, des2 = self._find_key_points(descriptor_extractor, im2)
        matches = match_descriptors(des1, des2, cross_check=True)

        # estimate affine transform model using all coordinates
        src = key1[matches[:, 0]]
        dst = key2[matches[:, 1]]

        # robustly estimate affine transform model with RANSAC
        model_robust, inliers = ransac((src, dst), AffineTransform,
                                       min_samples=3, residual_threshold=1,
                                       max_trials=100)
#        diff = []
#        for p1, p2 in zip(src[inliers], dst[inliers]):
#            diff.append(p2-p1)
#        return np.mean(diff, axis=0)

        return model_robust.translation
示例#45
0
def find_two_matches(base_img, img, base_k, img_k, base_d, img_d, min_matches=10):

    matches = match_descriptors(base_d, img_d, cross_check=True)
   
    #   * src (image to be registered):
    #   * dst (reference image):
   
    src = img_k[matches[:,1]][:,::-1]
    dst = base_k[matches[:,0]][:,::-1]
    
    if matches.shape[0] > min_matches:
#        model_robust, inliers = ransac((src, dst), ProjectiveTransform,
#                                   min_samples=10, residual_threshold=10,
#                                   stop_sample_num=100, max_trials=300)
#
        model_robust, inliers = ransac((src, dst), AffineTransform,
                                       min_samples=6, residual_threshold=3,
                                       max_trials=100)
        ransac_matches = matches[inliers]
        inlierRatio = ransac_matches.shape[0]/float(matches.shape[0])
        return model_robust, ransac_matches, inlierRatio
    else:
        return np.zeros((0, 2)), np.zeros((0, 2)), 0.0
示例#46
0
def match_from_to_compare(fk, fd, tk, td, min_matches=10):
    # get matching keypoints between images (from to) or (previous, base) or (next, base)
    ransac_matches = np.zeros((0, 2))
    matches = np.zeros((0, 2))
    inliers = np.zeros((0, 2))
    try:
        # skimage way
        # may need to reverse
        matches = match_descriptors(fd, td, cross_check=True)
        src = tk[matches[:, 1]][::-1]
        dst = fk[matches[:, 0]][::-1]
        logging.info("STARTING MATCH src: %d dst %d" % (src.shape[0], dst.shape[0]))
        if src.shape[0] > min_matches:
            # TODO - select which transform to use based on sensor data?
            try:
                model_robust, inliers = ransac(
                    (src, dst),
                    AffineTransform,
                    min_samples=min_matches,
                    stop_sample_num=100,
                    max_trials=2000,
                    stop_probability=0.995,
                    residual_threshold=2,
                )
            except Exception, e:
                logging.error(e)

            logging.info("FOUND inliers %d" % inliers.shape[0])
            if inliers.shape[0]:
                num_correct = inliers.shape[0]
                num_matches = src.shape[0]
                num_false = num_matches - num_correct
                ransac_matches = matches[inliers]
                perc_correct = 1 - float(num_false) / float(num_matches)
                return model_robust, ransac_matches, matches, inliers, perc_correct
        else:
    def get_translation_tool(self, n_keypoints=1000):

        # Convert images to grayscale
        src_image = rgb2gray(self.src_image)
        dst_image = rgb2gray(self.dst_image)

        # Initiate an ORB class object which can extract features & descriptors from images.
        # Set the amount of features that should be found (more = more accurate)
        descriptor_extractor = ORB(n_keypoints=n_keypoints)

        # Extract features and descriptors from source image
        descriptor_extractor.detect_and_extract(src_image)
        self.keypoints1 = descriptor_extractor.keypoints
        descriptors1 = descriptor_extractor.descriptors

        # Extract features and descriptors from destination image
        descriptor_extractor.detect_and_extract(dst_image)
        self.keypoints2 = descriptor_extractor.keypoints
        descriptors2 = descriptor_extractor.descriptors

        # Matches the descriptors and gives them rating as to how similar they are
        self.matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

        # Selects the coordinates from source image and destination image based on the
        # indices given from the match_descriptors function.
        src = self.keypoints1[self.matches12[:, 0]][:, ::-1]
        dst = self.keypoints2[self.matches12[:, 1]][:, ::-1]

        # Filters out the outliers and generates the transformation matrix based on only the inliers
        model_robust, inliers = \
            ransac((src, dst), ProjectiveTransform,
                min_samples=4, residual_threshold=2)

        # This returns the object "model_robust" which contains the tranformation matrix and
        # uses that to translate any coordinate point from source to destination image.
        return model_robust, inliers
示例#48
0
keypoints3 = corner_peaks(corner_harris(img3), min_distance=5)

extractor = BRIEF()

extractor.extract(img1, keypoints1)
keypoints1 = keypoints1[extractor.mask]
descriptors1 = extractor.descriptors

extractor.extract(img2, keypoints2)
keypoints2 = keypoints2[extractor.mask]
descriptors2 = extractor.descriptors

extractor.extract(img3, keypoints3)
keypoints3 = keypoints3[extractor.mask]
descriptors3 = extractor.descriptors

matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)

fig, ax = plt.subplots(nrows=2, ncols=1)

plt.gray()

plot_matches(ax[0], img1, img2, keypoints1, keypoints2, matches12)
ax[0].axis('off')

plot_matches(ax[1], img1, img3, keypoints1, keypoints3, matches13)
ax[1].axis('off')

plt.show()
示例#49
0
def main():
    image_base_dir = '/home/dek/makerfaire-booth/2018/burger/experimental/dek/train_object_detector/decoded'
    canonical_dir = 'canonical'
    # template = os.path.join(image_base_dir, 'bottombun.0.00.27.34.-24.61.0.81.png')
    template = os.path.join(canonical_dir, 'patty.png')

    img1 = imread(template)
    # img1_padded = numpy.zeros( (256, 256,3), dtype=numpy.uint8)
    img1_padded = numpy.resize( [255,255,255], (256, 256, 3))
    s = img1.shape
    img1_padded[:s[0], :s[1]] = img1
    img1_gray = rgb2gray(img1)

    descriptor_extractor = ORB()

    descriptor_extractor.detect_and_extract(img1_gray)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors

    # g = glob.glob(os.path.join(image_base_dir, 'patty*.nobox.png'))
    # for moving in g:
    while True:
        rot, tx, ty, scale = get_random_orientation()
        # img2 = imread(moving)
        img2 = draw_example('patty', 256, 256, rot, tx, ty, scale)
        img2_gray = rgb2gray(img2)

        try:
            descriptor_extractor.detect_and_extract(img2_gray)
        except RuntimeError:
            continue
        
        keypoints2 = descriptor_extractor.keypoints
        descriptors2 = descriptor_extractor.descriptors

        matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

        src = keypoints2[matches12[:, 1]][:, ::-1]
        dst = keypoints1[matches12[:, 0]][:, ::-1]

        model_robust, inliers = \
            ransac((src, dst), SimilarityTransform,
                   min_samples=4, residual_threshold=2)
        if not model_robust:
            print "bad"
            continue
        img2_transformed = transform.warp(img2, model_robust.inverse, mode='constant', cval=1)
        img1_padded_float = img1_padded.astype(numpy.float64)/255.
        sub = img2_transformed - img1_padded_float
        print compare_ssim(img2_transformed, img1_padded_float, win_size=5, multichannel=True)
        fig, axes = plt.subplots(2, 2, figsize=(7, 6), sharex=True, sharey=True)
        ax = axes.ravel()

        ax[0].imshow(img1_padded_float)
        ax[1].imshow(img2)
        ax[1].set_title("Template image")
        ax[2].imshow(img2_transformed)
        ax[2].set_title("Matched image")
        ax[3].imshow(sub)
        ax[3].set_title("Subtracted image")
        # plt.gray()

        # ax = plt.gca()
        # plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12)


        plt.show()
示例#50
0
def find_all_matches(unmatched, run_num, min_to_match=40):
    channel = 2
    num_keypoints=1000
    # get previous base
    print("=====================================")
    ## setup detector
    orb = ORB(n_keypoints=num_keypoints, downscale=1.2, n_scales=20, harris_k=.04,
              fast_threshold=0.05)
    base_num = 0
    unmatched.sort()
    init_num_unmatched = len(unmatched)
    while len(unmatched):
        print("Working on run_%05i" %run_num)
        # get next unmatched image
        base_path = unmatched.pop(0)
        # read the image
        base_img = imread(base_path)
        # if the image has gps data, remove it for now
        if base_img.shape[0] == 2:
            base_img = base_img[0]
        base_gray = base_img[:,:,channel]

        ikdname = os.path.join(base_path.replace('.'+output_image_type, ''))
        #base_k, base_d = detect_and_extract(orb, base_gray)
        base_k, base_d = get_keypoints_and_descriptors(ikdname, base_gray, orb)

        match_num = get_matches(base_path)
        base_name = get_basename(run_num, base_num, match_num)
        bad_match = 0
        matched_files = [base_path]

        print("New base file is: %s" %os.path.split(base_path)[1])
        #print('new unmatched', unmatched)
        for xx, img_path in enumerate(unmatched):
            # get new keypoints for the updated base
            img = imread(img_path)
            if img.shape[0] == 2:
                img = img[0]
            img_name = os.path.split(img_path)[1]
            print("working on img: %s" %img_name)
            img_gray = img[:,:,channel]
            ikdname = os.path.join(img_path.replace('.'+output_image_type, ''))
            img_k, img_d = get_keypoints_and_descriptors(ikdname, img_gray, orb)

            matches = match_descriptors(base_d, img_d, cross_check=True)

            model_robust, ransac_matches = find_two_matches(base_gray,
                                                            img_gray,
                                                            base_k, img_k,
                                                            base_d, img_d)
            num_ransac_matches = ransac_matches.shape[0]
            if num_ransac_matches < min_to_match:
                print("------------ could not match %s, only %s ransac_matches out of %s"
                      %(img_name, num_ransac_matches, matches.shape[0]))
                # couldn't match with this base, save where we are and
                # get new base
                #fig, ax = plt.subplots(nrows=2, ncols=1)
                #plt.title('run %s' %run_num)
                #plot_matches(ax[0], base_gray, img_gray, base_k, img_k, matches)
                #plot_matches(ax[1], base_gray, img_gray, base_k, img_k, ransac_matches)
                #plt.show()
                bad_match += 1
                if bad_match > 1:
                    print("quiting this base img: %s" %base_path)
                    break
            else:
                match_num += 1
                print("*********** matched %s with %s" %(img_name,
                                                  num_ransac_matches))
                base_img = find_mask(base_name, base_img, img_name,
                                     img, model_robust, channel)
                base_gray = base_img[:,:,channel]
                base_name = get_basename(run_num, base_num, match_num)
                matched_files.append(img_path)
                base_k, base_d = detect_and_extract(orb, base_gray)

        base_out = os.path.join(outdir, base_name)
        print("WRITING", base_out)
        plt.imsave(base_out, base_img)
        [os.remove(f) for f in matched_files]
        unmatched = get_unmatched(run_num-1)
        # remove original file
        # increase to use next base_num
        base_num += 1
示例#51
0
 def calculate_rate(it, jt):
     mx = feature.match_descriptors(it, jt)
     return float(len(mx) * 2) / (len(it) + len(jt))
 def match(desc):
     desc1, desc2 = desc[0], desc[1]
     matches = match_descriptors(desc1, desc2, cross_check=True)
     return matches
示例#53
0
def main():
    image_base_dir = '/home/dek/makerfaire-booth/2018/burger/experimental/dek/train_object_detector/decoded'
    canonical_dir = 'canonical'
    # template = os.path.join(image_base_dir, 'bottombun.0.00.27.34.-24.61.0.81.png')
    fig, axes = plt.subplots(7, 7, figsize=(7, 6), sharex=True, sharey=True)

    fig.delaxes(axes[0][0])

    ssims = numpy.zeros( (len(BurgerElement.__members__), len(BurgerElement.__members__)), dtype=float)
    mses = numpy.zeros( (len(BurgerElement.__members__), len(BurgerElement.__members__)), dtype=float)
                         
    for i, layer in enumerate(BurgerElement.__members__):
        template = os.path.join(canonical_dir, '%s.png' % layer)

        img1 = imread(template)
        # img1_padded = numpy.zeros( (WIDTH, HEIGHT,3), dtype=numpy.uint8)
        img1_padded = numpy.resize( [255,255,255], (WIDTH, HEIGHT, 3))
        s = img1.shape
        w = s[0]
        h = s[1]
        nb = img1_padded.shape[0]
        na = img1.shape[0]
        lower1 = (nb) // 2 - (na // 2)
        upper1 = (nb // 2) + (na // 2)
        nb = img1_padded.shape[1]
        na = img1.shape[1]
        lower2 = (nb) // 2 - (na // 2)
        upper2 = (nb // 2) + (na // 2)
        img1_padded[lower1:upper1, lower2:upper2] = img1
        img1_padded_float = img1_padded.astype(numpy.float64)/255.
        print img1_padded_float.shape
        img1_gray = rgb2gray(img1_padded_float)

        descriptor_extractor = ORB()

        try:
            descriptor_extractor.detect_and_extract(img1_gray)
        except RuntimeError:
            continue
        
        keypoints1 = descriptor_extractor.keypoints
        descriptors1 = descriptor_extractor.descriptors

        axes[i][0].imshow(img1_padded_float)
        axes[i][0].set_title("Template image")

        for j, layer2 in enumerate(BurgerElement.__members__):

            rot, tx, ty, scale = get_random_orientation()
            img2 = draw_example(layer2, WIDTH, HEIGHT, rot, tx, ty, scale)

            # match = os.path.join(canonical_dir, '%s.png' % layer2)
            # img2 = imread(match)

            img2_padded = numpy.resize( [255,255,255], (WIDTH, HEIGHT, 3))
            s = img2.shape
            img2_padded[:s[0], :s[1]] = img2
            img2_padded_float = img2_padded.astype(numpy.float64)/255.
            img2_gray = rgb2gray(img2_padded_float)

            try:
                descriptor_extractor.detect_and_extract(img2_gray)
            except RuntimeError:
                continue

            keypoints2 = descriptor_extractor.keypoints
            descriptors2 = descriptor_extractor.descriptors

            matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

            src = keypoints2[matches12[:, 1]][:, ::-1]
            dst = keypoints1[matches12[:, 0]][:, ::-1]

            model_robust, inliers = \
                ransac((src, dst), SimilarityTransform,
                       min_samples=4, residual_threshold=2)
            if not model_robust:
                print "bad"
                continue
            img2_transformed = transform.warp(img2_padded_float, model_robust.inverse, mode='constant', cval=1)
            sub = img2_transformed - img1_padded_float
            ssim = compare_ssim(img2_transformed, img1_padded_float, win_size=5, multichannel=True)
            mse = compare_mse(img2_transformed, img1_padded_float)
            ssims[i,j] = ssim
            mses[i,j] = mse

            axes[0][j].imshow(img2_padded_float)
            axes[0][j].set_title("Match image")

            axes[i][j].imshow(img2_transformed)
            axes[i][j].set_title("Transformed image")
            axes[i][j].set_xlabel("SSIM: %9.4f MSE: %9.4f" % (ssim, mse))

        # ax = plt.gca()
        # plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12)

    print ssims
    print numpy.argmax(ssims, axis=1)
    print numpy.argmin(mses, axis=1)
                       
    plt.show()
def match_descriptors(descriptor1, descriptor2):

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
    
    return(matches12)
示例#55
0
文件: image.py 项目: zshipko/imagepy
def match_template(im, template, *args, **kw):
    '''see skimage.feature.match_template'''
    return feature.match_descriptors(im, template, *args, **kw)
示例#56
0
    try:
        #Retrieve Keypoint Features 
        keypoints_database = pickle.load( open("features/" + folder1 + "/orb/gray/" + file_names[i][:-5] + "_orb.pkl", "rb") ) 
        kp, des = unpickle_keypoints(keypoints_database[0])
        K.append(kp)
        D.append(np.array(des))
        print(len(D))
        print(len(D[0]))
        print(len(D[0][0]))
    except IOError:
        print("Couldn't load " + file_names[i] + " due to IOError.")
    except TypeError:
        print("Couldn't load " + file_names[i] + " due to TypeError.")


    # print("Progress: " + str(i) + "/" + str(N))
# distance = np.zeros((len(K), len(K)))

for i in range(len(K)):
    top1 = [0, 0]
    for j in range(len(K)):
        matches = match_descriptors(D[i], D[j])
        if matches.shape[1] > top1[0]:
            top1[0] = matches.shape[1]
            top1[1] = j
        # distance[i][j] = matches.shape[1]
    print(top1)
    top1 = [0, 0]
    print("Keypoints: " + str(K[i][matches[:,0]]) + ', ' + str(K[j][matches[:,1]]))
    print("Progress: " + str(i) + "/" + str(len(K)))
示例#57
0
文件: image.py 项目: zshipko/imagepy
def match_descriptors(d1, d2, *args, **kw):
    '''see skimage.feature.match_descriptors'''
    return feature.match_descriptors(d1, d2, *args, **kw)
img_left, img_right, groundtruth_disp = data.stereo_motorcycle()
img_left, img_right = map(rgb2gray, (img_left, img_right))

# Find sparse feature correspondences between left and right image.

descriptor_extractor = ORB()

descriptor_extractor.detect_and_extract(img_left)
keypoints_left = descriptor_extractor.keypoints
descriptors_left = descriptor_extractor.descriptors

descriptor_extractor.detect_and_extract(img_right)
keypoints_right = descriptor_extractor.keypoints
descriptors_right = descriptor_extractor.descriptors

matches = match_descriptors(descriptors_left, descriptors_right,
                            cross_check=True)

# Estimate the epipolar geometry between the left and right image.

model, inliers = ransac((keypoints_left[matches[:, 0]],
                         keypoints_right[matches[:, 1]]),
                        FundamentalMatrixTransform, min_samples=8,
                        residual_threshold=1, max_trials=5000)

inlier_keypoints_left = keypoints_left[matches[inliers, 0]]
inlier_keypoints_right = keypoints_right[matches[inliers, 1]]

print("Number of matches:", matches.shape[0])
print("Number of inliers:", inliers.sum())

# Compare estimated sparse disparities to the dense ground-truth disparities.
示例#59
0
文件: image.py 项目: gracz21/KCK
def recognize(pattern, name, scene):
    zipped_matches = []
    match = match_descriptors(scene, pattern, cross_check=True, max_distance=0.5)
    zipped_matches.append([match.size, name, match])
    return zipped_matches