Esempio n. 1
0
def plot_pair_ransac(outputdir, pairstring, p, full_im1, full_im2, kp_im1,
                     kp_im2, matches, inliers):
    """Create plots of orb keypoints vs. ransac inliers."""

    import matplotlib.pyplot as plt

    fig, (ax1, ax2) = plt.subplots(2, 1)
    plot_matches(ax1,
                 full_im1,
                 full_im2,
                 kp_im1,
                 kp_im2,
                 matches,
                 only_matches=True)
    ax1.axis('off')
    plot_matches(ax2,
                 full_im1,
                 full_im2,
                 kp_im1,
                 kp_im2,
                 matches[inliers],
                 only_matches=True)
    ax2.axis('off')
    plotdir = path.join(outputdir, 'plotpairs')
    if not path.exists(plotdir):
        makedirs(plotdir)
    fig.savefig(path.join(plotdir, pairstring))
    plt.close(fig)
Esempio n. 2
0
def plot_template_matches(keypoints_q, keypoints_r, inliers, query_image,
                          reference_image_border):
    import matplotlib.pyplot as plt
    from skimage.feature import plot_matches

    keypoints_q = np.fliplr(keypoints_q)
    keypoints_r = np.fliplr(keypoints_r)
    matches = np.array(
        list(zip(range(len(keypoints_q)), range(len(keypoints_r)))))

    print(f"Number of matches: {matches.shape[0]}")
    print(f"Number of inliers: {inliers.sum()}")
    fig, ax = plt.subplots(nrows=2, ncols=1)

    plot_matches(ax[0], (255 - query_image), (255 - reference_image_border),
                 keypoints_q, keypoints_r, matches)  #,alignment="vertical")
    plot_matches(ax[1], (255 - query_image), (255 - reference_image_border),
                 keypoints_q, keypoints_r,
                 matches[inliers])  #,alignment="vertical")
    y = query_image.shape[0]
    plt.plot([500, 1000, 1000, 500, 500], [y, y, 0, 0, y], "r", linewidth=2)
    plt.plot([530, 970, 970, 530, 530], [y - 30, y - 30, 30, 30, y - 30],
             "g",
             linewidth=1)
    # plt.xticks([],[])
    # plt.yticks([],[])
    # for spine in ax.spines:
    #     ax.spines[spine].set_visible(False)
    plt.show()
Esempio n. 3
0
def plot_inliers(src, dest, src_keypoints, dest_keypoints, matches):
    plt.figure(figsize=FIGSIZE)
    ax = plt.axes()
    ax.axis("off")
    ax.set_title(f"Inlier correspondences: {len(matches)} points matched")
    plot_matches(ax, src, dest, src_keypoints, dest_keypoints, matches)
    plt.show()
def show_correspondences(imgA, imgB, X1, Y1, X2, Y2, matches, good_matches, number_to_display, filename=None):
	"""
		Visualizes corresponding points between two images, either as
		arrows or dots

		mode='dots': Corresponding points will have the same random color
		mode='arrows': Corresponding points will be joined by a line

		Writes out a png of the visualization if 'filename' is not None.
	"""

	# generates unique figures so students can
	# look at all three at once
	fig, ax = plt.subplots(nrows=1, ncols=1)

	matches = matches[0:number_to_display, :]
	good_matches = good_matches[0:number_to_display]

	kp1 = zip_x_y(Y1, X1)
	kp2 = zip_x_y(Y2, X2)
	matches = matches.astype(int)
	plot_matches(ax, imgA, imgB, kp1, kp2, matches[np.logical_not(good_matches)], matches_color='orangered')
	plot_matches(ax, imgA, imgB, kp1, kp2, matches[good_matches], matches_color='springgreen')

	fig = plt.gcf()

	if filename:
		if not os.path.isdir('../results'):
			os.mkdir('../results')
		fig.savefig('../results/' + filename)

	plt.show()
Esempio n. 5
0
def show_matches(img1, img2, feat1, feat2):
    matches12 = match_descriptors(
        feat1["descriptors"], feat2["descriptors"], cross_check=True
    )

    fig, (ax3, ax2) = plt.subplots(1, 2, figsize=(15, 5))
    c_matches = match_descriptors(
        feat1["descriptors"], feat2["descriptors"], cross_check=True
    )

    plot_matches(ax3, img1, img2, feat1["keypoints"], feat1["keypoints"], matches12)

    ax2.plot(feat1["keypoints"][:, 1], feat1["keypoints"][:, 0], ".", label="Before")

    ax2.plot(feat2["keypoints"][:, 1], feat2["keypoints"][:, 0], ".", label="After")

    for i, (c_idx, n_idx) in enumerate(c_matches):
        x_vec = [feat1["keypoints"][c_idx, 0], feat2["keypoints"][n_idx, 0]]
        y_vec = [feat1["keypoints"][c_idx, 1], feat2["keypoints"][n_idx, 1]]
        dist = np.sqrt(np.square(np.diff(x_vec)) + np.square(np.diff(y_vec)))
        alpha = np.clip(50 / dist, 0, 1)

        ax2.plot(y_vec, x_vec, "k-", alpha=alpha, label="Match" if i == 0 else "")

    ax2.legend()

    ax3.set_title(r"{} $\rightarrow$ {}".format("Before", "After"))
Esempio n. 6
0
def MatchPics(I1,I2):
    
    if I1.ndim == 3:
        I1 = rgb2gray(I1)   
    if I2.ndim == 3:
        I2 = rgb2gray(I2)
    
    points1 = corner_peaks(corner_fast(I1,n=12,threshold=0.15),min_distance=1)
    points2 = corner_peaks(corner_fast(I2,n=12,threshold=0.15),min_distance=1)
    
    extractor = BRIEF()
    
    extractor.extract(I1,points1)
    points1 = points1[extractor.mask]
    descriptors1 = extractor.descriptors
    
    extractor.extract(I2,points2)
    points2 = points2[extractor.mask]
    descriptors2 = extractor.descriptors
    
    matches = match_descriptors(descriptors1,descriptors2,metric = 'hamming',cross_check=True)
    
    #these points are y,x (row,col)
    locs1 = points1[matches[:,0]]
    locs2 = points2[matches[:,1]]
    #Change to x,y (col,row)
    xy1 = np.array([locs1[:,1],locs1[:,0]])
    xy1 = xy1.transpose()
    xy2 = np.array([locs2[:,1],locs2[:,0]])
    xy2 = xy2.transpose()
    fig, ax = plt.subplots()
    plot_matches(ax,I1,I2,points1,points2,matches,keypoints_color='r',only_matches=True)#,matches_color='y')
    
    return [xy1,xy2]
Esempio n. 7
0
def epipolar_rectify(imL,imR,show_matches=True):
    descriptor_extractor = ORB(n_keypoints=2000)
    
    descriptor_extractor.detect_and_extract(imL)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors    
    
    descriptor_extractor.detect_and_extract(imR)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors        
    
    matches12 = match_descriptors(descriptors1, descriptors2,metric='hamming', cross_check=True)
    
    pts1=keypoints1[matches12[:,0],:]
    pts2=keypoints2[matches12[:,1],:]    
    
    
    F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_RANSAC)
    pts1 = pts1[mask.ravel()==1]
    pts2 = pts2[mask.ravel()==1]
    
    res,H1,H2=cv2.stereoRectifyUncalibrated(pts1,pts2,F,imL.shape,10)
    
    if show_matches:
        fig, ax = plt.subplots(nrows=1, ncols=1)
        plot_matches(ax, imL, imR, keypoints1, keypoints2, matches12)    
    
    return H1,H2
Esempio n. 8
0
def orb(img_path):
    image = PIL.Image.open(img_path).convert('L')
    img1 = np.array(image)
    img2 = tf.rotate(img1, 180)

    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(img1)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors

    descriptor_extractor.detect_and_extract(img2)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    fig, ax = plt.subplots(nrows=2, ncols=1)

    plt.gray()

    plot_matches(ax[0], img1, img2, keypoints1, keypoints2, matches12)
    ax[0].axis('off')
    ax[0].set_title(img_path)

    plt.show()

    return matches12.shape[0]
Esempio n. 9
0
def plot_pair_ransac(p, full_im1, full_im2, kp_im1, kp_im2, matches, inliers):
    """Create plots of orb keypoints vs. ransac inliers."""
    fig, (ax1, ax2) = plt.subplots(2, 1)
    plot_matches(ax1,
                 full_im1,
                 full_im2,
                 kp_im1,
                 kp_im2,
                 matches,
                 only_matches=True)
    ax1.axis('off')
    plot_matches(ax2,
                 full_im1,
                 full_im2,
                 kp_im1,
                 kp_im2,
                 matches[inliers],
                 only_matches=True)
    ax2.axis('off')
    plotdir = path.join(datadir, 'plotpairs')
    if not path.exists(plotdir):
        makedirs(plotdir)
        print(plotdir)
    fig.savefig(
        path.join(
            plotdir, 'pair_s' + str(p[0][0]).zfill(4) + '-t' + str(p[0][1]) +
            '_s' + str(p[1][0]).zfill(4) + '-t' + str(p[1][1]) + '.tif'))
    plt.close(fig)
def match_pairwise(sfm_storage, vis_matches, profile=True):
    """
    Match keypoints of each image pair by their descriptors.
    
    sfm_storage:
        `SFMStorage` instance
    
    vis_matches:
        bool
        Whether to draw the matches
     
    profile:
        bool
        Whether to measure execution time
     
    return:
        `SFMStorage` instance
        `SFMStorage` instance filled with matches information
    """
    for i in range(len(sfm_storage.img_pose)):
        for j in range(i + 1, len(sfm_storage.img_pose)):
            # detect features and extract descriptors
            src_keypoints, src_descriptors = sfm_storage.img_pose[
                i].kp, sfm_storage.img_pose[i].desc
            dest_keypoints, dest_descriptors = sfm_storage.img_pose[
                j].kp, sfm_storage.img_pose[j].desc
            # RANSAC outlier filtering
            if profile: beg = time.time()
            robust_transform, matches = match_robust(
                src_keypoints,
                src_descriptors,
                dest_keypoints,
                dest_descriptors,
                method='flann',
                min_samples=4,
                residual_threshold=100,
                max_trials=3000,
            )
            if profile: print('Match and RANSAC time:', time.time() - beg)
            # save img1-kp1-img2-kp2 matches to global helper SFM instance
            for m in matches:
                sfm_storage.img_pose[i].kp_matches[(m[0], j)] = m[1]
                sfm_storage.img_pose[j].kp_matches[(m[1], i)] = m[0]
            print(
                f"Feature matching: image {i} <-> image {j} ==> {len(matches)} matches"
            )
            # vis
            if vis_matches:
                plt.figure(figsize=FIGSIZE)
                ax = plt.axes()
                ax.axis("off")
                ax.set_title(
                    f"Inlier correspondences: {len(matches)} points matched")
                plot_matches(ax, sfm_storage.img_pose[i].img,
                             sfm_storage.img_pose[j].img,
                             src_keypoints[:, ::-1], dest_keypoints[:, ::-1],
                             matches)
                plt.show()
    return sfm_storage
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)

  start = time.time()

  # Read features.
  locations_1, _, descriptors_1, _, _ = feature_io.ReadFromFile(
      cmd_args.features_1_path)
  num_features_1 = locations_1.shape[0]
  tf.logging.info("Loaded image 1's %d features" % num_features_1)
  locations_2, _, descriptors_2, _, _ = feature_io.ReadFromFile(
      cmd_args.features_2_path)
  num_features_2 = locations_2.shape[0]
  tf.logging.info("Loaded image 2's %d features" % num_features_2)

  # Find nearest-neighbor matches using a KD tree.
  d1_tree = spatial.cKDTree(descriptors_1)
  _, indices = d1_tree.query(
      descriptors_2, distance_upper_bound=_DISTANCE_THRESHOLD)

  print('>> feature match elapsed time: {}'.format(htime(time.time() - start)))

  # Select feature locations for putative matches.
  locations_2_to_use = np.array([
      locations_2[i,]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])
  locations_1_to_use = np.array([
      locations_1[indices[i],]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])

  # Perform geometric verification using RANSAC.
  _, inliers = measure.ransac((locations_1_to_use, locations_2_to_use),
                              transform.AffineTransform,
                              min_samples=3,
                              residual_threshold=20,
                              max_trials=1000)

  tf.logging.info('Found %d inliers' % sum(inliers))

  # Visualize correspondences, and save to file.
  _, ax = plt.subplots()
  img_1 = mpimg.imread(cmd_args.image_1_path)
  img_2 = mpimg.imread(cmd_args.image_2_path)
  inlier_idxs = np.nonzero(inliers)[0]
  feature.plot_matches(
      ax,
      img_1,
      img_2,
      locations_1_to_use,
      locations_2_to_use,
      np.column_stack((inlier_idxs, inlier_idxs)),
      matches_color='b')
  ax.axis('off')
  ax.set_title('DELF correspondences')
  plt.savefig(cmd_args.output_image)
Esempio n. 12
0
def visualize_matches(img1, img2, keypoints, matches):
    fig, ax = plt.subplots(nrows=1, ncols=1)
    plt.gray()
    feature.plot_matches(ax, img1, img2, keypoints[0], keypoints[1], matches, only_matches=True, alignment='vertical')
    ax.axis('off')
    ax.set_title("Original Image vs. Transformed Image")
    plt.show()
    plt.close()
Esempio n. 13
0
def showMatches(im1, im2, c1, c2, matches, title=""):
    disp_matches = np.array([np.arange(matches.shape[0]), matches]).T.astype(int)
    valid_matches = np.where(matches>=0)[0]
    disp_matches = disp_matches[valid_matches, :]
    fig, ax = plt.subplots(nrows=1, ncols=1)
    plot_matches(ax, im1, im2,
            c1[[1, 0], :].astype(int).T, c2[[1,0], :].astype(int).T, disp_matches)
    ax.set_title(title)
Esempio n. 14
0
def match_images(results_dict, image_1_path, image_2_path):
  distance_threshold = 0.8

  # Read features.
  locations_1, descriptors_1 = results_dict[image_1_path]
  num_features_1 = locations_1.shape[0]
  print("Loaded image 1's %d features" % num_features_1)
  locations_2, descriptors_2 = results_dict[image_2_path]
  num_features_2 = locations_2.shape[0]
  print("Loaded image 2's %d features" % num_features_2)

  # Find nearest-neighbor matches using a KD tree.
  d1_tree = cKDTree(descriptors_1)
  _, indices = d1_tree.query(
      descriptors_2, distance_upper_bound=distance_threshold)

  # Select feature locations for putative matches.
  locations_2_to_use = np.array([
      locations_2[i,]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])
  locations_1_to_use = np.array([
      locations_1[indices[i],]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])

  # Perform geometric verification using RANSAC.
  _, inliers = ransac(
      (locations_1_to_use, locations_2_to_use),
      AffineTransform,
      min_samples=3,
      residual_threshold=20,
      max_trials=1000)

  print('Found %d inliers' % sum(inliers))

  # Visualize correspondences.
  plt.figure()
  _, ax = plt.subplots()
  img_1 = mpimg.imread(image_1_path)
  img_2 = mpimg.imread(image_2_path)
  inlier_idxs = np.nonzero(inliers)[0]
  plot_matches(
      ax,
      img_1,
      img_2,
      locations_1_to_use,
      locations_2_to_use,
      np.column_stack((inlier_idxs, inlier_idxs)),
      matches_color='b')
  ax.axis('off')
  ax.set_title('DELF correspondences')
  plt.show()

  print("hello world");
Esempio n. 15
0
def plot_point_matches(src, dest, img1, img2):
    index = np.arange(0,src.shape[0],1).T
    fig, ax = plt.subplots(nrows=1, ncols=1)

    plt.gray()
    plot_matches(ax, img1, img2, src, dest, 
                 np.column_stack((index, index)), matches_color='b', alignment='vertical')
    ax.axis('off')
    ax.set_title('Correct correspondences')
Esempio n. 16
0
def main():
	# save_frames()

	meta_dir = '../scratch/matching_im'
	in_dir = os.path.join(meta_dir, 'sir_holger')
	im_format = 'si_%d_20181102111500.jpg'
	for views in [[0,1],[1,2],[2,3],[3,0]]:
		ims = [os.path.join(in_dir, im_format%view) for view in views]
		out_file = os.path.join(in_dir, 'correspondences_%d_%d.jpg'%(views[0],views[1]))


		img_left = rescale(io.imread(ims[0]),scale = 0.25).squeeze()
		img_right = rescale(io.imread(ims[1]),scale = 0.25).squeeze()
		
		# Find sparse feature correspondences between left and right image.

		descriptor_extractor = ORB()

		descriptor_extractor.detect_and_extract(img_left)
		keypoints_left = descriptor_extractor.keypoints
		descriptors_left = descriptor_extractor.descriptors

		descriptor_extractor.detect_and_extract(img_right)
		keypoints_right = descriptor_extractor.keypoints
		descriptors_right = descriptor_extractor.descriptors

		matches = match_descriptors(descriptors_left, descriptors_right,
		                            cross_check=True)

		# Estimate the epipolar geometry between the left and right image.

		model, inliers = ransac((keypoints_left[matches[:, 0]],
		                         keypoints_right[matches[:, 1]]),
		                        FundamentalMatrixTransform, min_samples=8,
		                        residual_threshold=1, max_trials=5000)

		inlier_keypoints_left = keypoints_left[matches[inliers, 0]]
		inlier_keypoints_right = keypoints_right[matches[inliers, 1]]

		print(f"Number of matches: {matches.shape[0]}")
		print(f"Number of inliers: {inliers.sum()}")
		
		plt.figure()
		
		plt.gray()

		plot_matches(plt.gca(), img_left, img_right, keypoints_left, keypoints_right,
		             matches[inliers], only_matches=True)
		plt.gca().axis("off")
		plt.gca().set_title("Inlier correspondences")

		plt.savefig(out_file)
		plt.close()
		print (out_file)

	visualize.writeHTMLForFolder(in_dir)
Esempio n. 17
0
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)

  # Read features.
  locations_1, _, descriptors_1, _, _ = feature_io.ReadFromFile(
      cmd_args.features_1_path)
  num_features_1 = locations_1.shape[0]
  tf.logging.info("Loaded image 1's %d features" % num_features_1)
  locations_2, _, descriptors_2, _, _ = feature_io.ReadFromFile(
      cmd_args.features_2_path)
  num_features_2 = locations_2.shape[0]
  tf.logging.info("Loaded image 2's %d features" % num_features_2)

  # Find nearest-neighbor matches using a KD tree.
  d1_tree = cKDTree(descriptors_1)
  _, indices = d1_tree.query(
      descriptors_2, distance_upper_bound=_DISTANCE_THRESHOLD)

  # Select feature locations for putative matches.
  locations_2_to_use = np.array([
      locations_2[i,]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])
  locations_1_to_use = np.array([
      locations_1[indices[i],]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])

  # Perform geometric verification using RANSAC.
  _, inliers = ransac(
      (locations_1_to_use, locations_2_to_use),
      AffineTransform,
      min_samples=3,
      residual_threshold=20,
      max_trials=1000)

  tf.logging.info('Found %d inliers' % sum(inliers))

  # Visualize correspondences, and save to file.
  _, ax = plt.subplots()
  img_1 = mpimg.imread(cmd_args.image_1_path)
  img_2 = mpimg.imread(cmd_args.image_2_path)
  inlier_idxs = np.nonzero(inliers)[0]
  plot_matches(
      ax,
      img_1,
      img_2,
      locations_1_to_use,
      locations_2_to_use,
      np.column_stack((inlier_idxs, inlier_idxs)),
      matches_color='b')
  ax.axis('off')
  ax.set_title('DELF correspondences')
  plt.savefig(cmd_args.output_image)
Esempio n. 18
0
def visualization(cor_points):
    ma_cor = cor_points[:,0:2]
    sl_cor = cor_points[:,2:4]
    index_2 = np.arange(0,cor_points.shape[0],1).T
    
    fig, ax = plt.subplots()
    plt.gray()   
    plot_matches(ax, my_img_master, my_img_slave, ma_cor, sl_cor,
             np.column_stack((index_2, index_2)), matches_color='r', alignment='vertical')
    ax.axis('off')
    plt.show()
def plot_matches(src, dst, src_keypoints, dst_keypoints, matches_src_dst):

    fig, ax = plt.subplots(nrows=3, ncols=1)

    plt.gray()

    plot_matches(ax[0], src, dst, src_keypoints, dst_keypoints, matches_src_dst)

    ax[0].axis('off')

    plt.show()
Esempio n. 20
0
def showMatches(im1, im2, c1, c2, matches, title=""):
    disp_matches = np.array([np.arange(matches.shape[0]),
                             matches]).T.astype(int)
    valid_matches = np.where(matches >= 0)[0]
    disp_matches = disp_matches[valid_matches, :]
    fig, ax = plt.subplots(nrows=1, ncols=1)
    #from IPython import embed; embed(); exit(-1)
    plot_matches(ax, im1, im2, c1[:, [1, 0]].astype(int),
                 c2[:, [1, 0]].astype(int), disp_matches)
    ax.set_title(title)
    plt.show()
def plot_matches(src, dst, src_keypoints, dst_keypoints, matches_src_dst):

    fig, ax = plt.subplots(nrows=3, ncols=1)

    plt.gray()

    plot_matches(ax[0], src, dst, src_keypoints, dst_keypoints,
                 matches_src_dst)

    ax[0].axis('off')

    plt.show()
Esempio n. 22
0
def show_matches(img1, img2, kp1, kp2, matches):
    # Need to draw only good matches, so create a mask
    kp1 = np.array([[int(kp.pt[0]), int(kp.pt[1])] for kp in kp1])
    kp2 = np.array([[int(kp.pt[0]), int(kp.pt[1])] for kp in kp2])

    fig, ax = plt.subplots(nrows=2, ncols=1)
    plot_matches(ax[0], cv2.resize(img1, (256,256)), cv2.resize(img2, (256,256)), kp1, kp2, matches)
    ax[0].axis('off')
    ax[0].set_title("Matches")
    plt.show()

    """ matchesMask = [[0,0] for i in range(len(matches))]
Esempio n. 23
0
def draw_matches(img1,
                 img2,
                 kp1,
                 kp2,
                 matches,
                 title,
                 x0=None,
                 y0=None,
                 window=None):
    """
    plot the given keypoints and there matches on the given images
    :param img1: first image
    :param img2: second image
    :param kp1: keypoints for the first image
    :param kp2: keypoints for the second image
    :param matches: list of pairs of indices to match the keypoints from the first to second image
    :param title: title for this figure
    :param x0: x point of our ROI for displaying the matches
    :param y0: y point of our ROI for displaying the matches
    :param window: size of the ROI
    :return: None
    """

    filtered_matches = matches

    if (y0 is not None) and (x0 is not None) and (window is not None):
        print(x0, y0, window)
        filtered_matches = []
        for m in matches:
            y, x = kp1[m[0]]
            if ((x > x0 + window or x < x0 - window)
                    or (y > y0 + window or y < y0 - window)):
                continue
            else:
                filtered_matches.append(m)

    filtered_matches = np.array(filtered_matches)
    fig, ax = plt.subplots()

    plt.gray()

    plot_matches(ax,
                 img1,
                 img2,
                 kp1,
                 kp2,
                 filtered_matches,
                 only_matches=True,
                 keypoints_color='red')
    ax.set_title(title)

    plt.show()
Esempio n. 24
0
def plotDescriptors(images, grid=(2, 2), figsize=(15, 9), **kwargs):
    f, ax = plt.subplots(grid[0], grid[1], figsize=figsize)
    for imageIdx, (title, args) in enumerate(images.items()):
        if grid[0] == 1:
            ax[imageIdx].set_title(title)
            feature.plot_matches(ax[imageIdx], *args, **kwargs)
            ax[imageIdx].axis('off')
        else:
            ax[imageIdx // grid[1]][imageIdx % grid[1]].set_title(title)
            feature.plot_matches(ax[imageIdx // grid[1]][imageIdx % grid[1]],
                                 *args, **kwargs)
            ax[imageIdx // grid[1]][imageIdx % grid[1]].axis('off')
    return f, ax
Esempio n. 25
0
def plot_correspondences(im1, im2, src, dst, inliers):
    inlier_idxs = np.nonzero(inliers)[0]
    fig, ax = plt.subplots(nrows=1, ncols=1)
    plt.gray()
    plot_matches(ax,
                 im1,
                 im2,
                 src,
                 dst,
                 np.column_stack((inlier_idxs, inlier_idxs)),
                 matches_color='b')
    ax.axis('off')
    ax.set_title('RANSAC filtered correspondances with closest image')
    plt.show()
def match_sequential(sfm_storage, vis_matches, profile=True):
    """
    Match keypoints of each image pair by their descriptors
    """
    for i in range(len(sfm_storage.img_pose) - 1):
        j = i + 1
        # detect features and extract descriptors
        src_keypoints, src_descriptors = sfm_storage.img_pose[
            i].kp, sfm_storage.img_pose[i].desc
        dest_keypoints, dest_descriptors = sfm_storage.img_pose[
            j].kp, sfm_storage.img_pose[j].desc
        # RANSAC outlier filtering
        if profile: beg = time.time()
        robust_transform, matches = match_robust(
            src_keypoints,
            src_descriptors,
            dest_keypoints,
            dest_descriptors,
            method='flann',
            min_samples=4,
            residual_threshold=100,
            max_trials=3000,
        )
        print(matches.shape)
        if profile: print('Match and RANSAC time:', time.time() - beg)
        # save img1-kp1-img2-kp2 matches to global helper SFM instance
        for m in matches:
            sfm_storage.img_pose[i].kp_matches[(m[0], j)] = m[1]
            sfm_storage.img_pose[j].kp_matches[(m[1], i)] = m[0]
        print(
            f"Feature matching: image {i} <-> image {j} ==> {len(matches)} matches"
        )
        # vis
        FIGSIZE = (15, 10)
        if vis_matches:
            plt.figure(figsize=FIGSIZE)
            ax = plt.axes()
            ax.axis("off")
            ax.set_title(
                f"Inlier correspondences: {len(matches)} points matched")
            plot_matches(
                ax,
                sfm_storage.img_pose[i].img,
                sfm_storage.img_pose[j].img,
                src_keypoints[:, ::-1],  # !!!
                dest_keypoints[:, ::-1],  # !!!
                matches)
            plt.show()
    return sfm_storage
Esempio n. 27
0
def main(*args):

    args = parse_args()

    # Read features.
    locations_1, _, descriptors_1, attention_1, _ = feature_io.ReadFromFile(
        args.features_1_path)
    locations_2, _, descriptors_2, attention_2, _ = feature_io.ReadFromFile(
        args.features_2_path)

    num_features_1 = locations_1.shape[0]
    num_features_2 = locations_2.shape[0]

    d1_tree = cKDTree(descriptors_1)
    distances, indices = d1_tree.query(
        descriptors_2, distance_upper_bound=args.distance_threshold)

    has_match = distances != np.inf
    locations_1_to_use = locations_1[indices[has_match]]
    locations_2_to_use = locations_2[has_match]

    _, inliers = ransac((locations_1_to_use, locations_2_to_use),
                        AffineTransform,
                        min_samples=3,
                        residual_threshold=20,
                        max_trials=1000)

    if inliers is None:
        raise Exception('match_images.py: inliers is None')
    else:
        print('number of inliers -> %d' % len(inliers))

    # --
    # Plot

    fig, ax = plt.subplots()

    inlier_idxs = np.nonzero(inliers)[0]
    plot_matches(ax,
                 mpimg.imread(args.image_1_path),
                 mpimg.imread(args.image_2_path),
                 locations_1_to_use,
                 locations_2_to_use,
                 np.column_stack((inlier_idxs, inlier_idxs)),
                 matches_color='b')
    ax.axis('off')
    ax.set_title('DELF correspondences')
    plt.savefig(args.output_image)
Esempio n. 28
0
def draw_inliers(img1, img2, inliers, keypoints1, keypoints2, img_name):
    if not os.path.exists('./%s' % img_name):
        os.makedirs('./%s' % img_name)
    for index, mat in enumerate(inliers):
        fig, ax = plt.subplots()
        plot_matches(ax,
                     img1,
                     img2,
                     keypoints1,
                     keypoints2,
                     np.array([[mat[0], mat[1]]]),
                     only_matches=True)
        ax.axis('off')
        fig.savefig('./%s/%s%d.png' % (img_name, img_name, index),
                    dpi=100,
                    pad_inches=0,
                    bbox_inches='tight')
Esempio n. 29
0
def show_matches(stack, keypoints, matches):

    src = stack[0]
    dst = stack[1]

    src_keypoints = keypoints[0]
    dst_keypoints = keypoints[1]

    fig, ax = plt.subplots()

    plt.gray()

    plot_matches(ax, src, dst, src_keypoints, dst_keypoints, matches)

    ax.axis('off')

    plt.show()
def getDisplacement(Image0, Image1):
    Image0Gray = rgb2gray(Image0)
    Image1Gray = rgb2gray(Image1)
    descriptor_extractor = ORB(n_keypoints=200)

    descriptor_extractor.detect_and_extract(Image0Gray)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors

    descriptor_extractor.detect_and_extract(Image1Gray)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    # Sort the matches based on distance.  Least distance
    # is better
    distances12 = []
    for match in matches12:
        distance = hamming(descriptors1[match[0]], descriptors2[match[1]])
        distances12.append(distance)

    indices = np.range(len(matches12))
    indices = [index for (_, index) in sorted(zip(distances12, indices))]
    matches12 = matches12[indices]

    # collect displacement from the first 10 matches
    dxList = []
    dyList = []
    for mat in matches12[:10]:
        # Get the matching keypoints for each of the images
        img1_idx = mat[0]
        img2_idx = mat[1]

        # x - columns
        # y - rows
        (x1, y1) = keypoints1[img1_idx]
        (x2, y2) = keypoints2[img2_idx]
        dxList.append(abs(x1 - x2))
        dyList.append(abs(y1 - y2))

    dxMedian = np.median(np.asarray(dxList, dtype=np.double))
    dyMedian = np.median(np.asarray(dyList, dtype=np.double))
    plot_matches(Image0, Image1, descriptors1, descriptors2, matches12[:10])
    return dxMedian, dyMedian
Esempio n. 31
0
    def show_matches(self, im1, im2, src, dst, inliers):

        fig = plt.figure()
        ax = fig.add_subplot(1, 1, 1)

        inlier_idxs = np.nonzero(inliers)[0]
        plot_matches(ax,
                     im1,
                     im2,
                     src,
                     dst,
                     np.column_stack((inlier_idxs, inlier_idxs)),
                     matches_color='b',
                     only_matches=True)
        ax.axis('off')
        ax.set_title('Correct correspondences')

        plt.show()
Esempio n. 32
0
def main():
    p = Pool(5)
    listing = os.listdir('logo')

    zipped_patterns = p.map(load_pattenrs, logos)
    #zipped_patterns = p.map(load_pattenrs, listing)
    zipped_patterns = [ent for sublist in zipped_patterns for ent in sublist]
    listing = os.listdir('foto')
    zipped_scenes = p.map(load_scenes, photos)
    #zipped_scenes = p.map(load_scenes, listing)
    zipped_scenes = [ent for sublist in zipped_scenes for ent in sublist]
    zipped_patterns.sort(key=lambda x: x[3])
    p_img, patterns, p_key, tmp = zip(*zipped_patterns)
    zipped_scenes.sort(key=lambda x: x[3])
    s_img, scenes, s_key, tmp = zip(*zipped_scenes)
    set_names(len(zipped_patterns) - 1)
    k = 0
    for j in scenes:
        arg_list = []
        for a, b, c, d in zipped_patterns:
            arg_list.append([b, d, j])
        zipped_matches = p.map(f_wrap, arg_list)
        zipped_matches = [ent for sublist in zipped_matches for ent in sublist]
        zipped_matches.sort(key=lambda x: x[1])
        matches, tmp, m_array = zip(*zipped_matches)
        best_match = max(matches)
        proc = 1.0
        id = 0
        result = 'Logo to: '
        for i in range(len(patterns)):
            el = abs((patterns[i].size / j.size) - 1)
            if matches[i] == best_match:
                result += logos[i] + " "  #cards[i] + " "
            if matches[i] == best_match and el < proc:
                proc = el
                id = i
        fig, ax = plt.subplots()
        plt.gray()
        plot_matches(ax, p_img[id], s_img[k], p_key[id], s_key[k], m_array[id])
        ax.axis('off')
        plt.savefig('output/' + str(k) + '.png')
        #        plt.show()
        print(result)
        k += 1
Esempio n. 33
0
def plot_matching_points(image1, image2, sorted_inliers_by_err,
                         num_matches_shown):
    fig2 = plt.figure(2, dpi=200)
    ax4 = plt.subplot(111)
    src = sorted_inliers_by_err[:, :2]
    dst = sorted_inliers_by_err[:, 2:]
    src = np.column_stack((src[:, 1], src[:, 0]))
    dst = np.column_stack((dst[:, 1], dst[:, 0]))
    src = src[:num_matches_shown]
    dst = dst[:num_matches_shown]
    inlier_idxs = np.nonzero(src)[0]
    matches = np.column_stack((inlier_idxs, inlier_idxs))
    plot_matches(ax4, image1, image2, src, dst, matches)
    i = 0
    while os.path.exists("part1/results/res%s.png" % i):
        i += 1
    resn = "part1/results/res" + str(i) + ".png"
    fig2.savefig(resn, dpi=200)
    plt.show()
Esempio n. 34
0
def image_registration(image_left, image_right, verbose=1):
    """
	use ORB to extract keypoints of left and right images
	then use RANSAC to remove inliers to achieve succesful image registration 
	"""

    orb = ORB(n_keypoints=1000)
    orb.detect_and_extract(image_left)
    keypoints1 = orb.keypoints
    descriptors1 = orb.descriptors

    orb.detect_and_extract(image_right)
    keypoints2 = orb.keypoints
    descriptors2 = orb.descriptors

    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    # Select keypoints from the source (image to be registered)
    # and target (reference image)
    src = keypoints2[matches12[:, 1]][:, ::-1]
    dst = keypoints1[matches12[:, 0]][:, ::-1]

    model_robust, inliers = ransac((src, dst),
                                   EuclideanTransform,
                                   min_samples=2,
                                   max_trials=10000,
                                   residual_threshold=5,
                                   random_state=20)

    if verbose:
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(5, 5))
        plot_matches(ax1, image_left, image_right, keypoints1, keypoints2,
                     matches12)
        ax1.set_title('all matching points')
        ax1.axis('off')
        plot_matches(ax2, image_left, image_right, keypoints1, keypoints2,
                     matches12[inliers])
        ax2.set_title('correct matching points')
        ax2.axis('off')

    return (model_robust)
Esempio n. 35
0
File: image.py Progetto: gracz21/KCK
def main():
    p = Pool(5)
    listing = os.listdir('patterns')
    zipped_patterns = p.map(load_pattenrs, listing)
    zipped_patterns = [ent for sublist in zipped_patterns for ent in sublist]
    listing = os.listdir('scenes')
    zipped_scenes = p.map(load_scenes, listing)
    zipped_scenes = [ent for sublist in zipped_scenes for ent in sublist]
    zipped_patterns.sort(key=lambda x: x[3])
    p_img, patterns, p_key, tmp = zip(*zipped_patterns)
    zipped_scenes.sort(key=lambda x: x[3])
    s_img, scenes, s_key, tmp = zip(*zipped_scenes)
    set_names(len(zipped_patterns) - 1)
    k = 0
    for j in scenes:
        arg_list = []
        for a, b, c, d in zipped_patterns:
            arg_list.append([b, d, j])
        zipped_matches = p.map(f_wrap, arg_list)
        zipped_matches = [ent for sublist in zipped_matches for ent in sublist]
        zipped_matches.sort(key=lambda x: x[1])
        matches, tmp, m_array = zip(*zipped_matches)
        best_match = max(matches)
        proc = 1.0
        id = 0
        result = 'Karta to: '
        for i in range(len(patterns)):
            el = abs((patterns[i].size/j.size) - 1)
            if matches[i] == best_match:
                result += cards[i] + " "
            if matches[i] == best_match and el < proc:
                proc = el
                id = i
        fig, ax = plt.subplots()
        plt.gray()
        plot_matches(ax, p_img[id], s_img[k], p_key[id], s_key[k], m_array[id])
        ax.axis('off')
        plt.show()
        #print 'Karta to: ', cards[id]
        print result
        k += 1
Esempio n. 36
0
keypoints3 = corner_peaks(corner_harris(img3), min_distance=5)

extractor = BRIEF()

extractor.extract(img1, keypoints1)
keypoints1 = keypoints1[extractor.mask]
descriptors1 = extractor.descriptors

extractor.extract(img2, keypoints2)
keypoints2 = keypoints2[extractor.mask]
descriptors2 = extractor.descriptors

extractor.extract(img3, keypoints3)
keypoints3 = keypoints3[extractor.mask]
descriptors3 = extractor.descriptors

matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)

fig, ax = plt.subplots(nrows=2, ncols=1)

plt.gray()

plot_matches(ax[0], img1, img2, keypoints1, keypoints2, matches12)
ax[0].axis('off')

plot_matches(ax[1], img1, img3, keypoints1, keypoints3, matches13)
ax[1].axis('off')

plt.show()
Esempio n. 37
0
def plot(img1, img2, keyp1, keyp2, matches):
    plot_matches(plt, img1, img2, keyp1, keyp2, matches)
    plt.show()
Esempio n. 38
0
# robustly estimate affine transform model with RANSAC
model_robust, inliers = ransac((src, dst), AffineTransform, min_samples=3,
                               residual_threshold=2, max_trials=100)
outliers = inliers == False


# compare "true" and estimated transform parameters
print(tform.scale, tform.translation, tform.rotation)
print(model.scale, model.translation, model.rotation)
print(model_robust.scale, model_robust.translation, model_robust.rotation)

# visualize correspondence
fig, ax = plt.subplots(nrows=2, ncols=1)

plt.gray()

inlier_idxs = np.nonzero(inliers)[0]
plot_matches(ax[0], img_orig_gray, img_warped_gray, src, dst,
             np.column_stack((inlier_idxs, inlier_idxs)), matches_color='b')
ax[0].axis('off')
ax[0].set_title('Correct correspondences')

outlier_idxs = np.nonzero(outliers)[0]
plot_matches(ax[1], img_orig_gray, img_warped_gray, src, dst,
             np.column_stack((outlier_idxs, outlier_idxs)), matches_color='r')
ax[1].axis('off')
ax[1].set_title('Faulty correspondences')

plt.show()
Esempio n. 39
0
img2 = rgb2gray(skimage.io.imread(imgPath + "banana/banana3.jpeg", plugin="pil"))

descriptor_extractor = ORB(n_keypoints=200)

descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors

descriptor_extractor.detect_and_extract(img2)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors

# descriptor_extractor.detect_and_extract(img3)
# keypoints3 = descriptor_extractor.keypoints
# descriptors3 = descriptor_extractor.descriptors

matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
# matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)

fig, ax = plt.subplots(nrows=1, ncols=1)

plt.gray()

plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12)
ax.axis("off")

# plot_matches(ax[1], img1, img3, keypoints1, keypoints3, matches13)
# ax[1].axis('off')

plt.show()
inlier_keypoints_left = keypoints_left[matches[inliers, 0]]
inlier_keypoints_right = keypoints_right[matches[inliers, 1]]

print("Number of matches:", matches.shape[0])
print("Number of inliers:", inliers.sum())

# Compare estimated sparse disparities to the dense ground-truth disparities.

disp = inlier_keypoints_left[:, 1] - inlier_keypoints_right[:, 1]
disp_coords = np.round(inlier_keypoints_left).astype(np.int64)
disp_idxs = np.ravel_multi_index(disp_coords.T, groundtruth_disp.shape)
disp_error = np.abs(groundtruth_disp.ravel()[disp_idxs] - disp)
disp_error = disp_error[np.isfinite(disp_error)]

# Visualize the results.

fig, ax = plt.subplots(nrows=2, ncols=1)

plt.gray()

plot_matches(ax[0], img_left, img_right, keypoints_left, keypoints_right,
             matches[inliers], only_matches=True)
ax[0].axis("off")
ax[0].set_title("Inlier correspondences")

ax[1].hist(disp_error)
ax[1].set_title("Histogram of disparity errors")

plt.show()
# Apply the ORB algorithm to our images
descriptor_extractor.detect_and_extract(schroedinger)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors

descriptor_extractor.detect_and_extract(schroedinger_rotate)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors

descriptor_extractor.detect_and_extract(schroedinger_warped)
keypoints3 = descriptor_extractor.keypoints
descriptors3 = descriptor_extractor.descriptors

# See which descriptors match across the images
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)

fig, ax = plt.subplots(nrows=2, ncols=1)

plot_matches(ax[0], schroedinger, schroedinger_warped, keypoints1, keypoints2,
             matches12)
ax[0].axis('off')

plot_matches(ax[1], schroedinger, schroedinger_warped, keypoints1, keypoints3, 
             matches13)
ax[1].axis('off')

plt.show()

plt.gray()