def own_stitching_run(): kpo1 ,ds1= sift.harris_own(own1) kpo2 ,ds2= sift.harris_own(own2) kpo3 ,ds3= sift.harris_own(own3) imgo1 = cv2.imread(img_own_1) imgo2 = cv2.imread(img_own_2) imgo3 = cv2.imread(img_own_3) good_matches_o = sift.match_own(imgo1,imgo2, kpo1, kpo2) hom_o, homInv_o,inliersFinal_o = ran.RANSAC(good_matches_o,100,.2,kpo1,kpo2) pano_o, b1,b2= st.stitch(own1, own2, hom_o, homInv_o) cv2.imwrite(path_result+'/ownstep2.jpg',pano_o.astype(np.uint8)) a = cv2.imread(path_result + '/ownstep2.jpg') kpos ,ds4 = sift.harris_own(a) own = cv2.imread(path_result + '/ownstep2.jpg') good_matches_o2 = sift.match_own(own ,imgo3, kpos, kpo3) hom_o2, homInv_o2,inliersFinal_o2 = ran.RANSAC(good_matches_o2,300,.2,kpos,kpo3) pano_o2, b1,b2= st.stitch(own, own3, hom_o2, homInv_o2) cv2.imwrite(path_result+'/ownStitched.png',pano_o2.astype(np.uint8)) cv2.imshow("ownStitched", pano_o2.astype(np.uint8)) cv2.waitKey(0) cv2.destroyAllWindows()
def test_ransac(self, inlier_ratio=1.0, inlier_noise=0): n_iter = trial_count(inlier_ratio, self.n_sample) data = self.make_data(inlier_ratio, inlier_noise) ransac_instance = ransac.RANSAC( n_sample=self.n_sample, n_iter=n_iter, err_thres=0.01, ) best_func, inliers = ransac_instance(data, self.model) self.assertGreaterEqual(len(inliers) / len(data), 0.9 * inlier_ratio) np.testing.assert_array_almost_equal(best_func.matrix, self.ground_truth, decimal=2) print('[*] ({ratio:0<.2}, {noise}) ransac error: {error:.8}'.format( ratio=inlier_ratio, noise=inlier_noise, error=np.sqrt(((best_func.matrix - self.ground_truth)**2).sum())))
def compute_rotation_and_consistency_pano(self, idx1, idxb): """ Function to compute the rotation and count consistency between two indexes in the Panorama stitcher """ #Initialize classes to match descriptors and find consistency desc_ex = interest_point.DescriptorExtractor() rn = ransac.RANSAC() #Get matching descriptors for the two photos to calculate consistency ip1 = self.interest_points[idxb] ip2 = self.interest_points[idx1] desc1 = self.descriptors[idxb] desc2 = self.descriptors[idx1] match_idx = desc_ex.match_descriptors(desc1, desc2) ipb = ip2[:, match_idx] #Set params and variables to hold info from match_images (divide rows by 2 since they contain both entries) ip_rows = int(self.matches[idxb][idx1].shape[0] / 2) ip_cols = self.matches[idxb][idx1].shape[1] ip1c = np.ndarray((ip_rows, ip_cols)) ipbc = np.ndarray((ip_rows, ip_cols)) ip1c[0] = self.matches[idxb][idx1][0] ip1c[1] = self.matches[idxb][idx1][1] ipbc[0] = self.matches[idxb][idx1][2] ipbc[1] = self.matches[idxb][idx1][3] #Get the rotation for the best match K1 = geometry.get_calibration(self.images[idxb].shape, self.params['fov_degrees']) K2 = geometry.get_calibration(self.images[idx1].shape, self.params['fov_degrees']) R, H = geometry.compute_rotation(ip1c, ipbc, K1, K2) #Calculate the consistency between the two photos inliers = rn.consistent(H, ip1, ipb) num_inliers_s = np.sum(inliers) return R, num_inliers_s """
def match_images(self, images): """ Find geometrically consistent matches between images """ # extract interest points and descriptors print('[ find interest points ]') t0 = time() interest_points = [] descriptors = [] ip_ex = interest_point.InterestPointExtractor() desc_ex = interest_point.DescriptorExtractor() num_images = len(images) for i in range(num_images): im = images[i] img = np.mean(im, 2, keepdims=True) ip = ip_ex.find_interest_points(img) print(' found ' + str(ip.shape[1]) + ' interest points') interest_points.append(ip) desc = desc_ex.get_descriptors(img, ip) descriptors.append(desc) if self.params['draw_interest_points']: interest_point.draw_interest_points_file( im, ip, self.params['results_dir'] + '/ip' + str(i) + '.jpg') t1 = time() print(' % .2f secs ' % (t1 - t0)) # match descriptors and perform ransac print('[ match descriptors ]') matches = [[None] * num_images for _ in range(num_images)] num_matches = np.zeros((num_images, num_images)) t0 = time() rn = ransac.RANSAC() for i in range(num_images): ipi = interest_points[i] print(' image ' + str(i)) for j in range(num_images): if (i == j): continue matchesij = desc_ex.match_descriptors(descriptors[i], descriptors[j]) ipm = interest_points[j][:, matchesij] S, inliers = rn.ransac_similarity(ipi, ipm) num_matches[i, j] = np.sum(inliers) ipic = ipi[:, inliers] ipmc = ipm[:, inliers] matches[i][j] = np.concatenate((ipic, ipmc), 0) if (self.params['draw_matches']): imi = images[i] imj = images[j] interest_point.draw_matches_file( imi, imj, ipi, ipm, self.params['results_dir'] + '/match_raw_' + str(i) + str(j) + '.jpg') interest_point.draw_matches_file( imi, imj, ipic, ipmc, self.params['results_dir'] + '/match_' + str(i) + str(j) + '.jpg') t1 = time() print(' % .2f secs' % (t1 - t0)) return matches, num_matches
images = images[1:] index = 1 while True: print("Adding image ", index) index += 1 max_inliers = 0 max_inliers_index = 0 max_inliers_homography = None max_inliers_inverse_homography = None for i in range(len(images)): matches, matching_image = utl.get_matches(final_image, images[i]) homography, inverse_homography, match_image, number_of_inliers = ransac.RANSAC( matches=matches, numMatches=4, numIterations=150, inlierThreshold=50, hom=None, homInv=None, image1Display=final_image, image2Display=images[i]) if number_of_inliers > max_inliers: max_inliers = number_of_inliers max_inliers_index = i max_inliers_homography = homography max_inliers_inverse_homography = inverse_homography print("Best current match: ", images[max_inliers_index].image_name) print("Stitching") final_image = stitch(final_image, images[max_inliers_index], max_inliers_homography, max_inliers_inverse_homography) images = images[:max_inliers_index] + images[max_inliers_index + 1:]
# EXTRA CREDIT : Old_port Keypoint Detect import SiftKeypointDetector as newDetect o, image31_magnitude, image31_orientations, keypoint_image31 = h.Corner(imharris31, .5) #.5, 1.1 p, image32_magnitude, image32_orientations, keypoint_image32 = h.Corner(imharris32,1.1) keypoints31 = newDetect.new_impl(imgex31) keypoints32 = newDetect.new_impl(imgex32) # Question 2 : describe features and draw keypoints key1, key2, good, matched_keypoints = fea.runDescriptor(imharris1, keypoints1, image1_magnitude, image1_orientations,imharris2, keypoints2, image2_magnitude, image2_orientations, .7) cv2.imwrite(path_result+'/2'+'.png',matched_keypoints.astype(np.uint8)) # # Question 3: run the Ransac algorithm to compute homography, draw the inliers good_matches,kp1,kp2 = sift.match(img1,img2) hom, homInv,inliersFinal = ran.RANSAC(good_matches,800,.9,kp1,kp2) im_inlier = cv2.drawMatches(img1,kp1, img2, kp2, inliersFinal, None, flags=2) cv2.imwrite(path_result+'/3.png',im_inlier.astype(np.uint8)) cv2.imshow("Inliers", im_inlier) cv2.waitKey(0) cv2.destroyAllWindows() # # Question 4: Stitch the two images based on the inliers given by the ransac algorithm i =0 pano = [] panorama, blend1,blend2= st.stitch(img1, img2, hom, homInv) pano.append(panorama.astype(np.uint8)) cv2.imwrite(path_result+'/4.png',panorama.astype(np.uint8)) cv2.imshow("Stitched", pano[0]) cv2.waitKey(0)
def ransac_2_new_dimension(self, n_unit_threshold, cylinder_value, circle_number_start): # Add a new dimension holding the circle numbers self.add_dimension("circle", 3, "") #self.reset_dimension("circle") # Open the file las = laspy.file.File(self.filename, mode="rw") mins = las.header.min maxs = las.header.max points = las.header.records_count area = (maxs[0]-mins[0])*(maxs[1]-mins[1]) # Threshold distance unit_threshold = area / points threshold = n_unit_threshold*unit_threshold print(threshold) slice_data = getattr(las, "slice") cylinder_data = getattr(las, "cylinder") circle_data = getattr(las, "circle") n_slices = slice_data.max() circle_number = circle_number_start # Circle format for evlrs: { slice: [[circle_number center_point, radius]]} circles = [] for i in tqdm.tqdm(reversed(range(n_slices)), total=n_slices): idx = (cylinder_data == cylinder_value) & (slice_data == i) if len(las.x[idx]) < 3: continue data = np.vstack([las.x[idx], las.y[idx]]).T new_idx = getattr(las, "index")[idx] # make ransac class # n: how many times try sampling rransac = ransac.RANSAC(las.x[idx].T, las.y[idx].T, 100) # execute ransac algorithm rransac.execute_ransac(threshold) # get best model from ransac a, b, r = rransac.best_model[0], rransac.best_model[1], rransac.best_model[2] if a is None or b is None or r is None: continue # get the inliers or outliers inlier_idxs = [] for idx, (x, y) in enumerate(zip(las.x[idx], las.y[idx])): if np.abs(math.sqrt((x-a)**2 + (y-b)**2) - r) < threshold: inlier_idxs.append(idx) original_idxs = new_idx[inlier_idxs] circle_data[original_idxs] = circle_number circles.append(Circle(circle_number, a, b, las.z[original_idxs], r, slice_number=i, support=len(inlier_idxs), original_idxs=original_idxs)) setattr(las, "circle", circle_data) circle_number += 1 las.close() return circles, circle_number