def match_features(problem, data): """ Match features between all relevant pairs of images, and save to DataSet. """ pairs = problem.get_pairs_to_match() candidates = defaultdict(list) for image1, image2 in pairs: candidates[image1].append(image2) for image1, image2s in candidates.iteritems(): if data.matches_exists(image1): print "{} - matching features (cached)".format(image1) else: print "{} - matching features".format(image1) points1, descriptors1, colors1 = data.load_features(image1) index1 = data.load_feature_index(image1, descriptors1) image1_matches = {} for image2 in image2s: print "{} - {} - matching features".format(image1, image2) points2, descriptors2, colors2 = data.load_features(image2) index2 = data.load_feature_index(image2, descriptors2) image1_matches[image2] = matching.match_symmetric( descriptors1, index1, descriptors2, index2, data.config) # image1_matches[image2] = match_custom( # descriptors1, index1, descriptors2, index2, data.config) data.save_matches(image1, image1_matches)
def match(args): """Compute all matches for a single image""" im1, candidates, i, n, ctx = args logger.info('Matching {} - {} / {}'.format(im1, i + 1, n)) config = ctx.data.config robust_matching_min_match = config['robust_matching_min_match'] preemptive_threshold = config['preemptive_threshold'] lowes_ratio = config['lowes_ratio'] preemptive_lowes_ratio = config['preemptive_lowes_ratio'] im1_matches = {} for im2 in candidates: # preemptive matching if preemptive_threshold > 0: t = time.time() config['lowes_ratio'] = preemptive_lowes_ratio matches_pre = matching.match_lowe_bf(ctx.f_pre[im1], ctx.f_pre[im2], config) config['lowes_ratio'] = lowes_ratio logger.debug("Preemptive matching {0}, time: {1}s".format(len(matches_pre), time.time() - t)) if len(matches_pre) < preemptive_threshold: logger.debug("Discarding based of preemptive matches {0} < {1}".format(len(matches_pre), preemptive_threshold)) continue # symmetric matching t = time.time() p1, f1, c1 = ctx.data.load_features(im1) i1 = ctx.data.load_feature_index(im1, f1) p2, f2, c2 = ctx.data.load_features(im2) i2 = ctx.data.load_feature_index(im2, f2) matches = matching.match_symmetric(f1, i1, f2, i2, config) logger.debug('{} - {} has {} candidate matches'.format(im1, im2, len(matches))) if len(matches) < robust_matching_min_match: im1_matches[im2] = [] continue # robust matching t_robust_matching = time.time() camera1 = ctx.cameras[ctx.exifs[im1]['camera']] camera2 = ctx.cameras[ctx.exifs[im2]['camera']] rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config) if len(rmatches) < robust_matching_min_match: im1_matches[im2] = [] continue im1_matches[im2] = rmatches logger.debug('Robust matching time : {0}s'.format( time.time() - t_robust_matching)) logger.debug("Full matching {0} / {1}, time: {2}s".format( len(rmatches), len(matches), time.time() - t)) ctx.data.save_matches(im1, im1_matches)
config = {} #data = dataset.DataSet("/Volumes/Data/Berkeley/code_and_data/code/egomotion/OpenSfM/data/frames120_minus_blurry") data = dataset.DataSet( "/Volumes/Data/Berkeley/code_and_data/code/egomotion/OpenSfM/data/eaef92b1-7909-46e1-9f62-5fe6eddcfcac" ) config = {"homography_seg_relative_path": "output/results/joint"} #filter_by_segmentation_test(im1, config, data) p1, f1, i1 = load_masked(im1, data, config) p2, f2, i2 = load_masked(im2, data, config) # version with lower ratio test one old_ratio = config.get('lowes_ratio', 0.6) config["lowes_ratio"] = config.get('homography_lowes_ratio', 0.9) matches = matching.match_symmetric(f1, i1, f2, i2, config) config["lowes_ratio"] = old_ratio # version with more neighbour included #matches = homography_match(f1, i1, f2, i2, config) homography_match_test(matches, data, p1, p2, .1) print(matches.shape) p1matched = p1[matches[:, 0], :] p2matched = p2[matches[:, 1], :] # TODO: the too strict threshold of RANSAC result in fewer correspondence than we thought H, inliers = cv2.findHomography(p1matched, p2matched, cv2.RANSAC, config.get("homography_threshold", 0.004)) matches = matches[inliers.reshape(-1).astype(np.bool), :] print(matches.shape) homography_match_test(matches, data, p1, p2, 1)
def match(args): """Compute all matches for a single image""" log.setup() im1, candidates, i, n, ctx = args #cv2.imshow('123',im1) print("....................1%s" % im1) print("....................2%s" % candidates) logger.info('Matching {} - {} / {}'.format(im1, i + 1, n)) config = ctx.data.config robust_matching_min_match = config['robust_matching_min_match'] preemptive_threshold = config['preemptive_threshold'] lowes_ratio = config['lowes_ratio'] preemptive_lowes_ratio = config['preemptive_lowes_ratio'] im1_matches = {} for im2 in candidates: # preemptive matching if preemptive_threshold > 0: t = timer() config['lowes_ratio'] = preemptive_lowes_ratio matches_pre = matching.match_lowe_bf(ctx.f_pre[im1], ctx.f_pre[im2], config) config['lowes_ratio'] = lowes_ratio logger.debug("Preemptive matching {0}, time: {1}s".format( len(matches_pre), timer() - t)) if len(matches_pre) < preemptive_threshold: logger.debug( "Discarding based of preemptive matches {0} < {1}".format( len(matches_pre), preemptive_threshold)) continue # symmetric matching t = timer() p1, f1, c1 = ctx.data.load_features(im1) p2, f2, c2 = ctx.data.load_features(im2) if config['matcher_type'] == 'FLANN': print('.....................using FLANN step 1') i1 = ctx.data.load_feature_index(im1, f1) i2 = ctx.data.load_feature_index(im2, f2) else: i1 = None i2 = None matches = matching.match_symmetric( f1, i1, f2, i2, config) #, p1, p2, im1, im2)#add keypoints and images qli # Apply ratio test """ matchesMask = [[0,0] for ii in range(len(matches))] for ii,(m,n) in enumerate(matches): if 0.55*n.distance<m.distance < 0.80*n.distance: matchesMask[ii]=[1,0] draw_params=dict(matchesMask=matchesMask) """ siftQ = cv2.xfeatures2d.SIFT_create() print('loading image 1 ..................%s' % im1) print('loading image 2 ..................%s' % im2) img1 = cv2.imread( '/home/qli/workspace/OpenSfM/data/MattGPS/images/%s' % im1) img2 = cv2.imread( '/home/qli/workspace/OpenSfM/data/MattGPS/images/%s' % im2) kp1, des1 = siftQ.detectAndCompute(img1, None) kp2, des2 = siftQ.detectAndCompute(img2, None) # BFMatcher with default params bf = cv2.BFMatcher() matchesQ = bf.knnMatch(des1, des2, k=2) matchesMask = [[0, 0] for i in range(len(matchesQ))] for i, (m, n) in enumerate(matchesQ): if 0.55 * n.distance < m.distance < 0.80 * n.distance: matchesMask[i] = [1, 0] # cv2.drawMatchesKnn expects list of lists as matches. draw_params = dict(matchesMask=matchesMask) img3 = None img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matchesQ, None, flags=2, **draw_params) #namestr= '/home/qli/workspace/OpenSfM/data/MattGPS/images/'+str(im1)+str(im2) #cv2.imwrite(namestr,img3) #plt.figure(str(im1)+str(im2)) #plt.imshow(img3) savefigname = "/home/qli/workspace/OpenSfM/data/MattGPS/GPS%s%s.jpg" % ( str(im1), str(im2)) #plt.savefig(savefigname) cv2.imwrite(savefigname, img3) #plt.show() #plt.close() #return logger.debug('{} - {} has {} candidate matches'.format( im1, im2, len(matches))) if len(matches) < robust_matching_min_match: im1_matches[im2] = [] continue # robust matching t_robust_matching = timer() camera1 = ctx.cameras[ctx.exifs[im1]['camera']] camera2 = ctx.cameras[ctx.exifs[im2]['camera']] rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config) if len(rmatches) < robust_matching_min_match: im1_matches[im2] = [] continue im1_matches[im2] = rmatches logger.debug('Robust matching time : {0}s'.format(timer() - t_robust_matching)) logger.debug("Full matching {0} / {1}, time: {2}s".format( len(rmatches), len(matches), timer() - t)) ctx.data.save_matches(im1, im1_matches)
def match(args): ''' Compute all matches for a single image ''' im1, candidates, i, n, ctx = args logger.info('Matching {} - {} / {}'.format(im1, i + 1, n)) config = ctx.data.config robust_matching_min_match = config['robust_matching_min_match'] preemptive_threshold = config['preemptive_threshold'] lowes_ratio = config['lowes_ratio'] preemptive_lowes_ratio = config['preemptive_lowes_ratio'] if ctx.data.matches_exists(im1): im1_matches = ctx.data.load_matches(im1) else: im1_matches = {} for im2 in candidates: if im2 in im1_matches: continue # preemptive matching if preemptive_threshold > 0: t = time.time() config['lowes_ratio'] = preemptive_lowes_ratio matches_pre = matching.match_lowe_bf(ctx.f_pre[im1], ctx.f_pre[im2], config) config['lowes_ratio'] = lowes_ratio logger.debug("Preemptive matching {0}, time: {1}s".format( len(matches_pre), time.time() - t)) if len(matches_pre) < preemptive_threshold: logger.debug( "Discarding based of preemptive matches {0} < {1}".format( len(matches_pre), preemptive_threshold)) continue # symmetric matching t = time.time() p1, f1, c1 = ctx.data.load_features(im1) # if we are using bruteforce matching, the loaded index will simplily be False. i1 = ctx.data.load_feature_index(im1, f1) p2, f2, c2 = ctx.data.load_features(im2) i2 = ctx.data.load_feature_index(im2, f2) matches = matching.match_symmetric(f1, i1, f2, i2, config) logger.debug('{} - {} has {} candidate matches'.format( im1, im2, len(matches))) if len(matches) < robust_matching_min_match: im1_matches[im2] = [] continue # robust matching t_robust_matching = time.time() camera1 = ctx.cameras[ctx.exifs[im1]['camera']] camera2 = ctx.cameras[ctx.exifs[im2]['camera']] # add extra matches on the road with homography method # filter the candidate points by semantic segmentation rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config) if len(rmatches) < robust_matching_min_match: im1_matches[im2] = [] continue im1_matches[im2] = rmatches logger.debug('Robust matching time : {0}s'.format(time.time() - t_robust_matching)) logger.debug("Full matching {0} / {1}, time: {2}s".format( len(rmatches), len(matches), time.time() - t)) ctx.data.save_matches(im1, im1_matches)
def match(args): """Compute all matches for a single image""" log.setup() im1, candidates, i, n, ctx = args logger.info('Matching {} - {} / {}'.format(im1, i + 1, n)) config = ctx.data.config robust_matching_min_match = config['robust_matching_min_match'] preemptive_threshold = config['preemptive_threshold'] lowes_ratio = config['lowes_ratio'] preemptive_lowes_ratio = config['preemptive_lowes_ratio'] im1_matches = {} for im2 in candidates: # preemptive matching if preemptive_threshold > 0: t = timer() config['lowes_ratio'] = preemptive_lowes_ratio matches_pre = matching.match_lowe_bf( ctx.f_pre[im1], ctx.f_pre[im2], config) config['lowes_ratio'] = lowes_ratio logger.debug("Preemptive matching {0}, time: {1}s".format( len(matches_pre), timer() - t)) if len(matches_pre) < preemptive_threshold: logger.debug( "Discarding based of preemptive matches {0} < {1}".format( len(matches_pre), preemptive_threshold)) continue # symmetric matching t = timer() p1, f1, c1 = ctx.data.load_features(im1) p2, f2, c2 = ctx.data.load_features(im2) if config['matcher_type'] == 'FLANN': i1 = ctx.data.load_feature_index(im1, f1) i2 = ctx.data.load_feature_index(im2, f2) else: i1 = None i2 = None matches = matching.match_symmetric(f1, i1, f2, i2, config) logger.debug('{} - {} has {} candidate matches'.format( im1, im2, len(matches))) if len(matches) < robust_matching_min_match: im1_matches[im2] = [] continue # robust matching t_robust_matching = timer() camera1 = ctx.cameras[ctx.exifs[im1]['camera']] camera2 = ctx.cameras[ctx.exifs[im2]['camera']] rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config) if len(rmatches) < robust_matching_min_match: im1_matches[im2] = [] continue im1_matches[im2] = rmatches logger.debug('Robust matching time : {0}s'.format( timer() - t_robust_matching)) logger.debug("Full matching {0} / {1}, time: {2}s".format( len(rmatches), len(matches), timer() - t)) ctx.data.save_matches(im1, im1_matches)
def match(args): ''' Compute all matches for a single image ''' im1, candidates, i, n, ctx = args logger.info('Matching {} - {} / {}'.format(im1, i + 1, n)) config = ctx.data.config robust_matching_min_match = config['robust_matching_min_match'] preemptive_threshold = config['preemptive_threshold'] lowes_ratio = config['lowes_ratio'] preemptive_lowes_ratio = config['preemptive_lowes_ratio'] path_seg = ctx.data.data_path + "/images/output/results/frontend_vgg/" + os.path.splitext( im1)[0] + '.png' file_name = Path(path_seg) if file_name.is_file(): im1_seg = Image.open(path_seg) im1_seg = np.array(im1_seg) p1, f1, c1 = ctx.data.load_features(im1) # if we are using bruteforce matching, the loaded index will simplily be False. i1 = ctx.data.load_feature_index(im1, f1) if file_name.is_file(): idx_u1 = im1_seg.shape[1] * (p1[:, 0] + 0.5) idx_v1 = im1_seg.shape[0] * (p1[:, 1] + 0.5) im1_seg = im1_seg[idx_v1.astype(np.int), idx_u1.astype(np.int)] else: im1_seg = None if ctx.data.matches_exists(im1): im1_matches = ctx.data.load_matches(im1) else: im1_matches = {} for im2 in candidates: if im2 in im1_matches: continue path_seg = ctx.data.data_path + "/images/output/results/frontend_vgg/" + os.path.splitext( im2)[0] + '.png' file_name = Path(path_seg) if file_name.is_file(): im2_seg = Image.open(path_seg) im2_seg = np.array(im2_seg) p2, f2, c2 = ctx.data.load_features(im2) i2 = ctx.data.load_feature_index(im2, f2) if file_name.is_file(): idx_u2 = im2_seg.shape[1] * (p2[:, 0] + 0.5) idx_v2 = im2_seg.shape[0] * (p2[:, 1] + 0.5) im2_seg = im2_seg[idx_v2.astype(np.int), idx_u2.astype(np.int)] else: im2_seg = None # preemptive matching if preemptive_threshold > 0: t = time.time() config['lowes_ratio'] = preemptive_lowes_ratio matches_pre = matching.match_lowe_bf(ctx.f_pre[im1], ctx.f_pre[im2], config, im1_seg, im2_seg) config['lowes_ratio'] = lowes_ratio logger.debug("Preemptive matching {0}, time: {1}s".format( len(matches_pre), time.time() - t)) if len(matches_pre) < preemptive_threshold: logger.debug( "Discarding based of preemptive matches {0} < {1}".format( len(matches_pre), preemptive_threshold)) continue # symmetric matching t = time.time() matches = matching.match_symmetric(f1, i1, f2, i2, config, im1_seg, im2_seg) logger.debug('{} - {} has {} candidate matches'.format( im1, im2, len(matches))) if len(matches) < robust_matching_min_match: im1_matches[im2] = [] continue # robust matching t_robust_matching = time.time() camera1 = ctx.cameras[ctx.exifs[im1]['camera']] camera2 = ctx.cameras[ctx.exifs[im2]['camera']] # add extra matches on the road with homography method # filter the candidate points by semantic segmentation rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config) if len(rmatches) < robust_matching_min_match: im1_matches[im2] = [] continue im1_matches[im2] = rmatches logger.debug('Robust matching time : {0}s'.format(time.time() - t_robust_matching)) logger.debug("Full matching {0} / {1}, time: {2}s".format( len(rmatches), len(matches), time.time() - t)) ctx.data.save_matches(im1, im1_matches)
def main(): parser = argparse.ArgumentParser() parser.add_argument('dataset', help='dataset to process') parser.add_argument('--homography_ransac_threshold', help='the threshold used to match homography', default=0.004) parser.add_argument( '--homography_inlier_ratio', help= 'the lower bound of homography inlier ratio to be considered as the same frame', default=0.90) parser.add_argument('--matching_mod', help='could either be good or fast', default="good") args = parser.parse_args() is_good = (args.matching_mod == "good") data = dataset.DataSet(args.dataset) images = sorted(data.images()) config = data.config # the current image, next image is used as potentials to be the same as this image im1i = 0 retained = [images[0]] indexes = [0] if is_good: robust_matching_min_match = config['robust_matching_min_match'] cameras = data.load_camera_models() exifs = {im: data.load_exif(im) for im in images} while im1i + 1 < len(images): im1 = images[im1i] # while the next image exists p1, f1, c1 = data.load_features(im1) i1 = data.load_feature_index(im1, f1) # get the cached features if data.matches_exists(im1): im1_matches = data.load_matches(im1) else: im1_matches = {} modified = False for im2i in range(im1i + 1, len(images)): # match this image against the inow im2 = images[im2i] p2, f2, c2 = data.load_features(im2) if im2 not in im1_matches: modified = True i2 = data.load_feature_index(im2, f2) matches = matching.match_symmetric(f1, i1, f2, i2, config) if len(matches) < robust_matching_min_match: # this image doesn't have enough matches with the first one # i.e. either of them is broken, to be safe throw away both print("%s and %s don't have enough matches, skipping" % (im1, im2)) im1i = im2i + 1 break # robust matching camera1 = cameras[exifs[im1]['camera']] camera2 = cameras[exifs[im2]['camera']] rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config) if len(rmatches) < robust_matching_min_match: im1_matches[im2] = [] else: im1_matches[im2] = rmatches #print("computed match between %s and %s" % (im1, im2)) else: rmatches = im1_matches[im2] if len(rmatches) < robust_matching_min_match: print( "%s and %s don't have enough robust matches, skipping" % (im1, im2)) im1i = im2i + 1 break inliers_ratio = homography_inlier_ratio(p1, p2, rmatches, args) print("im %s and im %s, homography ratio is %f" % (im1, im2, inliers_ratio)) if inliers_ratio <= float(args.homography_inlier_ratio): # this figure considered as not the same retained.append(im2) indexes.append(im2i) im1i = im2i break else: print("throw away %s" % im2) else: im1i += 1 if modified: data.save_matches(im1, im1_matches) else: # we should run neighbourhood matching anyway # make a copy of the old config config_path = os.path.join(data.data_path, "config.yaml") config_bak = config_path + ".bak" os.rename(config_path, config_bak) # replace the line with neighbour 2 subprocess.call([ 'sed -e "s/matching_order_neighbors:.*/matching_order_neighbors: 2/" ' + config_bak + ' > ' + config_path ], shell=True) subprocess.call(["bin/opensfm", "match_features", args.dataset]) # remove the replaced file os.remove(config_path) # move back os.rename(config_bak, config_path) # using the loaded features after ransac # slightly different logic here, we use the nearby frames' matches only for i1, im1 in enumerate(images): im1_matches = data.load_matches(im1) p1, f1, c1 = data.load_features(im1) if i1 + 1 < len(images): im2 = images[i1 + 1] p2, f2, c2 = data.load_features(im2) match = im1_matches[im2] if match == []: print("im %s and im %s don't have match, throw away 2nd" % (im1, im2)) continue # match is a list of tuples indicating which feature do I use for 2 images inliers_ratio = homography_inlier_ratio(p1, p2, match, args) print("im %s and im %s, homography ratio is %f" % (im1, im2, inliers_ratio)) if inliers_ratio <= float(args.homography_inlier_ratio): retained.append(im2) indexes.append(i1 + 1) else: print("throw away %s" % im2) # TODO: investigate whether need to remove further stop frames ''' # refine the list of remaining images by removing the isolated frames refined = [retained[0]] nn = 3 for i in range(1, len(retained)-1): if abs(indexes[i]-indexes[i-1])<=nn or abs(indexes[i]-indexes[i+1])<=nn: refined.append(retained[i]) refined.append(retained[-1]) retained = refined ''' # overwrite the image list if it exists image_list = os.path.join(data.data_path, "image_list.txt") with open(image_list, "w") as f: for im in retained: f.write("images/" + im + "\n")
def remove_stopping_frames_good(args): data = dataset.DataSet(args.dataset) config = data.config # Check which, if any, matches have already been computed cache_path, computed, computed_matches = get_cache(data.matches_path()) # The current image, next image is used as potentials to be the same as this image images = sorted(data.images()) retained = [images[0]] indexes = [0] robust_matching_min_match = config["robust_matching_min_match"] cameras = data.load_camera_models() exifs = {im: data.load_exif(im) for im in images} print("computing matches") im1i = 0 while im1i + 1 < len(images): im1 = images[im1i] print("processing image %s" % im1) p1, f1, c1 = data.load_features(im1) i1 = data.load_feature_index(im1, f1) # Get the cached features if data.matches_exists(im1): im1_matches = data.load_matches(im1) else: im1_matches = {} # Match against all subsequent images modified = False for im2i in range(im1i + 1, len(images)): im2 = images[im2i] # Print without newline print("\tmatching %s against %s " % (im1, im2)) # Check if already computed, and if not, mark as computed if computed and "%s,%s" % (im1, im2) in computed_matches: print("\t\tcache hit") continue else: print("\t\twriting to cache") with open(cache_path, "a") as f: f.write("%s,%s\n" % (im1, im2)) p2, f2, c2 = data.load_features(im2) if im2 not in im1_matches: modified = True i2 = data.load_feature_index(im2, f2) # Include segmentations im1_seg = get_segmentations(data, im1, p1, round = True) im2_seg = get_segmentations(data, im2, p2, round = im2 not in im1_matches) sys.stdout.write("\t\t") # Prepend tabs in prints of match_symmetric matches = matching.match_symmetric(f1, i1, f2, i2, config, im1_seg, im2_seg) if len(matches) < robust_matching_min_match: # This image doesn't have enough matches with the first one i.e. either of # them is broken; to be safe throw away both print("\t%s and %s don't have enough matches, skipping" % (im1, im2)) im1i = im2i + 1 break # Robust matching camera1 = cameras[exifs[im1]["camera"]] camera2 = cameras[exifs[im2]["camera"]] rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config) if len(rmatches) < robust_matching_min_match: im1_matches[im2] = [] else: im1_matches[im2] = rmatches else: rmatches = im1_matches[im2] if len(rmatches) < robust_matching_min_match: print("\t%s and %s don't have enough robust matches, skipping" % (im1, im2)) im1i = im2i + 1 break inliers_ratio = homography_inlier_ratio(p1, p2, rmatches, args) print("\t\tcomputed match between im %s and im %s, homography ratio is %f" % (im1, im2, inliers_ratio)) if inliers_ratio <= float(args.homography_inlier_ratio): # this figure considered as not the same retained.append(im2) indexes.append(im2i) im1i = im2i break else: print("\thomography inlier ratio is too high, throwing away %s" % im2) else: im1i += 1 if modified: data.save_matches(im1, im1_matches) return retained, indexes