def match(args): """Compute all matches for a single image""" log.setup() im1, candidates, i, n, ctx = args logger.info('Matching {} - {} / {}'.format(im1, i + 1, n)) config = ctx.data.config matcher_type = config['matcher_type'] robust_matching_min_match = config['robust_matching_min_match'] im1_matches = {} for im2 in candidates: # symmetric matching t = timer() p1, f1, c1 = ctx.data.load_features(im1) p2, f2, c2 = ctx.data.load_features(im2) if matcher_type == 'WORDS': w1 = ctx.data.load_words(im1) w2 = ctx.data.load_words(im2) matches = matching.match_words_symmetric(f1, w1, f2, w2, config) elif matcher_type == 'FLANN': i1 = ctx.data.load_feature_index(im1, f1) i2 = ctx.data.load_feature_index(im2, f2) matches = matching.match_flann_symmetric(f1, i1, f2, i2, config) elif matcher_type == 'BRUTEFORCE': matches = matching.match_brute_force_symmetric(f1, f2, config) else: raise ValueError("Invalid matcher_type: {}".format(matcher_type)) logger.debug('{} - {} has {} candidate matches'.format( im1, im2, len(matches))) if len(matches) < robust_matching_min_match: im1_matches[im2] = [] continue # robust matching t_robust_matching = timer() camera1 = ctx.cameras[ctx.exifs[im1]['camera']] camera2 = ctx.cameras[ctx.exifs[im2]['camera']] rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config) if len(rmatches) < robust_matching_min_match: im1_matches[im2] = [] continue im1_matches[im2] = rmatches logger.debug('Robust matching time : {0}s'.format(timer() - t_robust_matching)) logger.debug("Full matching {0} / {1}, time: {2}s".format( len(rmatches), len(matches), timer() - t)) ctx.data.save_matches(im1, im1_matches)
def match(args): """Compute all matches for a single image""" im1, candidates, i, n, ctx = args logger.info('Matching {} - {} / {}'.format(im1, i + 1, n)) config = ctx.data.config robust_matching_min_match = config['robust_matching_min_match'] preemptive_threshold = config['preemptive_threshold'] lowes_ratio = config['lowes_ratio'] preemptive_lowes_ratio = config['preemptive_lowes_ratio'] im1_matches = {} for im2 in candidates: # preemptive matching if preemptive_threshold > 0: t = time.time() config['lowes_ratio'] = preemptive_lowes_ratio matches_pre = matching.match_lowe_bf(ctx.f_pre[im1], ctx.f_pre[im2], config) config['lowes_ratio'] = lowes_ratio logger.debug("Preemptive matching {0}, time: {1}s".format(len(matches_pre), time.time() - t)) if len(matches_pre) < preemptive_threshold: logger.debug("Discarding based of preemptive matches {0} < {1}".format(len(matches_pre), preemptive_threshold)) continue # symmetric matching t = time.time() p1, f1, c1 = ctx.data.load_features(im1) i1 = ctx.data.load_feature_index(im1, f1) p2, f2, c2 = ctx.data.load_features(im2) i2 = ctx.data.load_feature_index(im2, f2) matches = matching.match_symmetric(f1, i1, f2, i2, config) logger.debug('{} - {} has {} candidate matches'.format(im1, im2, len(matches))) if len(matches) < robust_matching_min_match: im1_matches[im2] = [] continue # robust matching t_robust_matching = time.time() camera1 = ctx.cameras[ctx.exifs[im1]['camera']] camera2 = ctx.cameras[ctx.exifs[im2]['camera']] rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config) if len(rmatches) < robust_matching_min_match: im1_matches[im2] = [] continue im1_matches[im2] = rmatches logger.debug('Robust matching time : {0}s'.format( time.time() - t_robust_matching)) logger.debug("Full matching {0} / {1}, time: {2}s".format( len(rmatches), len(matches), time.time() - t)) ctx.data.save_matches(im1, im1_matches)
def test_robust_match(): d = data_generation.CubeDataset(2, 100, 0.0, 0.3) p1 = np.array([v['feature'] for k, v in iteritems(d.tracks['shot0'])]) p2 = np.array([v['feature'] for k, v in iteritems(d.tracks['shot1'])]) camera1 = d.shots['shot0'].camera camera2 = d.shots['shot1'].camera num_points = len(p1) inlier_matches = np.array([(i, i) for i in range(num_points)]) outlier_matches = np.random.randint(num_points, size=(num_points // 2, 2)) matches = np.concatenate((inlier_matches, outlier_matches)) rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config.default_config()) assert num_points <= len(rmatches) <= len(matches)
def match(args): """Compute all matches for a single image""" log.setup() im1, candidates, i, n, ctx = args #cv2.imshow('123',im1) print("....................1%s" % im1) print("....................2%s" % candidates) logger.info('Matching {} - {} / {}'.format(im1, i + 1, n)) config = ctx.data.config robust_matching_min_match = config['robust_matching_min_match'] preemptive_threshold = config['preemptive_threshold'] lowes_ratio = config['lowes_ratio'] preemptive_lowes_ratio = config['preemptive_lowes_ratio'] im1_matches = {} for im2 in candidates: # preemptive matching if preemptive_threshold > 0: t = timer() config['lowes_ratio'] = preemptive_lowes_ratio matches_pre = matching.match_lowe_bf(ctx.f_pre[im1], ctx.f_pre[im2], config) config['lowes_ratio'] = lowes_ratio logger.debug("Preemptive matching {0}, time: {1}s".format( len(matches_pre), timer() - t)) if len(matches_pre) < preemptive_threshold: logger.debug( "Discarding based of preemptive matches {0} < {1}".format( len(matches_pre), preemptive_threshold)) continue # symmetric matching t = timer() p1, f1, c1 = ctx.data.load_features(im1) p2, f2, c2 = ctx.data.load_features(im2) if config['matcher_type'] == 'FLANN': print('.....................using FLANN step 1') i1 = ctx.data.load_feature_index(im1, f1) i2 = ctx.data.load_feature_index(im2, f2) else: i1 = None i2 = None matches = matching.match_symmetric( f1, i1, f2, i2, config) #, p1, p2, im1, im2)#add keypoints and images qli # Apply ratio test """ matchesMask = [[0,0] for ii in range(len(matches))] for ii,(m,n) in enumerate(matches): if 0.55*n.distance<m.distance < 0.80*n.distance: matchesMask[ii]=[1,0] draw_params=dict(matchesMask=matchesMask) """ siftQ = cv2.xfeatures2d.SIFT_create() print('loading image 1 ..................%s' % im1) print('loading image 2 ..................%s' % im2) img1 = cv2.imread( '/home/qli/workspace/OpenSfM/data/MattGPS/images/%s' % im1) img2 = cv2.imread( '/home/qli/workspace/OpenSfM/data/MattGPS/images/%s' % im2) kp1, des1 = siftQ.detectAndCompute(img1, None) kp2, des2 = siftQ.detectAndCompute(img2, None) # BFMatcher with default params bf = cv2.BFMatcher() matchesQ = bf.knnMatch(des1, des2, k=2) matchesMask = [[0, 0] for i in range(len(matchesQ))] for i, (m, n) in enumerate(matchesQ): if 0.55 * n.distance < m.distance < 0.80 * n.distance: matchesMask[i] = [1, 0] # cv2.drawMatchesKnn expects list of lists as matches. draw_params = dict(matchesMask=matchesMask) img3 = None img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matchesQ, None, flags=2, **draw_params) #namestr= '/home/qli/workspace/OpenSfM/data/MattGPS/images/'+str(im1)+str(im2) #cv2.imwrite(namestr,img3) #plt.figure(str(im1)+str(im2)) #plt.imshow(img3) savefigname = "/home/qli/workspace/OpenSfM/data/MattGPS/GPS%s%s.jpg" % ( str(im1), str(im2)) #plt.savefig(savefigname) cv2.imwrite(savefigname, img3) #plt.show() #plt.close() #return logger.debug('{} - {} has {} candidate matches'.format( im1, im2, len(matches))) if len(matches) < robust_matching_min_match: im1_matches[im2] = [] continue # robust matching t_robust_matching = timer() camera1 = ctx.cameras[ctx.exifs[im1]['camera']] camera2 = ctx.cameras[ctx.exifs[im2]['camera']] rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config) if len(rmatches) < robust_matching_min_match: im1_matches[im2] = [] continue im1_matches[im2] = rmatches logger.debug('Robust matching time : {0}s'.format(timer() - t_robust_matching)) logger.debug("Full matching {0} / {1}, time: {2}s".format( len(rmatches), len(matches), timer() - t)) ctx.data.save_matches(im1, im1_matches)
def match(args): ''' Compute all matches for a single image ''' im1, candidates, i, n, ctx = args logger.info('Matching {} - {} / {}'.format(im1, i + 1, n)) config = ctx.data.config robust_matching_min_match = config['robust_matching_min_match'] preemptive_threshold = config['preemptive_threshold'] lowes_ratio = config['lowes_ratio'] preemptive_lowes_ratio = config['preemptive_lowes_ratio'] if ctx.data.matches_exists(im1): im1_matches = ctx.data.load_matches(im1) else: im1_matches = {} for im2 in candidates: if im2 in im1_matches: continue # preemptive matching if preemptive_threshold > 0: t = time.time() config['lowes_ratio'] = preemptive_lowes_ratio matches_pre = matching.match_lowe_bf(ctx.f_pre[im1], ctx.f_pre[im2], config) config['lowes_ratio'] = lowes_ratio logger.debug("Preemptive matching {0}, time: {1}s".format( len(matches_pre), time.time() - t)) if len(matches_pre) < preemptive_threshold: logger.debug( "Discarding based of preemptive matches {0} < {1}".format( len(matches_pre), preemptive_threshold)) continue # symmetric matching t = time.time() p1, f1, c1 = ctx.data.load_features(im1) # if we are using bruteforce matching, the loaded index will simplily be False. i1 = ctx.data.load_feature_index(im1, f1) p2, f2, c2 = ctx.data.load_features(im2) i2 = ctx.data.load_feature_index(im2, f2) matches = matching.match_symmetric(f1, i1, f2, i2, config) logger.debug('{} - {} has {} candidate matches'.format( im1, im2, len(matches))) if len(matches) < robust_matching_min_match: im1_matches[im2] = [] continue # robust matching t_robust_matching = time.time() camera1 = ctx.cameras[ctx.exifs[im1]['camera']] camera2 = ctx.cameras[ctx.exifs[im2]['camera']] # add extra matches on the road with homography method # filter the candidate points by semantic segmentation rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config) if len(rmatches) < robust_matching_min_match: im1_matches[im2] = [] continue im1_matches[im2] = rmatches logger.debug('Robust matching time : {0}s'.format(time.time() - t_robust_matching)) logger.debug("Full matching {0} / {1}, time: {2}s".format( len(rmatches), len(matches), time.time() - t)) ctx.data.save_matches(im1, im1_matches)
def match(args): """Compute all matches for a single image""" log.setup() im1, candidates, i, n, ctx = args logger.info('Matching {} - {} / {}'.format(im1, i + 1, n)) config = ctx.data.config robust_matching_min_match = config['robust_matching_min_match'] preemptive_threshold = config['preemptive_threshold'] lowes_ratio = config['lowes_ratio'] preemptive_lowes_ratio = config['preemptive_lowes_ratio'] im1_matches = {} for im2 in candidates: # preemptive matching if preemptive_threshold > 0: t = timer() config['lowes_ratio'] = preemptive_lowes_ratio matches_pre = matching.match_lowe_bf( ctx.f_pre[im1], ctx.f_pre[im2], config) config['lowes_ratio'] = lowes_ratio logger.debug("Preemptive matching {0}, time: {1}s".format( len(matches_pre), timer() - t)) if len(matches_pre) < preemptive_threshold: logger.debug( "Discarding based of preemptive matches {0} < {1}".format( len(matches_pre), preemptive_threshold)) continue # symmetric matching t = timer() p1, f1, c1 = ctx.data.load_features(im1) p2, f2, c2 = ctx.data.load_features(im2) if config['matcher_type'] == 'FLANN': i1 = ctx.data.load_feature_index(im1, f1) i2 = ctx.data.load_feature_index(im2, f2) else: i1 = None i2 = None matches = matching.match_symmetric(f1, i1, f2, i2, config) logger.debug('{} - {} has {} candidate matches'.format( im1, im2, len(matches))) if len(matches) < robust_matching_min_match: im1_matches[im2] = [] continue # robust matching t_robust_matching = timer() camera1 = ctx.cameras[ctx.exifs[im1]['camera']] camera2 = ctx.cameras[ctx.exifs[im2]['camera']] rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config) if len(rmatches) < robust_matching_min_match: im1_matches[im2] = [] continue im1_matches[im2] = rmatches logger.debug('Robust matching time : {0}s'.format( timer() - t_robust_matching)) logger.debug("Full matching {0} / {1}, time: {2}s".format( len(rmatches), len(matches), timer() - t)) ctx.data.save_matches(im1, im1_matches)
def match(args): ''' Compute all matches for a single image ''' im1, candidates, i, n, ctx = args logger.info('Matching {} - {} / {}'.format(im1, i + 1, n)) config = ctx.data.config robust_matching_min_match = config['robust_matching_min_match'] preemptive_threshold = config['preemptive_threshold'] lowes_ratio = config['lowes_ratio'] preemptive_lowes_ratio = config['preemptive_lowes_ratio'] path_seg = ctx.data.data_path + "/images/output/results/frontend_vgg/" + os.path.splitext( im1)[0] + '.png' file_name = Path(path_seg) if file_name.is_file(): im1_seg = Image.open(path_seg) im1_seg = np.array(im1_seg) p1, f1, c1 = ctx.data.load_features(im1) # if we are using bruteforce matching, the loaded index will simplily be False. i1 = ctx.data.load_feature_index(im1, f1) if file_name.is_file(): idx_u1 = im1_seg.shape[1] * (p1[:, 0] + 0.5) idx_v1 = im1_seg.shape[0] * (p1[:, 1] + 0.5) im1_seg = im1_seg[idx_v1.astype(np.int), idx_u1.astype(np.int)] else: im1_seg = None if ctx.data.matches_exists(im1): im1_matches = ctx.data.load_matches(im1) else: im1_matches = {} for im2 in candidates: if im2 in im1_matches: continue path_seg = ctx.data.data_path + "/images/output/results/frontend_vgg/" + os.path.splitext( im2)[0] + '.png' file_name = Path(path_seg) if file_name.is_file(): im2_seg = Image.open(path_seg) im2_seg = np.array(im2_seg) p2, f2, c2 = ctx.data.load_features(im2) i2 = ctx.data.load_feature_index(im2, f2) if file_name.is_file(): idx_u2 = im2_seg.shape[1] * (p2[:, 0] + 0.5) idx_v2 = im2_seg.shape[0] * (p2[:, 1] + 0.5) im2_seg = im2_seg[idx_v2.astype(np.int), idx_u2.astype(np.int)] else: im2_seg = None # preemptive matching if preemptive_threshold > 0: t = time.time() config['lowes_ratio'] = preemptive_lowes_ratio matches_pre = matching.match_lowe_bf(ctx.f_pre[im1], ctx.f_pre[im2], config, im1_seg, im2_seg) config['lowes_ratio'] = lowes_ratio logger.debug("Preemptive matching {0}, time: {1}s".format( len(matches_pre), time.time() - t)) if len(matches_pre) < preemptive_threshold: logger.debug( "Discarding based of preemptive matches {0} < {1}".format( len(matches_pre), preemptive_threshold)) continue # symmetric matching t = time.time() matches = matching.match_symmetric(f1, i1, f2, i2, config, im1_seg, im2_seg) logger.debug('{} - {} has {} candidate matches'.format( im1, im2, len(matches))) if len(matches) < robust_matching_min_match: im1_matches[im2] = [] continue # robust matching t_robust_matching = time.time() camera1 = ctx.cameras[ctx.exifs[im1]['camera']] camera2 = ctx.cameras[ctx.exifs[im2]['camera']] # add extra matches on the road with homography method # filter the candidate points by semantic segmentation rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config) if len(rmatches) < robust_matching_min_match: im1_matches[im2] = [] continue im1_matches[im2] = rmatches logger.debug('Robust matching time : {0}s'.format(time.time() - t_robust_matching)) logger.debug("Full matching {0} / {1}, time: {2}s".format( len(rmatches), len(matches), time.time() - t)) ctx.data.save_matches(im1, im1_matches)
def match(args): """Compute all matches for a single image""" log.setup() im1, candidates, i, n, ctx = args logger.info('Matching {} - {} / {}'.format(im1, i + 1, n)) config = ctx.data.config robust_matching_min_match = config['robust_matching_min_match'] preemptive_threshold = config['preemptive_threshold'] lowes_ratio = config['lowes_ratio'] preemptive_lowes_ratio = config['preemptive_lowes_ratio'] im1_matches = {} im1_all_matches = {} im1_all_robust_matches = {} im1_valid_rmatches = {} im1_T = {} im1_F = {} im1_valid_inliers = {} im1_calibration_flag = {} im1_unthresholded_matches = {} for im2 in candidates: # preemptive matching if preemptive_threshold > 0: t = timer() config['lowes_ratio'] = preemptive_lowes_ratio matches_pre = matching.match_lowe_bf( ctx.f_pre[im1], ctx.f_pre[im2], config) config['lowes_ratio'] = lowes_ratio logger.debug("Preemptive matching {0}, time: {1}s".format( len(matches_pre), timer() - t)) if len(matches_pre) < preemptive_threshold: logger.debug( "Discarding based of preemptive matches {0} < {1}".format( len(matches_pre), preemptive_threshold)) im1_all_matches[im2] = matches_pre im1_all_robust_matches[im2] = [] im1_valid_rmatches[im2] = -1 continue # symmetric matching t = timer() p1, f1, c1 = ctx.data.load_features(im1) p2, f2, c2 = ctx.data.load_features(im2) if config['matcher_type'] == 'FLANN': i1 = ctx.data.load_feature_index(im1, f1) i2 = ctx.data.load_feature_index(im2, f2) else: i1 = None i2 = None matches_all = classifier.unthresholded_match_symmetric(f1, i1, f2, i2, config) if config['matcher_type'] == 'FLANN': # Flann returns squared L2 distances ri = np.where((matches_all[:,2] <= config['lowes_ratio']**2) & (matches_all[:,3] <= config['lowes_ratio']**2))[0] else: ri = np.where((matches_all[:,2] <= config['lowes_ratio']) & (matches_all[:,3] <= config['lowes_ratio']))[0] matches = matches_all[ri,:] logger.debug('{} - {} has {} candidate matches'.format( im1, im2, len(matches))) if len(matches) < robust_matching_min_match: im1_matches[im2] = [] im1_all_matches[im2] = matches im1_unthresholded_matches[im2] = matches_all im1_all_robust_matches[im2] = [] im1_valid_rmatches[im2] = 0 continue # robust matching t_robust_matching = timer() camera1 = ctx.cameras[ctx.exifs[im1]['camera']] camera2 = ctx.cameras[ctx.exifs[im2]['camera']] [rmatches, T, F, validity], calibration_flag = matching.robust_match(p1, p2, camera1, camera2, matches, config) im1_all_matches[im2] = matches im1_unthresholded_matches[im2] = matches_all im1_all_robust_matches[im2] = rmatches im1_valid_rmatches[im2] = 1 im1_T[im2] = T.tolist() im1_F[im2] = F.tolist() im1_valid_inliers[im2] = validity im1_calibration_flag[im2] = calibration_flag if len(rmatches) < robust_matching_min_match: im1_matches[im2] = [] continue im1_matches[im2] = rmatches logger.debug('Robust matching time : {0}s'.format( timer() - t_robust_matching)) logger.debug("Full matching {0} / {1}, time: {2}s".format( len(rmatches), len(matches), timer() - t)) ctx.data.save_matches(im1, im1_matches) ctx.data.save_unthresholded_matches(im1, im1_unthresholded_matches) ctx.data.save_all_matches(im1, im1_all_matches, im1_valid_rmatches, im1_all_robust_matches) ctx.data.save_pairwise_results(im1, im1_T, im1_F, im1_valid_inliers, im1_calibration_flag)
def main(): parser = argparse.ArgumentParser() parser.add_argument('dataset', help='dataset to process') parser.add_argument('--homography_ransac_threshold', help='the threshold used to match homography', default=0.004) parser.add_argument( '--homography_inlier_ratio', help= 'the lower bound of homography inlier ratio to be considered as the same frame', default=0.90) parser.add_argument('--matching_mod', help='could either be good or fast', default="good") args = parser.parse_args() is_good = (args.matching_mod == "good") data = dataset.DataSet(args.dataset) images = sorted(data.images()) config = data.config # the current image, next image is used as potentials to be the same as this image im1i = 0 retained = [images[0]] indexes = [0] if is_good: robust_matching_min_match = config['robust_matching_min_match'] cameras = data.load_camera_models() exifs = {im: data.load_exif(im) for im in images} while im1i + 1 < len(images): im1 = images[im1i] # while the next image exists p1, f1, c1 = data.load_features(im1) i1 = data.load_feature_index(im1, f1) # get the cached features if data.matches_exists(im1): im1_matches = data.load_matches(im1) else: im1_matches = {} modified = False for im2i in range(im1i + 1, len(images)): # match this image against the inow im2 = images[im2i] p2, f2, c2 = data.load_features(im2) if im2 not in im1_matches: modified = True i2 = data.load_feature_index(im2, f2) matches = matching.match_symmetric(f1, i1, f2, i2, config) if len(matches) < robust_matching_min_match: # this image doesn't have enough matches with the first one # i.e. either of them is broken, to be safe throw away both print("%s and %s don't have enough matches, skipping" % (im1, im2)) im1i = im2i + 1 break # robust matching camera1 = cameras[exifs[im1]['camera']] camera2 = cameras[exifs[im2]['camera']] rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config) if len(rmatches) < robust_matching_min_match: im1_matches[im2] = [] else: im1_matches[im2] = rmatches #print("computed match between %s and %s" % (im1, im2)) else: rmatches = im1_matches[im2] if len(rmatches) < robust_matching_min_match: print( "%s and %s don't have enough robust matches, skipping" % (im1, im2)) im1i = im2i + 1 break inliers_ratio = homography_inlier_ratio(p1, p2, rmatches, args) print("im %s and im %s, homography ratio is %f" % (im1, im2, inliers_ratio)) if inliers_ratio <= float(args.homography_inlier_ratio): # this figure considered as not the same retained.append(im2) indexes.append(im2i) im1i = im2i break else: print("throw away %s" % im2) else: im1i += 1 if modified: data.save_matches(im1, im1_matches) else: # we should run neighbourhood matching anyway # make a copy of the old config config_path = os.path.join(data.data_path, "config.yaml") config_bak = config_path + ".bak" os.rename(config_path, config_bak) # replace the line with neighbour 2 subprocess.call([ 'sed -e "s/matching_order_neighbors:.*/matching_order_neighbors: 2/" ' + config_bak + ' > ' + config_path ], shell=True) subprocess.call(["bin/opensfm", "match_features", args.dataset]) # remove the replaced file os.remove(config_path) # move back os.rename(config_bak, config_path) # using the loaded features after ransac # slightly different logic here, we use the nearby frames' matches only for i1, im1 in enumerate(images): im1_matches = data.load_matches(im1) p1, f1, c1 = data.load_features(im1) if i1 + 1 < len(images): im2 = images[i1 + 1] p2, f2, c2 = data.load_features(im2) match = im1_matches[im2] if match == []: print("im %s and im %s don't have match, throw away 2nd" % (im1, im2)) continue # match is a list of tuples indicating which feature do I use for 2 images inliers_ratio = homography_inlier_ratio(p1, p2, match, args) print("im %s and im %s, homography ratio is %f" % (im1, im2, inliers_ratio)) if inliers_ratio <= float(args.homography_inlier_ratio): retained.append(im2) indexes.append(i1 + 1) else: print("throw away %s" % im2) # TODO: investigate whether need to remove further stop frames ''' # refine the list of remaining images by removing the isolated frames refined = [retained[0]] nn = 3 for i in range(1, len(retained)-1): if abs(indexes[i]-indexes[i-1])<=nn or abs(indexes[i]-indexes[i+1])<=nn: refined.append(retained[i]) refined.append(retained[-1]) retained = refined ''' # overwrite the image list if it exists image_list = os.path.join(data.data_path, "image_list.txt") with open(image_list, "w") as f: for im in retained: f.write("images/" + im + "\n")
def remove_stopping_frames_good(args): data = dataset.DataSet(args.dataset) config = data.config # Check which, if any, matches have already been computed cache_path, computed, computed_matches = get_cache(data.matches_path()) # The current image, next image is used as potentials to be the same as this image images = sorted(data.images()) retained = [images[0]] indexes = [0] robust_matching_min_match = config["robust_matching_min_match"] cameras = data.load_camera_models() exifs = {im: data.load_exif(im) for im in images} print("computing matches") im1i = 0 while im1i + 1 < len(images): im1 = images[im1i] print("processing image %s" % im1) p1, f1, c1 = data.load_features(im1) i1 = data.load_feature_index(im1, f1) # Get the cached features if data.matches_exists(im1): im1_matches = data.load_matches(im1) else: im1_matches = {} # Match against all subsequent images modified = False for im2i in range(im1i + 1, len(images)): im2 = images[im2i] # Print without newline print("\tmatching %s against %s " % (im1, im2)) # Check if already computed, and if not, mark as computed if computed and "%s,%s" % (im1, im2) in computed_matches: print("\t\tcache hit") continue else: print("\t\twriting to cache") with open(cache_path, "a") as f: f.write("%s,%s\n" % (im1, im2)) p2, f2, c2 = data.load_features(im2) if im2 not in im1_matches: modified = True i2 = data.load_feature_index(im2, f2) # Include segmentations im1_seg = get_segmentations(data, im1, p1, round = True) im2_seg = get_segmentations(data, im2, p2, round = im2 not in im1_matches) sys.stdout.write("\t\t") # Prepend tabs in prints of match_symmetric matches = matching.match_symmetric(f1, i1, f2, i2, config, im1_seg, im2_seg) if len(matches) < robust_matching_min_match: # This image doesn't have enough matches with the first one i.e. either of # them is broken; to be safe throw away both print("\t%s and %s don't have enough matches, skipping" % (im1, im2)) im1i = im2i + 1 break # Robust matching camera1 = cameras[exifs[im1]["camera"]] camera2 = cameras[exifs[im2]["camera"]] rmatches = matching.robust_match(p1, p2, camera1, camera2, matches, config) if len(rmatches) < robust_matching_min_match: im1_matches[im2] = [] else: im1_matches[im2] = rmatches else: rmatches = im1_matches[im2] if len(rmatches) < robust_matching_min_match: print("\t%s and %s don't have enough robust matches, skipping" % (im1, im2)) im1i = im2i + 1 break inliers_ratio = homography_inlier_ratio(p1, p2, rmatches, args) print("\t\tcomputed match between im %s and im %s, homography ratio is %f" % (im1, im2, inliers_ratio)) if inliers_ratio <= float(args.homography_inlier_ratio): # this figure considered as not the same retained.append(im2) indexes.append(im2i) im1i = im2i break else: print("\thomography inlier ratio is too high, throwing away %s" % im2) else: im1i += 1 if modified: data.save_matches(im1, im1_matches) return retained, indexes