def __init__(self, cam, **kwargs): self.nodes = set() self.edges = set() self.every_edge = [] self.weak_edges = [] self.link_thresh = 100 # how many inliers needed to add in a long-range link if 1: self.pg = TreeOptimizer3() self.vset = set() self.oldvset = set() else: import faketoro self.pg = faketoro.faketoro() self.pg.initializeOnlineOptimization() self.place_ids = [] self.cam = cam self.pe = PoseEstimator(*cam.params) self.optimize_after_addition = True self.ds = None for k,a in kwargs.items(): if k == 'descriptor_scheme': self.ds = a elif k == 'optimize_after_addition': self.optimize_after_addition = a elif k == 'link_thresh': self.link_thresh = a if self.ds == None: self.ds = DescriptorSchemeCalonder() self.vt = None search = [ '/u/mihelich/images/holidays/holidays.tree', '/u/jamesb/holidays.tree' ] for filename in search: if os.access(filename, os.R_OK): self.vt = place_recognition.load(filename, self.ds.cl) break if not self.vt: print "ERROR: Could not find a valid place_recognition tree in", search assert 0 self.node_kp = {} self.node_descriptors = {} self.termcrit = default_termcrit self.pr_maximum = 15 # How many out of PR's places to consider for GCC self.node_vdist = 15 # how many frame to wait to put in a skeleton node self.adaptive = False self.label = "no label" self.node_labels = {} self.fills = False self.timer = {} for t in ['toro add', 'toro opt', 'place recognition', 'gcc', 'descriptors']: self.timer[t] = Timer()
def __init__(self, ds): self.cameras = [] self.pics = [] filename = '/u/mihelich/images/holidays/holidays.tree' self.vt = place_recognition.load(filename) self.pe = PoseEstimator() self.clear() self.ds = ds self.verbose = 1
def match_runs(reference_files, query_files): print "loading vocabulary tree..." vt = place_recognition.load("/u/mihelich/images/holidays/holidays.tree") cl = calonder.classifier() cl.read('/u/prdata/calonder_trees/current.rtc') print "adding database images..." for i in reference_files[2]: # Add image to vocabulary tree name = reference_files[0] % i img = Image.open(name) desc, kp = get_features(img, cl) vt.add(img, desc) print '%s has %d feature points' % (name, len(kp)) # Save coordinates and descriptors file = open(reference_files[1] % (i, "key"), "w") for (pt, d) in zip(kp, desc): # Format: x y [d...] file.write("%i %i %s\n" % (pt[0], pt[1], str(list(d)))) print "getting top-k views for query images..." N = len(reference_files[2]) k = 32 M = [] for i in query_files[2]: # Get indexes of best N matching views from reference run name = query_files[0] % i img = Image.open(name) desc, kp = get_features(img, cl) print '%s has %d feature points' % (name, len(kp)) scores = vt.topN(img, desc, N) M.append(scores) top_n = sorted(enumerate(scores), key=operator.itemgetter(1))[:k] # Save filenames of matching views file = open(query_files[1] % (i, "match"), "w") for (index, score) in top_n: file.write("%s %f\n" % (reference_files[0] % reference_files[2][index], score)) return M
import pylab, numpy import Image import sys import random sys.path.append('lib') import place_recognition #import calonder #import fast ims = [ Image.open("/u/prdata/videre-bags/james4/im.%06u.left_rectified.tiff" % (20 * i)) for i in range(100) ] random.seed(0) BUILD_TREE = True if BUILD_TREE: vt = place_recognition.vocabularytree() vt.build(random.sample(ims, 50), 5, 4, False) vt.save("thrash.tree") else: vt = place_recognition.load("thrash.tree") for (a, q) in zip(random.sample(ims, 100), random.sample(ims, 100)): print a, q vt.add(a) vt.topN(q, None, 10)
print 2, newpose(2).xform(0, 0, 0) sys.exit(1) if 0: vt = None else: if 0: vt = place_recognition.vocabularytree() ims = [ Image.open( "/u/prdata/videre-bags/james4/im.%06u.left_rectified.tiff" % (200 * i)) for i in range(10) ] vt.build(ims, 5, 4, False) else: vt = place_recognition.load( "/u/mihelich/images/holidays/holidays.tree") vo = VisualOdometer(cam, scavenge=False, feature_detector=FeatureDetectorFast(), descriptor_scheme=DescriptorSchemeCalonder(), inlier_error_threshold=3.0, sba=None, inlier_thresh=99999, position_keypoint_thresh=0.2, angle_keypoint_thresh=0.15) from skeleton import Skeleton if 0: skel = Skeleton(cam) skel.node_vdist = 0
outlet_files = '/wg/wgdata1/vol1/Outlets/OutletWhiteHall_cropped_outlets/IMG_%04d.JPG' no_out_files = '/wg/wgdata1/vol1/Outlets/OutletWhiteHall_no_outlet/IMG_%04d.JPG' print 'opening images...' outlet_train = [ Image.open(outlet_files % i).convert('L') for i in range(416, 427) ] no_out_train = [ Image.open(no_out_files % i).convert('L') for i in range(416, 427) ] #outlet_test = [ Image.open(outlet_files % i).convert('L') for i in range(372, 416) + range(427, 471) ] #no_out_test = [ Image.open(no_out_files % i).convert('L') for i in range(372, 416) + range(427, 471) ] print 'loading trees...' vt = place_recognition.load('/u/mihelich/images/holidays/holidays_b5d4.tree') #vt = place_recognition.load('/u/mihelich/images/holidays/holidays.tree') #vt = place_recognition.vocabularytree() #vt.build(outlet_train, 5, 4, False) cl = calonder.classifier() cl.read('/u/prdata/calonder_trees/current.rtc') def kp_d(frame): fkp = fast.fast(frame.tostring(), frame.size[0], frame.size[1], 10, 15) fkp = [(x, y, r) for (x, y, r) in fkp if (16 <= x and 16 <= y and x <= frame.size[0] - 16 and y <= frame.size[1] - 16)] fkp = fast.nonmax(fkp) #damn this is slow kp = [(x, y) for ( x, y,