def xtest_image_pan(self): cam = camera.Camera((1.0, 1.0, 89.23, 320., 320., 240.0)) vo = VisualOdometer(cam) prev_af = None pose = None im = Image.open("img1.pgm") for x in [0,5]: # range(0,100,10) + list(reversed(range(0, 100, 10))): lf = im.crop((x, 0, x + 640, 480)) rf = im.crop((x, 0, x + 640, 480)) af = SparseStereoFrame(lf, rf) vo.find_keypoints(af) vo.find_disparities(af) vo.collect_descriptors(af) if prev_af: pairs = vo.temporal_match(prev_af, af) pose = vo.solve(prev_af.kp, af.kp, pairs) for i in range(10): old = prev_af.kp[pairs[i][0]] new = af.kp[pairs[i][1]] print old, new, new[0] - old[0] prev_af = af print "frame", x, "has", len(af.kp), "keypoints", pose
def xtest_image_pan(self): cam = camera.Camera((1.0, 1.0, 89.23, 320., 320., 240.0)) vo = VisualOdometer(cam) prev_af = None pose = None im = Image.open("img1.pgm") for x in [0, 5]: # range(0,100,10) + list(reversed(range(0, 100, 10))): lf = im.crop((x, 0, x + 640, 480)) rf = im.crop((x, 0, x + 640, 480)) af = SparseStereoFrame(lf, rf) vo.find_keypoints(af) vo.find_disparities(af) vo.collect_descriptors(af) if prev_af: pairs = vo.temporal_match(prev_af, af) pose = vo.solve(prev_af.kp, af.kp, pairs) for i in range(10): old = prev_af.kp[pairs[i][0]] new = af.kp[pairs[i][1]] print old, new, new[0] - old[0] prev_af = af print "frame", x, "has", len(af.kp), "keypoints", pose
def test_sad(self): cam = camera.Camera((389.0, 389.0, 89.23, 323.42, 323.42, 274.95)) vo = VisualOdometer(cam) class adapter: def __init__(self, im): self.rawdata = im.tostring() self.size = im.size im = adapter(Image.open("img1.pgm")) vo.feature_detector.thresh *= 15 vo.find_keypoints(im) im.kp = im.kp2d vo.collect_descriptors(im) print len(im.kp) matches = vo.temporal_match(im, im) for (a,b) in matches: self.assert_(a == b)
def test_sad(self): cam = camera.Camera((389.0, 389.0, 89.23, 323.42, 323.42, 274.95)) vo = VisualOdometer(cam) class adapter: def __init__(self, im): self.rawdata = im.tostring() self.size = im.size im = adapter(Image.open("img1.pgm")) vo.feature_detector.thresh *= 15 vo.find_keypoints(im) im.kp = im.kp2d vo.collect_descriptors(im) print len(im.kp) matches = vo.temporal_match(im, im) for (a, b) in matches: self.assert_(a == b)
feature_detector=FeatureDetectorFast(), descriptor_scheme=DescriptorSchemeSAD()) if cam and topic.endswith("videre/images"): imgR = imgAdapted(msg.images[0]) imgL = imgAdapted(msg.images[1]) assert msg.images[0].label == "right_rectified" assert msg.images[1].label == "left_rectified" frame = SparseStereoFrame(imgL, imgR) vo.find_keypoints(frame) vo.find_disparities(frame) #frame.kp = [ (x,y,d) for (x,y,d) in frame.kp if d > 8] all_ds += [d for (x, y, d) in frame.kp] vo.collect_descriptors(frame) if prev_frame: pairs = vo.temporal_match(prev_frame, frame) solution = vo.solve(prev_frame.kp, frame.kp, pairs, True) (inl, rot, shift) = solution sos += numpy.array(shift) print sos prev_frame = frame framecounter += 1 ds[filename] = all_ds for i, (filename, ds) in enumerate(ds.items()): pylab.figure(i + 1) pylab.title(filename) pylab.hist(ds, 50, normed=1, facecolor='green', alpha=0.75)
vo = VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD()) if cam and topic.endswith("videre/images"): imgR = imgAdapted(msg.images[0]) imgL = imgAdapted(msg.images[1]) assert msg.images[0].label == "right_rectified" assert msg.images[1].label == "left_rectified" frame = SparseStereoFrame(imgL, imgR) vo.find_keypoints(frame) vo.find_disparities(frame) #frame.kp = [ (x,y,d) for (x,y,d) in frame.kp if d > 8] all_ds += [ d for (x,y,d) in frame.kp ] vo.collect_descriptors(frame) if prev_frame: pairs = vo.temporal_match(prev_frame, frame) solution = vo.solve(prev_frame.kp, frame.kp, pairs, True) (inl, rot, shift) = solution sos += numpy.array(shift) print sos prev_frame = frame framecounter += 1 ds[filename] = all_ds for i,(filename, ds) in enumerate(ds.items()): pylab.figure(i+1) pylab.title(filename) pylab.hist(ds, 50, normed=1, facecolor='green', alpha=0.75)