def load_from_bag(filename, selected_frames): cam = None framecounter = 0 afs = {} for topic, msg in rosrecord.logplayer(filename): if rospy.is_shutdown(): break if topic == "/videre/cal_params" and not cam: cam = camera.VidereCamera(msg.data) if cam and topic == "/videre/images": print "frame", framecounter if framecounter in selected_frames: def imgAdapted(msg_img): return Image.fromstring("L", (msg_img.width, msg_img.height), msg_img.data) imgR = imgAdapted(msg.images[0]) imgL = imgAdapted(msg.images[1]) afs[framecounter] = SparseStereoFrame(imgL, imgR) if framecounter == max(selected_frames): break framecounter += 1 return (cam, afs)
def xtest_smoke_bag(self): import rosrecord import visualize class imgAdapted: def __init__(self, i): self.i = i self.size = (i.width, i.height) def tostring(self): return self.i.data cam = None filename = "/u/prdata/videre-bags/loop1-mono.bag" filename = "/u/prdata/videre-bags/vo1.bag" framecounter = 0 for topic, msg in rosrecord.logplayer(filename): print framecounter if rospy.is_shutdown(): break #print topic,msg if topic == "/videre/cal_params" and not cam: cam = camera.VidereCamera(msg.data) vo = VisualOdometer(cam) if cam and topic == "/videre/images": if -1 <= framecounter and framecounter < 360: imgR = imgAdapted(msg.images[0]) imgL = imgAdapted(msg.images[1]) af = SparseStereoFrame(imgL, imgR) pose = vo.handle_frame(af) visualize.viz(vo, af) framecounter += 1 print "distance from start:", vo.pose.distance() vo.summarize_timers()
def test_stereo(self): cam = camera.VidereCamera(open("wallcal.ini").read()) #lf = Image.open("wallcal-L.bmp").convert("L") #rf = Image.open("wallcal-R.bmp").convert("L") for offset in [ 1, 10, 10.25, 10.5, 10.75, 11, 63]: lf = Image.open("snap.png").convert("L") rf = Image.open("snap.png").convert("L") rf = rf.resize((16 * 640, 480)) rf = ImageChops.offset(rf, -int(offset * 16), 0) rf = rf.resize((640,480), Image.ANTIALIAS) for gradient in [ False, True ]: af = SparseStereoFrame(lf, rf, gradient) vo = VisualOdometer(cam) vo.find_keypoints(af) vo.find_disparities(af) error = offset - sum([d for (x,y,d) in af.kp]) / len(af.kp) self.assert_(abs(error) < 0.25) if 0: scribble = Image.merge("RGB", (lf,rf,Image.new("L", lf.size))).resize((1280,960)) #scribble = Image.merge("RGB", (Image.fromstring("L", lf.size, af0.lgrad),Image.fromstring("L", lf.size, af0.rgrad),Image.new("L", lf.size))).resize((1280,960)) draw = ImageDraw.Draw(scribble) for (x,y,d) in af0.kp: draw.line([ (2*x,2*y), (2*x-2*d,2*y) ], fill = (255,255,255)) for (x,y,d) in af1.kp: draw.line([ (2*x,2*y+1), (2*x-2*d,2*y+1) ], fill = (0,0,255))
def display_params(self, iar): if not self.vo: cam = camera.VidereCamera(iar.data) print "cam.params", cam.params self.vo = VisualOdometer(cam) self.started = None if self.mode == 'learn': self.library = set() self.previous_keyframe = None self.know_state = 'lost' self.best_show_pose = None self.mymarker1.floor()
def params(self, pmsg): if not self.vo: self.cam = camera.VidereCamera(pmsg.data) if DESCRIPTOR == 'CALONDER': self.vo = VisualOdometer( self.cam, descriptor_scheme=DescriptorSchemeCalonder()) self.desc_diff_thresh = 0.001 elif DESCRIPTOR == 'SAD': self.vo = Tracker(self.cam) self.desc_diff_thresh = 2000.0 else: print "Unknown descriptor" return
def __init__(self): pass def detect(self, frame, target_points): quality_level = 0.01 min_distance = 10.0 return VO.harris(frame.rawdata, frame.size[0], frame.size[1], 1000, quality_level, min_distance) for topic, msg, t in rosrecord.logplayer(filename): if rospy.is_shutdown(): break if topic.endswith("videre/cal_params") and not cam: print msg.data cam = camera.VidereCamera(msg.data) (Fx, Fy, Tx, Clx, Crx, Cy) = cam.params Tx /= (7.30 / 7.12) cam = camera.Camera((Fx, Fy, Tx, Clx, Crx, Cy)) vos = [ VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), sba = None), VisualOdometer(cam, feature_detector = FeatureDetectorMine(), inlier_error_threshold = 1.0, descriptor_scheme = DescriptorSchemeSAD(), sba = (3,10,10)), #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), sba = (3,8,10)), #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD()), #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), scavenge = True), #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD(), scavenge = True, inlier_thresh = 100), #VisualOdometer(cam, feature_detector = FeatureDetectorHarris(), descriptor_scheme = DescriptorSchemeSAD()), #VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD()),
def display_params(self, iar): cam = camera.VidereCamera(iar.data)
def handle_params(self, iar): if not self.vo: cam = camera.VidereCamera(iar.data) self.vo = VisualOdometer(cam) self.intervals = [] self.took = []