Ejemplo n.º 1
0
  def xtest_image_pan(self):
    cam = camera.Camera((1.0, 1.0, 89.23, 320., 320., 240.0))
    vo = VisualOdometer(cam)
    prev_af = None
    pose = None
    im = Image.open("img1.pgm")
    for x in [0,5]: # range(0,100,10) + list(reversed(range(0, 100, 10))):
      lf = im.crop((x, 0, x + 640, 480))
      rf = im.crop((x, 0, x + 640, 480))
      af = SparseStereoFrame(lf, rf)

      vo.find_keypoints(af)

      vo.find_disparities(af)
      vo.collect_descriptors(af)

      if prev_af:
        pairs = vo.temporal_match(prev_af, af)
        pose = vo.solve(prev_af.kp, af.kp, pairs)
        for i in range(10):
          old = prev_af.kp[pairs[i][0]]
          new = af.kp[pairs[i][1]]
          print old, new, new[0] - old[0]
      prev_af = af
      print "frame", x, "has", len(af.kp), "keypoints", pose
Ejemplo n.º 2
0
    def xtest_image_pan(self):
        cam = camera.Camera((1.0, 1.0, 89.23, 320., 320., 240.0))
        vo = VisualOdometer(cam)
        prev_af = None
        pose = None
        im = Image.open("img1.pgm")
        for x in [0,
                  5]:  # range(0,100,10) + list(reversed(range(0, 100, 10))):
            lf = im.crop((x, 0, x + 640, 480))
            rf = im.crop((x, 0, x + 640, 480))
            af = SparseStereoFrame(lf, rf)

            vo.find_keypoints(af)

            vo.find_disparities(af)
            vo.collect_descriptors(af)

            if prev_af:
                pairs = vo.temporal_match(prev_af, af)
                pose = vo.solve(prev_af.kp, af.kp, pairs)
                for i in range(10):
                    old = prev_af.kp[pairs[i][0]]
                    new = af.kp[pairs[i][1]]
                    print old, new, new[0] - old[0]
            prev_af = af
            print "frame", x, "has", len(af.kp), "keypoints", pose
Ejemplo n.º 3
0
  def test_stereo(self):
    cam = camera.VidereCamera(open("wallcal.ini").read())
    #lf = Image.open("wallcal-L.bmp").convert("L")
    #rf = Image.open("wallcal-R.bmp").convert("L")
    for offset in [ 1, 10, 10.25, 10.5, 10.75, 11, 63]:
      lf = Image.open("snap.png").convert("L")
      rf = Image.open("snap.png").convert("L")
      rf = rf.resize((16 * 640, 480))
      rf = ImageChops.offset(rf, -int(offset * 16), 0)
      rf = rf.resize((640,480), Image.ANTIALIAS)
      for gradient in [ False, True ]:
        af = SparseStereoFrame(lf, rf, gradient)
        vo = VisualOdometer(cam)
        vo.find_keypoints(af)
        vo.find_disparities(af)
        error = offset - sum([d for (x,y,d) in af.kp]) / len(af.kp)
        self.assert_(abs(error) < 0.25) 

    if 0:
      scribble = Image.merge("RGB", (lf,rf,Image.new("L", lf.size))).resize((1280,960))
      #scribble = Image.merge("RGB", (Image.fromstring("L", lf.size, af0.lgrad),Image.fromstring("L", lf.size, af0.rgrad),Image.new("L", lf.size))).resize((1280,960))
      draw = ImageDraw.Draw(scribble)
      for (x,y,d) in af0.kp:
        draw.line([ (2*x,2*y), (2*x-2*d,2*y) ], fill = (255,255,255))
      for (x,y,d) in af1.kp:
        draw.line([ (2*x,2*y+1), (2*x-2*d,2*y+1) ], fill = (0,0,255))
Ejemplo n.º 4
0
  def test_sad(self):
    cam = camera.Camera((389.0, 389.0, 89.23, 323.42, 323.42, 274.95))
    vo = VisualOdometer(cam)

    class adapter:
      def __init__(self, im):
        self.rawdata = im.tostring()
        self.size = im.size

    im = adapter(Image.open("img1.pgm"))
    vo.feature_detector.thresh *= 15
    vo.find_keypoints(im)
    im.kp = im.kp2d
    vo.collect_descriptors(im)
    print len(im.kp)
    matches = vo.temporal_match(im, im)
    for (a,b) in matches:
      self.assert_(a == b)
Ejemplo n.º 5
0
    def test_sad(self):
        cam = camera.Camera((389.0, 389.0, 89.23, 323.42, 323.42, 274.95))
        vo = VisualOdometer(cam)

        class adapter:
            def __init__(self, im):
                self.rawdata = im.tostring()
                self.size = im.size

        im = adapter(Image.open("img1.pgm"))
        vo.feature_detector.thresh *= 15
        vo.find_keypoints(im)
        im.kp = im.kp2d
        vo.collect_descriptors(im)
        print len(im.kp)
        matches = vo.temporal_match(im, im)
        for (a, b) in matches:
            self.assert_(a == b)
Ejemplo n.º 6
0
        if topic.endswith("videre/cal_params") and not cam:
            cam = camera.VidereCamera(msg.data)

            vo = VisualOdometer(cam,
                                feature_detector=FeatureDetectorFast(),
                                descriptor_scheme=DescriptorSchemeSAD())

        if cam and topic.endswith("videre/images"):
            imgR = imgAdapted(msg.images[0])
            imgL = imgAdapted(msg.images[1])
            assert msg.images[0].label == "right_rectified"
            assert msg.images[1].label == "left_rectified"

            frame = SparseStereoFrame(imgL, imgR)
            vo.find_keypoints(frame)
            vo.find_disparities(frame)
            #frame.kp = [ (x,y,d) for (x,y,d) in frame.kp if d > 8]
            all_ds += [d for (x, y, d) in frame.kp]

            vo.collect_descriptors(frame)
            if prev_frame:
                pairs = vo.temporal_match(prev_frame, frame)
                solution = vo.solve(prev_frame.kp, frame.kp, pairs, True)
                (inl, rot, shift) = solution
                sos += numpy.array(shift)
                print sos
            prev_frame = frame

            framecounter += 1
    ds[filename] = all_ds
Ejemplo n.º 7
0
    if rospy.is_shutdown():
      break

    if topic.endswith("videre/cal_params") and not cam:
      cam = camera.VidereCamera(msg.data)

      vo = VisualOdometer(cam, feature_detector = FeatureDetectorFast(), descriptor_scheme = DescriptorSchemeSAD())

    if cam and topic.endswith("videre/images"):
      imgR = imgAdapted(msg.images[0])
      imgL = imgAdapted(msg.images[1])
      assert msg.images[0].label == "right_rectified"
      assert msg.images[1].label == "left_rectified"

      frame = SparseStereoFrame(imgL, imgR)
      vo.find_keypoints(frame)
      vo.find_disparities(frame)
      #frame.kp = [ (x,y,d) for (x,y,d) in frame.kp if d > 8]
      all_ds += [ d for (x,y,d) in frame.kp ]

      vo.collect_descriptors(frame)
      if prev_frame:
        pairs = vo.temporal_match(prev_frame, frame)
        solution = vo.solve(prev_frame.kp, frame.kp, pairs, True)
        (inl, rot, shift) = solution
        sos += numpy.array(shift)
        print sos
      prev_frame = frame

      framecounter += 1
  ds[filename] = all_ds