Ejemplo n.º 1
0
 def test_sad(self):
     im = self.img640x480
     fd = FeatureDetectorStar(300)
     ds = DescriptorSchemeSAD()
     af = SparseStereoFrame(im,
                            im,
                            feature_detector=fd,
                            descriptor_scheme=ds)
     for (a, b) in af.match(af):
         self.assert_(a == b)
Ejemplo n.º 2
0
    def test_sparse_stereo(self):
        left = Image.new("L", (640, 480))
        circle(left, 320, 200, 4, 255)

        fd = FeatureDetectorStar(300)
        ds = DescriptorSchemeSAD()
        for disparity in range(20):
            right = Image.new("L", (640, 480))
            circle(right, 320 - disparity, 200, 4, 255)
            sf = SparseStereoFrame(left,
                                   right,
                                   feature_detector=fd,
                                   descriptor_scheme=ds)
            self.assertAlmostEqual(sf.lookup_disparity(320, 200), disparity, 0)
Ejemplo n.º 3
0
    def handle_raw_stereo(self, msg):
        print "incoming picture  ", msg.header.stamp.to_seconds()

        size = (msg.left_info.width, msg.left_info.height)
        cam = camera.StereoCamera(msg.right_info)
        if self.vo == None:
            self.vo = VisualOdometer(cam,
                                     scavenge=False,
                                     inlier_error_threshold=3.0,
                                     sba=None,
                                     inlier_thresh=100,
                                     position_keypoint_thresh=0.2,
                                     angle_keypoint_thresh=0.15)
            self.keys = set()
            self.v = Vis('raw stereo')

        #pair = [Image.fromstring("L", size, i.uint8_data.data) for i in [ msg.left_image, msg.right_image ]]
        pair = [dcamImage(i) for i in [msg.left_image, msg.right_image]]
        af = SparseStereoFrame(pair[0],
                               pair[1],
                               feature_detector=self.fd,
                               descriptor_scheme=self.ds)
        af.t = msg.header.stamp.to_seconds()

        self.vo.handle_frame(af)

        self.v.show(msg.left_image.uint8_data.data, [])
        k = self.vo.keyframe
        if (not (k.id in self.keys)):
            self.keys.add(k.id)
            picture = Picture(k.features(), k.descriptors())
            self.pm.newpic(k.t, cam, picture, True)

        if 0:
            picture = Picture(af.features(), af.descriptors())
            self.pm.newpic(af.t, cam, picture, False)

        while True:
            if self.tfq.qsize() == 0:
                break
            tfmsg = self.tfq.get()
            if min([t.header.stamp.to_seconds() for t in tfmsg.transforms
                    ]) > (msg.header.stamp.to_seconds() + 1.0):
                break
            self.handle_tf(tfmsg)

        self.pmlock.acquire()
        self.pm.resolve(self.transformer)
        self.pmlock.release()
Ejemplo n.º 4
0
    def xtest_smoke(self):
        cam = camera.Camera((389.0, 389.0, 89.23, 323.42, 323.42, 274.95))
        vo = VisualOdometer(cam)
        vo.reset_timers()
        dir = "/u/konolige/vslam/data/indoor1/"

        trail = []
        prev_scale = None

        schedule = [(f + 1000)
                    for f in (range(0, 100) + range(100, 0, -1) + [0] * 10)]
        schedule = range(1507)
        schedule = range(30)
        for f in schedule:
            lf = Image.open("%s/left-%04d.ppm" % (dir, f))
            rf = Image.open("%s/right-%04d.ppm" % (dir, f))
            lf.load()
            rf.load()
            af = SparseStereoFrame(lf, rf)

            vo.handle_frame(af)
            print f, vo.inl
            trail.append(numpy.array(vo.pose.M[0:3, 3].T)[0])

        def d(a, b):
            d = a - b
            return sqrt(numpy.dot(d, d.conj()))

        print "covered   ", sum([d(a, b) for (a, b) in zip(trail, trail[1:])])
        print "from start", d(trail[0], trail[-1]), trail[0] - trail[-1]

        vo.summarize_timers()
        print vo.log_keyframes
Ejemplo n.º 5
0
    def xtest_smoke_bag(self):
        import rosrecord
        import visualize

        class imgAdapted:
            def __init__(self, i):
                self.i = i
                self.size = (i.width, i.height)

            def tostring(self):
                return self.i.data

        cam = None
        filename = "/u/prdata/videre-bags/loop1-mono.bag"
        filename = "/u/prdata/videre-bags/vo1.bag"
        framecounter = 0
        for topic, msg in rosrecord.logplayer(filename):
            print framecounter
            if rospy.is_shutdown():
                break
            #print topic,msg
            if topic == "/videre/cal_params" and not cam:
                cam = camera.VidereCamera(msg.data)
                vo = VisualOdometer(cam)
            if cam and topic == "/videre/images":
                if -1 <= framecounter and framecounter < 360:
                    imgR = imgAdapted(msg.images[0])
                    imgL = imgAdapted(msg.images[1])
                    af = SparseStereoFrame(imgL, imgR)
                    pose = vo.handle_frame(af)
                    visualize.viz(vo, af)
                framecounter += 1
        print "distance from start:", vo.pose.distance()
        vo.summarize_timers()
Ejemplo n.º 6
0
    def xtest_image_pan(self):
        cam = camera.Camera((1.0, 1.0, 89.23, 320., 320., 240.0))
        vo = VisualOdometer(cam)
        prev_af = None
        pose = None
        im = Image.open("img1.pgm")
        for x in [0,
                  5]:  # range(0,100,10) + list(reversed(range(0, 100, 10))):
            lf = im.crop((x, 0, x + 640, 480))
            rf = im.crop((x, 0, x + 640, 480))
            af = SparseStereoFrame(lf, rf)

            if prev_af:
                pairs = vo.temporal_match(prev_af, af)
                pose = vo.solve(prev_af.kp, af.kp, pairs)
                for i in range(10):
                    old = prev_af.kp[pairs[i][0]]
                    new = af.kp[pairs[i][1]]
                    print old, new, new[0] - old[0]
            prev_af = af
            print "frame", x, "has", len(af.kp), "keypoints", pose

        if 0:
            scribble = Image.merge("RGB",
                                   (lf, rf, Image.new("L", lf.size))).resize(
                                       (1280, 960))
            #scribble = Image.merge("RGB", (Image.fromstring("L", lf.size, af0.lgrad),Image.fromstring("L", lf.size, af0.rgrad),Image.new("L", lf.size))).resize((1280,960))
            draw = ImageDraw.Draw(scribble)
            for (x, y, d) in af0.kp:
                draw.line([(2 * x, 2 * y), (2 * x - 2 * d, 2 * y)],
                          fill=(255, 255, 255))
            for (x, y, d) in af1.kp:
                draw.line([(2 * x, 2 * y + 1), (2 * x - 2 * d, 2 * y + 1)],
                          fill=(0, 0, 255))
Ejemplo n.º 7
0
 def handle_raw_stereo(self, msg):
     size = (msg.left_info.width, msg.left_info.height)
     if self.vo == None:
       cam = camera.StereoCamera(msg.right_info)
       self.fd = FeatureDetectorFast(300)
       self.ds = DescriptorSchemeCalonder()
       self.vo = VisualOdometer(cam, scavenge = False,
                           inlier_error_threshold = 3.0, sba = None,
                           inlier_thresh = 100,
                           position_keypoint_thresh = 0.2, angle_keypoint_thresh = 0.15)
       self.vo.num_frames = self.startframe
     pair = [Image.fromstring("L", size, i.uint8_data.data) for i in [ msg.left_image, msg.right_image ]]
     af = SparseStereoFrame(pair[0], pair[1], feature_detector = self.fd, descriptor_scheme = self.ds)
     self.vo.handle_frame(af)
     self.frame_timestamps[af.id] = msg.header.stamp
     
     if self.skel.add(self.vo.keyframe):
       print "====>", self.vo.keyframe.id, self.frame_timestamps[self.vo.keyframe.id]
       #print self.tf.getFrameStrings()
       #assert self.tf.frameExists("wide_stereo_optical_frame")
       #assert self.tf.frameExists("odom_combined")
       N = sorted(self.skel.nodes)
       for a,b in zip(N, N[1:]):
         if (a,b) in self.skel.edges:
           t0 = self.frame_timestamps[a]
           t1 = self.frame_timestamps[b]
           if self.tf.canTransformFull("wide_stereo_optical_frame", t0, "wide_stereo_optical_frame", t1, "odom_combined"):
             t,r = self.tf.lookupTransformFull("wide_stereo_optical_frame", t0, "wide_stereo_optical_frame", t1, "odom_combined")
             relpose = Pose()
             relpose.fromlist(self.tf.fromTranslationRotation(t,r))
             self.wheel_odom_edges.add((a, b, relpose, 1.0))
       self.send_map(msg.header.stamp)
Ejemplo n.º 8
0
 def test_stereo_accuracy(self):
     fd = FeatureDetectorStar(300)
     ds = DescriptorSchemeSAD()
     for offset in [1, 10, 10.25, 10.5, 10.75, 11, 63]:
         lf = self.img640x480
         rf = self.img640x480
         rf = rf.resize((16 * 640, 480))
         rf = ImageChops.offset(rf, -int(offset * 16), 0)
         rf = rf.resize((640, 480), Image.ANTIALIAS)
         for gradient in [False, True]:
             af = SparseStereoFrame(lf,
                                    rf,
                                    gradient,
                                    feature_detector=fd,
                                    descriptor_scheme=ds)
             kp = [(x, y, d) for (x, y, d) in af.features() if (x > 64)]
             error = offset - sum([d for (x, y, d) in kp]) / len(kp)
             print error
             self.assert_(abs(error) < 0.25)
Ejemplo n.º 9
0
 def test_feature_detectors(self):
     L = self.img640x480
     R = ImageChops.offset(L, -3, 0)
     for fdt in [
             FeatureDetectorFast, FeatureDetectorStar, FeatureDetectorHarris
     ]:
         feat = []
         for count in range(30, 300, 5):
             fd = fdt(count)
             frame = SparseStereoFrame(L,
                                       R,
                                       feature_detector=fd,
                                       descriptor_scheme=None)
             prev_feat, feat = feat, frame.features()
             # At least half the number of features requested
             self.assert_((count / 2) < len(feat))
             # Should never return too many features - too few is OK because of stereo
             self.assert_(len(feat) <= count)
             # Should have same or more features with greater count
             self.assert_(len(prev_feat) <= len(feat))
             # Previous feature call is always a sublist of this feature call
             self.assert_(feat[:len(prev_feat)] == prev_feat)
Ejemplo n.º 10
0
 def test_pe(self):
     random.seed(0)
     cloud = [(4 * (random.random() - 0.5), 4 * (random.random() - 0.5),
               4 * (random.random() - 0.5)) for i in range(20)]
     vo = VisualOdometer(
         camera.Camera(
             (389.0, 389.0, 89.23 * 1e-3, 323.42, 323.42, 274.95)))
     stereo_cam = {}
     af = {}
     fd = FeatureDetectorStar(300)
     ds = DescriptorSchemeSAD()
     for i in range(5):
         stereo_cam[i] = camera.Camera(
             (389.0, 389.0, .080 + .010 * i, 323.42, 323.42, 274.95))
         desired_pose = Pose()
         cam = ray_camera(desired_pose, stereo_cam[i])
         imL = Image.new("RGB", (640, 480))
         imR = Image.new("RGB", (640, 480))
         scene = []
         scene += [
             object(isphere(vec3(0, 0, 0), 1000), shadeLitCloud,
                    {'scale': 0.001})
         ]
         scene += [
             object(isphere(vec3(x, y, z + 6), 0.3), shadeLitCloud,
                    {'scale': 3}) for (x, y, z) in cloud
         ]
         imL, imR = [im.convert("L") for im in [imL, imR]]
         render_stereo_scene(imL, imR, None, cam, scene, 0)
         af[i] = SparseStereoFrame(imL,
                                   imR,
                                   feature_detector=fd,
                                   descriptor_scheme=ds)
         vo.process_frame(af[i])
     pe = PoseEstimator()
     for i1 in range(5):
         for i0 in range(5):
             pairs = vo.temporal_match(af[i1], af[i0])
             res = pe.estimateC(stereo_cam[i0], af[i0].features(),
                                stereo_cam[i1], af[i1].features(), pairs,
                                False)
             x, y, z = res[2]
             self.assertAlmostEqual(x, 0, 0)
             self.assertAlmostEqual(y, 0, 0)
             self.assertAlmostEqual(z, 0, 0)
Ejemplo n.º 11
0
Tx = 0.08923
Clx = 323.42
Crx = 323.42
Cy = 274.95

# Camera
cam = camera.Camera((Fx, Fy, Tx, Clx, Crx, Cy))

# Feature Detector
fd = FeatureDetectorFast(300)

# Descriptor Scheme
ds = DescriptorSchemeCalonder()

# f0 is a stereo pair
f0 = SparseStereoFrame(Image.open("f0-left.png"), Image.open("f0-right.png"), feature_detector = fd, descriptor_scheme = ds)

# f1 is also a stereo pair

f1 = SparseStereoFrame(Image.open("f1-left.png"), Image.open("f1-right.png"), feature_detector = fd, descriptor_scheme = ds)

# Create a pose estimator, and set it to do 500 RANSAC iterations
pe = PoseEstimator()
pe.setNumRansacIterations(500)

# Find the correspondences between keypoints in f0 and f1.  Discard the strength component.
pairs = [ (a,b) for (a,b,_) in f1.match(f0) ]

inliers,rotation,translation = pe.estimateC(cam, f0.features(), cam, f1.features(), pairs)

print
Ejemplo n.º 12
0
skel = Skeleton(stereo_cam, descriptor_scheme=ds)
vo = VisualOdometer(stereo_cam,
                    scavenge=False,
                    sba=None,
                    inlier_error_threshold=3.0)
vo.num_frames = 0

ground_truth = []
started = time.time()

connected = False
for i in range(0, 800):
    print i
    (desired_pose, imL, imR) = getImage(i)
    ground_truth.append(desired_pose.xform(0, 0, 0))
    f = SparseStereoFrame(imL, imR, feature_detector=fd, descriptor_scheme=ds)
    vo.handle_frame(f)
    skel.add(vo.keyframe, connected)
    connected = True

if 0:
    (desired_pose, imL, imR) = getImage(408)
    ground_truth.append(desired_pose.xform(0, 0, 0))
    qf = SparseStereoFrame(imL, imR)
    skel.localization(qf)
    sys.exit(0)

print "nodes", skel.nodes

ended = time.time()
took = ended - started
Ejemplo n.º 13
0
# Feature Detector
fd = FeatureDetectorFast(300)

# Descriptor Scheme
ds = DescriptorSchemeCalonder()

# Visual Odometer
vo = VisualOdometer(cam)

for i in range(1000):
    # Left image
    lim = Image.open("%s/%06dL.png" % (sys.argv[1], i))

    # Right image
    rim = Image.open("%s/%06dR.png" % (sys.argv[1], i))

    # Combine the images to make a stereo frame
    frame = SparseStereoFrame(lim,
                              rim,
                              feature_detector=fd,
                              descriptor_scheme=ds)

    # Supply the stereo frame to the visual odometer
    pose = vo.handle_frame(frame)
    print i, pose
    print

fd.summarize_timers()
ds.summarize_timers()
vo.summarize_timers()
Ejemplo n.º 14
0
    def xtest_sim(self):
        # Test process with one 'ideal' camera, one real-world Videre
        camera_param_list = [
            # (200.0, 200.0, 3.00,  320.0, 320.0, 240.0),
            (389.0, 389.0, 1e-3 * 89.23, 323.42, 323.42, 274.95)
        ]

        def move_forward(i, prev):
            """ Forward 1 meter, turn around, Back 1 meter """
            if i == 0:
                return Pose(rotation(0, 0, 1, 0), (0, 0, 0))
            elif i < 10:
                return prev * Pose(rotation(0, 0, 1, 0), (0, 0, .1))
            elif i < 40:
                return prev * Pose(rotation(math.pi / 30, 0, 1, 0), (0, 0, 0))
            elif i < 50:
                return prev * Pose(rotation(0, 0, 1, 0), (0, 0, .1))

        for movement in [move_forward]:  # move_combo, move_Yrot ]:
            for cam_params in camera_param_list:
                cam = camera.Camera(cam_params)

                random.seed(0)

                def rr():
                    return 2 * random.random() - 1.0

                model = [(3 * rr(), 1 * rr(), 3 * rr()) for i in range(300)]

                def rndimg():
                    b = "".join(random.sample([chr(c) for c in range(256)],
                                              64))
                    return Image.fromstring("L", (8, 8), b)

                def sprite(dst, x, y, src):
                    try:
                        dst.paste(src, (int(x) - 4, int(y) - 4))
                    except:
                        print "paste failed", x, y

                palette = [rndimg() for i in model]
                expected = []
                afs = []
                P = None
                for i in range(50):
                    print "render", i
                    P = movement(i, P)
                    li = Image.new("L", (640, 480))
                    ri = Image.new("L", (640, 480))
                    q = 0
                    for (mx, my, mz) in model:
                        pp = None
                        pt_camera = (numpy.dot(P.M.I,
                                               numpy.array([mx, my, mz, 1]).T))
                        (cx, cy, cz, cw) = numpy.array(pt_camera).ravel()
                        if cz > .100:
                            ((xl, yl), (xr, yr)) = cam.cam2pixLR(cx, cy, cz)
                            if 0 <= xl and xl < 640 and 0 <= yl and yl < 480:
                                sprite(li, xl, yl, palette[q])
                                sprite(ri, xr, yr, palette[q])
                        q += 1
                    expected.append(P)
                    afs.append(SparseStereoFrame(imgStereo(li), imgStereo(ri)))

            vo = VisualOdometer(cam)
            for i, (af, ep) in enumerate(zip(afs, expected)):
                vo.handle_frame(af)
                if 0:
                    print vo.pose.xform(0, 0, 0)
                    print "expected", ep.M
                    print "vo.pose", vo.pose.M
                    print numpy.abs((ep.M - vo.pose.M))
                self.assert_(
                    numpy.alltrue(numpy.abs((ep.M - vo.pose.M)) < 0.2))

            return

            def run(vos):
                for af in afs:
                    for vo in vos:
                        vo.handle_frame(af)

            # Check that the pose estimators are truly independent

            v1 = VisualOdometer(cam,
                                feature_detector=FeatureDetectorFast(),
                                descriptor_scheme=DescriptorSchemeSAD(),
                                inlier_error_threshold=1.0)
            v2 = VisualOdometer(cam,
                                feature_detector=FeatureDetectorFast(),
                                descriptor_scheme=DescriptorSchemeSAD(),
                                inlier_error_threshold=2.0)
            v8 = VisualOdometer(cam,
                                feature_detector=FeatureDetectorFast(),
                                descriptor_scheme=DescriptorSchemeSAD(),
                                inlier_error_threshold=8.0)
            v1a = VisualOdometer(cam,
                                 feature_detector=FeatureDetectorFast(),
                                 descriptor_scheme=DescriptorSchemeSAD(),
                                 inlier_error_threshold=1.0)
            run([v1])
            run([v2, v8, v1a])
            self.assert_(v1.pose.xform(0, 0, 0) == v1a.pose.xform(0, 0, 0))
            for a, b in [(v1, v2), (v2, v8), (v1, v8)]:
                self.assert_(a.pose.xform(0, 0, 0) != b.pose.xform(0, 0, 0))

            return
Ejemplo n.º 15
0
class Demo:
  def __init__(self, source):
    self.cvim = None

    # These variables are internal:

    self.stereo_cam = source.cam()
    print "Camera is", self.stereo_cam
    self.connected = False
    self.snail_trail = []
    self.source = source
    self.inlier_history = []
    self.label = 0
    self.skel_dist_thresh = 0.1;        # in meters

    # These variables can be tweaked:

    self.fd = FeatureDetectorFast(300)
#    self.fd = FeatureDetectorStar(500)
    self.ds = DescriptorSchemeCalonder()
    self.camera_preview = False
    self.vo = VisualOdometer(self.stereo_cam,
                             scavenge = False,
                             position_keypoint_thresh = 0.1,
                             angle_keypoint_thresh = 3*pi/180,
                             sba=None,
                             num_ransac_iters=500,
                             inlier_error_threshold = 3.0)
    self.skel = Skeleton(self.stereo_cam,
                         link_thresh = 100,
                         descriptor_scheme = self.ds,
                         optimize_after_addition = False)
    self.skel.node_vdist = 0
    self.running = -1                   # run forever

    if self.camera_preview:
      cv.NamedWindow("Camera")
      cv.MoveWindow("Camera", 0, 700)

  def handleFrame(self):
    r = False

    (w,h,li,ri) = self.source.getImage()

    self.f = SparseStereoFrame(li, ri,
                               disparity_range = 96,
                               feature_detector = self.fd,
                               descriptor_scheme = self.ds)

    self.vo.handle_frame(self.f)
    print self.vo.inl
    self.inlier_history = self.inlier_history[-20:] + [self.vo.inl]
    # If the VO inlier count falls below 20, we're not well-connected for this link
    if self.vo.inl < 20:
      self.connected = False
      self.f.pose = Pose()
      self.snail_trail = []

    # Add a frame to graph if:
    #   Frame zero, or
    #   There have been N successful preceding frames, and either
    #   Not connected,
    #   or if connected, the distance from the youngest graph frame to this frame is > thresh

    add_to_graph = None
    if self.vo.keyframe.id == 0 and len(self.skel.nodes) == 0:
      add_to_graph = "Zero keyframe"
    elif min(self.inlier_history) > 20:
      if not self.connected:
        add_to_graph = "Unconnected - but now recovering good poses"
        # This is a new graph, so generate a new label
        self.label += 1
      else:
        relpose = ~self.last_added_pose * self.f.pose
        if relpose.distance() > self.skel_dist_thresh:
          add_to_graph = "Connected and distance thresh passed"

    if add_to_graph:
      self.skel.setlabel(self.label)
      self.skel.add(self.vo.keyframe, self.connected)
      print "ADD %d %s" % (self.f.id, add_to_graph)
      self.connected = True
      self.last_added_pose = self.f.pose
      self.snail_trail = []
#      self.ioptimize(10)


    self.snail_trail.append(self.f.pose)

    # Render the camera view using OpenCV, then load it into an OpenGL texture
    if True:
      if not self.cvim:
        self.cvim = cv.CreateImage((w,h/2), cv.IPL_DEPTH_8U, 3)

      im = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
      im2 = cv.CreateImage((w/2,h/2), cv.IPL_DEPTH_8U, 1)
      for i,src in enumerate([li, ri]):
        cv.SetData(im, str(src.tostring()), w)
        cv.Resize(im, im2)
        cv.SetImageROI(self.cvim, (i * w / 2, 0, w / 2, h / 2))
        cv.CvtColor(im2, self.cvim, cv.CV_GRAY2BGR)

      cv.ResetImageROI(self.cvim)

      def annotate(cvim):
        green = cv.RGB(0,255,0)
        red = cv.RGB(0,0,255)
        def half(p):
          return (p[0] / 2, p[1] / 2)
        for (x,y,d) in self.f.features():
          cv.Circle(cvim, half((x, y)), 2, green)
          d = int(d)
          cv.Line(cvim, ((w+x-d)/2, y/2 - 2), ((w+x-d)/2, y/2 + 2), green)
        a_features = self.vo.keyframe.features()
        b_features = self.f.features()
        inliers = set([ (b,a) for (a,b) in self.vo.pe.inl])
        outliers = set(self.vo.pairs) - inliers
        for (a,b) in inliers:
          cv.Line(cvim, half(b_features[b]), half(a_features[a]), green)
        for (a,b) in outliers:
          cv.Line(cvim, half(b_features[b]), half(a_features[a]), red)

      annotate(self.cvim)
      glBindTexture(GL_TEXTURE_2D, demo.textures[2])
      glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, 640, 240, 0, GL_RGB, GL_UNSIGNED_BYTE, self.cvim.tostring())

    if self.camera_preview:
      cv.ShowImage("Camera", self.cvim)
      if cv.WaitKey(10) == 27:
        r = True

    return r

  def anchor(self):
    """ Return the current pose of the most recently added skeleton node """
    id = max(self.skel.nodes)
    return self.skel.newpose(id)

  def optimize(self):
    self.skel.optimize(1000)

  def ioptimize(self,iters = 30):
    self.skel.ioptimize(iters)

  def report(self):
    print
    print
    print
    print "-" * 80
    print self.skel.edges
    print "-" * 80

  def pose(self):
    """ Return the current camera pose in the skeleton frame """
    if len(self.skel.nodes) == 0:
      return self.vo.pose
    else:
      return self.anchor() * (~self.snail_trail[0] * self.snail_trail[-1])

  def map_size(self):
    xyz = [demo.skel.newpose(i).xform(0,0,0) for i in demo.skel.nodes]
    xrange = max([x for (x,y,z) in xyz]) - min([x for (x,y,z) in xyz])
    zrange = max([z for (x,y,z) in xyz]) - min([z for (x,y,z) in xyz])
    return max(xrange, zrange)
Ejemplo n.º 16
0
  def handleFrame(self):
    r = False

    (w,h,li,ri) = self.source.getImage()

    self.f = SparseStereoFrame(li, ri,
                               disparity_range = 96,
                               feature_detector = self.fd,
                               descriptor_scheme = self.ds)

    self.vo.handle_frame(self.f)
    print self.vo.inl
    self.inlier_history = self.inlier_history[-20:] + [self.vo.inl]
    # If the VO inlier count falls below 20, we're not well-connected for this link
    if self.vo.inl < 20:
      self.connected = False
      self.f.pose = Pose()
      self.snail_trail = []

    # Add a frame to graph if:
    #   Frame zero, or
    #   There have been N successful preceding frames, and either
    #   Not connected,
    #   or if connected, the distance from the youngest graph frame to this frame is > thresh

    add_to_graph = None
    if self.vo.keyframe.id == 0 and len(self.skel.nodes) == 0:
      add_to_graph = "Zero keyframe"
    elif min(self.inlier_history) > 20:
      if not self.connected:
        add_to_graph = "Unconnected - but now recovering good poses"
        # This is a new graph, so generate a new label
        self.label += 1
      else:
        relpose = ~self.last_added_pose * self.f.pose
        if relpose.distance() > self.skel_dist_thresh:
          add_to_graph = "Connected and distance thresh passed"

    if add_to_graph:
      self.skel.setlabel(self.label)
      self.skel.add(self.vo.keyframe, self.connected)
      print "ADD %d %s" % (self.f.id, add_to_graph)
      self.connected = True
      self.last_added_pose = self.f.pose
      self.snail_trail = []
#      self.ioptimize(10)


    self.snail_trail.append(self.f.pose)

    # Render the camera view using OpenCV, then load it into an OpenGL texture
    if True:
      if not self.cvim:
        self.cvim = cv.CreateImage((w,h/2), cv.IPL_DEPTH_8U, 3)

      im = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
      im2 = cv.CreateImage((w/2,h/2), cv.IPL_DEPTH_8U, 1)
      for i,src in enumerate([li, ri]):
        cv.SetData(im, str(src.tostring()), w)
        cv.Resize(im, im2)
        cv.SetImageROI(self.cvim, (i * w / 2, 0, w / 2, h / 2))
        cv.CvtColor(im2, self.cvim, cv.CV_GRAY2BGR)

      cv.ResetImageROI(self.cvim)

      def annotate(cvim):
        green = cv.RGB(0,255,0)
        red = cv.RGB(0,0,255)
        def half(p):
          return (p[0] / 2, p[1] / 2)
        for (x,y,d) in self.f.features():
          cv.Circle(cvim, half((x, y)), 2, green)
          d = int(d)
          cv.Line(cvim, ((w+x-d)/2, y/2 - 2), ((w+x-d)/2, y/2 + 2), green)
        a_features = self.vo.keyframe.features()
        b_features = self.f.features()
        inliers = set([ (b,a) for (a,b) in self.vo.pe.inl])
        outliers = set(self.vo.pairs) - inliers
        for (a,b) in inliers:
          cv.Line(cvim, half(b_features[b]), half(a_features[a]), green)
        for (a,b) in outliers:
          cv.Line(cvim, half(b_features[b]), half(a_features[a]), red)

      annotate(self.cvim)
      glBindTexture(GL_TEXTURE_2D, demo.textures[2])
      glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, 640, 240, 0, GL_RGB, GL_UNSIGNED_BYTE, self.cvim.tostring())

    if self.camera_preview:
      cv.ShowImage("Camera", self.cvim)
      if cv.WaitKey(10) == 27:
        r = True

    return r
Ejemplo n.º 17
0
     skel = Skeleton(cam)
     if skel_load_filename:
         skel.load(skel_load_filename)
         vos[0].num_frames = max(skel.nodes) + 1
         framecounter = max(skel.nodes) + 1
     # skel.fill("/u/prdata/vslam_data/FtCarson/2007.08/2007.08.29/course3-DTED4-run1/traj.txt", 118016)
     # skel.fill("/u/prdata/vslam_data/FtCarson/2007.08/2007.08.29/course3-DTED5-run1/traj.txt", 13916)
     if skel_fill_filename:
         skel.fill(skel_fill_filename)
     skel.node_vdist = 1
     oe_x = []
     oe_y = []
     oe_home = None
 for i, vo in enumerate(vos):
     af = SparseStereoFrame(l_image,
                            r_image,
                            feature_detector=fd,
                            descriptor_scheme=ds)
     vopose = vo.handle_frame(af)
     log_dead_reckon.append(vopose)
     inl_history.append(vo.inl)
     inl_history = inl_history[-2:]
     if cold:
         af.connected = -1
     else:
         af.connected = vo.inl > 7
     # Log keyframes into "pool_loop"
     # Image.fromstring("L", af.lf.size, af.lf.tostring()).save("dump/%06dL.png" % af.id)
     if False and not vo.keyframe.id in keys:
         k = vo.keyframe
         Image.fromstring("L", (640, 480), k.lf.tostring()).save(
             "dump/%06dL.png" % len(keys))