Пример #1
0
 def handle_raw_stereo(self, msg):
     size = (msg.left_info.width, msg.left_info.height)
     if self.vo == None:
       cam = camera.StereoCamera(msg.right_info)
       self.fd = FeatureDetectorFast(300)
       self.ds = DescriptorSchemeCalonder()
       self.vo = VisualOdometer(cam, scavenge = False,
                           inlier_error_threshold = 3.0, sba = None,
                           inlier_thresh = 100,
                           position_keypoint_thresh = 0.2, angle_keypoint_thresh = 0.15)
       self.vo.num_frames = self.startframe
     pair = [Image.fromstring("L", size, i.uint8_data.data) for i in [ msg.left_image, msg.right_image ]]
     af = SparseStereoFrame(pair[0], pair[1], feature_detector = self.fd, descriptor_scheme = self.ds)
     self.vo.handle_frame(af)
     self.frame_timestamps[af.id] = msg.header.stamp
     
     if self.skel.add(self.vo.keyframe):
       print "====>", self.vo.keyframe.id, self.frame_timestamps[self.vo.keyframe.id]
       #print self.tf.getFrameStrings()
       #assert self.tf.frameExists("wide_stereo_optical_frame")
       #assert self.tf.frameExists("odom_combined")
       N = sorted(self.skel.nodes)
       for a,b in zip(N, N[1:]):
         if (a,b) in self.skel.edges:
           t0 = self.frame_timestamps[a]
           t1 = self.frame_timestamps[b]
           if self.tf.canTransformFull("wide_stereo_optical_frame", t0, "wide_stereo_optical_frame", t1, "odom_combined"):
             t,r = self.tf.lookupTransformFull("wide_stereo_optical_frame", t0, "wide_stereo_optical_frame", t1, "odom_combined")
             relpose = Pose()
             relpose.fromlist(self.tf.fromTranslationRotation(t,r))
             self.wheel_odom_edges.add((a, b, relpose, 1.0))
       self.send_map(msg.header.stamp)
Пример #2
0
  def __init__(self, source):
    self.cvim = None

    # These variables are internal:

    self.stereo_cam = source.cam()
    print "Camera is", self.stereo_cam
    self.connected = False
    self.snail_trail = []
    self.source = source
    self.inlier_history = []
    self.label = 0
    self.skel_dist_thresh = 0.5;        # in meters

    # These variables can be tweaked:

    self.fd = FeatureDetectorFast(300)
#    self.fd = FeatureDetectorStar(300)
    self.ds = DescriptorSchemeCalonder(32,16) # set up lower sequential search window
    self.camera_preview = False
    self.vo = VisualOdometer(self.stereo_cam,
                             scavenge = False,
                             position_keypoint_thresh = 0.3,
                             angle_keypoint_thresh = 10*pi/180,
                             inlier_thresh = 100,
                             sba=None,
                             num_ransac_iters=500,
                             inlier_error_threshold = 3.0)
    self.skel = Skeleton(self.stereo_cam,
                         link_thresh = 100,
                         descriptor_scheme = self.ds,
                         optimize_after_addition = False)
    self.skel.node_vdist = 0
    self.running = -1                   # run forever

    if self.camera_preview:
      cv.NamedWindow("Camera")
      cv.MoveWindow("Camera", 0, 700)
Пример #3
0
from stereo_utils.descriptor_schemes import DescriptorSchemeCalonder, DescriptorSchemeSAD
from stereo_utils.feature_detectors import FeatureDetectorFast, FeatureDetector4x4, FeatureDetectorStar, FeatureDetectorHarris
from visual_odometry.pe import PoseEstimator

Fx = 389.0
Fy =  389.0
Tx = 0.08923
Clx = 323.42
Crx = 323.42
Cy = 274.95

# Camera
cam = camera.Camera((Fx, Fy, Tx, Clx, Crx, Cy))

# Feature Detector
fd = FeatureDetectorFast(300)

# Descriptor Scheme
ds = DescriptorSchemeCalonder()

# f0 is a stereo pair
f0 = SparseStereoFrame(Image.open("f0-left.png"), Image.open("f0-right.png"), feature_detector = fd, descriptor_scheme = ds)

# f1 is also a stereo pair

f1 = SparseStereoFrame(Image.open("f1-left.png"), Image.open("f1-right.png"), feature_detector = fd, descriptor_scheme = ds)

# Create a pose estimator, and set it to do 500 RANSAC iterations
pe = PoseEstimator()
pe.setNumRansacIterations(500)
Пример #4
0
from stereo_utils.descriptor_schemes import DescriptorSchemeCalonder, DescriptorSchemeSAD
from stereo_utils.feature_detectors import FeatureDetectorFast, FeatureDetector4x4, FeatureDetectorStar, FeatureDetectorHarris
from visual_odometry.visualodometer import VisualOdometer, Pose, from_xyz_euler

Fx = 100  # Focal length x
Fy = 100  # Focal length y
Tx = 0.090  # baseline in M
Clx = 320  # Center left image
Crx = 320  # Center right image
Cy = 240  # Center Y (both images)

# Camera
cam = camera.Camera((Fx, Fy, Tx, Clx, Crx, Cy))

# Feature Detector
fd = FeatureDetectorFast(300)

# Descriptor Scheme
ds = DescriptorSchemeCalonder()

# Visual Odometer
vo = VisualOdometer(cam)

for i in range(1000):
    # Left image
    lim = Image.open("%s/%06dL.png" % (sys.argv[1], i))

    # Right image
    rim = Image.open("%s/%06dR.png" % (sys.argv[1], i))

    # Combine the images to make a stereo frame
Пример #5
0
    def xtest_sim(self):
        # Test process with one 'ideal' camera, one real-world Videre
        camera_param_list = [
            # (200.0, 200.0, 3.00,  320.0, 320.0, 240.0),
            (389.0, 389.0, 1e-3 * 89.23, 323.42, 323.42, 274.95)
        ]

        def move_forward(i, prev):
            """ Forward 1 meter, turn around, Back 1 meter """
            if i == 0:
                return Pose(rotation(0, 0, 1, 0), (0, 0, 0))
            elif i < 10:
                return prev * Pose(rotation(0, 0, 1, 0), (0, 0, .1))
            elif i < 40:
                return prev * Pose(rotation(math.pi / 30, 0, 1, 0), (0, 0, 0))
            elif i < 50:
                return prev * Pose(rotation(0, 0, 1, 0), (0, 0, .1))

        for movement in [move_forward]:  # move_combo, move_Yrot ]:
            for cam_params in camera_param_list:
                cam = camera.Camera(cam_params)

                random.seed(0)

                def rr():
                    return 2 * random.random() - 1.0

                model = [(3 * rr(), 1 * rr(), 3 * rr()) for i in range(300)]

                def rndimg():
                    b = "".join(random.sample([chr(c) for c in range(256)],
                                              64))
                    return Image.fromstring("L", (8, 8), b)

                def sprite(dst, x, y, src):
                    try:
                        dst.paste(src, (int(x) - 4, int(y) - 4))
                    except:
                        print "paste failed", x, y

                palette = [rndimg() for i in model]
                expected = []
                afs = []
                P = None
                for i in range(50):
                    print "render", i
                    P = movement(i, P)
                    li = Image.new("L", (640, 480))
                    ri = Image.new("L", (640, 480))
                    q = 0
                    for (mx, my, mz) in model:
                        pp = None
                        pt_camera = (numpy.dot(P.M.I,
                                               numpy.array([mx, my, mz, 1]).T))
                        (cx, cy, cz, cw) = numpy.array(pt_camera).ravel()
                        if cz > .100:
                            ((xl, yl), (xr, yr)) = cam.cam2pixLR(cx, cy, cz)
                            if 0 <= xl and xl < 640 and 0 <= yl and yl < 480:
                                sprite(li, xl, yl, palette[q])
                                sprite(ri, xr, yr, palette[q])
                        q += 1
                    expected.append(P)
                    afs.append(SparseStereoFrame(imgStereo(li), imgStereo(ri)))

            vo = VisualOdometer(cam)
            for i, (af, ep) in enumerate(zip(afs, expected)):
                vo.handle_frame(af)
                if 0:
                    print vo.pose.xform(0, 0, 0)
                    print "expected", ep.M
                    print "vo.pose", vo.pose.M
                    print numpy.abs((ep.M - vo.pose.M))
                self.assert_(
                    numpy.alltrue(numpy.abs((ep.M - vo.pose.M)) < 0.2))

            return

            def run(vos):
                for af in afs:
                    for vo in vos:
                        vo.handle_frame(af)

            # Check that the pose estimators are truly independent

            v1 = VisualOdometer(cam,
                                feature_detector=FeatureDetectorFast(),
                                descriptor_scheme=DescriptorSchemeSAD(),
                                inlier_error_threshold=1.0)
            v2 = VisualOdometer(cam,
                                feature_detector=FeatureDetectorFast(),
                                descriptor_scheme=DescriptorSchemeSAD(),
                                inlier_error_threshold=2.0)
            v8 = VisualOdometer(cam,
                                feature_detector=FeatureDetectorFast(),
                                descriptor_scheme=DescriptorSchemeSAD(),
                                inlier_error_threshold=8.0)
            v1a = VisualOdometer(cam,
                                 feature_detector=FeatureDetectorFast(),
                                 descriptor_scheme=DescriptorSchemeSAD(),
                                 inlier_error_threshold=1.0)
            run([v1])
            run([v2, v8, v1a])
            self.assert_(v1.pose.xform(0, 0, 0) == v1a.pose.xform(0, 0, 0))
            for a, b in [(v1, v2), (v2, v8), (v1, v8)]:
                self.assert_(a.pose.xform(0, 0, 0) != b.pose.xform(0, 0, 0))

            return