Ejemplo n.º 1
0
 def test_computed_dense_stereo(self):
     fd = FeatureDetectorStar(300)
     ds = DescriptorSchemeSAD()
     lf = Image.open("f0-left.png").convert("L")
     rf = Image.open("f0-right.png").convert("L")
     af = ComputedDenseStereoFrame(lf,
                                   rf,
                                   feature_detector=fd,
                                   descriptor_scheme=ds)
Ejemplo n.º 2
0
 def test_sad(self):
     im = self.img640x480
     fd = FeatureDetectorStar(300)
     ds = DescriptorSchemeSAD()
     af = SparseStereoFrame(im,
                            im,
                            feature_detector=fd,
                            descriptor_scheme=ds)
     for (a, b) in af.match(af):
         self.assert_(a == b)
Ejemplo n.º 3
0
    def test_sparse_stereo(self):
        left = Image.new("L", (640, 480))
        circle(left, 320, 200, 4, 255)

        fd = FeatureDetectorStar(300)
        ds = DescriptorSchemeSAD()
        for disparity in range(20):
            right = Image.new("L", (640, 480))
            circle(right, 320 - disparity, 200, 4, 255)
            sf = SparseStereoFrame(left,
                                   right,
                                   feature_detector=fd,
                                   descriptor_scheme=ds)
            self.assertAlmostEqual(sf.lookup_disparity(320, 200), disparity, 0)
Ejemplo n.º 4
0
 def test_pe(self):
     random.seed(0)
     cloud = [(4 * (random.random() - 0.5), 4 * (random.random() - 0.5),
               4 * (random.random() - 0.5)) for i in range(20)]
     vo = VisualOdometer(
         camera.Camera(
             (389.0, 389.0, 89.23 * 1e-3, 323.42, 323.42, 274.95)))
     stereo_cam = {}
     af = {}
     fd = FeatureDetectorStar(300)
     ds = DescriptorSchemeSAD()
     for i in range(5):
         stereo_cam[i] = camera.Camera(
             (389.0, 389.0, .080 + .010 * i, 323.42, 323.42, 274.95))
         desired_pose = Pose()
         cam = ray_camera(desired_pose, stereo_cam[i])
         imL = Image.new("RGB", (640, 480))
         imR = Image.new("RGB", (640, 480))
         scene = []
         scene += [
             object(isphere(vec3(0, 0, 0), 1000), shadeLitCloud,
                    {'scale': 0.001})
         ]
         scene += [
             object(isphere(vec3(x, y, z + 6), 0.3), shadeLitCloud,
                    {'scale': 3}) for (x, y, z) in cloud
         ]
         imL, imR = [im.convert("L") for im in [imL, imR]]
         render_stereo_scene(imL, imR, None, cam, scene, 0)
         af[i] = SparseStereoFrame(imL,
                                   imR,
                                   feature_detector=fd,
                                   descriptor_scheme=ds)
         vo.process_frame(af[i])
     pe = PoseEstimator()
     for i1 in range(5):
         for i0 in range(5):
             pairs = vo.temporal_match(af[i1], af[i0])
             res = pe.estimateC(stereo_cam[i0], af[i0].features(),
                                stereo_cam[i1], af[i1].features(), pairs,
                                False)
             x, y, z = res[2]
             self.assertAlmostEqual(x, 0, 0)
             self.assertAlmostEqual(y, 0, 0)
             self.assertAlmostEqual(z, 0, 0)
Ejemplo n.º 5
0
 def test_stereo_accuracy(self):
     fd = FeatureDetectorStar(300)
     ds = DescriptorSchemeSAD()
     for offset in [1, 10, 10.25, 10.5, 10.75, 11, 63]:
         lf = self.img640x480
         rf = self.img640x480
         rf = rf.resize((16 * 640, 480))
         rf = ImageChops.offset(rf, -int(offset * 16), 0)
         rf = rf.resize((640, 480), Image.ANTIALIAS)
         for gradient in [False, True]:
             af = SparseStereoFrame(lf,
                                    rf,
                                    gradient,
                                    feature_detector=fd,
                                    descriptor_scheme=ds)
             kp = [(x, y, d) for (x, y, d) in af.features() if (x > 64)]
             error = offset - sum([d for (x, y, d) in kp]) / len(kp)
             print error
             self.assert_(abs(error) < 0.25)
Ejemplo n.º 6
0
import Image
import math

import os

from stereo_utils import camera
from stereo_utils.stereo import SparseStereoFrame, DenseStereoFrame
from stereo_utils.descriptor_schemes import DescriptorSchemeCalonder, DescriptorSchemeSAD
from stereo_utils.feature_detectors import FeatureDetectorFast, FeatureDetector4x4, FeatureDetectorStar, FeatureDetectorHarris
from visual_odometry.visualodometer import VisualOdometer, Pose
from visual_odometry.pe import PoseEstimator
import vop
import cv

fd = FeatureDetectorStar(300)
ds = DescriptorSchemeSAD()

from stereo_utils.reader import reader
#r = reader("/u/jamesb/sequences/kk_2009-02-24-17-24-55-topic")
r = reader("/u/jamesb/sequences/kk_2009-02-24-17-22-41-topic")

#(i0,i1) = [ (Image.open("f%d-left.png" % i), Image.open("f%d-right.png" % i)) for i in [0, 1] ]


def xform(M, x, y, z):
    nx = vop.mad(M[0], x, vop.mad(M[1], y, vop.mad(M[2], z, M[3])))
    ny = vop.mad(M[4], x, vop.mad(M[5], y, vop.mad(M[6], z, M[7])))
    nz = vop.mad(M[8], x, vop.mad(M[9], y, vop.mad(M[10], z, M[11])))
    return (nx, ny, nz)

Ejemplo n.º 7
0
    def xtest_sim(self):
        # Test process with one 'ideal' camera, one real-world Videre
        camera_param_list = [
            # (200.0, 200.0, 3.00,  320.0, 320.0, 240.0),
            (389.0, 389.0, 1e-3 * 89.23, 323.42, 323.42, 274.95)
        ]

        def move_forward(i, prev):
            """ Forward 1 meter, turn around, Back 1 meter """
            if i == 0:
                return Pose(rotation(0, 0, 1, 0), (0, 0, 0))
            elif i < 10:
                return prev * Pose(rotation(0, 0, 1, 0), (0, 0, .1))
            elif i < 40:
                return prev * Pose(rotation(math.pi / 30, 0, 1, 0), (0, 0, 0))
            elif i < 50:
                return prev * Pose(rotation(0, 0, 1, 0), (0, 0, .1))

        for movement in [move_forward]:  # move_combo, move_Yrot ]:
            for cam_params in camera_param_list:
                cam = camera.Camera(cam_params)

                random.seed(0)

                def rr():
                    return 2 * random.random() - 1.0

                model = [(3 * rr(), 1 * rr(), 3 * rr()) for i in range(300)]

                def rndimg():
                    b = "".join(random.sample([chr(c) for c in range(256)],
                                              64))
                    return Image.fromstring("L", (8, 8), b)

                def sprite(dst, x, y, src):
                    try:
                        dst.paste(src, (int(x) - 4, int(y) - 4))
                    except:
                        print "paste failed", x, y

                palette = [rndimg() for i in model]
                expected = []
                afs = []
                P = None
                for i in range(50):
                    print "render", i
                    P = movement(i, P)
                    li = Image.new("L", (640, 480))
                    ri = Image.new("L", (640, 480))
                    q = 0
                    for (mx, my, mz) in model:
                        pp = None
                        pt_camera = (numpy.dot(P.M.I,
                                               numpy.array([mx, my, mz, 1]).T))
                        (cx, cy, cz, cw) = numpy.array(pt_camera).ravel()
                        if cz > .100:
                            ((xl, yl), (xr, yr)) = cam.cam2pixLR(cx, cy, cz)
                            if 0 <= xl and xl < 640 and 0 <= yl and yl < 480:
                                sprite(li, xl, yl, palette[q])
                                sprite(ri, xr, yr, palette[q])
                        q += 1
                    expected.append(P)
                    afs.append(SparseStereoFrame(imgStereo(li), imgStereo(ri)))

            vo = VisualOdometer(cam)
            for i, (af, ep) in enumerate(zip(afs, expected)):
                vo.handle_frame(af)
                if 0:
                    print vo.pose.xform(0, 0, 0)
                    print "expected", ep.M
                    print "vo.pose", vo.pose.M
                    print numpy.abs((ep.M - vo.pose.M))
                self.assert_(
                    numpy.alltrue(numpy.abs((ep.M - vo.pose.M)) < 0.2))

            return

            def run(vos):
                for af in afs:
                    for vo in vos:
                        vo.handle_frame(af)

            # Check that the pose estimators are truly independent

            v1 = VisualOdometer(cam,
                                feature_detector=FeatureDetectorFast(),
                                descriptor_scheme=DescriptorSchemeSAD(),
                                inlier_error_threshold=1.0)
            v2 = VisualOdometer(cam,
                                feature_detector=FeatureDetectorFast(),
                                descriptor_scheme=DescriptorSchemeSAD(),
                                inlier_error_threshold=2.0)
            v8 = VisualOdometer(cam,
                                feature_detector=FeatureDetectorFast(),
                                descriptor_scheme=DescriptorSchemeSAD(),
                                inlier_error_threshold=8.0)
            v1a = VisualOdometer(cam,
                                 feature_detector=FeatureDetectorFast(),
                                 descriptor_scheme=DescriptorSchemeSAD(),
                                 inlier_error_threshold=1.0)
            run([v1])
            run([v2, v8, v1a])
            self.assert_(v1.pose.xform(0, 0, 0) == v1a.pose.xform(0, 0, 0))
            for a, b in [(v1, v2), (v2, v8), (v1, v8)]:
                self.assert_(a.pose.xform(0, 0, 0) != b.pose.xform(0, 0, 0))

            return