예제 #1
0
def check_flip(distortion=False):
    if distortion:
        d = [0.1, 0.2, 0.3, 0.4, 0.5]
    else:
        d = None

    # build camera
    center_expected = np.array([10, 5, 20])
    lookat_expected = center_expected + np.array([1, 2, 0])
    up_expected = np.array([0, 0, 1])

    width, height = 640, 480

    M = np.array([[300.0, 0, 321, 0], [0, 298.0, 240, 0], [0, 0, 1, 0]])
    cam1 = CameraModel.load_camera_from_M(M,
                                          width=width,
                                          height=height,
                                          distortion_coefficients=d)
    cam = cam1.get_view_camera(center_expected, lookat_expected, up_expected)
    del cam1

    pts = np.array(
        [lookat_expected, lookat_expected + up_expected, [1, 2, 3], [4, 5, 6]])
    pix_actual = cam.project_3d_to_pixel(pts)

    # Flipped camera gives same 3D->2D transform but different look direction.
    cf = cam.get_flipped_camera()
    assert not np.allclose(cam.get_lookat(), cf.get_lookat())

    pix_actual_flipped = cf.project_3d_to_pixel(pts)
    assert np.allclose(pix_actual, pix_actual_flipped)
예제 #2
0
def check_flip(distortion=False):
    if distortion:
        d = [0.1, 0.2, 0.3, 0.4, 0.5]
    else:
        d = None

    # build camera
    center_expected = np.array( [10, 5, 20] )
    lookat_expected = center_expected + np.array( [1, 2, 0] )
    up_expected     = np.array( [0,  0,  1] )

    width, height = 640, 480

    M = np.array( [[ 300.0,     0,  321, 0],
                      [ 0,     298.0,  240, 0],
                      [ 0,         0,   1,  0]])
    cam1 = CameraModel.load_camera_from_M( M, width=width, height=height,
                                              distortion_coefficients=d )
    cam = cam1.get_view_camera(center_expected, lookat_expected, up_expected)
    del cam1

    pts = np.array([lookat_expected,
                    lookat_expected+up_expected,
                    [1,2,3],
                    [4,5,6]])
    pix_actual = cam.project_3d_to_pixel( pts )

    # Flipped camera gives same 3D->2D transform but different look direction.
    cf = cam.get_flipped_camera()
    assert not np.allclose( cam.get_lookat(), cf.get_lookat() )

    pix_actual_flipped = cf.project_3d_to_pixel( pts )
    assert np.allclose( pix_actual,      pix_actual_flipped )
예제 #3
0
def check_built_from_M(cam_opts):
    """check that M is preserved in load_camera_from_M() factory"""
    cam_orig = _build_test_camera(**cam_opts)
    if cam_orig.is_distorted_and_skewed():
        raise SkipTest('do not expect that skewed camera passes this test')
    M_orig = cam_orig.M
    cam = CameraModel.load_camera_from_M( M_orig )
    assert np.allclose( cam.M, M_orig)
예제 #4
0
def test_simple_projection():

    # get some 3D points
    pts_3d = _build_points_3d()

    if DRAW:
        fig = plt.figure(figsize=(8, 12))
        ax1 = fig.add_subplot(3, 1, 1, projection='3d')
        ax1.scatter(pts_3d[:, 0], pts_3d[:, 1], pts_3d[:, 2])
        ax1.set_xlabel('X')
        ax1.set_ylabel('Y')
        ax1.set_zlabel('Z')

    # build a camera calibration matrix
    focal_length = 1200
    width, height = 640, 480
    R = np.eye(3)  # look at +Z
    c = np.array((9.99, 19.99, 20))
    M = make_M(focal_length, width, height, R, c)['M']

    # now, project these 3D points into our image plane
    pts_3d_H = np.vstack((pts_3d.T, np.ones((1, len(pts_3d)))))  # make homog.
    undist_rst_simple = np.dot(M, pts_3d_H)  # multiply
    undist_simple = undist_rst_simple[:2, :] / undist_rst_simple[
        2, :]  # project

    if DRAW:
        ax2 = fig.add_subplot(3, 1, 2)
        ax2.plot(undist_simple[0, :], undist_simple[1, :], 'b.')
        ax2.set_xlim(0, width)
        ax2.set_ylim(height, 0)
        ax2.set_title('matrix multiply')

    # build a camera model from our M and project onto image plane
    cam = CameraModel.load_camera_from_M(M, width=width, height=height)
    undist_full = cam.project_3d_to_pixel(pts_3d).T

    if DRAW:
        plot_camera(ax1, cam, scale=10, axes_size=5.0)
        sz = 20
        x = 5
        y = 8
        z = 19
        ax1.auto_scale_xyz([x, x + sz], [y, y + sz], [z, z + sz])

        ax3 = fig.add_subplot(3, 1, 3)
        ax3.plot(undist_full[0, :], undist_full[1, :], 'b.')
        ax3.set_xlim(0, width)
        ax3.set_ylim(height, 0)
        ax3.set_title('pymvg')

    if DRAW:
        plt.show()

    assert np.allclose(undist_full, undist_simple)
예제 #5
0
def check_align(cam_opts):

    cam_orig = _build_test_camera(**cam_opts)
    M_orig = cam_orig.M
    cam_orig = CameraModel.load_camera_from_M( M_orig )
    R1 = np.eye(3)
    R2 = np.zeros((3,3))
    R2[0,1] = 1
    R2[1,0] = 1
    R2[2,2] = -1
    t1 = np.array( (0.0, 0.0, 0.0) )
    t2 = np.array( (0.0, 0.0, 0.1) )
    t3 = np.array( (0.1, 0.0, 0.0) )
    for s in [1.0, 2.0]:
        for R in [R1, R2]:
            for t in [t1, t2, t3]:
                cam_actual = cam_orig.get_aligned_camera( s, R, t )
                M_expected = mcsc_align.align_M( s,R,t, M_orig )
                cam_expected = CameraModel.load_camera_from_M( M_expected )
                assert cam_actual==cam_expected
예제 #6
0
def test_simple_projection():

    # get some 3D points
    pts_3d = _build_points_3d()

    if DRAW:
        fig = plt.figure(figsize=(8,12))
        ax1 = fig.add_subplot(3,1,1, projection='3d')
        ax1.scatter( pts_3d[:,0], pts_3d[:,1], pts_3d[:,2])
        ax1.set_xlabel('X')
        ax1.set_ylabel('Y')
        ax1.set_zlabel('Z')

    # build a camera calibration matrix
    focal_length = 1200
    width, height = 640,480
    R = np.eye(3) # look at +Z
    c = np.array( (9.99, 19.99, 20) )
    M = make_M( focal_length, width, height, R, c)['M']

    # now, project these 3D points into our image plane
    pts_3d_H = np.vstack( (pts_3d.T, np.ones( (1,len(pts_3d))))) # make homog.
    undist_rst_simple = np.dot(M, pts_3d_H) # multiply
    undist_simple = undist_rst_simple[:2,:]/undist_rst_simple[2,:] # project

    if DRAW:
        ax2 = fig.add_subplot(3,1,2)
        ax2.plot( undist_simple[0,:], undist_simple[1,:], 'b.')
        ax2.set_xlim(0,width)
        ax2.set_ylim(height,0)
        ax2.set_title('matrix multiply')

    # build a camera model from our M and project onto image plane
    cam = CameraModel.load_camera_from_M( M, width=width, height=height )
    undist_full = cam.project_3d_to_pixel(pts_3d).T

    if DRAW:
        plot_camera( ax1, cam, scale=10, axes_size=5.0 )
        sz = 20
        x = 5
        y = 8
        z = 19
        ax1.auto_scale_xyz( [x,x+sz], [y,y+sz], [z,z+sz] )

        ax3 = fig.add_subplot(3,1,3)
        ax3.plot( undist_full[0,:], undist_full[1,:], 'b.')
        ax3.set_xlim(0,width)
        ax3.set_ylim(height,0)
        ax3.set_title('pymvg')

    if DRAW:
        plt.show()

    assert np.allclose( undist_full, undist_simple )
예제 #7
0
def test_lookat():

    dist = 5.0

    # build camera
    center_expected = np.array( [10, 5, 20] )
    lookat_expected = center_expected + np.array( [dist, 0, 0] ) # looking in +X
    up_expected     = np.array( [0,  0,  1] )

    f = 300.0 # focal length
    width, height = 640, 480
    cx, cy = width/2.0, height/2.0

    M = np.array( [[ f, 0, cx, 0],
                      [ 0, f, cy, 0],
                      [ 0, 0,   1, 0]])
    cam1 = CameraModel.load_camera_from_M( M, width=width, height=height)
    cam = cam1.get_view_camera(center_expected, lookat_expected, up_expected)
    del cam1

    # check that the extrinsic parameters were what we expected
    (center_actual,lookat_actual,up_actual) = cam.get_view()

    lookdir_expected = normalize( lookat_expected - center_expected )
    lookdir_actual   = normalize( lookat_actual   - center_actual   )

    assert np.allclose( center_actual,  center_expected  )
    assert np.allclose( lookdir_actual, lookdir_expected )
    assert np.allclose( up_actual,      up_expected      )

    # check that the extrinsics work as expected
    pts = np.array([lookat_expected,
                    lookat_expected+up_expected])
    eye_actual = cam.project_3d_to_camera_frame( pts )

    eye_expected = [[0, 0, dist], # camera looks at +Z
                    [0,-1, dist], # with -Y as up
                    ]
    assert np.allclose( eye_actual,      eye_expected      )

    # now check some basics of the projection
    pix_actual = cam.project_3d_to_pixel( pts )

    pix_expected = [[cx,cy], # center pixel on the camera
                    [cx,cy-(f/dist)]]
    assert np.allclose( pix_actual,      pix_expected      )
예제 #8
0
def test_lookat():

    dist = 5.0

    # build camera
    center_expected = np.array([10, 5, 20])
    lookat_expected = center_expected + np.array([dist, 0, 0])  # looking in +X
    up_expected = np.array([0, 0, 1])

    f = 300.0  # focal length
    width, height = 640, 480
    cx, cy = width / 2.0, height / 2.0

    M = np.array([[f, 0, cx, 0], [0, f, cy, 0], [0, 0, 1, 0]])
    cam1 = CameraModel.load_camera_from_M(M, width=width, height=height)
    cam = cam1.get_view_camera(center_expected, lookat_expected, up_expected)
    del cam1

    # check that the extrinsic parameters were what we expected
    (center_actual, lookat_actual, up_actual) = cam.get_view()

    lookdir_expected = normalize(lookat_expected - center_expected)
    lookdir_actual = normalize(lookat_actual - center_actual)

    assert np.allclose(center_actual, center_expected)
    assert np.allclose(lookdir_actual, lookdir_expected)
    assert np.allclose(up_actual, up_expected)

    # check that the extrinsics work as expected
    pts = np.array([lookat_expected, lookat_expected + up_expected])
    eye_actual = cam.project_3d_to_camera_frame(pts)

    eye_expected = [
        [0, 0, dist],  # camera looks at +Z
        [0, -1, dist],  # with -Y as up
    ]
    assert np.allclose(eye_actual, eye_expected)

    # now check some basics of the projection
    pix_actual = cam.project_3d_to_pixel(pts)

    pix_expected = [
        [cx, cy],  # center pixel on the camera
        [cx, cy - (f / dist)]
    ]
    assert np.allclose(pix_actual, pix_expected)
예제 #9
0
    def generate_camera(self):
        (width,height)=(self.width,self.height)=(640,480)
        center = 1,2,3
        rot_axis = np.array((4,5,6.7))
        rot_axis = rot_axis / np.sum(rot_axis**2)
        rquat = tf.transformations.quaternion_about_axis(0.1, (rot_axis.tolist()))
        rmat,_ = get_rotation_matrix_and_quaternion(rquat)

        parts = make_M( 1234.56, width, height,
                        rmat, center)

        if self.use_distortion:
            dist = [-0.4, .2, 0, 0, 0]
        else:
            dist = [0, 0, 0, 0, 0]

        self.cam = CameraModel.load_camera_from_M(parts['M'],
                                                  width=width,height=height,
                                                  distortion_coefficients=dist)
예제 #10
0
def test_problem_M():
    """check a particular M which previously caused problems"""
    # This M (found by the DLT method) was causing me problems.
    d = {'width': 848,
         'name': 'camera',
         'height': 480}
    M =  np.array([[ -1.70677031e+03,  -4.10373295e+03,  -3.88568028e+02, 6.89034515e+02],
                   [ -6.19019195e+02,  -1.01292091e+03,  -2.67534989e+03, 4.51847857e+02],
                   [ -4.52548832e+00,  -3.78900498e+00,  -7.35860226e-01, 1.00000000e+00]])
    cam = CameraModel.load_camera_from_M( M, **d)

    #assert np.allclose( cam.M, M) # we don't expect this since the intrinsic matrix may not be scaled

    verts = np.array([[ 0.042306,  0.015338,  0.036328, 1.0],
                      [ 0.03323,   0.030344,  0.041542, 1.0],
                      [ 0.036396,  0.026464,  0.052408, 1.0]])

    actual = cam.project_3d_to_pixel(verts[:,:3])

    expectedh = np.dot( M, verts.T )
    expected = (expectedh[:2]/expectedh[2]).T
    assert np.allclose( expected, actual )
예제 #11
0
def build_multi_camera_system(cameras, no_distortion=False):
    """
    Build a multi-camera system with pymvg package for triangulation

    Args:
        cameras: list of camera parameters
    Returns:
        cams_system: a multi-cameras system
    """
    pymvg_cameras = []
    for (name, camera) in cameras:
        R, T, f, c, k, p = unfold_camera_param(camera, avg_f=False)
        camera_matrix = np.array(
            [[f[0], 0, c[0]], [0, f[1], c[1]], [0, 0, 1]], dtype=float)
        proj_matrix = np.zeros((3, 4))
        proj_matrix[:3, :3] = camera_matrix
        distortion = np.array([k[0], k[1], p[0], p[1], k[2]])
        distortion.shape = (5,)
        T = -np.matmul(R, T)
        M = camera_matrix.dot(np.concatenate((R, T), axis=1))
        camera = CameraModel.load_camera_from_M(
            M, name=name, distortion_coefficients=None if no_distortion else distortion)
        pymvg_cameras.append(camera)
    return MultiCameraSystem(pymvg_cameras)
예제 #12
0
    for idx in range(len(calib_id)):
        print ""
        print "~~~~~~~~~~~~~~~~~~~~~~~~~~"
        print "Importing {0}".format(calib_id[idx])

        pmat = calib[idx][0]
        distortion = calib[idx][1]
        name = calib_id[idx]
        width = cam_settings[name]["f7"]["width"]
        height = cam_settings[name]["f7"]["height"]

        print pmat
        print distortion

        camera = CameraModel.load_camera_from_M(pmat, width=width, height=height, name=name,
                                                distortion_coefficients=distortion)

        cameras.append(camera)

    system = MultiCameraSystem(cameras)
    system.save_to_pymvg_file(join(CALIB, "camera_system.json"))

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    # plot_system(ax, system)
    for name in system.get_names():
        plot_camera(ax, system.get_camera(name))

    ax.set_xlabel('x')
    ax.set_ylabel('y')
예제 #13
0
    def launch_calibration(self, method, vdisp):
        def _pump_ui():
            if os.environ.get('RUNNING_NOSE') != '1':
                Gtk.main_iteration_do(False)  #run once, no block
                while Gtk.events_pending():  #run remaining
                    Gtk.main_iteration()

        orig_data = []
        for row in self.point_store:
            if row[VDISP] == vdisp:
                orig_data.append(
                    [row[TEXU], row[TEXV], row[DISPLAYX], row[DISPLAYY]])
        orig_data = np.array(orig_data)
        uv = orig_data[:, :2]
        XYZ = self.geom.model.texcoord2worldcoord(uv)
        xy = orig_data[:, 2:4]

        assert method in EXTRINSIC_CALIBRATION_METHODS

        if method in ('DLT', 'RANSAC DLT'):
            ransac = method.startswith('RANSAC')
            r = dlt.dlt(XYZ, xy, ransac=ransac)
            c1 = CameraModel.load_camera_from_M(
                r['pmat'],
                width=self.dsc.width,
                height=self.dsc.height,
            )

            _pump_ui()

            if 0:
                c2 = c1.get_flipped_camera()

                # slightly hacky way to find best camera direction
                obj = self.geom.model.get_center()

                d1 = np.sqrt(np.sum((c1.get_lookat() - obj)**2))
                d2 = np.sqrt(np.sum((c2.get_lookat() - obj)**2))
                if d1 < d2:
                    #print 'using normal camera'
                    camera = c1
                else:
                    print 'using flipped camera'
                    camera = c2
            elif 1:
                farr = self.geom.compute_for_camera_view(c1,
                                                         what='texture_coords')

                _pump_ui()

                u = farr[:, :, 0]
                good = ~np.isnan(u)
                npix = np.sum(np.nonzero(good))
                if npix == 0:
                    print 'using flipped camera, otherwise npix = 0'
                    camera = c1.get_flipped_camera()
                else:
                    camera = c1
            else:
                camera = c1
        elif method in ['extrinsic only', 'iterative extrinsic only']:
            assert self.display_intrinsic_cam is not None, 'need intrinsic calibration'

            di = self.dsc.get_display_info()

            mirror = None
            if 'virtualDisplays' in di:
                found = False
                for d in di['virtualDisplays']:
                    if d['id'] == vdisp:
                        found = True
                        mirror = d.get('mirror', None)
                        break
                assert found

            if mirror is not None:
                cami = self.display_intrinsic_cam.get_mirror_camera(
                    axis=mirror)
            else:
                cami = self.display_intrinsic_cam

            _pump_ui()

            if method == 'iterative extrinsic only':
                result = fit_extrinsics_iterative(cami, XYZ, xy)
            else:
                result = fit_extrinsics(cami, XYZ, xy)

            _pump_ui()

            c1 = result['cam']
            if 1:
                farr = self.geom.compute_for_camera_view(c1,
                                                         what='texture_coords')

                _pump_ui()

                u = farr[:, :, 0]
                good = ~np.isnan(u)
                npix = np.sum(np.nonzero(good))
                if npix == 0:
                    print 'using flipped camera, otherwise npix = 0'
                    camera = c1.get_flipped_camera()
                else:
                    camera = c1
            else:
                camera = c1
            del result
        else:
            raise ValueError('unknown calibration method %r' % method)

        _pump_ui()

        projected_points = camera.project_3d_to_pixel(XYZ)
        reproj_error = np.sum((projected_points - xy)**2, axis=1)
        mre = np.mean(reproj_error)

        for row in self.vdisp_store:
            if row[VS_VDISP] == vdisp:
                row[VS_MRE] = mre
                row[VS_CAMERA_OBJECT] = camera

        _pump_ui()

        self.update_bg_image()
예제 #14
0
def check_built_from_M(cam_opts):
    """check that M is preserved in load_camera_from_M() factory"""
    cam_orig = _build_test_camera(**cam_opts)
    M_orig = cam_orig.get_M()
    cam = CameraModel.load_camera_from_M( M_orig )
    assert np.allclose( cam.get_M(), M_orig)
예제 #15
0
def test_basic_dlt():
    results = dlt.dlt(XYZ, xy, ransac=False)
    assert results['mean_reprojection_error'] < 6.0
    c1 = CameraModel.load_camera_from_M(results['pmat'])
예제 #16
0
def test_basic_dlt():
    results = dlt.dlt(XYZ, xy, ransac=False)
    assert results['mean_reprojection_error'] < 6.0
    c1 = CameraModel.load_camera_from_M(  results['pmat']  )
예제 #17
0
    def launch_calibration(self, method, vdisp ):

        def _pump_ui():
            if os.environ.get('RUNNING_NOSE') != '1':
                Gtk.main_iteration_do(False) #run once, no block
                while Gtk.events_pending():  #run remaining
                    Gtk.main_iteration()

        orig_data = []
        for row in self.point_store:
            if row[VDISP]==vdisp:
                orig_data.append( [ row[TEXU], row[TEXV], row[DISPLAYX], row[DISPLAYY] ] )
        orig_data = np.array(orig_data)
        uv = orig_data[:,:2]
        XYZ = self.geom.model.texcoord2worldcoord(uv)
        xy = orig_data[:,2:4]

        assert method in EXTRINSIC_CALIBRATION_METHODS

        if method in ('DLT','RANSAC DLT'):
            ransac = method.startswith('RANSAC')
            r = dlt.dlt(XYZ, xy, ransac=ransac )
            c1 = CameraModel.load_camera_from_M( r['pmat'],
                                                       width=self.dsc.width,
                                                       height=self.dsc.height,
                                                       )

            _pump_ui()

            if 0:
                c2 = c1.get_flipped_camera()

                # slightly hacky way to find best camera direction
                obj = self.geom.model.get_center()

                d1 = np.sqrt( np.sum( (c1.get_lookat() - obj)**2 ))
                d2 = np.sqrt( np.sum( (c2.get_lookat() - obj)**2 ))
                if d1 < d2:
                    #print 'using normal camera'
                    camera = c1
                else:
                    print 'using flipped camera'
                    camera = c2
            elif 1:
                farr = self.geom.compute_for_camera_view( c1,
                                                          what='texture_coords' )

                _pump_ui()

                u = farr[:,:,0]
                good = ~np.isnan( u )
                npix=np.sum( np.nonzero( good ) )
                if npix==0:
                    print 'using flipped camera, otherwise npix = 0'
                    camera = c1.get_flipped_camera()
                else:
                    camera = c1
            else:
                camera = c1
        elif method in ['extrinsic only','iterative extrinsic only']:
            assert self.display_intrinsic_cam is not None, 'need intrinsic calibration'

            di = self.dsc.get_display_info()

            mirror = None
            if 'virtualDisplays' in di:
                found = False
                for d in di['virtualDisplays']:
                    if d['id'] == vdisp:
                        found = True
                        mirror = d.get('mirror',None)
                        break
                assert found


            if mirror is not None:
                cami = self.display_intrinsic_cam.get_mirror_camera(axis=mirror)
            else:
                cami = self.display_intrinsic_cam

            _pump_ui()

            if method == 'iterative extrinsic only':
                result = fit_extrinsics_iterative(cami,XYZ,xy)
            else:
                result = fit_extrinsics(cami,XYZ,xy)

            _pump_ui()

            c1 = result['cam']
            if 1:
                farr = self.geom.compute_for_camera_view( c1,
                                                          what='texture_coords' )

                _pump_ui()

                u = farr[:,:,0]
                good = ~np.isnan( u )
                npix=np.sum( np.nonzero( good ) )
                if npix==0:
                    print 'using flipped camera, otherwise npix = 0'
                    camera = c1.get_flipped_camera()
                else:
                    camera = c1
            else:
                camera = c1
            del result
        else:
            raise ValueError('unknown calibration method %r'%method)

        _pump_ui()

        projected_points = camera.project_3d_to_pixel( XYZ )
        reproj_error = np.sum( (projected_points - xy)**2, axis=1)
        mre = np.mean(reproj_error)

        for row in self.vdisp_store:
            if row[VS_VDISP]==vdisp:
                row[VS_MRE] = mre
                row[VS_CAMERA_OBJECT] = camera

        _pump_ui()

        self.update_bg_image()