Пример #1
0
def make_default_system():
    '''helper function to generate an instance of MultiCameraSystem'''
    lookat = np.array( (0.0, 0.0, 0.0))

    center1 = np.array( (0.0, 0.0, 0.9) )
    distortion1 = np.array( [0.2, 0.3, 0.1, 0.1, 0.1] )
    cam1 = CameraModel.load_camera_simple(name='cam1',
                                          fov_x_degrees=90,
                                          eye=center1,
                                          lookat=lookat,
                                          distortion_coefficients=distortion1,
                                          )

    center2 = np.array( (0.5, -0.8, 0.0) )
    cam2 = CameraModel.load_camera_simple(name='cam2',
                                          fov_x_degrees=90,
                                          eye=center2,
                                          lookat=lookat,
                                          )

    center3 = np.array( (0.5, 0.5, 0.0) )
    cam3 = CameraModel.load_camera_simple(name='cam3',
                                          fov_x_degrees=90,
                                          eye=center3,
                                          lookat=lookat,
                                          )

    system = MultiCameraSystem([cam1,cam2,cam3])
    return system
Пример #2
0
def make_default_system(with_separate_distorions=False):
    """helper function to generate an instance of MultiCameraSystem"""
    if with_separate_distorions:
        raise NotImplementedError
    camxs = np.linspace(-2, 2, 3)
    camzs = np.linspace(-2, 2, 3).tolist()
    camzs.pop(1)
    cameras = []
    lookat = np.array((0.0, 0, 0))
    up = np.array((0.0, 0, 1))

    for enum, camx in enumerate(camxs):
        center = np.array((camx, -5, 0))
        name = "camx_%d" % (enum + 1,)
        cam = CameraModel.load_camera_simple(name=name, fov_x_degrees=45, eye=center, lookat=lookat, up=up)
        cameras.append(cam)

    for enum, camz in enumerate(camzs):
        center = np.array((0, -5, camz))
        name = "camz_%d" % (enum + 1,)
        cam = CameraModel.load_camera_simple(name=name, fov_x_degrees=45, eye=center, lookat=lookat, up=up)
        cameras.append(cam)

    system = MultiCameraSystem(cameras)
    return system
Пример #3
0
    def make_cam_from_params(self, params):
        if 1:
            t = params[:3]
            rod = params[3:]
            rmat = rodrigues2matrix( rod )
            d = self.intrinsic_dict.copy()
            d['translation'] = t
            d['Q'] = rmat
            cam_model = CameraModel.from_dict(d)
            return cam_model

        C = params[:3]
        quat = params[3:]
        qmag = np.sqrt(np.sum(quat**2))
        quat = quat/qmag

        R,rquat=get_rotation_matrix_and_quaternion(quat)

        t = -np.dot(R, C)

        d = self.intrinsic_dict.copy()
        d['translation'] = t
        d['Q'] = R
        cam_model = CameraModel.from_dict(d)
        return cam_model
Пример #4
0
def test_equals():
    system = make_default_system()
    assert system != 1234

    system2 = MultiCameraSystem([CameraModel.load_camera_simple(name='cam%d'%i) for i in range(2)])
    system3 = MultiCameraSystem([CameraModel.load_camera_simple(name='cam%d'%i) for i in range(3)])
    assert system2 != system3

    system4 = make_default_system()
    d = system4.to_dict()
    d['camera_system'][0]['width'] += 1
    system5 = MultiCameraSystem.from_dict( d )
    assert system4 != system5
Пример #5
0
def check_flip(distortion=False):
    if distortion:
        d = [0.1, 0.2, 0.3, 0.4, 0.5]
    else:
        d = None

    # build camera
    center_expected = np.array( [10, 5, 20] )
    lookat_expected = center_expected + np.array( [1, 2, 0] )
    up_expected     = np.array( [0,  0,  1] )

    width, height = 640, 480

    M = np.array( [[ 300.0,     0,  321, 0],
                      [ 0,     298.0,  240, 0],
                      [ 0,         0,   1,  0]])
    cam1 = CameraModel.load_camera_from_M( M, width=width, height=height,
                                              distortion_coefficients=d )
    cam = cam1.get_view_camera(center_expected, lookat_expected, up_expected)
    del cam1

    pts = np.array([lookat_expected,
                    lookat_expected+up_expected,
                    [1,2,3],
                    [4,5,6]])
    pix_actual = cam.project_3d_to_pixel( pts )

    # Flipped camera gives same 3D->2D transform but different look direction.
    cf = cam.get_flipped_camera()
    assert not np.allclose( cam.get_lookat(), cf.get_lookat() )

    pix_actual_flipped = cf.project_3d_to_pixel( pts )
    assert np.allclose( pix_actual,      pix_actual_flipped )
Пример #6
0
def test_align():
    system1 = make_default_system()
    system2 = system1.get_aligned_copy( system1 ) # This should be a no-op.
    assert system1==system2

    system3 = MultiCameraSystem([CameraModel.load_camera_simple(name='cam%d'%i) for i in range(2)])
    nose.tools.assert_raises(ValueError, system3.get_aligned_copy, system1)
Пример #7
0
def check_built_from_M(cam_opts):
    """check that M is preserved in load_camera_from_M() factory"""
    cam_orig = _build_test_camera(**cam_opts)
    if cam_orig.is_distorted_and_skewed():
        raise SkipTest('do not expect that skewed camera passes this test')
    M_orig = cam_orig.M
    cam = CameraModel.load_camera_from_M( M_orig )
    assert np.allclose( cam.M, M_orig)
Пример #8
0
def get_camera_for_boards(rows,width=0,height=0):
    info_dict = {}
    for row in rows:
        r = row[0]
        info_str = '%d %d %f'%(r['rows'], r['columns'], r['size'])
        if info_str not in info_dict:
            # create entry
            info = camera_calibration.calibrator.ChessboardInfo()
            info.dim = r['size']
            info.n_cols = r['columns']
            info.n_rows = r['rows']
            info_dict[info_str] = {'info':info,
                                   'corners':[]}
        this_list = info_dict[info_str]['corners']
        this_corners = r['points']
        assert len(this_corners)==r['rows']*r['columns']
        this_list.append( this_corners )

    boards = []
    goodcorners = []
    mpl_debug = False
    mpl_lines = []
    for k in info_dict:
        info = info_dict[k]['info']
        for xys in info_dict[k]['corners']:
            goodcorners.append( (xys,info) )

        if mpl_debug:
            N = info.n_cols
            xs = []; ys=[]
            for (x,y) in xys:
                xs.append(x); ys.append(y)
            while len(xs):
                this_xs = xs[:N]
                this_ys = ys[:N]

                del xs[:N]
                del ys[:N]

                mpl_lines.append( (this_xs, this_ys ) )

    if mpl_debug:
        import matplotlib.pyplot as plt
        for (this_xs, this_ys) in mpl_lines:
            plt.plot( this_xs, this_ys )
        plt.show()

    cal = camera_calibration.calibrator.MonoCalibrator(boards)
    cal.size = (width,height)
    r = cal.cal_fromcorners(goodcorners)
    msg = cal.as_message()

    buf = roslib.message.strify_message(msg)
    obj = yaml.safe_load(buf)
    cam = CameraModel.from_dict(obj,
                                      extrinsics_required=False)
    return cam
Пример #9
0
def test_equality():
    center = np.array( (0, 0.0, 5) )
    lookat = center + np.array( (0,1,0))
    cam_apple1 = CameraModel.load_camera_simple(fov_x_degrees=90,
                                                eye=center,
                                                lookat=lookat,
                                                name='apple')
    cam_apple2 = CameraModel.load_camera_simple(fov_x_degrees=90,
                                                eye=center,
                                                lookat=lookat,
                                                name='apple')
    cam_orange = CameraModel.load_camera_simple(fov_x_degrees=30,
                                                eye=center,
                                                lookat=lookat,
                                                name='orange')
    assert cam_apple1==cam_apple2
    assert cam_apple1!=cam_orange
    assert not cam_apple1==cam_orange
Пример #10
0
def check_roundtrip_ros_tf(cam_opts):
    cam1 = _build_test_camera(**cam_opts)
    translation, rotation = cam1.get_ROS_tf()
    i = cam1.get_intrinsics_as_bunch()
    cam2 = CameraModel.load_camera_from_ROS_tf( translation=translation,
                                                rotation=rotation,
                                                intrinsics = i,
                                                name = cam1.name)
    assert cam1==cam2
Пример #11
0
def test_getters():
    system1 = make_default_system()
    d = system1.to_dict()
    names1 = list(system1.get_camera_dict().keys())
    names2 = [cd['name'] for cd in d['camera_system']]
    assert set(names1)==set(names2)

    for idx in range(len(names1)):
        cam1 = system1.get_camera( names1[idx] )
        cam2 = CameraModel.from_dict(d['camera_system'][idx])
        assert cam1==cam2
Пример #12
0
 def calc_mean_reproj_error(self,msg):
     ros_cam = CameraModel._from_parts(intrinsics=msg)
     all_ims = []
     for imd in self.db:
         ros_pix  = ros_cam.project_3d_to_pixel(imd['cc'], distorted=True)
         d = (ros_pix-imd['pix'])**2
         drows = np.sqrt(np.sum(d, axis=1))
         mean_d = np.mean(drows)
         all_ims.append(mean_d)
     mean_err = np.mean(all_ims)
     return mean_err
Пример #13
0
def check_align(cam_opts):

    cam_orig = _build_test_camera(**cam_opts)
    M_orig = cam_orig.M
    cam_orig = CameraModel.load_camera_from_M( M_orig )
    R1 = np.eye(3)
    R2 = np.zeros((3,3))
    R2[0,1] = 1
    R2[1,0] = 1
    R2[2,2] = -1
    t1 = np.array( (0.0, 0.0, 0.0) )
    t2 = np.array( (0.0, 0.0, 0.1) )
    t3 = np.array( (0.1, 0.0, 0.0) )
    for s in [1.0, 2.0]:
        for R in [R1, R2]:
            for t in [t1, t2, t3]:
                cam_actual = cam_orig.get_aligned_camera( s, R, t )
                M_expected = mcsc_align.align_M( s,R,t, M_orig )
                cam_expected = CameraModel.load_camera_from_M( M_expected )
                assert cam_actual==cam_expected
Пример #14
0
def test_simple_projection():

    # get some 3D points
    pts_3d = _build_points_3d()

    if DRAW:
        fig = plt.figure(figsize=(8,12))
        ax1 = fig.add_subplot(3,1,1, projection='3d')
        ax1.scatter( pts_3d[:,0], pts_3d[:,1], pts_3d[:,2])
        ax1.set_xlabel('X')
        ax1.set_ylabel('Y')
        ax1.set_zlabel('Z')

    # build a camera calibration matrix
    focal_length = 1200
    width, height = 640,480
    R = np.eye(3) # look at +Z
    c = np.array( (9.99, 19.99, 20) )
    M = make_M( focal_length, width, height, R, c)['M']

    # now, project these 3D points into our image plane
    pts_3d_H = np.vstack( (pts_3d.T, np.ones( (1,len(pts_3d))))) # make homog.
    undist_rst_simple = np.dot(M, pts_3d_H) # multiply
    undist_simple = undist_rst_simple[:2,:]/undist_rst_simple[2,:] # project

    if DRAW:
        ax2 = fig.add_subplot(3,1,2)
        ax2.plot( undist_simple[0,:], undist_simple[1,:], 'b.')
        ax2.set_xlim(0,width)
        ax2.set_ylim(height,0)
        ax2.set_title('matrix multiply')

    # build a camera model from our M and project onto image plane
    cam = CameraModel.load_camera_from_M( M, width=width, height=height )
    undist_full = cam.project_3d_to_pixel(pts_3d).T

    if DRAW:
        plot_camera( ax1, cam, scale=10, axes_size=5.0 )
        sz = 20
        x = 5
        y = 8
        z = 19
        ax1.auto_scale_xyz( [x,x+sz], [y,y+sz], [z,z+sz] )

        ax3 = fig.add_subplot(3,1,3)
        ax3.plot( undist_full[0,:], undist_full[1,:], 'b.')
        ax3.set_xlim(0,width)
        ax3.set_ylim(height,0)
        ax3.set_title('pymvg')

    if DRAW:
        plt.show()

    assert np.allclose( undist_full, undist_simple )
Пример #15
0
    def new_data(self):
        with self._lock:
            if (self.translation is None or
                self.rotation is None or
                self.intrinsics is None):
                return
            newcam = CameraModel.load_camera_from_ROS_tf( translation=self.translation,
                                                          rotation=self.rotation,
                                                          intrinsics=self.intrinsics,
                                                          name=self.get_frame_id(),
                                                          )
        self.cam = newcam

        self.draw()
Пример #16
0
def check_distortion_yamlfile_roundtrip(cam_opts):
    """check that roundtrip of camera model to/from a yaml file works"""
    cam = _build_test_camera(**cam_opts)
    fname = tempfile.mktemp(suffix='.yaml')
    cam.save_intrinsics_to_yamlfile(fname)
    try:
        cam2 = CameraModel.load_camera_from_file( fname, extrinsics_required=False )
    finally:
        os.unlink(fname)

    distorted = np.array( [[100.0,100],
                           [100,200],
                           [100,300],
                           [100,400]] )
    orig_undistorted = cam.undistort( distorted )
    reloaded_undistorted = cam2.undistort( distorted )
    assert np.allclose( orig_undistorted, reloaded_undistorted )
Пример #17
0
def test_lookat():

    dist = 5.0

    # build camera
    center_expected = np.array( [10, 5, 20] )
    lookat_expected = center_expected + np.array( [dist, 0, 0] ) # looking in +X
    up_expected     = np.array( [0,  0,  1] )

    f = 300.0 # focal length
    width, height = 640, 480
    cx, cy = width/2.0, height/2.0

    M = np.array( [[ f, 0, cx, 0],
                      [ 0, f, cy, 0],
                      [ 0, 0,   1, 0]])
    cam1 = CameraModel.load_camera_from_M( M, width=width, height=height)
    cam = cam1.get_view_camera(center_expected, lookat_expected, up_expected)
    del cam1

    # check that the extrinsic parameters were what we expected
    (center_actual,lookat_actual,up_actual) = cam.get_view()

    lookdir_expected = normalize( lookat_expected - center_expected )
    lookdir_actual   = normalize( lookat_actual   - center_actual   )

    assert np.allclose( center_actual,  center_expected  )
    assert np.allclose( lookdir_actual, lookdir_expected )
    assert np.allclose( up_actual,      up_expected      )

    # check that the extrinsics work as expected
    pts = np.array([lookat_expected,
                    lookat_expected+up_expected])
    eye_actual = cam.project_3d_to_camera_frame( pts )

    eye_expected = [[0, 0, dist], # camera looks at +Z
                    [0,-1, dist], # with -Y as up
                    ]
    assert np.allclose( eye_actual,      eye_expected      )

    # now check some basics of the projection
    pix_actual = cam.project_3d_to_pixel( pts )

    pix_expected = [[cx,cy], # center pixel on the camera
                    [cx,cy-(f/dist)]]
    assert np.allclose( pix_actual,      pix_expected      )
Пример #18
0
    def generate_camera(self):
        (width,height)=(self.width,self.height)=(640,480)
        center = 1,2,3
        rot_axis = np.array((4,5,6.7))
        rot_axis = rot_axis / np.sum(rot_axis**2)
        rquat = tf.transformations.quaternion_about_axis(0.1, (rot_axis.tolist()))
        rmat,_ = get_rotation_matrix_and_quaternion(rquat)

        parts = make_M( 1234.56, width, height,
                        rmat, center)

        if self.use_distortion:
            dist = [-0.4, .2, 0, 0, 0]
        else:
            dist = [0, 0, 0, 0, 0]

        self.cam = CameraModel.load_camera_from_M(parts['M'],
                                                  width=width,height=height,
                                                  distortion_coefficients=dist)
Пример #19
0
def build_example_system(n=6,z=5.0):
    base = CameraModel.load_camera_default()

    x = np.linspace(0, 2*n, n)
    theta = np.linspace(0, 2*np.pi, n)
    cams = []
    for i in range(n):
        # cameras are spaced parallel to the x axis
        center = np.array( (x[i], 0.0, z) )

        # cameras are looking at +y
        lookat = center + np.array( (0,1,0))

        # camera up direction rotates around the y axis
        up = -np.sin(theta[i]), 0, np.cos(theta[i])

        cam = base.get_view_camera(center,lookat,up)
        cam.name = 'theta: %.0f'%( np.degrees(theta[i]) )
        cams.append(cam)

    system = MultiCameraSystem(cams)
    return system
Пример #20
0
def test_problem_M():
    """check a particular M which previously caused problems"""
    # This M (found by the DLT method) was causing me problems.
    d = {'width': 848,
         'name': 'camera',
         'height': 480}
    M =  np.array([[ -1.70677031e+03,  -4.10373295e+03,  -3.88568028e+02, 6.89034515e+02],
                   [ -6.19019195e+02,  -1.01292091e+03,  -2.67534989e+03, 4.51847857e+02],
                   [ -4.52548832e+00,  -3.78900498e+00,  -7.35860226e-01, 1.00000000e+00]])
    cam = CameraModel.load_camera_from_M( M, **d)

    #assert np.allclose( cam.M, M) # we don't expect this since the intrinsic matrix may not be scaled

    verts = np.array([[ 0.042306,  0.015338,  0.036328, 1.0],
                      [ 0.03323,   0.030344,  0.041542, 1.0],
                      [ 0.036396,  0.026464,  0.052408, 1.0]])

    actual = cam.project_3d_to_pixel(verts[:,:3])

    expectedh = np.dot( M, verts.T )
    expected = (expectedh[:2]/expectedh[2]).T
    assert np.allclose( expected, actual )
Пример #21
0
def check_bagfile_roundtrip(cam_opts):
    """check that roundtrip of camera model to/from a bagfile works"""
    cam = _build_test_camera(**cam_opts)
    fname = tempfile.mktemp(suffix='.bag')
    try:
        with open(fname,mode='wb') as fd:
            cam.save_to_bagfile(fd, roslib)

        with open(fname,mode='r') as fd:
            bag = rosbag.Bag(fd, 'r')
            cam2 = CameraModel.load_camera_from_opened_bagfile( bag )
    finally:
        os.unlink(fname)

    verts = np.array([[ 0.042306,  0.015338,  0.036328],
                      [ 0.03323,   0.030344,  0.041542],
                      [ 0.03323,   0.030344,  0.041542],
                      [ 0.03323,   0.030344,  0.041542],
                      [ 0.036396,  0.026464,  0.052408]])

    expected =  cam.project_3d_to_pixel(verts)
    actual   = cam2.project_3d_to_pixel(verts)
    assert np.allclose( expected, actual )
Пример #22
0
 def from_dict(cls, d):
     cam_dict_list = d['camera_system']
     cams = [CameraModel.from_dict(cd) for cd in cam_dict_list]
     return MultiCameraSystem(cameras=cams)
Пример #23
0
 def from_dict(cls, d):
     cam_dict_list = d['camera_system']
     cams = [CameraModel.from_dict(cd) for cd in cam_dict_list]
     return MultiCameraSystem( cameras=cams )
Пример #24
0
    def from_mcsc(cls, dirname):
        '''create MultiCameraSystem from output directory of MultiCamSelfCal'''

        # FIXME: This is a bit convoluted because it's been converted
        # from multiple layers of internal code. It should really be
        # simplified and cleaned up.

        do_normalize_pmat = True

        all_Pmat = {}
        all_Res = {}
        all_K = {}
        all_distortion = {}

        opj = os.path.join

        with open(opj(dirname, 'camera_order.txt'), mode='r') as fd:
            cam_ids = fd.read().strip().split('\n')

        with open(os.path.join(dirname, 'Res.dat'), 'r') as res_fd:
            for i, cam_id in enumerate(cam_ids):
                fname = 'camera%d.Pmat.cal' % (i + 1)
                pmat = np.loadtxt(opj(dirname, fname))  # 3 rows x 4 columns
                if do_normalize_pmat:
                    pmat_orig = pmat
                    pmat = normalize_M(pmat)
                all_Pmat[cam_id] = pmat
                all_Res[cam_id] = map(int, res_fd.readline().split())

        # load non linear parameters
        rad_files = [f for f in os.listdir(dirname) if f.endswith('.rad')]
        for cam_id_enum, cam_id in enumerate(cam_ids):
            filename = os.path.join(dirname,
                                    'basename%d.rad' % (cam_id_enum + 1, ))
            if os.path.exists(filename):
                K, distortion = parse_radfile(filename)
                all_K[cam_id] = K
                all_distortion[cam_id] = distortion
            else:
                if len(rad_files):
                    raise RuntimeError(
                        '.rad files present but none named "%s"' % filename)
                warnings.warn('no non-linear data (e.g. radial distortion) '
                              'in calibration for %s' % cam_id)
                all_K[cam_id] = None
                all_distortion[cam_id] = None

        cameras = []
        for cam_id in cam_ids:
            w, h = all_Res[cam_id]
            Pmat = all_Pmat[cam_id]
            M = Pmat[:, :3]
            K, R = my_rq(M)
            if not is_rotation_matrix(R):
                # RQ may return left-handed rotation matrix. Make right-handed.
                R2 = -R
                K2 = -K
                assert np.allclose(np.dot(K2, R2), np.dot(K, R))
                K, R = K2, R2

            P = np.zeros((3, 4))
            P[:3, :3] = K
            KK = all_K[cam_id]  # from rad file or None
            distortion = all_distortion[cam_id]

            # (ab)use PyMVG's rectification to do coordinate transform
            # for MCSC's undistortion.

            # The intrinsic parameters used for 3D -> 2D.
            ex = P[0, 0]
            bx = P[0, 2]
            Sx = P[0, 3]
            ey = P[1, 1]
            by = P[1, 2]
            Sy = P[1, 3]

            if KK is None:
                rect = np.eye(3)
                KK = P[:, :3]
            else:
                # Parameters used to define undistortion coordinates.
                fx = KK[0, 0]
                fy = KK[1, 1]
                cx = KK[0, 2]
                cy = KK[1, 2]

                rect = np.array([[ex / fx, 0, (bx + Sx - cx) / fx],
                                 [0, ey / fy, (by + Sy - cy) / fy], [0, 0,
                                                                     1]]).T

            if distortion is None:
                distortion = np.zeros((5, ))

            C = center(Pmat)
            rot = R
            t = -np.dot(rot, C)[:, 0]

            d = {
                'width': w,
                'height': h,
                'P': P,
                'K': KK,
                'R': rect,
                'translation': t,
                'Q': rot,
                'D': distortion,
                'name': cam_id,
            }
            cam = CameraModel.from_dict(d)
            cameras.append(cam)
        return MultiCameraSystem(cameras=cameras)
Пример #25
0
 def from_pymvg_str(cls, buf):
     d = json.loads(buf)
     assert d['__pymvg_file_version__'] == '1.0'
     cam_dict_list = d['camera_system']
     cams = [CameraModel.from_dict(cd) for cd in cam_dict_list]
     return MultiCameraSystem(cameras=cams)
def get_sample_camera():
    yaml_str = """header:
  seq: 0
  stamp:
    secs: 0
    nsecs: 0
  frame_id: ''
height: 494
width: 659
distortion_model: plumb_bob
D:
  - -0.331416226762
  - 0.143584747016
  - 0.00314558656669
  - -0.00393597842852
  - 0.0
K:
  - 516.385667641
  - 0.0
  - 339.167079537
  - 0.0
  - 516.125799368
  - 227.379935241
  - 0.0
  - 0.0
  - 1.0
R:
  - 1.0
  - 0.0
  - 0.0
  - 0.0
  - 1.0
  - 0.0
  - 0.0
  - 0.0
  - 1.0
P:
  - 444.369750977
  - 0.0
  - 337.107817516
  - 0.0
  - 0.0
  - 474.186859131
  - 225.062742824
  - 0.0
  - 0.0
  - 0.0
  - 1.0
  - 0.0
binning_x: 0
binning_y: 0
roi:
  x_offset: 0
  y_offset: 0
  height: 0
  width: 0
  do_rectify: False"""
    d = yaml.load(yaml_str)
    cam1 = CameraModel.from_dict(d,extrinsics_required=False)

    eye = (10,20,30)
    lookat = (11,20,30)
    up = (0,-1,0)
    cam = cam1.get_view_camera(eye, lookat, up)

    return cam
Пример #27
0
    for idx in range(len(calib_id)):
        print ""
        print "~~~~~~~~~~~~~~~~~~~~~~~~~~"
        print "Importing {0}".format(calib_id[idx])

        pmat = calib[idx][0]
        distortion = calib[idx][1]
        name = calib_id[idx]
        width = cam_settings[name]["f7"]["width"]
        height = cam_settings[name]["f7"]["height"]

        print pmat
        print distortion

        camera = CameraModel.load_camera_from_M(pmat, width=width, height=height, name=name,
                                                distortion_coefficients=distortion)

        cameras.append(camera)

    system = MultiCameraSystem(cameras)
    system.save_to_pymvg_file(join(CALIB, "camera_system.json"))

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    # plot_system(ax, system)
    for name in system.get_names():
        plot_camera(ax, system.get_camera(name))

    ax.set_xlabel('x')
    ax.set_ylabel('y')
Пример #28
0
    def launch_calibration(self, method, vdisp):
        def _pump_ui():
            if os.environ.get('RUNNING_NOSE') != '1':
                Gtk.main_iteration_do(False)  #run once, no block
                while Gtk.events_pending():  #run remaining
                    Gtk.main_iteration()

        orig_data = []
        for row in self.point_store:
            if row[VDISP] == vdisp:
                orig_data.append(
                    [row[TEXU], row[TEXV], row[DISPLAYX], row[DISPLAYY]])
        orig_data = np.array(orig_data)
        uv = orig_data[:, :2]
        XYZ = self.geom.model.texcoord2worldcoord(uv)
        xy = orig_data[:, 2:4]

        assert method in EXTRINSIC_CALIBRATION_METHODS

        if method in ('DLT', 'RANSAC DLT'):
            ransac = method.startswith('RANSAC')
            r = dlt.dlt(XYZ, xy, ransac=ransac)
            c1 = CameraModel.load_camera_from_M(
                r['pmat'],
                width=self.dsc.width,
                height=self.dsc.height,
            )

            _pump_ui()

            if 0:
                c2 = c1.get_flipped_camera()

                # slightly hacky way to find best camera direction
                obj = self.geom.model.get_center()

                d1 = np.sqrt(np.sum((c1.get_lookat() - obj)**2))
                d2 = np.sqrt(np.sum((c2.get_lookat() - obj)**2))
                if d1 < d2:
                    #print 'using normal camera'
                    camera = c1
                else:
                    print 'using flipped camera'
                    camera = c2
            elif 1:
                farr = self.geom.compute_for_camera_view(c1,
                                                         what='texture_coords')

                _pump_ui()

                u = farr[:, :, 0]
                good = ~np.isnan(u)
                npix = np.sum(np.nonzero(good))
                if npix == 0:
                    print 'using flipped camera, otherwise npix = 0'
                    camera = c1.get_flipped_camera()
                else:
                    camera = c1
            else:
                camera = c1
        elif method in ['extrinsic only', 'iterative extrinsic only']:
            assert self.display_intrinsic_cam is not None, 'need intrinsic calibration'

            di = self.dsc.get_display_info()

            mirror = None
            if 'virtualDisplays' in di:
                found = False
                for d in di['virtualDisplays']:
                    if d['id'] == vdisp:
                        found = True
                        mirror = d.get('mirror', None)
                        break
                assert found

            if mirror is not None:
                cami = self.display_intrinsic_cam.get_mirror_camera(
                    axis=mirror)
            else:
                cami = self.display_intrinsic_cam

            _pump_ui()

            if method == 'iterative extrinsic only':
                result = fit_extrinsics_iterative(cami, XYZ, xy)
            else:
                result = fit_extrinsics(cami, XYZ, xy)

            _pump_ui()

            c1 = result['cam']
            if 1:
                farr = self.geom.compute_for_camera_view(c1,
                                                         what='texture_coords')

                _pump_ui()

                u = farr[:, :, 0]
                good = ~np.isnan(u)
                npix = np.sum(np.nonzero(good))
                if npix == 0:
                    print 'using flipped camera, otherwise npix = 0'
                    camera = c1.get_flipped_camera()
                else:
                    camera = c1
            else:
                camera = c1
            del result
        else:
            raise ValueError('unknown calibration method %r' % method)

        _pump_ui()

        projected_points = camera.project_3d_to_pixel(XYZ)
        reproj_error = np.sum((projected_points - xy)**2, axis=1)
        mre = np.mean(reproj_error)

        for row in self.vdisp_store:
            if row[VS_VDISP] == vdisp:
                row[VS_MRE] = mre
                row[VS_CAMERA_OBJECT] = camera

        _pump_ui()

        self.update_bg_image()
Пример #29
0
def fit_extrinsics(base_cam,X3d,x2d,geom=None):
    assert x2d.ndim==2
    assert x2d.shape[1]==2

    assert X3d.ndim==2
    assert X3d.shape[1]==3

    if 0:
        fname = 'x2d_'+base_cam.name + '.png'
        fname = fname.replace('/','-')
        save_point_image(fname, (base_cam.width, base_cam.height), x2d )
        #print 'saved pt debug image to',fname

    ipts = np.array(x2d,dtype=np.float64)
    opts = np.array(X3d,dtype=np.float64)

    K = np.array(base_cam.get_K(), dtype=np.float64)
    dist_coeffs = np.array( base_cam.get_D(), dtype=np.float64)

    retval, rvec, tvec = cv2.solvePnP( opts, ipts,
                                       K,
                                       dist_coeffs)
    assert retval

    # we get two possible cameras back, figure out which one has objects in front
    rmata = rodrigues2matrix( rvec )
    intrinsics = base_cam.to_dict()
    del intrinsics['Q']
    del intrinsics['translation']
    del intrinsics['name']
    d = intrinsics.copy()
    d['translation'] = tvec
    d['Q'] = rmata
    d['name'] = base_cam.name
    cam_model_a = CameraModel.from_dict(d)
    mza = np.mean(cam_model_a.project_3d_to_camera_frame(X3d)[:,2])

    # don't bother with second - it does not have a valid rotation matrix

    if 1:
        founda = cam_model_a.project_3d_to_pixel(X3d)
        erra = np.mean(np.sqrt(np.sum((founda-x2d)**2, axis=1)))

        cam_model = cam_model_a

    if 1:
        found = cam_model.project_3d_to_pixel(X3d)
        orig = x2d
        reproj_error = np.sqrt(np.sum((found-orig)**2, axis=1))
        cum = np.mean(reproj_error)

        mean_cam_z = np.mean(cam_model.project_3d_to_camera_frame(X3d)[:,2])

    if (mean_cam_z < 0 or cum > 20) and 0:
        # hmm, we have a flipped view of the camera.
        print '-'*80,'HACK ON'
        center, lookat, up = cam_model.get_view()
        #cam2 = cam_model.get_view_camera( -center, lookat, -up )
        cam2 = cam_model.get_view_camera( center, lookat, up )
        cam2.name=base_cam.name
        return fit_extrinsics_iterative(cam2,X3d,x2d, geom=geom)

    result = dict(cam=cam_model,
                  mean_err=cum,
                  mean_cam_z = mean_cam_z,
                  )
    return result
Пример #30
0
def test_simple_camera():
    center = np.array( (0, 0.0, 5) )
    lookat = center + np.array( (0,1,0))
    cam = CameraModel.load_camera_simple(fov_x_degrees=90,
                                         eye=center,
                                         lookat=lookat)
Пример #31
0
def test_pymvg_roundtrip():
    from pymvg.camera_model import CameraModel
    from pymvg.multi_camera_system import MultiCameraSystem
    from flydra_core.reconstruct import Reconstructor

    # ----------- with no distortion ------------------------
    center1 = np.array( (0, 0.0, 5) )
    center2 = np.array( (1, 0.0, 5) )

    lookat = np.array( (0,1,0))

    cam1 = CameraModel.load_camera_simple(fov_x_degrees=90,
                                          name='cam1',
                                          eye=center1,
                                          lookat=lookat)
    cam2 = CameraModel.load_camera_simple(fov_x_degrees=90,
                                          name='cam2',
                                          eye=center2,
                                          lookat=lookat)
    mvg = MultiCameraSystem( cameras=[cam1, cam2] )
    R = Reconstructor.from_pymvg(mvg)
    mvg2 = R.convert_to_pymvg()

    cam_ids = ['cam1','cam2']
    for distorted in [True,False]:
        for cam_id in cam_ids:
            v1 = mvg.find2d(  cam_id, lookat, distorted=distorted )
            v2 = R.find2d(    cam_id, lookat, distorted=distorted )
            v3 = mvg2.find2d( cam_id, lookat, distorted=distorted )
            assert np.allclose(v1,v2)
            assert np.allclose(v1,v3)

    # ----------- with distortion ------------------------
    cam1dd = cam1.to_dict()
    cam1dd['D'] = (0.1, 0.2, 0.3, 0.4, 0.0)
    cam1d = CameraModel.from_dict(cam1dd)

    cam2dd = cam2.to_dict()
    cam2dd['D'] = (0.11, 0.21, 0.31, 0.41, 0.0)
    cam2d = CameraModel.from_dict(cam2dd)

    mvgd = MultiCameraSystem( cameras=[cam1d, cam2d] )
    Rd = Reconstructor.from_pymvg(mvgd)
    mvg2d = Rd.convert_to_pymvg()
    cam_ids = ['cam1','cam2']
    for distorted in [True,False]:
        for cam_id in cam_ids:
            v1 = mvgd.find2d(  cam_id, lookat, distorted=distorted )
            v2 = Rd.find2d(    cam_id, lookat, distorted=distorted )
            v3 = mvg2d.find2d( cam_id, lookat, distorted=distorted )
            assert np.allclose(v1,v2)
            assert np.allclose(v1,v3)

    # ------------ with distortion at different focal length ------
    mydir = os.path.dirname(__file__)
    caldir = os.path.join(mydir,'sample_calibration')
    print mydir
    print caldir
    R3 = Reconstructor(caldir)
    mvg3 = R3.convert_to_pymvg()
    #R4 = Reconstructor.from_pymvg(mvg3)
    mvg3b = MultiCameraSystem.from_mcsc( caldir )

    for distorted in [True,False]:
        for cam_id in R3.cam_ids:
            v1 = R3.find2d(   cam_id, lookat, distorted=distorted )
            v2 = mvg3.find2d( cam_id, lookat, distorted=distorted )
            #v3 = R4.find2d(   cam_id, lookat, distorted=distorted )
            v4 = mvg3b.find2d( cam_id, lookat, distorted=distorted )
            assert np.allclose(v1,v2)
            #assert np.allclose(v1,v3)
            assert np.allclose(v1,v4)
Пример #32
0
def test_no_duplicate_names():
    cam1a = CameraModel.load_camera_simple(name='cam1')
    cam1b = CameraModel.load_camera_simple(name='cam1')
    cams = [cam1a,cam1b]
    nose.tools.assert_raises(ValueError, MultiCameraSystem, cams)
Пример #33
0
def get_sample_camera():
    yaml_str = """header:
  seq: 0
  stamp:
    secs: 0
    nsecs: 0
  frame_id: ''
height: 494
width: 659
distortion_model: plumb_bob
D:
  - -0.331416226762
  - 0.143584747016
  - 0.00314558656669
  - -0.00393597842852
  - 0.0
K:
  - 516.385667641
  - 0.0
  - 339.167079537
  - 0.0
  - 516.125799368
  - 227.379935241
  - 0.0
  - 0.0
  - 1.0
R:
  - 1.0
  - 0.0
  - 0.0
  - 0.0
  - 1.0
  - 0.0
  - 0.0
  - 0.0
  - 1.0
P:
  - 444.369750977
  - 0.0
  - 337.107817516
  - 0.0
  - 0.0
  - 474.186859131
  - 225.062742824
  - 0.0
  - 0.0
  - 0.0
  - 1.0
  - 0.0
binning_x: 0
binning_y: 0
roi:
  x_offset: 0
  y_offset: 0
  height: 0
  width: 0
  do_rectify: False"""
    d = yaml.load(yaml_str)
    cam1 = CameraModel.from_dict(d,extrinsics_required=False)

    eye = (10,20,30)
    lookat = (11,20,30)
    up = (0,-1,0)
    cam = cam1.get_view_camera(eye, lookat, up)

    return cam
Пример #34
0
    def launch_calibration(self, method, vdisp ):

        def _pump_ui():
            if os.environ.get('RUNNING_NOSE') != '1':
                Gtk.main_iteration_do(False) #run once, no block
                while Gtk.events_pending():  #run remaining
                    Gtk.main_iteration()

        orig_data = []
        for row in self.point_store:
            if row[VDISP]==vdisp:
                orig_data.append( [ row[TEXU], row[TEXV], row[DISPLAYX], row[DISPLAYY] ] )
        orig_data = np.array(orig_data)
        uv = orig_data[:,:2]
        XYZ = self.geom.model.texcoord2worldcoord(uv)
        xy = orig_data[:,2:4]

        assert method in EXTRINSIC_CALIBRATION_METHODS

        if method in ('DLT','RANSAC DLT'):
            ransac = method.startswith('RANSAC')
            r = dlt.dlt(XYZ, xy, ransac=ransac )
            c1 = CameraModel.load_camera_from_M( r['pmat'],
                                                       width=self.dsc.width,
                                                       height=self.dsc.height,
                                                       )

            _pump_ui()

            if 0:
                c2 = c1.get_flipped_camera()

                # slightly hacky way to find best camera direction
                obj = self.geom.model.get_center()

                d1 = np.sqrt( np.sum( (c1.get_lookat() - obj)**2 ))
                d2 = np.sqrt( np.sum( (c2.get_lookat() - obj)**2 ))
                if d1 < d2:
                    #print 'using normal camera'
                    camera = c1
                else:
                    print 'using flipped camera'
                    camera = c2
            elif 1:
                farr = self.geom.compute_for_camera_view( c1,
                                                          what='texture_coords' )

                _pump_ui()

                u = farr[:,:,0]
                good = ~np.isnan( u )
                npix=np.sum( np.nonzero( good ) )
                if npix==0:
                    print 'using flipped camera, otherwise npix = 0'
                    camera = c1.get_flipped_camera()
                else:
                    camera = c1
            else:
                camera = c1
        elif method in ['extrinsic only','iterative extrinsic only']:
            assert self.display_intrinsic_cam is not None, 'need intrinsic calibration'

            di = self.dsc.get_display_info()

            mirror = None
            if 'virtualDisplays' in di:
                found = False
                for d in di['virtualDisplays']:
                    if d['id'] == vdisp:
                        found = True
                        mirror = d.get('mirror',None)
                        break
                assert found


            if mirror is not None:
                cami = self.display_intrinsic_cam.get_mirror_camera(axis=mirror)
            else:
                cami = self.display_intrinsic_cam

            _pump_ui()

            if method == 'iterative extrinsic only':
                result = fit_extrinsics_iterative(cami,XYZ,xy)
            else:
                result = fit_extrinsics(cami,XYZ,xy)

            _pump_ui()

            c1 = result['cam']
            if 1:
                farr = self.geom.compute_for_camera_view( c1,
                                                          what='texture_coords' )

                _pump_ui()

                u = farr[:,:,0]
                good = ~np.isnan( u )
                npix=np.sum( np.nonzero( good ) )
                if npix==0:
                    print 'using flipped camera, otherwise npix = 0'
                    camera = c1.get_flipped_camera()
                else:
                    camera = c1
            else:
                camera = c1
            del result
        else:
            raise ValueError('unknown calibration method %r'%method)

        _pump_ui()

        projected_points = camera.project_3d_to_pixel( XYZ )
        reproj_error = np.sum( (projected_points - xy)**2, axis=1)
        mre = np.mean(reproj_error)

        for row in self.vdisp_store:
            if row[VS_VDISP]==vdisp:
                row[VS_MRE] = mre
                row[VS_CAMERA_OBJECT] = camera

        _pump_ui()

        self.update_bg_image()
Пример #35
0
def test_basic_dlt():
    results = dlt.dlt(XYZ, xy, ransac=False)
    assert results['mean_reprojection_error'] < 6.0
    c1 = CameraModel.load_camera_from_M(  results['pmat']  )
Пример #36
0
    def from_mcsc(cls, dirname ):
        '''create MultiCameraSystem from output directory of MultiCamSelfCal'''

        # FIXME: This is a bit convoluted because it's been converted
        # from multiple layers of internal code. It should really be
        # simplified and cleaned up.

        do_normalize_pmat=True

        all_Pmat = {}
        all_Res = {}
        all_K = {}
        all_distortion = {}

        opj = os.path.join

        with open(opj(dirname,'camera_order.txt'),mode='r') as fd:
            cam_ids = fd.read().strip().split('\n')

        with open(os.path.join(dirname,'Res.dat'),'r') as res_fd:
            for i, cam_id in enumerate(cam_ids):
                fname = 'camera%d.Pmat.cal'%(i+1)
                pmat = np.loadtxt(opj(dirname,fname)) # 3 rows x 4 columns
                if do_normalize_pmat:
                    pmat_orig = pmat
                    pmat = normalize_M(pmat)
                all_Pmat[cam_id] = pmat
                all_Res[cam_id] = map(int,res_fd.readline().split())

        # load non linear parameters
        rad_files = [ f for f in os.listdir(dirname) if f.endswith('.rad') ]
        for cam_id_enum, cam_id in enumerate(cam_ids):
            filename = os.path.join(dirname,
                                    'basename%d.rad'%(cam_id_enum+1,))
            if os.path.exists(filename):
                K, distortion = parse_radfile(filename)
                all_K[cam_id] = K
                all_distortion[cam_id] = distortion
            else:
                if len(rad_files):
                    raise RuntimeError(
                        '.rad files present but none named "%s"'%filename)
                warnings.warn('no non-linear data (e.g. radial distortion) '
                              'in calibration for %s'%cam_id)
                all_K[cam_id] = None
                all_distortion[cam_id] = None

        cameras = []
        for cam_id in cam_ids:
            w,h = all_Res[cam_id]
            Pmat = all_Pmat[cam_id]
            M = Pmat[:,:3]
            K,R = my_rq(M)
            if not is_rotation_matrix(R):
                # RQ may return left-handed rotation matrix. Make right-handed.
                R2 = -R
                K2 = -K
                assert np.allclose(np.dot(K2,R2), np.dot(K,R))
                K,R = K2,R2

            P = np.zeros((3,4))
            P[:3,:3] = K
            KK = all_K[cam_id] # from rad file or None
            distortion = all_distortion[cam_id]

            # (ab)use PyMVG's rectification to do coordinate transform
            # for MCSC's undistortion.

            # The intrinsic parameters used for 3D -> 2D.
            ex = P[0,0]
            bx = P[0,2]
            Sx = P[0,3]
            ey = P[1,1]
            by = P[1,2]
            Sy = P[1,3]

            if KK is None:
                rect = np.eye(3)
                KK = P[:,:3]
            else:
                # Parameters used to define undistortion coordinates.
                fx = KK[0,0]
                fy = KK[1,1]
                cx = KK[0,2]
                cy = KK[1,2]

                rect = np.array([[ ex/fx,     0, (bx+Sx-cx)/fx ],
                                 [     0, ey/fy, (by+Sy-cy)/fy ],
                                 [     0,     0,       1       ]]).T

            if distortion is None:
                distortion = np.zeros((5,))

            C = center(Pmat)
            rot = R
            t = -np.dot(rot, C)[:,0]

            d = {'width':w,
                 'height':h,
                 'P':P,
                 'K':KK,
                 'R':rect,
                 'translation':t,
                 'Q':rot,
                 'D':distortion,
                 'name':cam_id,
                 }
            cam = CameraModel.from_dict(d)
            cameras.append( cam )
        return MultiCameraSystem( cameras=cameras )
Пример #37
0
 def from_pymvg_str(cls, buf):
     d = json.loads(buf)
     assert d['__pymvg_file_version__']=='1.0'
     cam_dict_list = d['camera_system']
     cams = [CameraModel.from_dict(cd) for cd in cam_dict_list]
     return MultiCameraSystem( cameras=cams )
Пример #38
0
def fit_extrinsics(base_cam,X3d,x2d,geom=None):
    assert x2d.ndim==2
    assert x2d.shape[1]==2

    assert X3d.ndim==2
    assert X3d.shape[1]==3

    if 0:
        fname = 'x2d_'+base_cam.name + '.png'
        fname = fname.replace('/','-')
        save_point_image(fname, (base_cam.width, base_cam.height), x2d )
        #print 'saved pt debug image to',fname

    ipts = np.array(x2d,dtype=np.float64)
    opts = np.array(X3d,dtype=np.float64)

    K = np.array(base_cam.get_K(), dtype=np.float64)
    dist_coeffs = np.array( base_cam.get_D(), dtype=np.float64)

    retval, rvec, tvec = cv2.solvePnP( opts, ipts,
                                       K,
                                       dist_coeffs)
    assert retval

    # we get two possible cameras back, figure out which one has objects in front
    rmata = rodrigues2matrix( rvec )
    intrinsics = base_cam.to_dict()
    del intrinsics['Q']
    del intrinsics['translation']
    del intrinsics['name']
    d = intrinsics.copy()
    d['translation'] = tvec
    d['Q'] = rmata
    d['name'] = base_cam.name
    cam_model_a = CameraModel.from_dict(d)
    mza = np.mean(cam_model_a.project_3d_to_camera_frame(X3d)[:,2])

    # don't bother with second - it does not have a valid rotation matrix

    if 1:
        founda = cam_model_a.project_3d_to_pixel(X3d)
        erra = np.mean(np.sqrt(np.sum((founda-x2d)**2, axis=1)))

        cam_model = cam_model_a

    if 1:
        found = cam_model.project_3d_to_pixel(X3d)
        orig = x2d
        reproj_error = np.sqrt(np.sum((found-orig)**2, axis=1))
        cum = np.mean(reproj_error)

        mean_cam_z = np.mean(cam_model.project_3d_to_camera_frame(X3d)[:,2])

    if (mean_cam_z < 0 or cum > 20) and 0:
        # hmm, we have a flipped view of the camera.
        print '-'*80,'HACK ON'
        center, lookat, up = cam_model.get_view()
        #cam2 = cam_model.get_view_camera( -center, lookat, -up )
        cam2 = cam_model.get_view_camera( center, lookat, up )
        cam2.name=base_cam.name
        return fit_extrinsics_iterative(cam2,X3d,x2d, geom=geom)

    result = dict(cam=cam_model,
                  mean_err=cum,
                  mean_cam_z = mean_cam_z,
                  )
    return result