コード例 #1
0
ファイル: robot.py プロジェクト: rongrong1314/Visual_SLAM
    def wld2cam(self, head, neck, dist, pw):
        # Robot pose
        Rr2w = quat.qua_to_rot(self.q)
        tr2w = self.pos

        # Transformation from robot base to world
        Tr2w = np.vstack((Rr2w, tr2w))
        Tr2w = np.hstack((Tr2w, np.array([0, 0, 0, 1])))

        # Transformation from the robot head to the base
        Th2r = np.array([[1., 0., 0., 0.], [0., 1., 0., 0.],
                         [0., 0., 1., dist['g2com'] + dist['com2h']],
                         [0., 0., 0., 1.]])

        # Transformation from camera to head = R_yaw * R_pitch * R_trans
        Tcam2h = np.dot(
            np.dot(
                np.array([[m.cos(neck), -m.sin(neck), 0., 0.],
                          [m.sin(neck), m.cos(neck), 0., 0.], [0., 0., 1., 0.],
                          [0., 0., 0., 1.]]),
                np.array([[m.cos(head), 0., m.sin(head), 0.], [0., 1., 0., 0.],
                          [-m.sin(head), 0., m.cos(head), 0.],
                          [0., 0., 0., 1.]])),
            np.array([[1., 0., 0., 0.], [0., 1., 0., 0.],
                      [0., 0., 1., dist['h2cam']], [0., 0., 0., 1.]]))

        # Transform from  local coordinates to global coordinates
        Tcam2w = np.dot(Tr2w, np.dot(Th2r, Tcam2h))
        pcam = np.dot(np.linalg.inv(Tcam2w), pw)

        return pcam
コード例 #2
0
def fun(params,
        camera_params,
        n_cameras,
        n_points,
        camera_indices,
        points_indices,
        points_2d,
        K=None):
    points_3d = params.reshape((n_points, 3))
    points_proj = np.zeros(points_2d.shape)
    for i in range(len(points_indices)):
        cam_ind = camera_indices[i]
        p_ind = points_indices[i]
        p3d = points_3d[p_ind]
        # h**o 3D
        homo_X = np.hstack((p3d, np.ones(1)))
        # rotation
        vecw2c = camera_params[cam_ind, 0:3]
        quatw2c = quat.exp_qua(
            np.array([0, vecw2c[0], vecw2c[1], vecw2c[2]]).reshape(-1, 4))
        Rw2c = quat.qua_to_rot(quatw2c)
        Tw2c = camera_params[cam_ind, 3:6]
        Hw2c = util.rot_trans_to_homo(Rw2c, Tw2c)
        pred = np.dot(np.dot(K, Hw2c[0:3, :]), homo_X)
        pred = pred / pred[2]
        points_proj[i, :] = pred[:2]
    # points_proj = project(points_3d[points_indices], camera_params[camera_indices],K)
    # print 'A'
    return (points_proj - points_2d).ravel()
コード例 #3
0
def Tcam2w(rob, head, neck):
    # Robot pose
    Rr2w = quat.qua_to_rot(rob.q)
    tr2w = (rob.pos).reshape(-1, 3)

    # Transformation from robot base to world
    Tr2w = np.hstack((Rr2w, tr2w.T))
    Tr2w = np.vstack((Tr2w, np.array([0, 0, 0, 1]).reshape(-1, 4)))

    # Transformation from the robot head to the base
    Th2r = np.array([[1., 0., 0., 0.], [0., 1., 0., 0.],
                     [0., 0., 1., 0.93 + 0.33], [0., 0., 0., 1.]])

    # Transformation from camera to head = R_yaw * R_pitch * R_trans
    Tcam2h = np.dot(
        np.dot(
            np.array([[m.cos(neck), -m.sin(neck), 0., 0.],
                      [m.sin(neck), m.cos(neck), 0., 0.], [0., 0., 1., 0.],
                      [0., 0., 0., 1.]]),
            np.array([[m.cos(head), 0., m.sin(head), 0.], [0., 1., 0., 0.],
                      [-m.sin(head), 0., m.cos(head), 0.], [0., 0., 0., 1.]])),
        np.array([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.07],
                  [0., 0., 0., 1.]]))

    # Transform from  local coordinates to global coordinates
    T_cam2w = np.dot(Tr2w, np.dot(Th2r, Tcam2h))
    T_cam2w = np.dot(
        np.array([[0, -1, 0, 0], [0, 0, -1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]).T,
        T_cam2w)
    # T_cam2w = np.dot(np.array([[0, -1, 0, 0],[0, 0, -1, 0],[1, 0, 0, 0], [0, 0, 0, 1]]), T_cam2w)
    return T_cam2w
コード例 #4
0
ファイル: linearT.py プロジェクト: rongrong1314/Visual_SLAM
def main():
    '''Test linearT() function with the original data from Bundle Adjustment website'''
    # We will read the given dataset from the bundle sample code and mimic the transformation
    with open('gt_orig_data.pkl', 'rb') as f:
        data = pkl.load(f)

    with open('gt_data.pkl', 'rb') as f:
        gt_data = pkl.load(f)
    gt_points_3d = gt_data['3d']
    # camera_params with shape (n_cameras, 9) contains initial estimates of parameters for all cameras. First 3 components in each row form a rotation vector (https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula), next 3 components form a translation vector, then a focal distance and two distortion parameters.
    # points_3d with shape (n_points, 3) contains initial estimates of point coordinates in the world frame.
    # camera_ind with shape (n_observations,) contains indices of cameras (from 0 to n_cameras - 1) involved in each observation.
    # point_ind with shape (n_observations,) contatins indices of points (from 0 to n_points - 1) involved in each observation.
    # points_2d with shape (n_observations, 2) contains measured 2-D coordinates of points projected on images in each observations.
    camera_params = data['params']
    camera_indices = data['cam_indices']
    points_indices = data['points_indices']
    points_2d = data['2d']  #
    points_3d = data['3d']
    n_cameras = camera_params.shape[0]
    n_points = points_3d.shape[0]

    n = 9 * n_cameras + 3 * n_points
    m = 2 * points_2d.shape[0]

    print("n_cameras: {}".format(n_cameras))
    print("n_points: {}".format(n_points))
    print("Total number of parameters: {}".format(n))
    print("Total number of residuals: {}".format(m))

    # plt.subplot(1,2,1)
    # plt.plot(f0)
    # plt.show()
    print('done poltting')

    Hw2c_set = []
    for i in range(points_indices.shape[0]):
        cam_ind = camera_indices[i]
        vecw2c = camera_params[cam_ind, 0:3]
        quatw2c = quat.exp_qua(
            np.array([0, vecw2c[0], vecw2c[1], vecw2c[2]]).reshape(-1, 4))
        Rw2c = quat.qua_to_rot(quatw2c)
        Tw2c = camera_params[cam_ind, 3:6]
        # append Hw2c
        Hw2c_set.append(rot_trans_to_homo(Rw2c, Tw2c))

    Errors = test_linearT(Hw2c_set, points_2d, points_3d, points_indices,
                          camera_indices, camera_params, gt_points_3d)
コード例 #5
0
ファイル: robot.py プロジェクト: rongrong1314/Visual_SLAM
    def predict(self, q_act, pos_act):
        # print hex(id(self.x))
        # pos act is (3,) and rot_act is 3x3 which represents rotation of next frame in terms of the 1st one
        # Use current orientation
        q_cur = self.q
        # Convert q_act to rot_act
        # rot_act = quat.qua_to_rot(q_act)

        # Find rotation matrix to convert from local to global
        R = quat.qua_to_rot(q_cur)

        # Convert from local to global frame and add the noise
        pos_pred = self.pos + (np.dot(R, pos_act))
        # rot_pred = R*rot_act
        self.pos = pos_pred
        self.q = quat.multi_quas(q_cur, q_act)
コード例 #6
0
def qp2homo(q, pos):
    rot = quat.qua_to_rot(q)
    t = pos.reshape(-1, 3)
    T = np.hstack((rot, t.T))
    T = np.vstack((T, np.array([0, 0, 0, 1]).reshape(-1, 4)))
    return T