コード例 #1
0
    def compute_target(self, idx, target_idx, source_idx):
        ## Compute ts
        # dt = np.abs(self.ts_samples[idx,target_idx] - self.ts_samples[idx, source_idx])
        dt = self.ts_samples[idx, target_idx] - self.ts_samples[idx,
                                                                source_idx]

        #compute Tau_gt
        T2 = SE3.from_matrix(self.gt_samples[idx, target_idx, :, :],
                             normalize=True).inv()
        T1 = SE3.from_matrix(self.gt_samples[idx, source_idx, :, :],
                             normalize=True)
        dT_gt = T2.dot(
            T1
        )  #pose change from source to a target (for reconstructing source from target)
        gt_lie_alg = dT_gt.log()

        #compute Tau_vo
        T2 = SE3.from_matrix(self.vo_samples[idx, target_idx, :, :],
                             normalize=True).inv()
        T1 = SE3.from_matrix(self.vo_samples[idx, source_idx, :, :],
                             normalize=True)
        dT_vo = T2.dot(T1)
        vo_lie_alg = dT_vo.log()

        if self.config['estimator_type'] == 'mono':
            if np.linalg.norm(vo_lie_alg[0:3]) >= 1e-8:
                scale = np.linalg.norm(gt_lie_alg[0:3]) / np.linalg.norm(
                    vo_lie_alg[0:3])
                scale = 1
                vo_lie_alg[0:3] = scale * vo_lie_alg[0:3]
                dT_vo = SE3.exp(vo_lie_alg)

        gt_correction = (dT_gt.dot(dT_vo.inv())).log()
        return gt_lie_alg, vo_lie_alg, gt_correction, dt
コード例 #2
0
ファイル: __main__.py プロジェクト: yxw027/tractor
 def __init__(self):
     self.model = TractorKinematics()
     self.controller = TractorMoveToGoalController(self.model)
     self.v_left = 0.0
     self.v_right = 0.0
     self.world_pose_tractor = SE3.identity()
     self.world_pose_tractor_goal = SE3.identity()
コード例 #3
0
    def test_se3(self, options):
        from liegroups import SE3
        from pyslam.residuals import PoseResidual, PoseToPoseResidual

        problem = Problem(options)

        odom = SE3.exp(0.1 * np.ones(6))

        odom_stiffness = invsqrt(1e-3 * np.eye(SE3.dof))
        T0_stiffness = invsqrt(1e-6 * np.eye(SE3.dof))

        odom_covar = np.linalg.inv(np.dot(odom_stiffness, odom_stiffness))
        T0_covar = np.linalg.inv(np.dot(T0_stiffness, T0_stiffness))

        residual0 = PoseResidual(SE3.identity(), T0_stiffness)
        residual1 = PoseToPoseResidual(odom, odom_stiffness)

        params_init = {'T0': SE3.identity(), 'T1': SE3.identity()}

        problem.add_residual_block(residual0, 'T0')
        problem.add_residual_block(residual1, ['T0', 'T1'])
        problem.initialize_params(params_init)
        problem.solve()
        problem.compute_covariance()
        estimated_covar = problem.get_covariance_block('T1', 'T1')
        expected_covar = odom_covar + odom.adjoint().dot(T0_covar.dot(odom.adjoint().T))

        assert np.allclose(estimated_covar, expected_covar)
コード例 #4
0
def load_brookshire_data(data_filename):
    data_dict = sio.loadmat(data_filename)
    pose_array = data_dict['T_v_rel_list']
    T_v_rel = [SE3.from_matrix(pose_array[:, :, i]) for i in range(pose_array.shape[2])]
    pose_array = data_dict['T_c_rel_list']
    T_c_rel = [SE3.from_matrix(pose_array[:, :, i]) for i in range(pose_array.shape[2])]
    extcal = data_dict['extcal']
    return T_v_rel, T_c_rel, extcal
コード例 #5
0
def load_data_1(data_filename):
    #Data loader for Emmett's Synethetic datasets
    data_dict = sio.loadmat(data_filename)
    pose_array = data_dict['T_vki_list']
    T_vki = [SE3.from_matrix(pose_array[i, :, :]) for i in range(pose_array.shape[0])]
    pose_array = data_dict['T_ci_list']
    T_ci = [SE3.from_matrix(pose_array[i, :, :]) for i in range(pose_array.shape[0])]
    extcal = data_dict['extcal']
    return T_vki, T_ci, extcal
コード例 #6
0
 def poses(self):
     from liegroups import SE3
     T_cam_w_true = [
         SE3.identity(),
         SE3.exp(0.1 * np.ones(6)),
         SE3.exp(0.2 * np.ones(6)),
         SE3.exp(0.3 * np.ones(6))
     ]
     return T_cam_w_true
コード例 #7
0
ファイル: kinematics.py プロジェクト: yxw027/tractor
 def evolve_world_pose_tractor(
     self,
     world_pose_tractor: SE3,
     v_left: float,
     v_right: float,
     delta_t: float,
 ) -> SE3:
     v, w = self.wheel_velocity_to_unicycle(v_left, v_right)
     tractor_pose_t = SE3.exp([v * delta_t, 0, 0, 0, 0, w * delta_t])
     return world_pose_tractor.dot(tractor_pose_t)
コード例 #8
0
ファイル: dense.py プロジェクト: raphajaner/gp_lie
    def track(self, trackframe, guess=None):
        """ Track an image.

            Args:
                trackframe  : frame to track
                guess       : optional initial guess of the motion
        """
        if len(self.keyframes) == 0:
            # First frame, so don't track anything yet
            trackframe.compute_pyramids()
            self.keyframes.append(trackframe)
            self.active_keyframe_idx = 0
            active_keyframe = self.keyframes[0]
        else:
            # Default behaviour for second frame and beyond
            active_keyframe = self.keyframes[self.active_keyframe_idx]

            if guess is None:
                # Default initial guess is previous pose relative to keyframe
                if len(self.T_c_w) == 0:
                    # We just started relocalizing
                    guess = SE3.identity()
                else:
                    guess = self.T_c_w[-1].dot(active_keyframe.T_c_w.inv())
                # Better initial guess is previous pose + previous motion
                if self.use_motion_model_guess and len(self.T_c_w) > 1:
                    guess = self.T_c_w[-1].dot(self.T_c_w[-2].inv().dot(guess))
            else:
                guess = guess.dot(active_keyframe.T_c_w.inv())

            # Estimate pose change from keyframe to tracking frame
            T_track_ref = self._compute_frame_to_frame_motion(
                active_keyframe, trackframe, guess)
            T_track_ref.normalize()  # Numerical instability problems otherwise
            self.T_c_w.append(T_track_ref.dot(active_keyframe.T_c_w))

            # Threshold the distance from the active keyframe to drop a new one
            se3_vec = SE3.log(T_track_ref)
            trans_dist = np.linalg.norm(se3_vec[0:3])
            rot_dist = np.linalg.norm(se3_vec[3:6])

            if trans_dist > self.keyframe_trans_thresh or \
                    rot_dist > self.keyframe_rot_thresh:
                if self.mode is 'map':
                    trackframe.T_c_w = self.T_c_w[-1]
                    trackframe.compute_pyramids()
                    self.keyframes.append(trackframe)

                    print('Dropped new keyframe. '
                          'Trans dist was {:.3f}. Rot dist was {:.3f}.'.format(
                              trans_dist, rot_dist))

                self.active_keyframe_idx += 1
                print('Active keyframe idx: {}'.format(
                    self.active_keyframe_idx))
コード例 #9
0
    def compute_target(self, poses, target_idx, source_idx):
        T_c2_w = SE3.from_matrix(poses[target_idx], normalize=True)
        T_c1_w = SE3.from_matrix(poses[source_idx], normalize=True)
        

        T_c2_c1 = T_c2_w.dot(T_c1_w.inv()) 
        gt_lie_alg = T_c2_c1.log()
        vo_lie_alg = np.copy(gt_lie_alg)
        gt_correction = np.zeros(gt_lie_alg.shape)
        dt = 0

        return gt_lie_alg, vo_lie_alg, gt_correction, dt
コード例 #10
0
def run_vo_kitti(basedir, date, drive, frames, outfile=None):
    # Load KITTI data
    dataset = pykitti.raw(basedir, date, drive, frames=frames, imformat='cv2')

    first_oxts = dataset.oxts[0]
    T_cam0_imu = SE3.from_matrix(dataset.calib.T_cam0_imu)
    T_cam0_imu.normalize()
    T_0_w = T_cam0_imu.dot(SE3.from_matrix(first_oxts.T_w_imu).inv())
    T_0_w.normalize()

    # Create the camera
    test_im = np.array(next(dataset.cam0))
    fu = dataset.calib.K_cam0[0, 0]
    fv = dataset.calib.K_cam0[1, 1]
    cu = dataset.calib.K_cam0[0, 2]
    cv = dataset.calib.K_cam0[1, 2]
    b = dataset.calib.b_gray
    h, w = test_im.shape
    camera = StereoCamera(cu, cv, fu, fv, b, w, h)

    # Ground truth
    T_w_c_gt = [
        SE3.from_matrix(o.T_w_imu).dot(T_cam0_imu.inv()) for o in dataset.oxts
    ]

    # Pipeline
    vo = DenseStereoPipeline(camera, first_pose=T_0_w)
    # Skip the highest resolution level
    vo.pyrlevel_sequence.pop()
    vo.pyr_cameras.pop()

    start = time.perf_counter()
    for c_idx, impair in enumerate(dataset.gray):
        vo.track(np.array(impair[0]), np.array(impair[1]))
        # vo.track(impair[0], impair[1], guess=T_w_c_gt[c_idx].inv())
        end = time.perf_counter()
        print('Image {}/{} | {:.3f} s'.format(c_idx, len(dataset),
                                              end - start))
        start = end

    # Compute errors
    T_w_c_est = [T.inv() for T in vo.T_c_w]
    tm = TrajectoryMetrics(T_w_c_gt, T_w_c_est)

    # Save to file
    if outfile is not None:
        print('Saving to {}'.format(outfile))
        tm.savemat(outfile)

    # Clean up
    del vo

    return tm
コード例 #11
0
ファイル: dense.py プロジェクト: raphajaner/gp_lie
    def _compute_frame_to_frame_motion(self,
                                       ref_frame,
                                       track_frame,
                                       guess=SE3.identity()):
        # params = {'T_1_0': guess}
        params = {'R_1_0': guess.rot, 't_1_0_1': guess.trans}

        for (pyrlevel, pyr_camera) in zip(self.pyrlevel_sequence,
                                          self.pyr_cameras):
            pyrfactor = 2**-pyrlevel

            im_jacobian = ref_frame.jacobian[pyrlevel]
            # ESM
            # im_jacobian = 0.5 * (ref_frame.jacobian[pyrlevel] +
            #                      track_frame.jacobian[pyrlevel])

            if self.depth_map_type is 'disparity':
                depth_ref = ref_frame.disparity[pyrlevel]
                # Disparity is in pixels, so we need to scale it according to the pyramid level
                depth_stiffness = self.depth_stiffness / pyrfactor
            else:
                depth_ref = ref_frame.depth[pyrlevel]
                depth_stiffness = self.depth_stiffness

            residual = PhotometricResidualSE3(
                pyr_camera, ref_frame.im_pyr[pyrlevel], depth_ref,
                track_frame.im_pyr[pyrlevel], im_jacobian,
                self.intensity_stiffness, depth_stiffness, self.min_grad)

            problem = Problem(self.motion_options)
            # problem.add_residual_block(residual, ['T_1_0'], loss=self.loss)
            problem.add_residual_block(residual, ['R_1_0', 't_1_0_1'],
                                       loss=self.loss)
            problem.initialize_params(params)

            if pyrlevel > 2:
                problem.set_parameters_constant('t_1_0_1')
            # else:
            # problem.set_parameters_constant('R_1_0')

            params = problem.solve()
            # print(problem.summary(format='brief'))

            # # DEBUG: Store residuals for later
            # try:
            #     self.residuals = np.hstack(
            #         [self.residuals, residual.evaluate([guess])])
            # except AttributeError:
            #     self.residuals = residual.evaluate([guess])

        # return params['T_1_0']
        return SE3(params['R_1_0'], params['t_1_0_1'])
コード例 #12
0
ファイル: test_costs.py プロジェクト: raphajaner/gp_lie
 def test_jacobians_se3(self, se3_residual):
     T1 = SE3.exp([1, 2, 3, 4, 5, 6])
     T2 = SE3.exp([7, 8, 9, 10, 11, 12])
     _, jac1 = se3_residual.evaluate([T1, T2],
                                     compute_jacobians=[True, True])
     _, jac2 = se3_residual.evaluate([T1, T2],
                                     compute_jacobians=[True, False])
     _, jac3 = se3_residual.evaluate([T1, T2],
                                     compute_jacobians=[False, True])
     assert len(jac1) == len(jac2) == len(jac3) == 2
     assert jac1[0].shape == (6, 6) and jac1[1].shape == (6, 6)
     assert jac2[0].shape == (6, 6) and jac2[1] is None
     assert jac3[0] is None and jac3[1].shape == (6, 6)
コード例 #13
0
def generate_trajectory_metrics(gt_traj, est_traj, name='',seq='', mode=''):
    gt_traj_se3 = [SE3.from_matrix(T,normalize=True) for T in gt_traj]
    est_traj_se3 = [SE3.from_matrix(T, normalize=True) for T in est_traj]
    tm = TrajectoryMetrics(gt_traj_se3, est_traj_se3, convention = 'Twv')
    
    est_mATE_trans, est_mATE_rot = tm.mean_err()
    est_mATE_rot = est_mATE_rot*180/np.pi
    print("{} ({}) mean trans. error: {} | mean rot. error: {}".format(name, mode, est_mATE_trans, est_mATE_rot))
    
    seg_lengths = list(range(100,801,100))
    _, est_seg_errs = tm.segment_errors(seg_lengths, rot_unit='rad')
    est_seg_err_trans = np.mean(est_seg_errs[:,1])*100
    est_seg_err_rot = 100*np.mean(est_seg_errs[:,2])*180/np.pi
    print("{} ({}) mean Segment Errors: {} (trans, %) | {} (rot, deg/100m)".format(name, mode, est_seg_err_trans, est_seg_err_rot) )
    return tm, (seq, name, mode, est_mATE_trans.round(3), est_mATE_rot.round(3), est_seg_err_trans.round(3), est_seg_err_rot.round(3))
コード例 #14
0
def add_noise(pose_list, trans_noise, rot_noise):
    ''' Add Noise to the pose measurements as a percentage of the translation and rotation magnitude'''

    #Create the translation noise vectors
    trans_noise_sigma = trans_noise * np.linalg.norm(
        np.array([pose.trans for pose in pose_list]), axis=1)
    trans_noise_vec = [
        np.random.randn((3)) * trans_noise_sigma[i]
        for i in range(len(pose_list))
    ]

    #Create the rotation noise vectors
    rot_noise_sigma = rot_noise * np.linalg.norm(
        np.array([pose.rot.log() for pose in pose_list]), axis=1)
    rot_noise_unit_vecs = np.random.rand(3, len(pose_list))
    rot_noise_unit_vecs = rot_noise_unit_vecs / np.linalg.norm(
        rot_noise_unit_vecs, axis=0)
    rot_noise_vec = [
        rot_noise_sigma[i] * np.random.rand() * rot_noise_unit_vecs[:, i]
        for i in range(len(pose_list))
    ]

    #Add the noise to the poses
    noisy_poses = [
        SE3(rot=pose.rot.dot(SO3.exp(rot_noise_vec[i])),
            trans=pose.trans + trans_noise_vec[i])
        for i, pose in enumerate(pose_list)
    ]
    return noisy_poses
コード例 #15
0
    def evaluate(self, params, compute_jacobians=None):
        T_cam_w = params[0]
        pt_w = params[1]
        pt_cam = T_cam_w.dot(pt_w)

        if compute_jacobians:
            jacobians = [None for _ in enumerate(params)]

            predicted_obs, cam_jacobian = self.camera.project(
                pt_cam, compute_jacobians=True)
            residual = np.dot(self.stiffness, predicted_obs - self.obs)

            if compute_jacobians[0]:
                jacobians[0] = np.dot(self.stiffness,
                                      cam_jacobian.dot(SE3.odot(pt_cam)))

            if compute_jacobians[1]:
                jacobians[1] = np.dot(
                    self.stiffness, cam_jacobian.dot(T_cam_w.rot.as_matrix()))

            return residual, jacobians

        residual = np.dot(self.stiffness,
                          self.camera.project(pt_cam) - self.obs)
        return residual
コード例 #16
0
def find_loop_closures(traj, cum_dist):
    num_loop_closures = 0
    filtered_loop_closures = 0
    idx_list = []
    for i in range(
            0, traj.shape[0], 8
    ):  #check for loop closure points (compare current frame with all future points, and do this for every 20th frame)
        current_pose = traj[i]
        current_trans = current_pose[0:3, 3]
        current_rot = SO3.from_matrix(current_pose[0:3, 0:3],
                                      normalize=True).to_rpy()
        current_yaw = current_rot[2]

        current_cum_dist = cum_dist[i]
        loop_closure_idx = np.linalg.norm(
            np.abs(current_trans[0:3] - traj[i + 1:, 0:3, 3]), axis=1) <= 7
        dist_idx = (cum_dist[i + 1:] - current_cum_dist) >= 10
        loop_closure_idx = loop_closure_idx & dist_idx

        idx = np.where(loop_closure_idx == 1)

        if idx != np.array([]):
            for pose_idx in idx[0]:
                T = traj[i + 1:][pose_idx]
                yaw = SE3.from_matrix(T, normalize=True).rot.to_rpy()[2]
                yaw_diff = np.abs(np.abs(current_yaw) - np.abs(yaw))
                in_range = ((yaw_diff <= 0.15)) or (np.abs(
                    (np.pi - yaw_diff) <= 0.15))
                filtered_loop_closures += in_range
                if in_range:
                    idx_list.append(pose_idx + i)

        num_loop_closures += np.sum(loop_closure_idx)

    return num_loop_closures, filtered_loop_closures, idx_list
コード例 #17
0
ファイル: proto_utils.py プロジェクト: ROAR-ROBOTICS/tractor
def proto_to_se3(proto: geometry_pb2.SE3Pose) -> SE3:
    rot = SE3.RotationType.from_quaternion(np.array([proto.rotation.w, proto.rotation.x, proto.rotation.y, proto.rotation.z]), ordering='wxyz')
    trans = np.array([
        proto.position.x,
        proto.position.y,
        proto.position.z,
    ])
    return SE3(rot=rot, trans=trans)
コード例 #18
0
ファイル: keyframes.py プロジェクト: wpfhtl/pyslam
    def __init__(self, data, pyrimage, pyrlevels, T_c_w=SE3.identity()):
        super().__init__(data, T_c_w)

        self.pyrlevels = pyrlevels
        """Number of pyramid levels to downsample"""

        self.compute_image_pyramid(pyrimage)
        """Image pyramid"""
コード例 #19
0
def compute_trajectory(pose_vec, gt_traj, method='odom'):
    est_traj = [gt_traj[0]]
    cum_dist = [0]
    for i in range(0, pose_vec.shape[0]):
        #classically estimated traj
        dT = SE3.exp(pose_vec[i])
        new_est = SE3.as_matrix(
            (dT.dot(SE3.from_matrix(est_traj[i], normalize=True).inv())).inv())
        est_traj.append(new_est)
        cum_dist.append(cum_dist[i] + np.linalg.norm(dT.trans))

    gt_traj_se3 = [SE3.from_matrix(T, normalize=True) for T in gt_traj]
    est_traj_se3 = [SE3.from_matrix(T, normalize=True) for T in est_traj]

    tm_est = TrajectoryMetrics(gt_traj_se3, est_traj_se3, convention='Twv')
    est_mean_trans, est_mean_rot = tm_est.mean_err()
    est_mean_rot = (est_mean_rot * 180 / np.pi).round(3)
    est_mean_trans = est_mean_trans.round(3)

    seg_lengths = list(range(100, 801, 100))
    _, seg_errs_est = tm_est.segment_errors(seg_lengths, rot_unit='rad')

    print('trans. rel. err: {}, rot. rel. err: {}'.format(
        np.mean(tm_est.rel_errors()[0]), np.mean(tm_est.rel_errors()[1])))

    rot_seg_err = (100 * np.mean(seg_errs_est[:, 2]) * 180 / np.pi).round(3)
    trans_seg_err = (np.mean(seg_errs_est[:, 1]) * 100).round(3)

    if np.isnan(trans_seg_err):
        max_dist = cum_dist[-1] - cum_dist[-1] % 100 + 1 - 100
        print('max dist', max_dist)
        seg_lengths = list(range(100, int(max_dist), 100))
        _, seg_errs_est = tm_est.segment_errors(seg_lengths, rot_unit='rad')

        rot_seg_err = (100 * np.mean(seg_errs_est[:, 2]) * 180 /
                       np.pi).round(3)
        trans_seg_err = (np.mean(seg_errs_est[:, 1]) * 100).round(3)

    print("{} mean trans. error: {} | mean rot. error: {}".format(
        method, est_mean_trans, est_mean_rot))
    print("{} mean Segment Errors: {} (trans, %) | {} (rot, deg/100m)".format(
        method, trans_seg_err, rot_seg_err))

    errors = (est_mean_trans, est_mean_rot, trans_seg_err, rot_seg_err)

    return np.array(est_traj), np.array(gt_traj), errors, np.array(cum_dist)
コード例 #20
0
def do_vo_mapping(basepath,
                  seq,
                  ref_cond,
                  frames=None,
                  outfile=None,
                  rgb_dir='rgb'):
    ref_data = vkitti.LocalizationDataset(basepath,
                                          seq,
                                          ref_cond,
                                          frames=frames,
                                          rgb_dir=rgb_dir)

    test_im = ref_data.get_gray(0)
    camera = get_camera(seq, test_im)
    camera.maxdepth = 200.

    # Ground truth
    T_w_c_gt = [SE3.from_matrix(p, normalize=True) for p in ref_data.poses]
    T_0_w = T_w_c_gt[0].inv()

    vo = DenseRGBDPipeline(camera, first_pose=T_0_w)
    # vo.keyframe_trans_thresh = 3.  # meters
    vo.keyframe_trans_thresh = 2.  # meters
    vo.keyframe_rot_thresh = 15. * np.pi / 180.  # rad
    vo.depth_stiffness = 1. / 0.01  # 1/meters
    vo.intensity_stiffness = 1. / 0.2  # 1/ (intensity is in [0,1] )
    # vo.intensity_stiffness = 1. / 0.1
    vo.use_motion_model_guess = True
    # vo.min_grad = 0.2
    # vo.loss = HuberLoss(5.0)

    print('Mapping using {}/{}'.format(seq, ref_cond))
    vo.set_mode('map')

    start = time.perf_counter()
    for c_idx in range(len(ref_data)):
        image = ref_data.get_gray(c_idx)
        depth = ref_data.get_depth(c_idx)

        depth[depth >= camera.maxdepth] = -1.
        vo.track(image, depth)
        # vo.track(image, depth, guess=T_w_c_gt[c_idx].inv()) # debug
        end = time.perf_counter()
        print('Image {}/{} ({:.2f} %) | {:.3f} s'.format(
            c_idx, len(ref_data), 100. * c_idx / len(ref_data), end - start),
              end='\r')
        start = end

    # Compute errors
    T_w_c_est = [T.inv() for T in vo.T_c_w]
    tm = TrajectoryMetrics(T_w_c_gt, T_w_c_est)

    # Save to file
    if outfile is not None:
        print('Saving to {}'.format(outfile))
        tm.savemat(outfile)

    return tm, vo
コード例 #21
0
ファイル: test_costs.py プロジェクト: raphajaner/gp_lie
 def test_evaluate_stereo(self, stereo_residual):
     T_cam_w = SE3.exp([1, 2, 3, 4, 5, 6])
     pt_good_w = T_cam_w.inv().dot(
         stereo_residual.camera.triangulate(stereo_residual.obs))
     pt_bad_w = pt_good_w + [1, 1, 1]
     assert np.allclose(stereo_residual.evaluate([T_cam_w, pt_good_w]),
                        np.zeros(3))
     assert not np.allclose(stereo_residual.evaluate([T_cam_w, pt_bad_w]),
                            np.zeros(3))
コード例 #22
0
    def test_bundle_adjust(self, options, camera, points, poses, observations):
        from liegroups import SE3
        from pyslam.residuals import ReprojectionResidual

        problem = Problem(options)

        obs_var = [1, 1, 2]  # [u,v,d]
        obs_stiffness = invsqrt(np.diagflat(obs_var))

        for i, this_pose_obs in enumerate(observations):
            for j, o in enumerate(this_pose_obs):
                residual = ReprojectionResidual(camera, o, obs_stiffness)
                problem.add_residual_block(
                    residual, ['T_cam{}_w'.format(i), 'pt{}_w'.format(j)])

        params_true = {}
        params_init = {}

        for i, pt in enumerate(points):
            pid = 'pt{}_w'.format(i)
            params_true.update({pid: pt})
            params_init.update({pid: camera.triangulate(
                observations[0][i] + 10. * np.random.rand(3))})

        for i, pose in enumerate(poses):
            pid = 'T_cam{}_w'.format(i)
            params_true.update({pid: pose})
            params_init.update({pid: SE3.identity()})

        problem.initialize_params(params_init)
        problem.set_parameters_constant('T_cam0_w')

        params_final = problem.solve()

        for key in params_true.keys():
            p_est = params_final[key]
            p_true = params_true[key]

            if isinstance(p_est, SE3):
                err = SE3.log(p_est.inv().dot(p_true))
            else:
                err = p_est - p_true

            assert np.linalg.norm(err) < 1e-4
コード例 #23
0
def do_tracking(basepath,
                seq,
                track_cond,
                vo,
                frames=None,
                outfile=None,
                rgb_dir='rgb'):
    track_data = tum_rgbd.LocalizationDataset(basepath,
                                              seq,
                                              track_cond,
                                              frames=frames,
                                              rgb_dir=rgb_dir)

    # Ground truth
    T_w_c_gt = [
        SE3(SO3.from_quaternion(p[0], ordering='xyzw'), p[1])
        for p in track_data.poses
    ]
    T_0_w = T_w_c_gt[0].inv()

    print('Tracking using {}/{}'.format(seq, track_cond))
    vo.set_mode('track')

    start = time.perf_counter()
    for c_idx in range(len(track_data)):
        image = track_data.get_gray(c_idx)
        depth = track_data.get_depth(c_idx)
        try:
            vo.track(image, depth)
            # vo.track(image, depth, guess=T_w_c_gt[c_idx].inv()) # debug
            end = time.perf_counter()
            print('Image {}/{} ({:.2f} %) | {:.3f} s'.format(
                c_idx, len(track_data), 100. * c_idx / len(track_data),
                end - start),
                  end='\r')

        except Exception as e:
            print('Error on {}/{}'.format(seq, track_cond))
            print(e)
            print('Image {}/{} ({:.2f} %) | {:.3f} s'.format(
                c_idx, len(track_data), 100. * c_idx / len(track_data),
                end - start))
            break

        start = end

    # Compute errors
    T_w_c_est = [T.inv() for T in vo.T_c_w]
    tm = TrajectoryMetrics(T_w_c_gt, T_w_c_est)

    # Save to file
    if outfile is not None:
        print('Saving to {}'.format(outfile))
        tm.savemat(outfile)

    return tm, vo
コード例 #24
0
 def evolve_world_pose_tractor(
     self,
     world_pose_tractor: SE3,
     v_left: float,
     v_right: float,
     delta_t: float,
 ) -> SE3:
     tractor_pose_delta = self.compute_tractor_pose_delta(
         v_left, v_right, delta_t)
     return world_pose_tractor.dot(tractor_pose_delta)
コード例 #25
0
ファイル: dense.py プロジェクト: raphajaner/gp_lie
    def __init__(self, camera, first_pose=SE3.identity()):
        self.camera = camera
        """Camera model"""
        self.first_pose = first_pose
        """First pose"""
        self.keyframes = []
        """List of keyframes"""
        self.T_c_w = [first_pose]
        """List of camera poses"""
        self.motion_options = Options()
        """Optimizer parameters for motion estimation"""

        # Default optimizer parameters for motion estimation
        self.motion_options.allow_nondecreasing_steps = True
        self.motion_options.max_nondecreasing_steps = 5
        self.motion_options.min_cost_decrease = 0.99
        self.motion_options.max_iters = 30
        self.motion_options.num_threads = 1
        self.motion_options.linesearch_max_iters = 0

        self.pyrlevels = 4
        """Number of image pyramid levels for coarse-to-fine optimization"""
        self.pyrlevel_sequence = list(range(self.pyrlevels))
        self.pyrlevel_sequence.reverse()

        self.keyframe_trans_thresh = 3.0  # meters
        """Translational distance threshold to drop new keyframes"""
        self.keyframe_rot_thresh = 0.3  # rad
        """Rotational distance threshold to drop new keyframes"""

        self.intensity_stiffness = 1. / 0.01
        """Photometric measurement stiffness"""
        self.depth_stiffness = 1. / 0.01
        """Depth or disparity measurement stiffness"""
        self.min_grad = 0.1
        """Minimum image gradient magnitude to use a given pixel"""
        self.depth_map_type = 'depth'
        """Is the depth map depth, inverse depth, disparity? ['depth','disparity'] supported"""
        self.mode = 'map'
        """Create new keyframes or localize against existing ones? ['map'|'track']"""

        self.use_motion_model_guess = True
        """Use constant motion model for initial guess."""

        # self.loss = L2Loss()
        self.loss = HuberLoss(10.0)
        # self.loss = TukeyLoss(5.0)
        # self.loss = CauchyLoss(5.0)
        # self.loss = TDistributionLoss(5.0)  # Kerl et al. ICRA 2013
        # self.loss = TDistributionLoss(3.0)
        """Loss function"""

        self._make_pyramid_cameras()
コード例 #26
0
ファイル: pose_vis_toy.py プロジェクト: ROAR-ROBOTICS/tractor
    def __init__(self, event_loop):
        self.event_loop = event_loop
        self.command_rate_hz = 50
        self.command_period_seconds = 1.0 / self.command_rate_hz
        self.event_bus = get_event_bus('farm_ng.pose_vis_toy')
        self.odom_pose_tractor = SE3.identity()
        self.tractor_pose_wheel_left = SE3.exp(
            (0.0, 1.0, 0, 0, 0,
             0)).dot(SE3.exp((0.0, 0.0, 0, -np.pi / 2, 0, 0)))
        self.tractor_pose_wheel_right = SE3.exp(
            (0.0, -1.0, 0, 0, 0,
             0)).dot(SE3.exp((0.0, 0.0, 0, -np.pi / 2, 0, 0)))

        self.kinematic_model = TractorKinematics(default_config())

        self.control_timer = Periodic(
            self.command_period_seconds,
            self.event_loop,
            self._command_loop,
            name='control_loop',
        )
コード例 #27
0
ファイル: pose_vis_toy.py プロジェクト: ROAR-ROBOTICS/tractor
    def _command_loop(self, n_periods):
        pose_msg = NamedSE3Pose()
        left_speed = 1.0
        right_speed = 0.5
        self.odom_pose_tractor = self.kinematic_model.evolve_world_pose_tractor(
            self.odom_pose_tractor,
            left_speed,
            right_speed,
            n_periods * self.command_period_seconds,
        )
        pose_msg.a_pose_b.CopyFrom(se3_to_proto(self.odom_pose_tractor))
        pose_msg.frame_a = 'odometry/wheel'
        pose_msg.frame_b = 'tractor/base'
        self.event_bus.send(make_event('pose/tractor/base', pose_msg))

        pose_msg = NamedSE3Pose()
        radius = self.kinematic_model.wheel_radius
        self.tractor_pose_wheel_left = self.tractor_pose_wheel_left.dot(
            SE3.exp((0, 0, 0, 0, 0, left_speed / radius *
                     self.command_period_seconds * n_periods)))
        pose_msg.a_pose_b.CopyFrom(se3_to_proto(self.tractor_pose_wheel_left))
        pose_msg.frame_a = 'tractor/base'
        pose_msg.frame_b = 'tractor/wheel/left'
        self.event_bus.send(make_event('pose/tractor/wheel/left', pose_msg))

        pose_msg = NamedSE3Pose()
        self.tractor_pose_wheel_right = self.tractor_pose_wheel_right.dot(
            SE3.exp((0, 0, 0, 0, 0, right_speed / radius *
                     self.command_period_seconds * n_periods)))
        pose_msg.a_pose_b.CopyFrom(se3_to_proto(self.tractor_pose_wheel_right))
        pose_msg.frame_a = 'tractor/base'
        pose_msg.frame_b = 'tractor/wheel/right'
        self.event_bus.send(make_event('pose/tractor/wheel/right', pose_msg))

        pose_msg = NamedSE3Pose()
        pose_msg.a_pose_b.position.z = 1.0
        pose_msg.a_pose_b.position.y = 0.5
        pose_msg.frame_a = 'tractor/base'
        pose_msg.frame_b = 'tractor/camera'
        self.event_bus.send(make_event('pose/tractor/camera', pose_msg))
コード例 #28
0
    def perform_ransac(self):
        """Main RANSAC Routine"""
        max_inliers = 0

        # Select random ids for minimal sets
        rand_idx = np.random.randint(self.num_pts,
                                     size=(self.ransac_iters,
                                           self.num_min_set_pts))
        pts_1_sample_stacked = self.pts_1[rand_idx]
        pts_2_sample_stacked = self.pts_2[rand_idx]

        # Parallel transform computation
        # Compute transforms in parallel
        # start = time.perf_counter()
        T_21_stacked = compute_transform_fast(pts_1_sample_stacked,
                                              pts_2_sample_stacked, SE3_SHAPE)
        # end = time.perf_counter()
        # print('comp, transform | {}'.format(end - start))

        # Parallel cost computation
        # start = time.perf_counter()
        # inlier_masks_stacked = compute_ransac_cost_fast(
        #     T_21_stacked, self.pts_1, self.obs_2, cam_params, inlier_thresh)
        inlier_masks_stacked = self.compute_ransac_cost(
            T_21_stacked, self.pts_1, self.obs_2, self.camera,
            self.ransac_thresh)

        # end = time.perf_counter()
        # print('comp, masks | {}'.format(end - start))

        # start = time.perf_counter()
        inlier_nums = np.sum(inlier_masks_stacked, axis=1)
        most_inliers_idx = np.argmax(inlier_nums)
        T_21_best = SE3.from_matrix(T_21_stacked[most_inliers_idx, :, :])
        max_inliers = inlier_nums[most_inliers_idx]
        inlier_indices_best = np.where(
            inlier_masks_stacked[most_inliers_idx, :])[0]
        # end = time.perf_counter()
        # print('comp, rest | {}'.format(end - start))

        if max_inliers < 5:
            raise ValueError(
                " RANSAC failed to find more than 5 inliers. Try adjusting the thresholds."
            )

        # print('After {} RANSAC iters, found best transform with {} / {} inliers.'.format(
        #     self.ransac_iters, max_inliers, self.num_pts))

        obs_1_inliers = self.obs_1[inlier_indices_best]
        obs_2_inliers = self.obs_2[inlier_indices_best]

        return (T_21_best, obs_1_inliers, obs_2_inliers, inlier_indices_best)
コード例 #29
0
def matlab_comparison():
    data = sio.loadmat('simple_case.mat')
    T1 = data['T1']
    T2 = data['T2']
    T_v_rel = [SE3.from_matrix(T1[:, :, 0]), SE3.from_matrix(T1[:, :, 1])]
    T_c_rel = [SE3.from_matrix(T2[:, :, 0]), SE3.from_matrix(T2[:, :, 1])]

    my_solver = solver()
    my_solver.set_T_v_rel(T_v_rel)
    my_solver.set_T_c_rel(T_c_rel)

    dual_time, dual_gt, dual_primal, dual_gap, dual_opt, dual_solution, dual_flag = my_solver.dual_solve(
        'R', verbose=True)
    rel_time, rel_gt, rel_primal, rel_gap, relax_opt, relax_solution, rel_flag = my_solver.relax_solve(
        'R')

    print(1 / dual_opt[3])
    print(1 / relax_opt[3])

    print(dual_solution)
    print(relax_solution)
    return
コード例 #30
0
ファイル: test_costs.py プロジェクト: raphajaner/gp_lie
 def test_jacobians_stereo(self, stereo_residual):
     T_cam_w = SE3.exp([1, 2, 3, 4, 5, 6])
     pt_w = T_cam_w.inv().dot(
         stereo_residual.camera.triangulate(stereo_residual.obs))
     _, jac1 = stereo_residual.evaluate([T_cam_w, pt_w],
                                        compute_jacobians=[True, True])
     _, jac2 = stereo_residual.evaluate([T_cam_w, pt_w],
                                        compute_jacobians=[True, False])
     _, jac3 = stereo_residual.evaluate([T_cam_w, pt_w],
                                        compute_jacobians=[False, True])
     assert len(jac1) == len(jac2) == len(jac3) == 2
     assert jac1[0].shape == (3, 6) and jac1[1].shape == (3, 3)
     assert jac2[0].shape == (3, 6) and jac2[1] is None
     assert jac3[0] is None and jac3[1].shape == (3, 3)