Ejemplo n.º 1
0
 def shift_and_visualize_gt(self, path_to_gt_file):
     """ Read associated groundtruth file and shift origin of the pose list so that it matches with the estimated trajectory """
     gt_data = np.loadtxt(path_to_gt_file,
                          usecols=(0, 1, 2, 3, 4, 5, 6, 7),
                          dtype=float)
     gt_pose_list = [None] * len(gt_data)
     gt_pose_list[0] = Pose(R=np.eye(3), t=np.zeros((1, 3)))
     gt_timestamps = gt_data[:, 0]
     t_np = gt_data[:, 1:4]
     q_np = np.roll(gt_data[:, 4:], 1, axis=1)
     # Storing the initial pose of the groundtruth list as 3D Rigidbody Transform, assuming that it corresponds to origin of estimated trjectory
     R_origin = np.asarray(Quaternion(*(q_np[0, :])).toRotationMatrix())
     T_gt_to_est = np.eye(4)
     T_gt_to_est[:3, :3] = R_origin
     T_gt_to_est[:3, 3] = t_np[0, :]
     T_gt_to_est = np.linalg.inv(T_gt_to_est)
     for ii in range(1, len(t_np)):
         T_current = np.eye(4)
         T_current[:3, 3] = t_np[ii, :]
         mini_q = Quaternion(*(q_np[ii, :]))
         T_current[:3, :3] = np.asarray(mini_q.toRotationMatrix())
         # Shift origin of current pose by multiplying it with Transformation matrix
         T_shifted = np.dot(T_gt_to_est, T_current)
         T_shifted = np.linalg.inv(T_shifted)
         shifted_pose = Pose(R=T_shifted[:3, :3], t=T_shifted[:3, 3])
         gt_pose_list[ii] = shifted_pose
     return gt_pose_list, gt_timestamps
Ejemplo n.º 2
0
def transform_poses_to_base(config, poses):
    """
    Transform the poses to the base frame with settings specified in the config
    :param config:
    :param poses:
    :return:
    """
    ## initialize base to cam transformation
    tf_t_BC = Vector3(config['base_to_cam_pose']['translation']['x'],
                      config['base_to_cam_pose']['translation']['y'],
                      config['base_to_cam_pose']['translation']['z'])
    tf_q_BC = Quaternion(config['base_to_cam_pose']['orientation']['w'],
                         config['base_to_cam_pose']['orientation']['x'],
                         config['base_to_cam_pose']['orientation']['y'],
                         config['base_to_cam_pose']['orientation']['z'])
    tf_R_BC = tf_q_BC.toRotationMatrix()

    ## transformation of poses
    tf_poses = []
    for pose in poses:
        t = pose.t - tf_t_BC
        R = pose.R * tf_R_BC.inverse()
        tf_pose = Pose(R=R, t=t)
        tf_poses.append(tf_pose)

    return tf_poses
Ejemplo n.º 3
0
def quaternion_to_rotation_matrix(q_np):
    q_mini = Quaternion(*(np.roll(q_np, 1)))
    R = q_mini.toRotationMatrix()
    return np.asarray(R, dtype=np.float32)
Ejemplo n.º 4
0
class SingleCamTracker:

    # TODO: require_depth, require_pose
    def __init__(self,
                 config,
                 tracking_module_path,
                 checkpoint,
                 require_depth=False,
                 require_pose=False):
        """
        Creates an object for a single camera tracking instance. It wraps around the DeepTAM Sequence and Tracker
        classes. This is done to make it easier to extend the setup for a multi-camera tracking class.

        :param config: (dict) comprising of the camera settings
        :param tracking_module_path: (str) path to file containing the Tracker network class
        :param checkpoint: (str) path to the pre-trained network weights
        :param require_depth:
        :param require_pose:
        """
        assert isinstance(config, dict)

        self._startup = False
        self.name = config['cam_name']

        # base to camera transformation
        self.tf_t_BC = Vector3(config['base_to_cam_pose']['translation']['x'],
                               config['base_to_cam_pose']['translation']['y'],
                               config['base_to_cam_pose']['translation']['z'])
        self.tf_q_BC = Quaternion(
            config['base_to_cam_pose']['orientation']['w'],
            config['base_to_cam_pose']['orientation']['x'],
            config['base_to_cam_pose']['orientation']['y'],
            config['base_to_cam_pose']['orientation']['z'])
        self.tf_R_BC = self.tf_q_BC.toRotationMatrix()

        self.sequence = RGBDSequence(
            config['cam_dir'],
            rgb_parameters=config['rgb_parameters'],
            depth_parameters=config['depth_parameters'],
            time_syncing_parameters=config['time_syncing_parameters'],
            cam_name=config['cam_name'],
            require_depth=require_depth,
            require_pose=require_pose)

        self.intrinsics = self.sequence.get_original_normalized_intrinsics()

        self.tracker = Tracker(
            tracking_module_path,
            checkpoint,
            self.intrinsics,
            tracking_parameters=config['tracking_parameters'])

    def __del__(self):
        self.delete_tracker()

    def startup(self):
        """
        Clears the tracker and sets the initial pose of the camera
        :return:
        """
        frame = self.sequence.get_dict(0, self.intrinsics,
                                       self.tracker.image_width,
                                       self.tracker.image_height)

        pose0_gt = frame['pose']

        self.tracker.clear()
        # WIP: If gt_poses is aligned such that it starts from identity pose, you may comment this line
        # TODO: @Rohit, should we make this base-to-cam transformation?
        self.tracker.set_init_pose(pose0_gt)
        self._startup = True

    def delete_tracker(self):
        del self.tracker

    def update(self, frame_idx):
        """
        Performs the tracking for an input image marked by the frame_idx
        :param frame_idx: Frame number in the data to perform tracking on
        :return:
            (dict) frame: contains the RGB image, depth image, and groundtruth pose for that frame index
            (dict) result: contains the pose, warped image and keyframe information
            (float) timestamp: timestamp corresponding to that frame index
            (list) poses: list of all the poses computed so far
        """
        if not self._startup:
            raise Exception(
                PRINT_PREFIX +
                'Tracker \"%s\"has not been initialized. Please call startup() first.'
                % self.name)

        frame = self.sequence.get_dict(frame_idx, self.intrinsics,
                                       self.tracker.image_width,
                                       self.tracker.image_height)

        result = self.tracker.feed_frame(frame['image'], frame['depth'])

        return frame, result, self.sequence.get_timestamp(
            frame_idx), self.tracker.poses

    def get_sequence_length(self):
        return self.sequence.get_sequence_length()

    def get_transformed_pose(self):
        """
        Transforms the poses estimated from the tracker to a base frame
        :return:
        """
        ## transformation of poses
        tf_poses = []
        for pose in self.tracker.poses:
            t = pose.t - self.tf_t_BC
            R = pose.R * self.tf_R_BC.inverse()
            tf_pose = Pose(R=R, t=t)
            tf_poses.append(tf_pose)

        return tf_poses