Beispiel #1
0
    def __init__(self, dataset_root):
        intrinsics0, dist_coeffs0, self.T_bc0 =\
            load_camera_params(dataset_root, 0)
        intrinsics1, dist_coeffs1, self.T_bc1 =\
            load_camera_params(dataset_root, 1)

        self.camera_model0 = CameraModel(
            CameraParameters(focal_length=intrinsics0[0:2],
                             offset=intrinsics0[2:4]), RadTan(dist_coeffs0))
        self.camera_model1 = CameraModel(
            CameraParameters(focal_length=intrinsics1[0:2],
                             offset=intrinsics1[2:4]), RadTan(dist_coeffs1))

        timestamps0, image_paths0 = load_image_paths(dataset_root, 0)
        timestamps1, image_paths1 = load_image_paths(dataset_root, 1)
        timestamps_body, rotations_wb, t_wb = load_body_poses(dataset_root)

        matches = tum.synchronize(timestamps_body,
                                  timestamps0,
                                  timestamps_ref=timestamps1)
        indices_wb = matches[:, 0]
        indices0 = matches[:, 1]
        indices1 = matches[:, 2]
        self.rotations_wb = value_list(rotations_wb, indices_wb)
        self.t_wb = value_list(t_wb, indices_wb)
        self.image_paths0 = value_list(image_paths0, indices0)
        self.image_paths1 = value_list(image_paths1, indices1)
        self.length = matches.shape[0]
 def export_points(self):
     assert (len(self.point_dict) == len(self.point_colors))
     point_hashes = self.point_dict.keys()
     point_array = np.array(value_list(self.point_dict, point_hashes))
     point_colors = np.array(value_list(self.point_colors, point_hashes))
     point_colors = point_colors.astype(np.float64) / 255.
     return point_array, point_colors
Beispiel #3
0
    def __init__(self, dataset_root, which_freiburg):
        # there are 3 types of camera model
        # specify which to use by setting 'which_freiburg'

        self.depth_factor = DEPTH_FACTOR * get_depth_scale(which_freiburg)
        self.camera_model = get_camera_model_rgb(which_freiburg)
        self.camera_model_depth = get_camera_model_depth(which_freiburg)

        timestamps_gt, rotations, positions =\
            load_ground_truth_poses(dataset_root)

        timestamps_rgb, paths_rgb = load_rgb_image_paths(dataset_root)
        timestamps_depth, paths_depth = load_depth_image_paths(dataset_root)

        matches = synchronize(timestamps_gt,
                              timestamps_rgb,
                              timestamps_ref=timestamps_depth)

        indices_gt = matches[:, 0]
        indices_rgb = matches[:, 1]
        indices_depth = matches[:, 2]

        self.length = matches.shape[0]

        self.timestamps = timestamps_gt[indices_gt]
        self.rotations = rotations[indices_gt]
        self.positions = positions[indices_gt]

        self.paths_rgb = value_list(paths_rgb, indices_rgb)
        self.paths_depth = value_list(paths_depth, indices_depth)
    def estime_pose(self, features1, viewpoints, matches):
        assert (len(viewpoints) == len(matches))
        correspondences = value_list(self.correspondences, viewpoints)

        point_hashes = []
        keypoint_indices = []
        for viewpoint, matches01 in zip(viewpoints, matches):
            correspondences = self.correspondences[viewpoint]
            hashes_, indices_ = get_indices(correspondences, matches01)
            point_hashes += hashes_
            keypoint_indices += indices_
        assert (len(point_hashes) == len(keypoint_indices))
        point_array = np.array(value_list(self.point_dict, point_hashes))
        return solve_pnp(point_array, features1.keypoints[keypoint_indices])
    def run_ba(self, viewpoints):
        correspondences = value_list(self.correspondences, viewpoints)
        poses = value_list(self.poses, viewpoints)
        features = value_list(self.features, viewpoints)

        point_hashes = unique_point_hashes(correspondences)

        point_array = np.array(value_list(self.point_dict, point_hashes))

        viewpoint_indices, point_indices, keypoints = get_ba_indices(
            correspondences, features, point_hashes)

        poses, point_array = try_run_ba(viewpoint_indices, point_indices,
                                        poses, point_array, keypoints)

        for point_hash, point in zip(point_hashes, point_array):
            self.point_dict[point_hash] = point

        for viewpoint, pose in zip(viewpoints, poses):
            self.poses[viewpoint] = pose
 def match_(self, features1, viewpoints):
     features = value_list(self.features, viewpoints)
     return [self.matcher(features0, features1) for features0 in features]