Ejemplo n.º 1
0
def make_pose_error(estimated_pose: tf.Transform, reference_pose: tf.Transform) -> PoseError:
    """
    Make a pose error object from an estimated
    :param estimated_pose:
    :param reference_pose:
    :return:
    """
    trans_error = estimated_pose.location - reference_pose.location
    trans_error_length = np.linalg.norm(trans_error)

    trans_error_direction = np.nan  # No direction if the vectors are the same
    if trans_error_length > 0:
        # Get the unit vector in the direction of the true location
        reference_norm = np.linalg.norm(reference_pose.location)
        if reference_norm > 0:
            unit_reference = reference_pose.location / reference_norm
            # Find the angle between the trans error and the true location
            dot_product = np.dot(trans_error / trans_error_length, unit_reference)
            trans_error_direction = np.arccos(
                # Clip to arccos range to avoid errors
                min(1.0, max(0.0, dot_product))
            )
    # Different to the trans_direction, this is the angle between the estimated orientation and true orientation
    rot_error = tf.quat_diff(estimated_pose.rotation_quat(w_first=True), reference_pose.rotation_quat(w_first=True))
    return PoseError(
        x=trans_error[0],
        y=trans_error[1],
        z=trans_error[2],
        length=trans_error_length,
        direction=trans_error_direction,
        rot=rot_error
    )
Ejemplo n.º 2
0
    def test_normalises_to_first_pose(self):
        first_pose = Transform(location=(15.2, -1167.9, -1.2),
                               rotation=(0.535, 0.2525, 0.11, 0.2876))
        relative_trajectory = {}
        absolute_trajectory = {}
        for time in range(0, 10):
            timestamp = time * 4999.936 + 1403638128.940097024
            pose = Transform(location=(0.122 * time, -0.53112 * time,
                                       1.893 * time),
                             rotation=(0.772 * time, -0.8627 * time,
                                       -0.68782 * time))
            relative_trajectory[timestamp] = pose
            absolute_trajectory[timestamp] = first_pose.find_independent(pose)

        builder = TrajectoryBuilder(relative_trajectory.keys())
        for time in sorted(absolute_trajectory.keys()):
            builder.add_trajectory_point(time, absolute_trajectory[time])
        result = builder.get_interpolated_trajectory()
        self.assertEqual(set(relative_trajectory.keys()), set(result.keys()))
        for time in relative_trajectory.keys():
            self.assertNPClose(relative_trajectory[time].location,
                               result[time].location,
                               atol=1e-13,
                               rtol=0)
            self.assertNPClose(relative_trajectory[time].rotation_quat(True),
                               result[time].rotation_quat(True),
                               atol=1e-14,
                               rtol=0)
Ejemplo n.º 3
0
    def test_profile_constructor_infer_motions(self, ):
        import cProfile as profile

        repeats = 3
        stats_file = "create_slam_result_motions.prof"
        random = np.random.RandomState(13)

        frame_results = [
            FrameResult(
                timestamp=idx + random.normal(0, 0.01),
                image=image,
                processing_time=random.uniform(0.01, 1),
                pose=image.camera_pose,
                estimated_motion=Transform(
                    (14 + random.normal(0, 1), 0.9 + random.normal(0, 0.05),
                     0.1 + random.normal(0, 0.05)),
                    tf3d.quaternions.axangle2quat(
                        (1, 2, 4), 5 * np.pi / (2 * self.num_images) +
                        random.normal(0, np.pi / 64)),
                    w_first=True) if idx > (self.num_images // 10) else None,
                tracking_state=TrackingState.OK,
                num_features=random.randint(10, 1000),
                num_matches=random.randint(10, 1000))
            for idx, image in enumerate(self.images)
        ]
        # Set an initial estimated pose so that the poses will be inferred
        frame_results[(self.num_images // 10)].estimated_pose = Transform()

        profile.runctx(
            "make_trial_result(self.system, self.image_source, frame_results)",
            locals=locals(),
            globals=globals(),
            filename=stats_file)
def get_error_from_motion(motion: tf.Transform, gt_motion: tf.Transform, avg_motion: tf.Transform = None) \
        -> typing.Tuple[float, float, float, float, float, float, float, float, float, float, float, float]:
    """
    Given a motion, ground truth motion, and average estimated motion, extract 12 different error statistics
    :param motion:
    :param gt_motion:
    :param avg_motion:
    :return:
    """
    # Error
    trans_error = motion.location - gt_motion.location
    trans_error_length = np.linalg.norm(trans_error)
    trans_error_direction = np.arccos(
        min(
            1.0,
            max(
                -1.0,
                np.dot(trans_error / trans_error_length, gt_motion.location /
                       np.linalg.norm(gt_motion.location))))
    ) if trans_error_length > 0 else 0  # No error direction when there is no error
    rot_error = tf.quat_diff(motion.rotation_quat(w_first=True),
                             gt_motion.rotation_quat(w_first=True))

    # Noise
    if avg_motion is None:
        return (
            trans_error[0],
            trans_error[1],
            trans_error[2],
            trans_error_length,
            trans_error_direction,
            rot_error,

            # No average estimate,
            np.nan,
            np.nan,
            np.nan,
            np.nan,
            np.nan,
            np.nan)
    else:
        trans_noise = motion.location - avg_motion.location
        trans_noise_length = np.linalg.norm(trans_noise)
        trans_noise_direction = np.arccos(
            min(
                1.0,
                max(
                    -1.0,
                    np.dot(
                        trans_noise / trans_noise_length, gt_motion.location /
                        np.linalg.norm(gt_motion.location))))
        ) if trans_noise_length > 0 else 0  # No noise direction for 0 noise
        rot_noise = tf.quat_diff(motion.rotation_quat(w_first=True),
                                 avg_motion.rotation_quat(w_first=True))

        return (trans_error[0], trans_error[1], trans_error[2],
                trans_error_length, trans_error_direction, rot_error,
                trans_noise[0], trans_noise[1], trans_noise[2],
                trans_noise_length, trans_noise_direction, rot_noise)
Ejemplo n.º 5
0
 def test_will_not_use_two_identical_timestamps(self):
     point = PointEstimate(0.5)
     point.add_sample(0, Transform())
     point.add_sample(0, Transform())
     with self.assertRaises(TypeError):
         point.get_estimate()
     # even though we have 2 estimates closer than this, they are identical
     point.add_sample(10, Transform((10, -5, 0)))
     result = point.get_estimate()
     self.assertNPEqual((0.5, -0.25, 0), result.location)
Ejemplo n.º 6
0
 def test_raises_exception_if_only_given_one_estimate(self):
     point = PointEstimate(0.5)
     with self.assertRaises(TypeError):
         point.get_estimate()
     point.add_sample(0, Transform())
     with self.assertRaises(TypeError):
         point.get_estimate()
     point.add_sample(1, Transform((1, 0, 0)))
     result = point.get_estimate()
     self.assertNPEqual((0.5, 0, 0), result.location)
    def setUpClass(cls):
        cls.system = mock_types.MockSystem()
        cls.image_source = mock_types.MockImageSource()

        image_data = [
            np.random.normal(128, 20, size=(10, 10)).astype(np.uint8)
            for _ in range(cls.num_images)
        ]
        cls.images = [
            Image(
                pixels=pixels,
                metadata=make_metadata(
                    pixels=pixels,
                    source_type=ImageSourceType.SYNTHETIC,
                    camera_pose=Transform(
                        (idx * 15, idx, 0),
                        tf3d.quaternions.axangle2quat((1, 2, 3), 5 * idx * np.pi / (2 * cls.num_images)), w_first=True
                    ),
                    intrinsics=CameraIntrinsics(
                        width=10, height=10,
                        fx=5, fy=5,
                        cx=5, cy=5
                    )
                )
            )
            for idx, pixels in enumerate(image_data)
        ]
Ejemplo n.º 8
0
    def get_interpolated_trajectory(self) -> typing.Mapping[float, Transform]:
        """
        Get the resulting
        :return:
        """
        # get the estimated pose for the first timestamp in the trajectory
        first_time = min(self._point_estimators.keys())
        if not self._point_estimators[first_time].can_estimate():
            raise RuntimeError(
                "Not enough timestamps read to determine pose for time {0}".
                format(first_time))
        first_pose = self._point_estimators[first_time].get_estimate()

        # Build the rest of the trajectory relative to the first pose
        result = {first_time: Transform()}
        for time, point_estimator in self._point_estimators.items():
            if time != first_time:
                if not point_estimator.can_estimate():
                    raise RuntimeError(
                        "Not enough trajectory points read to determine pose for time {0}"
                        .format(time))
                result[time] = first_pose.find_relative(
                    point_estimator.get_estimate())

        return result
Ejemplo n.º 9
0
    def clean(self):
        """
        Custom validation. Checks that estimated motions and trajectories match.
        Raises validation errors if the stored motion does not match the change in pose,
        or the estimated motion does not match the change in estimated pose
        Uses is_close to avoid floating point variation
        :return:
        """
        if self.results[0].motion != Transform():
            raise ValidationError(
                "The true motion for the first frame must be zero")
        if self.results[0].estimated_motion is not None:
            raise ValidationError(
                "The estimated motion for the first frame must be None")

        for idx in range(1, len(self.results)):
            # Check for 0 change in timestamp, will cause divide by zeros when calculating speed
            if self.results[idx].timestamp - self.results[idx -
                                                          1].timestamp <= 0:
                raise ValidationError(
                    "Frame {0} has timestamp less than or equal to the frame before it"
                    .format(idx))

            # Validate the true motion
            pose_motion = self.results[idx - 1].pose.find_relative(
                self.results[idx].pose)
            if not np.all(np.isclose(pose_motion.location, self.results[idx].motion.location)) or not \
                    np.all(np.isclose(pose_motion.rotation_quat(), self.results[idx].motion.rotation_quat())):
                raise ValidationError(
                    "Ground truth motion does not match change in position at frame {0} ({1} != {2})"
                    .format(idx, pose_motion, self.results[idx].motion))

            # Validate the estimated motion
            # Firs off, we need to know the estimated
            if self.results[idx - 1].estimated_pose is not None:
                if self.results[
                        idx].estimated_pose is not None and self.results[
                            idx].estimated_motion is None:
                    raise ValidationError(
                        "Estimated motion for frame {0} can be inferred, but is missing"
                        .format(idx))
                elif self.results[idx].estimated_pose is None and self.results[
                        idx].estimated_motion is not None:
                    raise ValidationError(
                        "Estimated pose for frame {0} can be inferred, but is missing"
                        .format(idx))
                elif self.results[
                        idx].estimated_pose is not None and self.results[
                            idx].estimated_motion is not None:
                    pose_motion = self.results[
                        idx - 1].estimated_pose.find_relative(
                            self.results[idx].estimated_pose)
                    if not np.all(np.isclose(pose_motion.location, self.results[idx].estimated_motion.location)) or \
                            not np.all(np.isclose(pose_motion.rotation_quat(),
                                                  self.results[idx].estimated_motion.rotation_quat())):
                        raise ValidationError(
                            "Ground truth motion does not match change in position at frame {0} ({1} != {2})"
                            .format(idx, pose_motion,
                                    self.results[idx].estimated_motion))
Ejemplo n.º 10
0
 def make_pose(time):
     # Make times that depend non-linearly on time so that linear interpolation is inaccurate
     return Transform(location=(10 * time - 0.1 * time * time,
                                10 * np.cos(time * np.pi / 50), 0),
                      rotation=tf3d.quaternions.axangle2quat(
                          (-2, -3, 2),
                          np.log(time + 1) * np.pi / (4 * np.log(10))),
                      w_first=True)
Ejemplo n.º 11
0
 def test_raises_exception_if_only_one_sample_point(self):
     builder = TrajectoryBuilder(time + 0.4311 for time in range(0, 10))
     builder.add_trajectory_point(5 + 0.5, Transform((5 * 10, -5, 0)))
     with self.assertRaises(RuntimeError) as ctx:
         builder.get_interpolated_trajectory()
     message = str(ctx.exception)
     self.assertIn(
         '0.4311',
         message)  # Check includes the timestamp in the error message
Ejemplo n.º 12
0
def make_trials(system: VisionSystem, image_collection: ImageCollection,
                repeats: int, random: np.random.RandomState):
    # Get the true motions, for making trials
    true_motions = [
        image_collection.images[frame_idx - 1].camera_pose.find_relative(
            image_collection.images[frame_idx].camera_pose)
        if frame_idx > 0 else None
        for frame_idx in range(len(image_collection))
    ]

    # Make some plausible trial results
    trial_results = []
    for repeat in range(repeats):
        start_idx = random.randint(0, len(image_collection) - 2)
        frame_results = [
            FrameResult(
                timestamp=timestamp,
                image=image,
                pose=image.camera_pose,
                processing_time=random.uniform(0.001, 1.0),
                estimated_motion=true_motions[frame_idx].find_independent(
                    Transform(location=random.normal(0, 1, 3),
                              rotation=t3.quaternions.axangle2quat(
                                  random.uniform(-1, 1, 3),
                                  random.normal(0, np.pi / 2)),
                              w_first=True))
                if frame_idx > start_idx else None,
                tracking_state=TrackingState.OK
                if frame_idx > start_idx else TrackingState.NOT_INITIALIZED,
                num_matches=random.randint(10, 100))
            for frame_idx, (timestamp, image) in enumerate(image_collection)
        ]
        frame_results[start_idx].estimated_pose = Transform()
        trial_settings = {'random': random.randint(0, 10), 'repeat': repeat}
        trial_result = SLAMTrialResult(system=system,
                                       image_source=image_collection,
                                       success=True,
                                       results=frame_results,
                                       has_scale=False,
                                       settings=trial_settings)
        trial_result.save()
        trial_results.append(trial_result)
    return trial_results
Ejemplo n.º 13
0
def fix_coordinates(trans: tf.Transform) -> tf.Transform:
    """
    Exchange the coordinates on a transform from camera frame to world frame.
    Used for the camera extrinsics
    :param trans:
    :return:
    """
    x, y, z = trans.location
    qw, qx, qy, qz = trans.rotation_quat(w_first=True)
    return make_camera_pose(x, y, z, qw, qx, qy, qz)
Ejemplo n.º 14
0
def compute_loop_distances_and_angles(current_pose: tf.Transform, linked_poses: typing.Iterable[tf.Transform]) -> \
        typing.Tuple[typing.List[float], typing.List[float]]:
    relative_poses = [
        current_pose.find_relative(linked_pose) for linked_pose in linked_poses
    ]
    return [
        np.linalg.norm(relative_pose.location)
        for relative_pose in relative_poses
    ], [
        tf.quat_angle(relative_pose.rotation_quat(w_first=True))
        for relative_pose in relative_poses
    ]
Ejemplo n.º 15
0
    def test_interpolates_pose(self):
        time_offset = 0.332
        times = [time + time_offset for time in range(0, 10)]
        builder = TrajectoryBuilder(times)
        for time in range(
                0, 11
        ):  # includes 10, to make sure we get either end of the range
            # Create sample points at the given times
            builder.add_trajectory_point(
                time,
                Transform(location=(10 * time, -time, 0),
                          rotation=tf3d.quaternions.axangle2quat(
                              (4, -3, 2), time * np.pi / 20),
                          w_first=True))
        result = builder.get_interpolated_trajectory()
        self.assertEqual(set(times), set(result.keys()))

        first_pose = Transform(location=(10 * time_offset, -time_offset, 0),
                               rotation=tf3d.quaternions.axangle2quat(
                                   (4, -3, 2), time_offset * np.pi / 20),
                               w_first=True)
        for time in times:
            # This is the true pose at that time, relative to the true pose at the first time.
            # need to do it this way, because of the rotation, which shifts things
            # the build has never seen this transform, the timestamps are different
            expected_pose = first_pose.find_relative(
                Transform(location=(10 * time, -time, 0),
                          rotation=tf3d.quaternions.axangle2quat(
                              (4, -3, 2), time * np.pi / 20),
                          w_first=True))
            interpolated_pose = result[time]
            self.assertNPClose(expected_pose.location,
                               interpolated_pose.location,
                               atol=1e-13,
                               rtol=0)
            self.assertNPClose(expected_pose.rotation_quat(True),
                               interpolated_pose.rotation_quat(True),
                               atol=1e-14,
                               rtol=0)
Ejemplo n.º 16
0
    def test_simple_trial_run_generated(self):
        sequence_folder, left_path, right_path = ndds_loader.find_files(
            NDDS_SEQUENCE)
        camera_intrinsics = ndds_loader.read_camera_intrinsics(
            left_path / '_camera_settings.json')
        max_img_id = ndds_loader.find_max_img_id(
            lambda idx: left_path / ndds_loader.IMG_TEMPLATE.format(idx))
        with (NDDS_SEQUENCE / 'timestamps.json').open('r') as fp:
            timestamps = json.load(fp)

        subject = LibVisOMonoSystem(
            matcher_nms_n=10,
            matcher_nms_tau=66,
            matcher_match_binsize=50,
            matcher_match_radius=245,
            matcher_match_disp_tolerance=2,
            matcher_outlier_disp_tolerance=5,
            matcher_outlier_flow_tolerance=2,
            matcher_multi_stage=False,
            matcher_half_resolution=False,
            matcher_refinement=MatcherRefinement.SUBPIXEL,
            bucketing_max_features=6,
            bucketing_bucket_width=136,
            bucketing_bucket_height=102,
            height=1.0,
            pitch=0.0,
            ransac_iters=439,
            inlier_threshold=4.921875,
            motion_threshold=609.375)
        subject.set_camera_intrinsics(camera_intrinsics, 0.1)

        subject.start_trial(ImageSequenceType.SEQUENTIAL, seed=0)
        image_group = 'test'
        with image_manager.get().get_group(image_group, allow_write=True):
            for img_idx in range(max_img_id + 1):
                pixels = image_utils.read_colour(
                    left_path / ndds_loader.IMG_TEMPLATE.format(img_idx))
                image = Image(
                    _id=bson.ObjectId(),
                    pixels=pixels,
                    image_group=image_group,
                    metadata=imeta.ImageMetadata(camera_pose=Transform()))
                subject.process_image(image, timestamps[img_idx])
        result = subject.finish_trial()

        self.assertIsInstance(result, SLAMTrialResult)
        self.assertEqual(subject, result.system)
        self.assertTrue(result.success)
        self.assertFalse(result.has_scale)
        self.assertIsNotNone(result.run_time)
        self.assertEqual(max_img_id + 1, len(result.results))
Ejemplo n.º 17
0
def make_camera_pose(pose):
    """
    KITTI uses a different coordinate frame to the one I'm using, which is the same as the Libviso2 frame.
    This function is to convert dataset ground-truth poses to transform objects.
    Thankfully, its still a right-handed coordinate frame, which makes this easier.
    Frame is: z forward, x right, y down

    :param pose: The raw pose as loaded by pykitti, a 4x4 homgenous transform object.
    :return: A Transform object representing the world pose of the current frame
    """
    pose = np.asmatrix(pose)
    coordinate_exchange = np.array([[0, 0, 1, 0],
                                    [-1, 0, 0, 0],
                                    [0, -1, 0, 0],
                                    [0, 0, 0, 1]])
    pose = np.dot(np.dot(coordinate_exchange, pose), coordinate_exchange.T)
    return Transform(pose)
Ejemplo n.º 18
0
def align_point(pose: tf.Transform,
                shift: np.ndarray,
                rotation: np.ndarray,
                scale: float = 1.0) -> tf.Transform:
    """
    Transform an estimated point
    :param pose:
    :param shift:
    :param rotation:
    :param scale:
    :return:
    """
    return tf.Transform(
        location=shift +
        scale * t3.quaternions.rotate_vector(pose.location, rotation),
        rotation=t3.quaternions.qmult(rotation,
                                      pose.rotation_quat(w_first=True)),
        w_first=True)
Ejemplo n.º 19
0
 def get_scaled_motion(self, index: int) -> typing.Union[Transform, None]:
     """
     Get the motion of the camera from a particular frame, re-scaled to the ground truth motion
     if the system didn't have scale available when it ran.
     May return None if there is no available estimate
     :param index:
     :return:
     """
     if not 0 <= index < len(self.results):
         return None
     base_motion = self.results[index].estimated_motion
     if self.has_scale or base_motion is None:
         return base_motion
     # Handle the system producing 0 motion estimates for every frame, so scale is 0
     scale = (self.ground_truth_scale /
              self.estimated_scale) if self.estimated_scale != 0 else 1
     return Transform(location=scale * base_motion.location,
                      rotation=base_motion.rotation_quat(w_first=True),
                      w_first=True)
    def test_profile(self, _):
        import cProfile as profile

        repeats = 3
        random = np.random.RandomState(13)

        # Make some number of trials results to measure
        trial_results = []
        for repeat in range(repeats):
            frame_results = [
                FrameResult(
                    timestamp=idx + random.normal(0, 0.01),
                    image=image,
                    processing_time=random.uniform(0.01, 1),
                    pose=image.camera_pose,
                    estimated_motion=Transform(
                        (14 + random.normal(0, 1), 0.9 + random.normal(0, 0.05), 0.1 + random.normal(0, 0.05)),
                        tf3d.quaternions.axangle2quat(
                            (1, 2, 4), 5 * np.pi / (2 * self.num_images) + random.normal(0, np.pi / 64)),
                        w_first=True
                    ) if idx > 0 else None,
                    tracking_state=TrackingState.OK,
                    num_features=random.randint(10, 1000),
                    num_matches=random.randint(10, 1000)
                )
                for idx, image in enumerate(self.images)
            ]
            trial_result = SLAMTrialResult(
                system=self.system,
                image_source=self.image_source,
                success=True,
                results=frame_results,
                has_scale=False
            )
            trial_results.append(trial_result)

        metric = FrameErrorMetric()

        stats_file = "measure_trials.prof"

        profile.runctx("metric.measure_results(trial_results)",
                       locals=locals(), globals=globals(), filename=stats_file)
Ejemplo n.º 21
0
 def get_scaled_pose(self, index: int) -> typing.Union[Transform, None]:
     """
     Get the estimated pose of the camera.
     If the system couldn't infer scale (i.e., it was monocular), then the estimated poses will be re-scaled
     so that the average distances between poses is the same as the average distance between poses
     in the ground truth.
     That is an affine transformation, so it doesn't
     :param index:
     :return:
     """
     if not 0 <= index < len(self.results):
         return None
     base_pose = self.results[index].estimated_pose
     if self.has_scale or base_pose is None:
         return base_pose
     # Handle the system producing 0 motion estimates for every frame, so scale is 0
     scale = (self.ground_truth_scale /
              self.estimated_scale) if self.estimated_scale != 0 else 1
     return Transform(location=scale * base_pose.location,
                      rotation=base_pose.rotation_quat(w_first=True),
                      w_first=True)
Ejemplo n.º 22
0
    def test_extrapolates_if_not_bounded_below(self):
        def make_pose(time):
            # Make times that depend non-linearly on time so that linear interpolation is inaccurate
            return Transform(location=(10 * time - 0.1 * time * time,
                                       10 * np.cos(time * np.pi / 50), 0),
                             rotation=tf3d.quaternions.axangle2quat(
                                 (-2, -3, 2),
                                 np.log(time + 1) * np.pi / (4 * np.log(10))),
                             w_first=True)

        builder = TrajectoryBuilder(time + 0.4311 for time in range(0, 10))
        for time in range(
                0, 10
        ):  # No times before 0.5, but min desired timestamp is 0.4331
            builder.add_trajectory_point(time + 0.5, make_pose(time + 0.5))
        result = builder.get_interpolated_trajectory()
        self.assertEqual({time + 0.4311
                          for time in range(0, 10)}, set(result.keys()))

        # Because this is the first pose, we expect it to be the origin, and all the other poses to be relative to it
        self.assertEqual(Transform(), result[0.4311])
        first_pose = linear_interpolate(make_pose(0.5), make_pose(1.5),
                                        (0.4311 - 0.5) / (1.5 - 0.5))
        for time in range(1, 10):
            expected_pose = linear_interpolate(make_pose(time - 0.5),
                                               make_pose(time + 0.5),
                                               (time + 0.4311 -
                                                (time - 0.5)) / (0.5 + 0.5))
            expected_pose = first_pose.find_relative(expected_pose)
            self.assertNPClose(expected_pose.location,
                               result[time + 0.4311].location,
                               atol=1e-13,
                               rtol=0)
            self.assertNPClose(expected_pose.rotation_quat(True),
                               result[time + 0.4311].rotation_quat(True),
                               atol=1e-13,
                               rtol=0)
Ejemplo n.º 23
0
    def test_interpolates_pose_over_long_range(self):
        builder = TrajectoryBuilder(range(1, 10))
        # Only add the start and end times, rely on the LERP for all other poses
        builder.add_trajectory_point(0, Transform())
        builder.add_trajectory_point(
            10,
            Transform(  # the pose at time 10
                location=(10 * 10, -10, 0),
                rotation=tf3d.quaternions.axangle2quat((4, -3, 2),
                                                       10 * np.pi / 20),
                w_first=True))
        result = builder.get_interpolated_trajectory()
        self.assertEqual(set(range(1, 10)), set(result.keys()))

        first_pose = Transform(location=(10 * 1, -1, 0),
                               rotation=tf3d.quaternions.axangle2quat(
                                   (4, -3, 2), 1 * np.pi / 20),
                               w_first=True)
        for time in range(1, 10):
            # This is the true pose at that time, relative to the true pose at the first time.
            # need to do it this way, because of the rotation, which shifts things
            # the build has never seen this transform, the timestamps are different
            expected_pose = first_pose.find_relative(
                Transform(location=(10 * time, -time, 0),
                          rotation=tf3d.quaternions.axangle2quat(
                              (4, -3, 2), time * np.pi / 20),
                          w_first=True))
            interpolated_pose = result[time]
            self.assertNPClose(expected_pose.location,
                               interpolated_pose.location,
                               atol=1e-13,
                               rtol=0)
            self.assertNPClose(expected_pose.rotation_quat(True),
                               interpolated_pose.rotation_quat(True),
                               atol=1e-14,
                               rtol=0)
Ejemplo n.º 24
0
def rectify(left_extrinsics: tf.Transform, left_intrinsics: CameraIntrinsics,
            right_extrinsics: tf.Transform, right_intrinsics: CameraIntrinsics) -> \
        typing.Tuple[np.ndarray, np.ndarray, CameraIntrinsics,
                     np.ndarray, np.ndarray, CameraIntrinsics]:
    """
    Compute mapping matrices for performing stereo rectification, from the camera properties.
    Applying the returned transformation to the images using cv2.remap gives us undistorted stereo rectified images

    :param left_extrinsics:
    :param left_intrinsics:
    :param right_extrinsics:
    :param right_intrinsics:
    :return: 4 remapping matrices: left x, left y, right x, right y
    """
    shape = (left_intrinsics.width, left_intrinsics.height)
    left_distortion = np.array([
        left_intrinsics.k1, left_intrinsics.k2, left_intrinsics.p1,
        left_intrinsics.p2, left_intrinsics.k3
    ])
    right_distortion = np.array([
        right_intrinsics.k1, right_intrinsics.k2, right_intrinsics.p1,
        right_intrinsics.p2, right_intrinsics.k3
    ])

    # We want the transform from the right to the left, which is the position of the left relative to the right
    relative_transform = right_extrinsics.find_relative(left_extrinsics)

    r_left = np.zeros((3, 3))
    r_right = np.zeros((3, 3))
    p_left = np.zeros((3, 4))
    p_right = np.zeros((3, 4))
    cv2.stereoRectify(cameraMatrix1=left_intrinsics.intrinsic_matrix(),
                      distCoeffs1=left_distortion,
                      cameraMatrix2=right_intrinsics.intrinsic_matrix(),
                      distCoeffs2=right_distortion,
                      imageSize=shape,
                      R=relative_transform.rotation_matrix,
                      T=relative_transform.location,
                      alpha=0,
                      flags=cv2.CALIB_ZERO_DISPARITY,
                      newImageSize=shape,
                      R1=r_left,
                      R2=r_right,
                      P1=p_left,
                      P2=p_right)

    m1l, m2l = cv2.initUndistortRectifyMap(left_intrinsics.intrinsic_matrix(),
                                           left_distortion, r_left,
                                           p_left[0:3, 0:3], shape, cv2.CV_32F)
    m1r, m2r = cv2.initUndistortRectifyMap(right_intrinsics.intrinsic_matrix(),
                                           right_distortion, r_right,
                                           p_right[0:3,
                                                   0:3], shape, cv2.CV_32F)

    # Rectification has changed the camera intrinsics, return the new ones
    rectified_left_intrinsics = CameraIntrinsics(width=shape[0],
                                                 height=shape[1],
                                                 fx=p_left[0, 0],
                                                 fy=p_left[1, 1],
                                                 cx=p_left[0, 2],
                                                 cy=p_left[1, 2],
                                                 s=p_left[0, 1])
    rectified_right_intrinsics = CameraIntrinsics(width=shape[0],
                                                  height=shape[1],
                                                  fx=p_right[0, 0],
                                                  fy=p_right[1, 1],
                                                  cx=p_right[0, 2],
                                                  cy=p_right[1, 2],
                                                  s=p_right[0, 1])

    return m1l, m2l, rectified_left_intrinsics, m1r, m2r, rectified_right_intrinsics
Ejemplo n.º 25
0
    def create_frame(self, time: float) -> Image:
        img_shape = (self.height, self.width,
                     3) if self.colour else (self.height, self.width)
        frame = np.zeros(img_shape, dtype=np.uint8)
        depth = None
        if self.mode is ImageMode.RGBD:
            depth = (1000 + 2 * len(self.stars)) * np.ones(
                (self.height, self.width), dtype=np.float64)
        f = self.focal_length
        cx = frame.shape[1] / 2
        cy = frame.shape[0] / 2

        for star in self.stars:
            x, y, z = star['pos']
            x -= self.speed * time
            if z <= 0:
                break  # Stars are sorted by z value, so once they're past the camera, stop.

            left = int(np.round(f * ((x - star['width'] / 2) / z) + cx))
            right = int(np.round(f * ((x + star['width'] / 2) / z) + cx))

            top = int(np.round(f * ((y - star['height'] / 2) / z) + cy))
            bottom = int(np.round(f * ((y + star['height'] / 2) / z) + cy))

            left = max(0, min(frame.shape[1], left))
            right = max(0, min(frame.shape[1], right))
            top = max(0, min(frame.shape[0], top))
            bottom = max(0, min(frame.shape[0], bottom))

            frame[top:bottom, left:right] = star['colour']
            if depth is not None:
                depth[top:bottom, left:right] = z

        metadata = imeta.make_metadata(
            pixels=frame,
            depth=depth,
            source_type=imeta.ImageSourceType.SYNTHETIC,
            camera_pose=Transform(location=[time * self.speed, 0, 0],
                                  rotation=[0, 0, 0, 1]),
            intrinsics=CameraIntrinsics(width=frame.shape[1],
                                        height=frame.shape[0],
                                        fx=f,
                                        fy=f,
                                        cx=cx,
                                        cy=cy))

        # If we're building a stereo image, make the right image
        if self.mode is ImageMode.STEREO:
            right_frame = np.zeros(img_shape, dtype=np.uint8)
            for star in self.stars:
                x, y, z = star['pos']
                x -= self.stereo_offset + self.speed * time
                if z <= 0:
                    break

                left = int(np.round(f * ((x - star['width'] / 2) / z) + cx))
                right = int(np.round(f * ((x + star['width'] / 2) / z) + cx))

                top = int(np.round(f * ((y - star['height'] / 2) / z) + cy))
                bottom = int(np.round(f * ((y + star['height'] / 2) / z) + cy))

                left = max(0, min(frame.shape[1], left))
                right = max(0, min(frame.shape[1], right))
                top = max(0, min(frame.shape[0], top))
                bottom = max(0, min(frame.shape[0], bottom))

                right_frame[top:bottom, left:right] = star['colour']
            right_metadata = imeta.make_right_metadata(
                pixels=right_frame,
                left_metadata=metadata,
                source_type=imeta.ImageSourceType.SYNTHETIC,
                camera_pose=Transform(
                    location=[time * self.speed, -1 * self.stereo_offset, 0],
                    rotation=[0, 0, 0, 1]),
                intrinsics=CameraIntrinsics(width=frame.shape[1],
                                            height=frame.shape[0],
                                            fx=f,
                                            fy=f,
                                            cx=cx,
                                            cy=cy))
            return StereoImage(pixels=frame,
                               image_group='test',
                               metadata=metadata,
                               right_pixels=right_frame,
                               right_metadata=right_metadata)

        if depth is not None:
            return Image(pixels=frame,
                         image_group='test',
                         depth=depth,
                         metadata=metadata)
        return Image(pixels=frame, image_group='test', metadata=metadata)
Ejemplo n.º 26
0
 def get_stereo_offset(self) -> Transform:
     return Transform([0, -1 * self.stereo_offset, 0])
Ejemplo n.º 27
0
    def infer_missing_poses_and_motions(self):
        """
        Based on the current results, infer the values of motions from poses and vice versa.
        Basically, the both the true and estimated trajectories are stored twice, once as absolute poses,
        and one as relative motions. One can be inferred from the other, but to save calculation time later,
        we store both.

        In practice, construct with either a set of motion or a set of poses for each, and let this function
        handle building the other.

        Run automatically on construction, you can call it manually afterwards if the results change.
        The trial will be invalid if there are missing values that can be inferred, or they are inconsistent.
        :return:
        """
        if len(self.results) > 0:
            # Fill in missing pose and motions for the first frame
            # Defaults are usually 0
            self.results[0].motion = Transform()
            if self.results[0].pose is None:
                self.results[0].pose = Transform()

            # Fill in missing pose or motion
            to_backpropagate_motions = []
            for idx in range(1, len(self.results)):
                if self.results[idx].pose is None and self.results[
                        idx].motion is not None:
                    # We have motion but no pose, compute pose
                    self.results[idx].pose = self.results[
                        idx - 1].pose.find_independent(
                            self.results[idx].motion)
                if self.results[idx].pose is not None and self.results[
                        idx].motion is None:
                    # We have pose but no motion, compute motion
                    self.results[idx].motion = self.results[
                        idx - 1].pose.find_relative(self.results[idx].pose)

                if self.results[idx - 1].estimated_pose is None:
                    if self.results[
                            idx].estimated_pose is not None and self.results[
                                idx].estimated_motion is not None:
                        # We have a pose and a motion for this frame, but no pose for the previous frame, we can go back
                        to_backpropagate_motions.append(idx)
                else:
                    if self.results[
                            idx].estimated_pose is not None and self.results[
                                idx].estimated_motion is None:
                        # We have estimated poses, but no estimated motion, infer estimated motion
                        self.results[idx].estimated_motion = self.results[
                            idx - 1].estimated_pose.find_relative(
                                self.results[idx].estimated_pose)
                    if self.results[
                            idx].estimated_pose is None and self.results[
                                idx].estimated_motion is not None:
                        # We have the previous estimated pose, and the estimated motion,
                        # we can combine into the next estimated motion
                        self.results[idx].estimated_pose = self.results[
                            idx - 1].estimated_pose.find_independent(
                                self.results[idx].estimated_motion)

            # Go back and infer earlier estimated poses from later ones and motions
            for start_idx in reversed(to_backpropagate_motions):
                for idx in range(start_idx, 0, -1):
                    if self.results[idx].estimated_pose is not None and self.results[idx].estimated_motion is not None \
                            and self.results[idx - 1].estimated_pose is None:
                        self.results[idx - 1].estimated_pose = self.results[
                            idx].estimated_pose.find_independent(
                                self.results[idx].estimated_motion.inverse())
                    else:
                        break
Ejemplo n.º 28
0
def import_dataset(root_folder, sequence_number, **_):
    """
    Load a KITTI image sequences into the database.
    :return:
    """
    sequence_number = int(sequence_number)
    if not 0 <= sequence_number < 11:
        raise ValueError("Cannot import sequence {0}, it is invalid".format(sequence_number))
    root_folder = find_root(root_folder, sequence_number)
    data = pykitti.odometry(root_folder, sequence="{0:02}".format(sequence_number))

    # dataset.calib:      Calibration data are accessible as a named tuple
    # dataset.timestamps: Timestamps are parsed into a list of timedelta objects
    # dataset.poses:      Generator to load ground truth poses T_w_cam0
    # dataset.camN:       Generator to load individual images from camera N
    # dataset.gray:       Generator to load monochrome stereo pairs (cam0, cam1)
    # dataset.rgb:        Generator to load RGB stereo pairs (cam2, cam3)
    # dataset.velo:       Generator to load velodyne scans as [x,y,z,reflectance]
    image_group = f"KITTI_{sequence_number:06}"
    images = []
    timestamps = []
    with arvet.database.image_manager.get().get_group(image_group, allow_write=True):
        for left_image, right_image, timestamp, pose in zip(data.cam2, data.cam3, data.timestamps, data.poses):
            left_image = np.array(left_image)
            right_image = np.array(right_image)
            camera_pose = make_camera_pose(pose)
            # camera pose is for cam0, we want cam2, which is 6cm (0.06m) to the left
            # Except that we don't need to control for that, since we want to be relative to the first pose anyway
            # camera_pose = camera_pose.find_independent(tf.Transform(location=(0, 0.06, 0), rotation=(0, 0, 0, 1),
            #                                                         w_first=False))
            # Stereo offset is 0.54m (http://www.cvlibs.net/datasets/kitti/setup.php)
            right_camera_pose = camera_pose.find_independent(Transform(location=(0, -0.54, 0), rotation=(0, 0, 0, 1),
                                                                       w_first=False))
            camera_intrinsics = CameraIntrinsics(
                height=left_image.shape[0],
                width=left_image.shape[1],
                fx=data.calib.K_cam2[0, 0],
                fy=data.calib.K_cam2[1, 1],
                cx=data.calib.K_cam2[0, 2],
                cy=data.calib.K_cam2[1, 2])
            right_camera_intrinsics = CameraIntrinsics(
                height=right_image.shape[0],
                width=right_image.shape[1],
                fx=data.calib.K_cam3[0, 0],
                fy=data.calib.K_cam3[1, 1],
                cx=data.calib.K_cam3[0, 2],
                cy=data.calib.K_cam3[1, 2])
            left_metadata = imeta.make_metadata(
                pixels=left_image,
                camera_pose=camera_pose,
                intrinsics=camera_intrinsics,
                source_type=imeta.ImageSourceType.REAL_WORLD,
                environment_type=imeta.EnvironmentType.OUTDOOR_URBAN,
                light_level=imeta.LightingLevel.WELL_LIT,
                time_of_day=imeta.TimeOfDay.AFTERNOON,
            )
            right_metadata = imeta.make_right_metadata(
                pixels=right_image,
                left_metadata=left_metadata,
                camera_pose=right_camera_pose,
                intrinsics=right_camera_intrinsics
            )
            image = StereoImage(
                pixels=left_image,
                right_pixels=right_image,
                image_group=image_group,
                metadata=left_metadata,
                right_metadata=right_metadata
            )
            image.save()
            images.append(image)
            timestamps.append(timestamp.total_seconds())

    # Create and save the image collection
    collection = ImageCollection(
        images=images,
        timestamps=timestamps,
        sequence_type=ImageSequenceType.SEQUENTIAL,
        dataset='KITTI',
        sequence_name="{0:02}".format(sequence_number),
        trajectory_id="KITTI_{0:02}".format(sequence_number)
    )
    collection.save()
    return collection
Ejemplo n.º 29
0
 def test_doesnt_ignore_other_timestamps_with_identical_diffs(self):
     point = PointEstimate(0.5)
     point.add_sample(0, Transform())
     point.add_sample(1, Transform((1, -0.5, 0)))
     result = point.get_estimate()
     self.assertNPEqual((0.5, -0.25, 0), result.location)