Esempio n. 1
0
    def test_removes_comments_from_the_end_of_lines(self):
        first_pose = tf.Transform(location=(15.2, -1167.9, -1.2),
                                  rotation=(0.535, 0.2525, 0.11, 0.2876))
        relative_trajectory = {}
        trajectory_text = "# Starting with a comment\n    # Another comment\n\n"

        line_template = "{time} {x} {y} {z} {qx} {qy} {qz} {qw} # This is a comment\n"
        for time in range(0, 10):
            timestamp = time * 4999936 + 1403638128940097024
            pose = tf.Transform(location=(0.122 * time, -0.53112 * time,
                                          1.893 * time),
                                rotation=(0.772 * time, -0.8627 * time,
                                          -0.68782 * time))
            relative_trajectory[timestamp] = pose
            absolute_pose = first_pose.find_independent(pose)
            trajectory_text += self.format_line(timestamp, absolute_pose,
                                                line_template)

        mock_open = mock.mock_open(read_data=trajectory_text)
        extend_mock_open(mock_open)
        with mock.patch('arvet_slam.dataset.tum.tum_loader.open',
                        mock_open,
                        create=True):
            trajectory = tum_loader.read_trajectory('test_filepath',
                                                    relative_trajectory.keys())
        self.assertEqual(len(trajectory), len(relative_trajectory))
        for time, pose in relative_trajectory.items():
            self.assertIn(time, trajectory)
            self.assertNPClose(pose.location, trajectory[time].location)
            self.assertNPClose(pose.rotation_quat(True),
                               trajectory[time].rotation_quat(True))
Esempio n. 2
0
    def test_skips_comments_and_blank_lines(self):
        first_pose = tf.Transform(location=(15.2, -1167.9, -1.2),
                                  rotation=(0.535, 0.2525, 0.11, 0.2876))
        relative_trajectory = {}
        trajectory_text = "# Starting with a comment\n    # Another comment\n\n"
        for time in range(0, 10):
            timestamp = time * 4999936 + 1403638128940097024
            pose = tf.Transform(location=(0.122 * time, -0.53112 * time,
                                          1.893 * time),
                                rotation=(0.772 * time, -0.8627 * time,
                                          -0.68782 * time))
            relative_trajectory[timestamp] = pose
            absolute_pose = first_pose.find_independent(pose)
            trajectory_text += self.format_line(timestamp, absolute_pose)
            # Add incorrect trajectory data, preceeded by a hash to indicate it's a comment
            trajectory_text += "# " + self.format_line(timestamp, pose)

        mock_open = mock.mock_open(read_data=trajectory_text)
        extend_mock_open(mock_open)
        with mock.patch('arvet_slam.dataset.tum.tum_loader.open',
                        mock_open,
                        create=True):
            trajectory = tum_loader.read_trajectory('test_filepath',
                                                    relative_trajectory.keys())
        self.assertEqual(len(trajectory), len(relative_trajectory))
        for time, pose in relative_trajectory.items():
            self.assertIn(time, trajectory)
            self.assertNPClose(pose.location, trajectory[time].location)
            self.assertNPClose(pose.rotation_quat(True),
                               trajectory[time].rotation_quat(True))
Esempio n. 3
0
    def test_reads_trajectory_relative_to_first_pose(self):
        first_pose = tf.Transform(location=(15.2, -1167.9, -1.2),
                                  rotation=(0.535, 0.2525, 0.11, 0.2876))
        relative_trajectory = {}
        trajectory_text = ""
        for time in range(0, 10):
            timestamp = time * 4999.936 + 1403638128.940097024
            pose = tf.Transform(location=(0.122 * time, -0.53112 * time,
                                          1.893 * time),
                                rotation=(0.772 * time, -0.8627 * time,
                                          -0.68782 * time))
            relative_trajectory[timestamp] = pose
            absolute_pose = first_pose.find_independent(pose)
            trajectory_text += self.format_line(timestamp, absolute_pose)

        mock_open = mock.mock_open(read_data=trajectory_text)
        extend_mock_open(mock_open)
        with mock.patch('arvet_slam.dataset.tum.tum_loader.open',
                        mock_open,
                        create=True):
            trajectory = tum_loader.read_trajectory('test_filepath',
                                                    relative_trajectory.keys())
        self.assertEqual(len(trajectory), len(relative_trajectory))
        for time, pose in relative_trajectory.items():
            self.assertIn(time, trajectory)
            self.assertNPClose(pose.location, trajectory[time].location)
            self.assertNPClose(pose.rotation_quat(True),
                               trajectory[time].rotation_quat(True))
Esempio n. 4
0
    def test_interpolates_multiple_times_within_the_same_interval(self):
        def make_pose(time):
            return tf.Transform(location=(time, -10 * time, 0),
                                rotation=tf3d.quaternions.axangle2quat(
                                    (1, 2, 3), (time / 10) * np.pi / 2),
                                w_first=True)

        encoded_trajectory = {
            time: make_pose(time)
            for time in range(0, 11, 2)
        }
        trajectory_text = ""
        for time, pose in encoded_trajectory.items():
            trajectory_text += self.format_line(time, pose)

        desired_times = np.linspace(
            0, 10, num=50,
            endpoint=True)  # Lots of desired times for widely spaced samples
        first_pose = make_pose(min(desired_times))

        mock_open = mock.mock_open(read_data=trajectory_text)
        extend_mock_open(mock_open)
        with mock.patch('arvet_slam.dataset.tum.tum_loader.open',
                        mock_open,
                        create=True):
            trajectory = tum_loader.read_trajectory('test_filepath',
                                                    desired_times)
        self.assertEqual(set(desired_times), set(trajectory.keys()))
        for time in desired_times:
            expected_pose = first_pose.find_relative(make_pose(time))
            self.assertIn(time, trajectory)
            self.assertNPClose(expected_pose.location,
                               trajectory[time].location,
                               atol=1e-14,
                               rtol=0)
            self.assertNPClose(expected_pose.rotation_quat(True),
                               trajectory[time].rotation_quat(True),
                               atol=1e-14,
                               rtol=0)
Esempio n. 5
0
    def test_reads_from_given_file(self):
        trajectory_text = ""
        timestamps = []
        for time in range(0, 10):
            timestamp = time * 1.01 + 100
            pose = tf.Transform(location=(0.122 * time, -0.53112 * time,
                                          1.893 * time),
                                rotation=(0.772 * time, -0.8627 * time,
                                          -0.68782 * time))
            trajectory_text += self.format_line(timestamp, pose)
            timestamps.append(timestamp)

        mock_open = mock.mock_open(read_data=trajectory_text)
        extend_mock_open(mock_open)
        with mock.patch('arvet_slam.dataset.tum.tum_loader.open',
                        mock_open,
                        create=True):
            trajectory = tum_loader.read_trajectory('test_filepath',
                                                    timestamps)
        self.assertTrue(mock_open.called)
        self.assertEqual('test_filepath', mock_open.call_args[0][0])
        self.assertEqual('r', mock_open.call_args[0][1])
        self.assertEqual(len(trajectory), len(timestamps))
Esempio n. 6
0
def verify_dataset(image_collection: ImageCollection,
                   root_folder: typing.Union[str, Path],
                   dataset_name: str,
                   repair: bool = False):
    """
    Load a TUM RGB-D sequence into the database.


    :return:
    """
    root_folder = Path(root_folder)
    dataset_name = str(dataset_name)
    repair = bool(repair)
    valid = True
    irreparable = False
    image_group = dataset_name

    # Check the root folder to see if it needs to be extracted from a tarfile
    delete_when_done = None
    if not root_folder.is_dir():
        if (root_folder.parent / dataset_name).is_dir():
            # The root was a tarball, but the extracted data already exists, just use that as the root
            root_folder = root_folder.parent / dataset_name
        else:
            candidate_tar_file = root_folder.parent / (dataset_name + '.tgz')
            if candidate_tar_file.is_file() and tarfile.is_tarfile(
                    candidate_tar_file):
                # Root is actually a tarfile, extract it. find_roots with handle folder structures
                with tarfile.open(candidate_tar_file) as tar_fp:
                    tar_fp.extractall(root_folder.parent / dataset_name)
                root_folder = root_folder.parent / dataset_name
                delete_when_done = root_folder
            else:
                # Could find neither a dir nor a tarfile to extract from
                raise NotADirectoryError(
                    "'{0}' is not a directory".format(root_folder))

    # Check the image group on the image collection
    if image_collection.image_group != image_group:
        if repair:
            image_collection.image_group = image_group
            image_collection.save()
            logging.getLogger(__name__).info(
                f"Fixed incorrect image group for {image_collection.sequence_name}"
            )
        else:
            logging.getLogger(__name__).warning(
                f"{image_collection.sequence_name} has incorrect image group {image_collection.image_group}"
            )
            valid = False

    # Find the relevant metadata files
    root_folder, rgb_path, depth_path, trajectory_path = tum_loader.find_files(
        root_folder)

    # Step 2: Read the metadata from them
    image_files = tum_loader.read_image_filenames(rgb_path)
    trajectory = tum_loader.read_trajectory(trajectory_path,
                                            image_files.keys())
    depth_files = tum_loader.read_image_filenames(depth_path)

    # Step 3: Associate the different data types by timestamp
    all_metadata = tum_loader.associate_data(image_files, trajectory,
                                             depth_files)

    # Step 3: Load the images from the metadata
    total_invalid_images = 0
    total_fixed_images = 0
    with arvet.database.image_manager.get().get_group(image_group,
                                                      allow_write=repair):
        for img_idx, (timestamp, image_file, camera_pose,
                      depth_file) in enumerate(all_metadata):
            changed = False
            img_valid = True
            img_path = root_folder / image_file
            depth_path = root_folder / depth_file
            rgb_data = image_utils.read_colour(img_path)
            depth_data = image_utils.read_depth(depth_path)
            depth_data = depth_data / 5000  # Re-scale depth to meters
            img_hash = bytes(xxhash.xxh64(rgb_data).digest())

            # Load the image from the database
            try:
                _, image = image_collection[img_idx]
            except (KeyError, IOError, RuntimeError):
                logging.getLogger(__name__).exception(
                    f"Error loading image object {img_idx}")
                valid = False
                total_invalid_images += 1
                continue

            # First, check the image group
            if image.image_group != image_group:
                if repair:
                    image.image_group = image_group
                    changed = True
                logging.getLogger(__name__).warning(
                    f"Image {img_idx} has incorrect group {image.image_group}")
                valid = False
                img_valid = False

            # Load the pixels from the image
            try:
                actual_pixels = image.pixels
            except (KeyError, IOError, RuntimeError):
                actual_pixels = None
            try:
                actual_depth = image.depth
            except (KeyError, IOError, RuntimeError):
                actual_depth = None

            # Compare the loaded image data to the data read from disk
            if actual_pixels is None or not np.array_equal(
                    rgb_data, actual_pixels):
                if repair:
                    image.store_pixels(rgb_data)
                    changed = True
                else:
                    logging.getLogger(__name__).error(
                        f"Image {img_idx}: Pixels do not match data read from {img_path}"
                    )
                valid = False
                img_valid = False
            if img_hash != bytes(image.metadata.img_hash):
                if repair:
                    image.metadata.img_hash = img_hash
                    changed = True
                else:
                    logging.getLogger(__name__).error(
                        f"Image {img_idx}: Image hash does not match metadata")
                valid = False
                img_valid = False
            if actual_depth is None or not np.array_equal(
                    depth_data, actual_depth):
                if repair:
                    image.store_depth(depth_data)
                    changed = True
                else:
                    logging.getLogger(__name__).error(
                        f"Image {img_idx}: Depth does not match data read from {depth_path}"
                    )
                valid = False
                img_valid = False
            if changed and repair:
                logging.getLogger(__name__).warning(
                    f"Image {img_idx}: repaired")
                image.save()
                total_fixed_images += 1
            if not img_valid:
                total_invalid_images += 1

    if irreparable:
        # Images are missing entirely, needs re-import
        logging.getLogger(__name__).error(
            f"Image Collection {image_collection.pk} for sequence {dataset_name} "
            "is IRREPARABLE, invalidate and re-import")
    elif repair:
        # Re-save the modified image collection
        logging.getLogger(__name__).info(
            f"{image_collection.sequence_name} repaired successfully "
            f"({total_fixed_images} image files fixed).")
    elif valid:
        logging.getLogger(__name__).info(
            f"Verification of {image_collection.sequence_name} successful.")
    else:
        logging.getLogger(__name__).error(
            f"Verification of {image_collection.sequence_name} ({image_collection.pk}) "
            f"FAILED, ({total_invalid_images} images failed)")

    if delete_when_done is not None and delete_when_done.exists():
        # We're done and need to clean up after ourselves
        shutil.rmtree(delete_when_done)

    return valid