def create_mock_image(width=128, height=128):
    """
    Make an image by drawing a bunch of semi-transparent squares
    This should actually have discernible features, in contrast to pure noise
    :param width:
    :param height:
    :return:
    """
    im = np.zeros((width, height, 3), dtype='uint8')
    for _ in range(10):
        shape = (np.random.randint(width), np.random.randint(height))
        position = (np.random.randint(1 - shape[0], width),
                    np.random.randint(1 - shape[1], height))
        colour = np.random.randint(0, 256, 3, dtype='uint8')
        im[max(0, position[1]):min(height, position[1] + shape[1]),
           max(0, position[0]):min(width, position[0] + shape[0]), :] += colour

    metadata = imeta.ImageMetadata(
        source_type=imeta.ImageSourceType.SYNTHETIC,
        hash_=b'\x1f`\xa8\x8aR\xed\x9f\x0b',
        environment_type=imeta.EnvironmentType.INDOOR_CLOSE,
        light_level=imeta.LightingLevel.WELL_LIT,
        time_of_day=imeta.TimeOfDay.DAY,
        lens_focal_distance=100,
        aperture=22,
        camera_pose=tf.Transform())
    return core.image_entity.ImageEntity(data=im,
                                         metadata=metadata,
                                         id_=bson.objectid.ObjectId())
    def test_repeated_applications_returns_to_original(self):
        data = np.array([list(range(i, i + 100)) for i in range(100)])
        image = core.image.Image(
            data=data,
            metadata=imeta.ImageMetadata(
                source_type=imeta.ImageSourceType.SYNTHETIC,
                hash_=b'\xa5\xc9\x08\xaf$\x0b\x116',
                labelled_objects=[
                    imeta.LabelledObject(class_names={'cup'},
                                         bounding_box=(80, 20, 10, 20))
                ]))
        # Apply 4 times to rotate back to the start
        result = self.do_augment(
            self.do_augment(self.do_augment(self.do_augment(image))))
        self.assertNPEqual(result.data, data)
        self.assertNPClose(result.metadata.affine_transformation_matrix,
                           np.identity(3))
        self.assertEqual((80, 20, 10, 20),
                         result.metadata.labelled_objects[0].bounding_box)

        for _ in range(10):
            result = self.do_augment(
                self.do_augment(self.do_augment(self.do_augment(result))))
            self.assertNPEqual(result.data, data)
            self.assertNPClose(result.metadata.affine_transformation_matrix,
                               np.identity(3))
            self.assertEqual((80, 20, 10, 20),
                             result.metadata.labelled_objects[0].bounding_box)
コード例 #3
0
def import_dataset(root_folder, db_client):
    """
    Load a TUM image sequences into the database.
    :return:
    """
    if not os.path.isdir(root_folder):
        return None

    # Step 1: Read the meta-information from the files
    left_rgb_path = os.path.join(root_folder, 'cam0', 'data.csv')
    left_camera_intrinsics_path = os.path.join(root_folder, 'cam0', 'sensor.yaml')
    right_rgb_path = os.path.join(root_folder, 'cam1', 'data.csv')
    right_camera_intrinsics_path = os.path.join(root_folder, 'cam1', 'sensor.yaml')
    trajectory_path = os.path.join(root_folder, 'state_groundtruth_estimate0', 'data.csv')

    if (not os.path.isfile(left_rgb_path) or not os.path.isfile(left_camera_intrinsics_path) or
            not os.path.isfile(right_rgb_path) or not os.path.isfile(right_camera_intrinsics_path) or
            not os.path.isfile(trajectory_path)):
        # Stop if we can't find the metadata files within the directory
        return None

    left_image_files = read_image_filenames(left_rgb_path)
    left_extrinsics, left_intrinsics = get_camera_calibration(left_camera_intrinsics_path)
    right_image_files = read_image_filenames(left_rgb_path)
    right_extrinsics, right_intrinsics = get_camera_calibration(right_camera_intrinsics_path)
    trajectory = read_trajectory(trajectory_path)

    # Step 2: Associate the different data types by timestamp. Trajectory last because it's bigger than the stereo.
    all_metadata = associate_data(left_image_files, right_image_files, trajectory)

    # Step 3: Load the images from the metadata
    builder = dataset.image_collection_builder.ImageCollectionBuilder(db_client)
    for timestamp, left_image_file, right_image_file, robot_pose in all_metadata:
        left_data = cv2.imread(os.path.join(root_folder, 'cam0', 'data', left_image_file), cv2.IMREAD_COLOR)
        right_data = cv2.imread(os.path.join(root_folder, 'cam1', 'data', right_image_file), cv2.IMREAD_COLOR)
        left_data = np.ascontiguousarray(left_data[:, :, ::-1])
        right_data = np.ascontiguousarray(right_data[:, :, ::-1])

        left_pose = robot_pose.find_independent(left_extrinsics)
        right_pose = robot_pose.find_independent(right_extrinsics)

        builder.add_image(image=core.image_entity.StereoImageEntity(
            left_data=left_data,
            right_data=right_data,
            metadata=imeta.ImageMetadata(
                hash_=xxhash.xxh64(left_data).digest(),
                camera_pose=left_pose,
                right_camera_pose=right_pose,
                intrinsics=left_intrinsics,
                right_intrinsics=right_intrinsics,
                source_type=imeta.ImageSourceType.REAL_WORLD,
                environment_type=imeta.EnvironmentType.INDOOR_CLOSE,
                light_level=imeta.LightingLevel.WELL_LIT,
                time_of_day=imeta.TimeOfDay.DAY,
            )
        ), timestamp=timestamp)
    return builder.save()
コード例 #4
0
def make_image(*args, **kwargs):
    du.defaults(kwargs, {
        'data': np.random.randint(0, 255, (32, 32, 3), dtype='uint8'),
        'data_id': 0,
        'metadata': imeta.ImageMetadata(
            source_type=imeta.ImageSourceType.SYNTHETIC,
            hash_=b'\x1f`\xa8\x8aR\xed\x9f\x0b',
            camera_pose=tf.Transform()
        )
    })
    return core.image_entity.ImageEntity(*args, **kwargs)
コード例 #5
0
def make_image(**kwargs):
    if 'data' in kwargs:
        data = kwargs['data']
    else:
        data = np.array([list(range(i, i + 100)) for i in range(100)])
    metadata_kwargs = {
        'source_type': imeta.ImageSourceType.SYNTHETIC,
        'hash_': b'\xa5\xc9\x08\xaf$\x0b\x116'
    }
    if 'metadata' in kwargs and isinstance(kwargs['metadata'], dict):
        metadata_kwargs = du.defaults(kwargs['metadata'], metadata_kwargs)
        del kwargs['metadata']
    kwargs = du.defaults(kwargs, {
        'data': data,
        'metadata': imeta.ImageMetadata(**metadata_kwargs)
    })
    return core.image.Image(**kwargs)
 def test_inverts_itself(self):
     data = np.array([list(range(i, i + 100)) for i in range(100)])
     image = core.image.Image(
         data=data,
         metadata=imeta.ImageMetadata(
             source_type=imeta.ImageSourceType.SYNTHETIC,
             hash_=b'\xa5\xc9\x08\xaf$\x0b\x116',
             labelled_objects=[imeta.LabelledObject(
                 class_names={'cup'},
                 bounding_box=(80, 20, 10, 20)
             )]
         )
     )
     result = self.do_augment(self.do_augment(image))
     self.assertNPEqual(result.data, data)
     self.assertNPEqual(result.metadata.affine_transformation_matrix, np.identity(3))
     self.assertEqual((80, 20, 10, 20), result.metadata.labelled_objects[0].bounding_box)
コード例 #7
0
    def test_serialize_and_deserialize_works_with_minimal_parameters(self):
        entity1 = imeta.ImageMetadata(
            source_type=imeta.ImageSourceType.SYNTHETIC,
            hash_=b'\x1f`\xa8\x8aR\xed\x9f\x0b',
            intrinsics=cam_intr.CameraIntrinsics(800, 600, 652.2, 291, 142.2,
                                                 614.4))
        s_entity1 = entity1.serialize()

        entity2 = imeta.ImageMetadata.deserialize(s_entity1)
        s_entity2 = entity2.serialize()

        self.assert_metadata_equal(entity1, entity2)
        self.assertEqual(s_entity1, s_entity2)

        for idx in range(100):
            # Test that repeated serialization and deserialization does not degrade the information
            entity2 = imeta.ImageMetadata.deserialize(s_entity2)
            s_entity2 = entity2.serialize()
            self.assert_metadata_equal(entity1, entity2)
            self.assertEqual(s_entity1, s_entity2)
 def test_is_labels_available_is_true_iff_all_images_have_bounding_boxes(
         self):
     subject = ic.ImageCollection(
         images=self.images,
         type_=core.sequence_type.ImageSequenceType.SEQUENTIAL,
         db_client_=self.create_mock_db_client())
     self.assertTrue(subject.is_labels_available)
     subject = ic.ImageCollection(
         type_=core.sequence_type.ImageSequenceType.SEQUENTIAL,
         images=du.defaults(
             {
                 1.7:
                 make_image(metadata=imeta.ImageMetadata(
                     hash_=b'\xf1\x9a\xe2|' +
                     np.random.randint(0, 0xFFFFFFFF).to_bytes(4, 'big'),
                     source_type=imeta.ImageSourceType.SYNTHETIC,
                     camera_pose=tf.Transform(
                         location=(800, 2 + np.random.uniform(-1, 1), 3),
                         rotation=(4, 5, 6, 7 + np.random.uniform(-4, 4))),
                     intrinsics=cam_intr.CameraIntrinsics(
                         800, 600, 550.2, 750.2, 400, 300),
                     environment_type=imeta.EnvironmentType.INDOOR_CLOSE,
                     light_level=imeta.LightingLevel.WELL_LIT,
                     time_of_day=imeta.TimeOfDay.DAY,
                     lens_focal_distance=5,
                     aperture=22,
                     simulation_world='TestSimulationWorld',
                     lighting_model=imeta.LightingModel.LIT,
                     texture_mipmap_bias=1,
                     normal_maps_enabled=2,
                     roughness_enabled=True,
                     geometry_decimation=0.8,
                     procedural_generation_seed=16234,
                     labelled_objects=[],
                     average_scene_depth=90.12))
             }, self.images),
         db_client_=self.create_mock_db_client())
     self.assertFalse(subject.is_labels_available)
コード例 #9
0
def make_image(**kwargs):
    """
    Make a mock image, randomly
    :param kwargs: Fixed kwargs to the constructor
    :return: a new image object
    """
    kwargs = du.defaults(
        kwargs, {
            'id_':
            bson.objectid.ObjectId(),
            'data':
            np.random.randint(0, 255, (32, 32, 3), dtype='uint8'),
            'metadata':
            imeta.ImageMetadata(
                hash_=b'\x1f`\xa8\x8aR\xed\x9f\x0b',
                source_type=imeta.ImageSourceType.SYNTHETIC,
                camera_pose=tf.Transform(location=np.random.uniform(
                    -1000, 1000, 3),
                                         rotation=np.random.uniform(-1, 1, 4)),
                intrinsics=cam_intr.CameraIntrinsics(800, 600, 782.5, 781.3,
                                                     320, 300),
                environment_type=imeta.EnvironmentType.INDOOR_CLOSE,
                light_level=imeta.LightingLevel.WELL_LIT,
                time_of_day=imeta.TimeOfDay.DAY,
                lens_focal_distance=np.random.uniform(10, 10000),
                aperture=np.random.uniform(1, 22),
                simulation_world='TestSimulationWorld',
                lighting_model=imeta.LightingModel.LIT,
                texture_mipmap_bias=np.random.randint(0, 8),
                normal_maps_enabled=bool(np.random.randint(0, 2)),
                roughness_enabled=bool(np.random.randint(0, 2)),
                geometry_decimation=np.random.uniform(0, 1),
                procedural_generation_seed=np.random.randint(10000),
                labelled_objects=(imeta.LabelledObject(
                    class_names=('car', ),
                    bounding_box=tuple(np.random.randint(0, 100, 4)),
                    label_color=tuple(np.random.randint(0, 255, 3)),
                    relative_pose=tf.Transform(
                        np.random.uniform(-1000, 1000, 3),
                        np.random.uniform(-1, 1, 4)),
                    object_id='Car-002'),
                                  imeta.LabelledObject(
                                      class_names=('cat', ),
                                      bounding_box=tuple(
                                          np.random.randint(0, 100, 4)),
                                      label_color=tuple(
                                          np.random.randint(0, 255, 4)),
                                      relative_pose=tf.Transform(
                                          np.random.uniform(-1000, 1000, 3),
                                          np.random.uniform(-1, 1, 4)),
                                      object_id='cat-090')),
                average_scene_depth=np.random.uniform(10000)),
            'additional_metadata': {
                'Source': 'Generated',
                'Resolution': {
                    'width': 32,
                    'height': 32
                },
                'Material Properties': {
                    'BaseMipMapBias': 0,
                    'RoughnessQuality': True
                }
            },
            'depth_data':
            np.random.uniform(0, 1, (32, 32)),
            'labels_data':
            np.random.uniform(0, 1, (32, 32, 3)),
            'world_normals_data':
            np.random.uniform(0, 1, (32, 32, 3))
        })
    return ie.ImageEntity(**kwargs)
コード例 #10
0
 def make_metadata(self, **kwargs):
     kwargs = du.defaults(
         kwargs, {
             'hash_':
             b'\xa5\xc9\x08\xaf$\x0b\x116',
             'source_type':
             imeta.ImageSourceType.SYNTHETIC,
             'environment_type':
             imeta.EnvironmentType.INDOOR_CLOSE,
             'light_level':
             imeta.LightingLevel.WELL_LIT,
             'time_of_day':
             imeta.TimeOfDay.DAY,
             'camera_pose':
             tf.Transform((1, 3, 4), (0.2, 0.8, 0.2, -0.7)),
             'right_camera_pose':
             tf.Transform((-10, -20, -30), (0.9, -0.7, 0.5, -0.3)),
             'intrinsics':
             cam_intr.CameraIntrinsics(700, 700, 654.2, 753.3, 400, 300),
             'right_intrinsics':
             cam_intr.CameraIntrinsics(700, 710, 732.1, 612.3, 400, 300),
             'lens_focal_distance':
             5,
             'aperture':
             22,
             'simulator':
             bson.ObjectId('5a14cf0e36ed1e17a55f1e35'),
             'simulation_world':
             'TestSimulationWorld',
             'lighting_model':
             imeta.LightingModel.LIT,
             'texture_mipmap_bias':
             1,
             'normal_maps_enabled':
             True,
             'roughness_enabled':
             True,
             'geometry_decimation':
             0.8,
             'procedural_generation_seed':
             16234,
             'labelled_objects': [
                 imeta.LabelledObject(class_names=('cup', ),
                                      bounding_box=(142, 280, 54, 78),
                                      label_color=(2, 227, 34),
                                      relative_pose=tf.Transform(
                                          location=(-246, 468, 4),
                                          rotation=(0.2, 0.3, 0.4)),
                                      object_id='LabelledObject-68478'),
                 imeta.LabelledObject(class_names=('car', ),
                                      bounding_box=(542, 83, 63, 123),
                                      label_color=(26, 12, 212),
                                      relative_pose=tf.Transform(
                                          location=(61, -717, 161),
                                          rotation=(0.7, 0.6, 0.3)),
                                      object_id='LabelledObject-8246'),
                 imeta.LabelledObject(class_names=('cow', ),
                                      bounding_box=(349, 672, 124, 208),
                                      label_color=(162, 134, 163),
                                      relative_pose=tf.Transform(
                                          location=(286, -465, -165),
                                          rotation=(0.9, 0.1, 0.5)),
                                      object_id='LabelledObject-56485')
             ],
             'average_scene_depth':
             90.12,
             'base_image':
             self.parent_image,
             'transformation_matrix':
             np.array([[0.19882871, 0.58747441, 0.90084303],
                       [0.6955363, 0.48193339, 0.09503605],
                       [0.20549805, 0.6110534, 0.61145574]])
         })
     return imeta.ImageMetadata(**kwargs)
コード例 #11
0
 def test_preserves_other_metadata(self):
     data = np.array([list(range(i, i + 100)) for i in range(100)])
     image = core.image.Image(
         data=data,
         metadata=imeta.ImageMetadata(
             hash_=b'\x04\xe2\x1f\x3d$\x7c\x116',
             source_type=imeta.ImageSourceType.SYNTHETIC,
             environment_type=imeta.EnvironmentType.INDOOR_CLOSE,
             light_level=imeta.LightingLevel.WELL_LIT,
             time_of_day=imeta.TimeOfDay.DAY,
             camera_pose=tf.Transform((1, 3, 4), (0.2, 0.8, 0.2, -0.7)),
             right_camera_pose=tf.Transform((-10, -20, -30),
                                            (0.9, -0.7, 0.5, -0.3)),
             intrinsics=cam_intr.CameraIntrinsics(data.shape[1],
                                                  data.shape[0], 147.2,
                                                  123.3, 420, 215),
             right_intrinsics=cam_intr.CameraIntrinsics(
                 data.shape[1], data.shape[0], 168.2, 123.3, 420, 251),
             lens_focal_distance=5,
             aperture=22,
             simulation_world='TestSimulationWorld',
             lighting_model=imeta.LightingModel.LIT,
             texture_mipmap_bias=1,
             normal_maps_enabled=True,
             roughness_enabled=True,
             geometry_decimation=0.8,
             procedural_generation_seed=16234,
             labelled_objects=[
                 imeta.LabelledObject(class_names=('cup', ),
                                      bounding_box=(142, 280, 54, 78),
                                      label_color=(2, 227, 34),
                                      relative_pose=tf.Transform(
                                          location=(-246, 468, 4),
                                          rotation=(0.2, 0.3, 0.4)),
                                      object_id='LabelledObject-68478'),
                 imeta.LabelledObject(class_names=('car', ),
                                      bounding_box=(542, 83, 63, 123),
                                      label_color=(26, 12, 212),
                                      relative_pose=tf.Transform(
                                          location=(61, -717, 161),
                                          rotation=(0.7, 0.6, 0.3)),
                                      object_id='LabelledObject-8246'),
                 imeta.LabelledObject(class_names=('cow', ),
                                      bounding_box=(349, 672, 124, 208),
                                      label_color=(162, 134, 163),
                                      relative_pose=tf.Transform(
                                          location=(286, -465, -165),
                                          rotation=(0.9, 0.1, 0.5)),
                                      object_id='LabelledObject-56485')
             ],
             average_scene_depth=90.12))
     result = self.do_augment(image)
     self.assertEqual(result.metadata.source_type,
                      image.metadata.source_type)
     self.assertEqual(result.metadata.environment_type,
                      image.metadata.environment_type)
     self.assertEqual(result.metadata.light_level,
                      image.metadata.light_level)
     self.assertEqual(result.metadata.time_of_day,
                      image.metadata.time_of_day)
     self.assertEqual(result.metadata.height, image.metadata.height)
     self.assertEqual(result.metadata.width, image.metadata.width)
     self.assertEqual(result.metadata.camera_pose,
                      image.metadata.camera_pose)
     self.assertEqual(result.metadata.right_camera_pose,
                      image.metadata.right_camera_pose)
     self.assertEqual(result.metadata.lens_focal_distance,
                      image.metadata.lens_focal_distance)
     self.assertEqual(result.metadata.aperture, image.metadata.aperture)
     self.assertEqual(result.metadata.simulation_world,
                      image.metadata.simulation_world)
     self.assertEqual(result.metadata.lighting_model,
                      image.metadata.lighting_model)
     self.assertEqual(result.metadata.texture_mipmap_bias,
                      image.metadata.texture_mipmap_bias)
     self.assertEqual(result.metadata.normal_maps_enabled,
                      image.metadata.normal_maps_enabled)
     self.assertEqual(result.metadata.roughness_enabled,
                      image.metadata.roughness_enabled)
     self.assertEqual(result.metadata.geometry_decimation,
                      image.metadata.geometry_decimation)
     self.assertEqual(result.metadata.procedural_generation_seed,
                      image.metadata.procedural_generation_seed)
     self.assertEqual(result.metadata.average_scene_depth,
                      image.metadata.average_scene_depth)
コード例 #12
0
    def _make_metadata(self,
                       im_data,
                       depth_data,
                       label_data,
                       camera_pose,
                       right_camera_pose=None):
        focus_length = self._focus_distance
        aperture = self._aperture
        # if self._client is not None:
        #     fov = self._client.request('vget /camera/0/fov')
        #     focus_length = self._client.request('vget /camera/0/focus-distance')
        #     aperture = self._client.request('vget /camera/0/fstop')
        camera_intrinsics = self.get_camera_intrinsics()

        labelled_objects = []
        if label_data is not None:
            label_colors = set(
                tuple(color) for m2d in label_data for color in m2d)
            for color in label_colors:
                if color != (0, 0, 0):
                    name = self._client.request(
                        "vget /object/name {0} {1} {2}".format(
                            color[0], color[1], color[2]))
                    class_names = self._client.request(
                        "vget /object/{0}/labels".format(name))
                    class_names = set(class_names.lower().split(','))

                    label_points = cv2.findNonZero(
                        np.asarray(np.all(label_data == color, axis=2),
                                   dtype='uint8'))
                    # TODO: Other ground-truth bounding boxes could be useful, and are trivial to calculate here
                    # E.g.: Oriented bounding boxes, or fit ellipses. see:
                    # http://docs.opencv.org/2.4/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html
                    labelled_objects.append(
                        imeta.LabelledObject(
                            class_names=class_names,
                            bounding_box=cv2.boundingRect(label_points),
                            label_color=color,
                            relative_pose=self.get_object_pose(name),
                            object_id=name))

        return imeta.ImageMetadata(
            hash_=xxhash.xxh64(im_data).digest(),
            source_type=imeta.ImageSourceType.SYNTHETIC,
            camera_pose=camera_pose,
            right_camera_pose=right_camera_pose,
            intrinsics=camera_intrinsics,
            right_intrinsics=camera_intrinsics,
            environment_type=self._environment_type,
            light_level=self._light_level,
            time_of_day=self._time_of_day,
            lens_focal_distance=focus_length,
            aperture=aperture,
            simulator=self.identifier,
            simulation_world=self._world_name,
            lighting_model=imeta.LightingModel.LIT
            if self._lit_mode else imeta.LightingModel.UNLIT,
            texture_mipmap_bias=None,
            normal_maps_enabled=None,
            roughness_enabled=None,
            geometry_decimation=None,
            procedural_generation_seed=None,
            labelled_objects=labelled_objects,
            average_scene_depth=np.mean(depth_data)
            if depth_data is not None else None)
コード例 #13
0
def import_dataset(labels_path, db_client, **kwargs):
    """
    Import a real-world dataset with labelled images.
    :param labels_path:
    :param db_client:
    :param kwargs: Additional arguments passed to the image metadata
    :return:
    """
    if os.path.isdir(labels_path):
        # Look in the given folder for possible labels files
        candidates = glob.glob(os.path.join(labels_path, '*.txt'))
        if len(candidates) >= 1:
            labels_path = candidates[0]
        else:
            # Cannot find the labels file, return None
            return None
    builder = dataset.image_collection_builder.ImageCollectionBuilder(
        db_client)
    builder.set_non_sequential()
    with open(labels_path, 'r') as labels_file:
        base_dir = os.path.dirname(labels_path)
        for line in labels_file:
            split = re.split('[, ]', line)
            if len(split) != 6:
                continue
            imfile, x1, y1, x2, y2, label = split
            label = label.rstrip()
            im = cv2.imread(os.path.join(base_dir, imfile))
            im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)

            focal_length = 1 / (2 * np.tan(np.pi / 4)
                                )  # FOV is 90 degrees which is pi / 2
            if im.shape[1] > im.shape[0]:
                focal_length = focal_length * im.shape[1]
            else:
                focal_length = focal_length * im.shape[0]

            labelled_object = imeta.LabelledObject(
                class_names=(label.lower(), ),
                bounding_box=(int(x1), int(y1), int(x2) - int(x1),
                              int(y2) - int(y1)),
                object_id=
                'StarbucksCup_170'  # This is so I can refer to it later, matches Unreal name
            )
            image_entity = core.image_entity.ImageEntity(
                data=im,
                metadata=imeta.ImageMetadata(
                    hash_=xxhash.xxh64(im).digest(),
                    source_type=imeta.ImageSourceType.REAL_WORLD,
                    intrinsics=cam_intr.CameraIntrinsics(width=im.shape[1],
                                                         height=im.shape[0],
                                                         fx=focal_length,
                                                         fy=focal_length,
                                                         cx=0.5 * im.shape[1],
                                                         cy=0.5 * im.shape[0]),
                    camera_pose=tf.Transform(),
                    labelled_objects=(labelled_object, ),
                    **kwargs),
                additional_metadata=None)
            builder.add_image(image_entity)
    return builder.save()
コード例 #14
0
def build_image_metadata(im_data,
                         ground_truth_depth_data,
                         camera_pose,
                         metadata,
                         right_camera_pose=None):
    """
    Construct an image metadata object from the reference images and a metadata dict.
    Should delete the keys it uses from the metadata, so that the remaining values are  'additional metadata'
    :param im_data: The image data
    :param ground_truth_depth_data: Ground-truth depth, if available.
    :param metadata: The metadata dict
    :param camera_pose: The camera pose
    :param right_camera_pose: The pose of the right stereo camera, if available
    :return:
    """

    # Calculate focal length from fov, np.pi / 4 (rad) = 90 / 2 (deg) = fov / 2
    # this is the horizontal field of view,
    focal_length = 1 / (2 * np.tan(np.pi / 4))

    # In unreal 4, field of view is whichever is the larger dimension
    # See: https://answers.unrealengine.com/questions/36550/perspective-camera-and-field-of-view.html
    if im_data.shape[1] > im_data.shape[
            0]:  # Wider than tall, fov is horizontal FOV
        focal_length = focal_length * im_data.shape[1]
    else:  # Taller than wide, fov is vertical fov
        focal_length = focal_length * im_data.shape[0]
    camera_intrinsics = intrins.CameraIntrinsics(width=im_data.shape[1],
                                                 height=im_data.shape[0],
                                                 fx=focal_length,
                                                 fy=focal_length,
                                                 cx=0.5 * im_data.shape[1],
                                                 cy=0.5 * im_data.shape[0])
    image_metadata = imeta.ImageMetadata(
        hash_=xxhash.xxh64(np.ascontiguousarray(im_data)).digest(),
        source_type=imeta.ImageSourceType.SYNTHETIC,
        camera_pose=camera_pose,
        right_camera_pose=right_camera_pose,
        environment_type=imeta.EnvironmentType.INDOOR,
        light_level=imeta.LightingLevel.EVENLY_LIT,
        time_of_day=imeta.TimeOfDay.DAY,
        intrinsics=camera_intrinsics,
        right_intrinsics=camera_intrinsics
        if right_camera_pose is not None else None,
        lens_focal_distance=None,
        aperture=None,
        simulation_world=metadata['World Name'],
        lighting_model=imeta.LightingModel.LIT,
        texture_mipmap_bias=int(
            metadata['Material Properties']['BaseMipMapBias']),
        normal_maps_enabled=int(
            metadata['Material Properties']['NormalQuality']) != 0,
        roughness_enabled=int(
            metadata['Material Properties']['RoughnessQuality']) != 0,
        geometry_decimation=int(
            metadata['Geometry Detail']['Forced LOD level']),
        procedural_generation_seed=int(
            metadata['World Information']['Camera Path']['Path Generation']
            ['Random Seed']),
        labelled_objects=[],
        average_scene_depth=np.mean(ground_truth_depth_data)
        if ground_truth_depth_data is not None else None)
    for key in {'World Name', 'Material Properties', 'Geometry Detail'}:
        del metadata[key]
    return image_metadata
コード例 #15
0
def import_dataset(root_folder, db_client, sequence_number=0):
    """
    Load a KITTI image sequences into the database.
    :return:
    """
    sequence_name = "{0:02}".format(sequence_number)
    if not os.path.isdir(root_folder) and os.path.isdir(
            os.path.join(root_folder, sequence_name)):
        return None

    data = pykitti.odometry(root_folder, sequence=sequence_name)
    builder = dataset.image_collection_builder.ImageCollectionBuilder(
        db_client)

    # dataset.calib:      Calibration data are accessible as a named tuple
    # dataset.timestamps: Timestamps are parsed into a list of timedelta objects
    # dataset.poses:      Generator to load ground truth poses T_w_cam0
    # dataset.camN:       Generator to load individual images from camera N
    # dataset.gray:       Generator to load monochrome stereo pairs (cam0, cam1)
    # dataset.rgb:        Generator to load RGB stereo pairs (cam2, cam3)
    # dataset.velo:       Generator to load velodyne scans as [x,y,z,reflectance]
    for left_image, right_image, timestamp, pose in zip(
            data.cam2, data.cam3, data.timestamps, data.poses):
        camera_pose = make_camera_pose(pose)
        # camera pose is for cam0, we want cam2, which is 6cm (0.06m) to the left
        camera_pose = camera_pose.find_independent(
            tf.Transform(location=(0, 0.06, 0),
                         rotation=(0, 0, 0, 1),
                         w_first=False))
        # Stereo offset is 0.54m (http://www.cvlibs.net/datasets/kitti/setup.php)
        right_camera_pose = camera_pose.find_independent(
            tf.Transform(location=(0, -0.54, 0),
                         rotation=(0, 0, 0, 1),
                         w_first=False))
        camera_intrinsics = intrins.CameraIntrinsics(
            height=left_image.shape[0],
            width=left_image.shape[1],
            fx=data.calib.K_cam2[0, 0],
            fy=data.calib.K_cam2[1, 1],
            cx=data.calib.K_cam2[0, 2],
            cy=data.calib.K_cam2[1, 2])
        right_camera_intrinsics = intrins.CameraIntrinsics(
            height=right_image.shape[0],
            width=right_image.shape[1],
            fx=data.calib.K_cam3[0, 0],
            fy=data.calib.K_cam3[1, 1],
            cx=data.calib.K_cam3[0, 2],
            cy=data.calib.K_cam3[1, 2])
        builder.add_image(image=core.image_entity.StereoImageEntity(
            left_data=left_image,
            right_data=right_image,
            metadata=imeta.ImageMetadata(
                hash_=xxhash.xxh64(left_image).digest(),
                camera_pose=make_camera_pose(pose),
                right_camera_pose=right_camera_pose,
                intrinsics=camera_intrinsics,
                right_intrinsics=right_camera_intrinsics,
                source_type=imeta.ImageSourceType.REAL_WORLD,
                environment_type=imeta.EnvironmentType.OUTDOOR_URBAN,
                light_level=imeta.LightingLevel.WELL_LIT,
                time_of_day=imeta.TimeOfDay.AFTERNOON,
            ),
            additional_metadata={
                'dataset': 'KITTI',
                'sequence': sequence_number
            }),
                          timestamp=timestamp.total_seconds())
    return builder.save()
コード例 #16
0
    def test_process_image_sends_image_and_depth_to_subprocess(self, mock_multiprocessing):
        mock_process = mock.create_autospec(multiprocessing.Process)
        mock_queue = mock.create_autospec(multiprocessing.queues.Queue)     # Have to be specific to get the class
        mock_queue.qsize.return_value = 0
        mock_multiprocessing.Process.return_value = mock_process
        mock_multiprocessing.Queue.return_value = mock_queue

        mock_image_data = np.random.randint(0, 255, (32, 32, 3), dtype='uint8')
        mock_depth_data = np.random.randint(0, 255, (32, 32, 3), dtype='uint8')
        image = core.image.Image(data=mock_image_data, depth_data=mock_depth_data, metadata=imeta.ImageMetadata(
            source_type=imeta.ImageSourceType.SYNTHETIC,
            hash_=b'\x00\x00\x00\x00\x00\x00\x00\x01'
        ))

        subject = self.make_instance()
        with mock.patch('systems.slam.orbslam2.open', mock.mock_open(), create=True):
            subject.start_trial(core.sequence_type.ImageSequenceType.SEQUENTIAL)
        self.assertTrue(mock_multiprocessing.Process.called)
        self.assertIn(mock_queue, mock_multiprocessing.Process.call_args[1]['args'])

        subject.process_image(image, 12)
        self.assertTrue(mock_queue.put.called)
コード例 #17
0
    def test_make_from_images(self):
        left_pose = tf.Transform((1, 2, 3), (0.5, 0.5, -0.5, -0.5))
        right_pose = tf.Transform(location=left_pose.find_independent((0, 0, 15)),
                                  rotation=left_pose.rotation_quat(w_first=False),
                                  w_first=False)
        metadata = imeta.ImageMetadata(
            hash_=b'\x1f`\xa8\x8aR\xed\x9f\x0b',
            source_type=imeta.ImageSourceType.SYNTHETIC,
            camera_pose=left_pose, right_camera_pose=right_pose,
            intrinsics=cam_intr.CameraIntrinsics(32, 32, 15, 21, 16, 16),
            right_intrinsics=cam_intr.CameraIntrinsics(32, 32, 13, 7, 16, 16),
            environment_type=imeta.EnvironmentType.INDOOR_CLOSE,
            light_level=imeta.LightingLevel.WELL_LIT, time_of_day=imeta.TimeOfDay.DAY,
            lens_focal_distance=5, aperture=22, simulation_world='TestSimulationWorld',
            lighting_model=imeta.LightingModel.LIT, texture_mipmap_bias=1,
            normal_maps_enabled=2, roughness_enabled=True, geometry_decimation=0.8,
            procedural_generation_seed=16234, labelled_objects=(
                imeta.LabelledObject(class_names=('car',), bounding_box=(12, 144, 67, 43), label_color=(123, 127, 112),
                                     relative_pose=tf.Transform((12, 3, 4), (0.5, 0.1, 1, 1.7)), object_id='Car-002'),
                imeta.LabelledObject(class_names=('cat',), bounding_box=(125, 244, 117, 67), label_color=(27, 89, 62),
                                     relative_pose=tf.Transform((378, -1890, 38), (0.3, 1.12, 1.1, 0.2)),
                                     object_id='cat-090')
            ), average_scene_depth=90.12)
        left_image = im.Image(
            data=self.left_data,
            depth_data=self.left_depth,
            labels_data=self.left_labels,
            world_normals_data=self.left_normals,
            metadata=metadata,
            additional_metadata={
                'Source': 'Generated',
                'Resolution': {'width': 1280, 'height': 720},
                'Material Properties': {
                    'BaseMipMapBias': 0,
                    'RoughnessQuality': True
                }
            }
        )
        right_image = im.Image(
            data=self.right_data,
            depth_data=self.right_depth,
            labels_data=self.right_labels,
            world_normals_data=self.right_normals,
            metadata=metadata,
            additional_metadata={
                'Source': 'Generated',
                'Resolution': {'width': 1280, 'height': 720},
                'Material Properties': {
                    'BaseMipMapBias': 1,
                    'RoughnessQuality': False
                },
                'skeletons': 'There is already one inside you'
            }
        )

        stereo_image = im.StereoImage.make_from_images(left_image, right_image)
        self.assertEqual(stereo_image.additional_metadata,
                         du.defaults(left_image.additional_metadata, right_image.additional_metadata))
        self.assertNPEqual(stereo_image.left_camera_location, left_image.camera_location)
        self.assertNPEqual(stereo_image.left_camera_orientation, left_image.camera_orientation)
        self.assertNPEqual(stereo_image.left_data, left_image.data)
        self.assertNPEqual(stereo_image.left_depth_data, left_image.depth_data)
        self.assertNPEqual(stereo_image.left_labels_data, left_image.labels_data)
        self.assertNPEqual(stereo_image.left_world_normals_data, left_image.world_normals_data)

        self.assertNPEqual(stereo_image.right_camera_location, right_image.camera_location)
        self.assertNPEqual(stereo_image.right_camera_orientation, right_image.camera_orientation)
        self.assertNPEqual(stereo_image.right_data, right_image.data)
        self.assertNPEqual(stereo_image.right_depth_data, right_image.depth_data)
        self.assertNPEqual(stereo_image.right_labels_data, right_image.labels_data)
        self.assertNPEqual(stereo_image.right_world_normals_data, right_image.world_normals_data)
コード例 #18
0
def make_stereo_image(index=1, **kwargs):
    kwargs = du.defaults(
        kwargs, {
            'id_':
            bson.objectid.ObjectId(),
            'left_data':
            np.random.uniform(0, 255, (32, 32, 3)),
            'right_data':
            np.random.uniform(0, 255, (32, 32, 3)),
            'metadata':
            imeta.ImageMetadata(
                hash_=b'\xf1\x9a\xe2|' +
                np.random.randint(0, 0xFFFFFFFF).to_bytes(4, 'big'),
                source_type=imeta.ImageSourceType.SYNTHETIC,
                camera_pose=tf.Transform(
                    location=(1 + 100 * index, 2 + np.random.uniform(-1, 1),
                              3),
                    rotation=(4, 5, 6, 7 + np.random.uniform(-4, 4))),
                right_camera_pose=tf.Transform(
                    location=(1 + 100 * index, 12 + np.random.uniform(-1, 1),
                              3),
                    rotation=(4, 5, 6, 7 + np.random.uniform(-4, 4))),
                intrinsics=cam_intr.CameraIntrinsics(800, 600, 550.2, 750.2,
                                                     400, 300),
                right_intrinsics=cam_intr.CameraIntrinsics(
                    800, 600, 550.2, 750.2, 400, 300),
                environment_type=imeta.EnvironmentType.INDOOR_CLOSE,
                light_level=imeta.LightingLevel.WELL_LIT,
                time_of_day=imeta.TimeOfDay.DAY,
                lens_focal_distance=5,
                aperture=22,
                simulation_world='TestSimulationWorld',
                lighting_model=imeta.LightingModel.LIT,
                texture_mipmap_bias=1,
                normal_maps_enabled=2,
                roughness_enabled=True,
                geometry_decimation=0.8,
                procedural_generation_seed=16234,
                labelled_objects=(imeta.LabelledObject(
                    class_names=('car', ),
                    bounding_box=(12, 144, 67, 43),
                    label_color=(123, 127, 112),
                    relative_pose=tf.Transform((12, 3, 4), (0.5, 0.1, 1, 1.7)),
                    object_id='Car-002'),
                                  imeta.LabelledObject(
                                      class_names=('cat', ),
                                      bounding_box=(125, 244, 117, 67),
                                      label_color=(27, 89, 62),
                                      relative_pose=tf.Transform(
                                          (378, -1890, 38),
                                          (0.3, 1.12, 1.1, 0.2)),
                                      object_id='cat-090')),
                average_scene_depth=90.12),
            'additional_metadata': {
                'Source': 'Generated',
                'Resolution': {
                    'width': 1280,
                    'height': 720
                },
                'Material Properties': {
                    'BaseMipMapBias': 0,
                    'RoughnessQuality': True
                }
            },
            'left_depth_data':
            np.random.uniform(0, 1, (32, 32)),
            'right_depth_data':
            np.random.uniform(0, 1, (32, 32)),
            'left_labels_data':
            np.random.uniform(0, 1, (32, 32, 3)),
            'right_labels_data':
            np.random.uniform(0, 1, (32, 32, 3)),
            'left_world_normals_data':
            np.random.uniform(0, 1, (32, 32, 3)),
            'right_world_normals_data':
            np.random.uniform(0, 1, (32, 32, 3))
        })
    return ie.StereoImageEntity(**kwargs)
コード例 #19
0
class TestImageMetadata(unittest.TestCase):

    parent_image = core.image.Image(
        data=np.random.randint(0, 255, (32, 32, 3), dtype='uint8'),
        metadata=imeta.ImageMetadata(
            source_type=imeta.ImageSourceType.SYNTHETIC,
            hash_=b'\x1f`\xa8\x8aR\xed\x9f\x0b'))

    def make_metadata(self, **kwargs):
        kwargs = du.defaults(
            kwargs, {
                'hash_':
                b'\xa5\xc9\x08\xaf$\x0b\x116',
                'source_type':
                imeta.ImageSourceType.SYNTHETIC,
                'environment_type':
                imeta.EnvironmentType.INDOOR_CLOSE,
                'light_level':
                imeta.LightingLevel.WELL_LIT,
                'time_of_day':
                imeta.TimeOfDay.DAY,
                'camera_pose':
                tf.Transform((1, 3, 4), (0.2, 0.8, 0.2, -0.7)),
                'right_camera_pose':
                tf.Transform((-10, -20, -30), (0.9, -0.7, 0.5, -0.3)),
                'intrinsics':
                cam_intr.CameraIntrinsics(700, 700, 654.2, 753.3, 400, 300),
                'right_intrinsics':
                cam_intr.CameraIntrinsics(700, 710, 732.1, 612.3, 400, 300),
                'lens_focal_distance':
                5,
                'aperture':
                22,
                'simulator':
                bson.ObjectId('5a14cf0e36ed1e17a55f1e35'),
                'simulation_world':
                'TestSimulationWorld',
                'lighting_model':
                imeta.LightingModel.LIT,
                'texture_mipmap_bias':
                1,
                'normal_maps_enabled':
                True,
                'roughness_enabled':
                True,
                'geometry_decimation':
                0.8,
                'procedural_generation_seed':
                16234,
                'labelled_objects': [
                    imeta.LabelledObject(class_names=('cup', ),
                                         bounding_box=(142, 280, 54, 78),
                                         label_color=(2, 227, 34),
                                         relative_pose=tf.Transform(
                                             location=(-246, 468, 4),
                                             rotation=(0.2, 0.3, 0.4)),
                                         object_id='LabelledObject-68478'),
                    imeta.LabelledObject(class_names=('car', ),
                                         bounding_box=(542, 83, 63, 123),
                                         label_color=(26, 12, 212),
                                         relative_pose=tf.Transform(
                                             location=(61, -717, 161),
                                             rotation=(0.7, 0.6, 0.3)),
                                         object_id='LabelledObject-8246'),
                    imeta.LabelledObject(class_names=('cow', ),
                                         bounding_box=(349, 672, 124, 208),
                                         label_color=(162, 134, 163),
                                         relative_pose=tf.Transform(
                                             location=(286, -465, -165),
                                             rotation=(0.9, 0.1, 0.5)),
                                         object_id='LabelledObject-56485')
                ],
                'average_scene_depth':
                90.12,
                'base_image':
                self.parent_image,
                'transformation_matrix':
                np.array([[0.19882871, 0.58747441, 0.90084303],
                          [0.6955363, 0.48193339, 0.09503605],
                          [0.20549805, 0.6110534, 0.61145574]])
            })
        return imeta.ImageMetadata(**kwargs)

    def test_constructor_works_with_minimal_parameters(self):
        imeta.ImageMetadata(source_type=imeta.ImageSourceType.SYNTHETIC,
                            hash_=b'\x1f`\xa8\x8aR\xed\x9f\x0b')

    def test_serialize_and_deserialise(self):
        entity1 = self.make_metadata()
        s_entity1 = entity1.serialize()

        entity2 = imeta.ImageMetadata.deserialize(s_entity1)
        s_entity2 = entity2.serialize()

        self.assert_metadata_equal(entity1, entity2)
        self.assertEqual(s_entity1, s_entity2)

        for idx in range(100):
            # Test that repeated serialization and deserialization does not degrade the information
            entity2 = imeta.ImageMetadata.deserialize(s_entity2)
            s_entity2 = entity2.serialize()
            self.assert_metadata_equal(entity1, entity2)
            self.assertEqual(s_entity1, s_entity2)

    def test_serialize_and_deserialize_works_with_minimal_parameters(self):
        entity1 = imeta.ImageMetadata(
            source_type=imeta.ImageSourceType.SYNTHETIC,
            hash_=b'\x1f`\xa8\x8aR\xed\x9f\x0b',
            intrinsics=cam_intr.CameraIntrinsics(800, 600, 652.2, 291, 142.2,
                                                 614.4))
        s_entity1 = entity1.serialize()

        entity2 = imeta.ImageMetadata.deserialize(s_entity1)
        s_entity2 = entity2.serialize()

        self.assert_metadata_equal(entity1, entity2)
        self.assertEqual(s_entity1, s_entity2)

        for idx in range(100):
            # Test that repeated serialization and deserialization does not degrade the information
            entity2 = imeta.ImageMetadata.deserialize(s_entity2)
            s_entity2 = entity2.serialize()
            self.assert_metadata_equal(entity1, entity2)
            self.assertEqual(s_entity1, s_entity2)

    def test_equals(self):
        alt_metadata = {
            'hash_': [b'\x1f`\xa8\x8aR\xed\x9f\x0b'],
            'source_type': [imeta.ImageSourceType.REAL_WORLD],
            'environment_type': [
                imeta.EnvironmentType.INDOOR,
                imeta.EnvironmentType.OUTDOOR_URBAN,
                imeta.EnvironmentType.OUTDOOR_LANDSCAPE
            ],
            'light_level': [
                imeta.LightingLevel.PITCH_BLACK, imeta.LightingLevel.DIM,
                imeta.LightingLevel.EVENLY_LIT, imeta.LightingLevel.BRIGHT
            ],
            'time_of_day': [
                imeta.TimeOfDay.DAWN, imeta.TimeOfDay.MORNING,
                imeta.TimeOfDay.AFTERNOON, imeta.TimeOfDay.TWILIGHT,
                imeta.TimeOfDay.NIGHT
            ],
            'camera_pose':
            [tf.Transform((12, 13, 14), (-0.5, 0.3, 0.8, -0.9))],
            'right_camera_pose':
            [tf.Transform((11, 15, 19), (-0.2, 0.4, 0.6, -0.8))],
            'intrinsics':
            [cam_intr.CameraIntrinsics(900, 910, 124.8, 285.7, 640, 360)],
            'right_intrinsics':
            [cam_intr.CameraIntrinsics(900, 890, 257.9, 670.12, 640, 360)],
            'lens_focal_distance': [22],
            'aperture': [1.2],
            'simulator': [bson.ObjectId()],
            'simulation_world': ['TestSimulationWorld2'],
            'lighting_model': [imeta.LightingModel.UNLIT],
            'texture_mipmap_bias': [2],
            'normal_maps_enabled': [False],
            'roughness_enabled': [False],
            'geometry_decimation': [0.3],
            'procedural_generation_seed': [7329],
            'average_scene_depth': [102.33],
            'base_image': [mock.create_autospec(core.image.Image)],
            'transformation_matrix': [np.random.uniform(0, 1, (3, 3))],
            'labelled_objects': [
                tuple(),
                (imeta.LabelledObject(class_names=('cup', ),
                                      bounding_box=(142, 280, 54, 78),
                                      label_color=(2, 227, 34),
                                      relative_pose=tf.Transform(
                                          location=(-246, 468, 4),
                                          rotation=(0.2, 0.3, 0.4)),
                                      object_id='LabelledObject-68478'),
                 imeta.LabelledObject(class_names=('cat', ),
                                      bounding_box=(542, 83, 63, 123),
                                      label_color=(26, 12, 212),
                                      relative_pose=tf.Transform(
                                          location=(61, -717, 161),
                                          rotation=(0.7, 0.6, 0.3)),
                                      object_id='LabelledObject-8246'),
                 imeta.LabelledObject(class_names=('cow', ),
                                      bounding_box=(349, 672, 124, 208),
                                      label_color=(162, 134, 163),
                                      relative_pose=tf.Transform(
                                          location=(286, -465, -165),
                                          rotation=(0.9, 0.1, 0.5)),
                                      object_id='LabelledObject-56485')),
                (imeta.LabelledObject(class_names=('cup', ),
                                      bounding_box=(142, 12, 54, 78),
                                      label_color=(2, 227, 34),
                                      relative_pose=tf.Transform(
                                          location=(-246, 468, 4),
                                          rotation=(0.2, 0.3, 0.4)),
                                      object_id='LabelledObject-68478'),
                 imeta.LabelledObject(class_names=('car', ),
                                      bounding_box=(542, 83, 63, 123),
                                      label_color=(26, 12, 212),
                                      relative_pose=tf.Transform(
                                          location=(61, -717, 161),
                                          rotation=(0.7, 0.6, 0.3)),
                                      object_id='LabelledObject-8246'),
                 imeta.LabelledObject(class_names=('cow', ),
                                      bounding_box=(349, 672, 124, 208),
                                      label_color=(162, 134, 163),
                                      relative_pose=tf.Transform(
                                          location=(286, -465, -165),
                                          rotation=(0.9, 0.1, 0.5)),
                                      object_id='LabelledObject-56485')),
                (imeta.LabelledObject(class_names=('cup', ),
                                      bounding_box=(142, 280, 54, 78),
                                      label_color=(2, 227, 34),
                                      relative_pose=tf.Transform(
                                          location=(-246, 468, 4),
                                          rotation=(0.2, 0.3, 0.4)),
                                      object_id='LabelledObject-68478'),
                 imeta.LabelledObject(class_names=('car', ),
                                      bounding_box=(542, 83, 63, 123),
                                      label_color=(26, 12, 212),
                                      relative_pose=tf.Transform(
                                          location=(61, -717, 161),
                                          rotation=(0.7, 0.6, 0.3)),
                                      object_id='LabelledObject-8246'),
                 imeta.LabelledObject(class_names=('cow', ),
                                      bounding_box=(349, 672, 124, 208),
                                      label_color=(162, 134, 255),
                                      relative_pose=tf.Transform(
                                          location=(286, -465, -165),
                                          rotation=(0.9, 0.1, 0.5)),
                                      object_id='LabelledObject-56485'))
            ]
        }
        a = self.make_metadata()
        b = self.make_metadata()
        self.assertEqual(a, b)

        # Change single keys, and make sure it is no longer equal
        for key, values in alt_metadata.items():
            for val in values:
                b = self.make_metadata(**{key: val})
                self.assertNotEqual(
                    a, b,
                    "Changing key {0} to {1} did not change equality".format(
                        key, str(val)))

    def test_hash(self):
        alt_metadata = {
            'hash_': [b'\x1f`\xa8\x8aR\xed\x9f\x0b'],
            'source_type': [imeta.ImageSourceType.REAL_WORLD],
            'environment_type': [
                imeta.EnvironmentType.INDOOR,
                imeta.EnvironmentType.OUTDOOR_URBAN,
                imeta.EnvironmentType.OUTDOOR_LANDSCAPE
            ],
            'light_level': [
                imeta.LightingLevel.PITCH_BLACK, imeta.LightingLevel.DIM,
                imeta.LightingLevel.EVENLY_LIT, imeta.LightingLevel.BRIGHT
            ],
            'time_of_day': [
                imeta.TimeOfDay.DAWN, imeta.TimeOfDay.MORNING,
                imeta.TimeOfDay.AFTERNOON, imeta.TimeOfDay.TWILIGHT,
                imeta.TimeOfDay.NIGHT
            ],
            'camera_pose':
            [tf.Transform((12, 13, 14), (-0.5, 0.3, 0.8, -0.9))],
            'right_camera_pose':
            [tf.Transform((11, 15, 19), (-0.2, 0.4, 0.6, -0.8))],
            'intrinsics':
            [cam_intr.CameraIntrinsics(900, 910, 184.9, 892.5, 640, 360)],
            'right_intrinsics':
            [cam_intr.CameraIntrinsics(900, 890, 963.1, 816.2, 640, 360)],
            'lens_focal_distance': [22],
            'aperture': [1.2],
            'simulator': [bson.ObjectId()],
            'simulation_world': ['TestSimulationWorld2'],
            'lighting_model': [imeta.LightingModel.UNLIT],
            'texture_mipmap_bias': [2],
            'normal_maps_enabled': [False],
            'roughness_enabled': [False],
            'geometry_decimation': [0.3],
            'procedural_generation_seed': [7329],
            'average_scene_depth': [102.33],
            'base_image': [mock.create_autospec(core.image.Image)],
            'transformation_matrix': [np.random.uniform(0, 1, (3, 3))],
            'labelled_objects': [
                tuple(),
                (imeta.LabelledObject(class_names=('cup', ),
                                      bounding_box=(142, 280, 54, 78),
                                      label_color=(2, 227, 34),
                                      relative_pose=tf.Transform(
                                          location=(-246, 468, 4),
                                          rotation=(0.2, 0.3, 0.4)),
                                      object_id='LabelledObject-68478'),
                 imeta.LabelledObject(class_names=('cat', ),
                                      bounding_box=(542, 83, 63, 123),
                                      label_color=(26, 12, 212),
                                      relative_pose=tf.Transform(
                                          location=(61, -717, 161),
                                          rotation=(0.7, 0.6, 0.3)),
                                      object_id='LabelledObject-8246'),
                 imeta.LabelledObject(class_names=('cow', ),
                                      bounding_box=(349, 672, 124, 208),
                                      label_color=(162, 134, 163),
                                      relative_pose=tf.Transform(
                                          location=(286, -465, -165),
                                          rotation=(0.9, 0.1, 0.5)),
                                      object_id='LabelledObject-56485')),
                (imeta.LabelledObject(class_names=('cup', ),
                                      bounding_box=(142, 12, 54, 78),
                                      label_color=(2, 227, 34),
                                      relative_pose=tf.Transform(
                                          location=(-246, 468, 4),
                                          rotation=(0.2, 0.3, 0.4)),
                                      object_id='LabelledObject-68478'),
                 imeta.LabelledObject(class_names=('car', ),
                                      bounding_box=(542, 83, 63, 123),
                                      label_color=(26, 12, 212),
                                      relative_pose=tf.Transform(
                                          location=(61, -717, 161),
                                          rotation=(0.7, 0.6, 0.3)),
                                      object_id='LabelledObject-8246'),
                 imeta.LabelledObject(class_names=('cow', ),
                                      bounding_box=(349, 672, 124, 208),
                                      label_color=(162, 134, 163),
                                      relative_pose=tf.Transform(
                                          location=(286, -465, -165),
                                          rotation=(0.9, 0.1, 0.5)),
                                      object_id='LabelledObject-56485')),
                (imeta.LabelledObject(class_names=('cup', ),
                                      bounding_box=(142, 280, 54, 78),
                                      label_color=(2, 227, 34),
                                      relative_pose=tf.Transform(
                                          location=(-246, 468, 4),
                                          rotation=(0.2, 0.3, 0.4)),
                                      object_id='LabelledObject-68478'),
                 imeta.LabelledObject(class_names=('car', ),
                                      bounding_box=(542, 83, 63, 123),
                                      label_color=(26, 12, 212),
                                      relative_pose=tf.Transform(
                                          location=(61, -717, 161),
                                          rotation=(0.7, 0.6, 0.3)),
                                      object_id='LabelledObject-8246'),
                 imeta.LabelledObject(class_names=('cow', ),
                                      bounding_box=(349, 672, 124, 208),
                                      label_color=(162, 134, 255),
                                      relative_pose=tf.Transform(
                                          location=(286, -465, -165),
                                          rotation=(0.9, 0.1, 0.5)),
                                      object_id='LabelledObject-56485'))
            ]
        }
        a = self.make_metadata()
        b = self.make_metadata()
        self.assertEqual(hash(a), hash(b))

        # Change single keys, and make sure it is no longer equal
        for key, values in alt_metadata.items():
            for val in values:
                b = self.make_metadata(**{key: val})
                self.assertNotEqual(
                    hash(a), hash(b),
                    "Changing key {0} to {1} did not change the hash".format(
                        key, str(val)))

    def test_clone(self):
        alt_metadata = {
            'hash_': [b'\x1f`\xa8\x8aR\xed\x9f\x0b'],
            'source_type': [imeta.ImageSourceType.REAL_WORLD],
            'environment_type': [
                imeta.EnvironmentType.INDOOR,
                imeta.EnvironmentType.OUTDOOR_URBAN,
                imeta.EnvironmentType.OUTDOOR_LANDSCAPE
            ],
            'light_level': [
                imeta.LightingLevel.PITCH_BLACK, imeta.LightingLevel.DIM,
                imeta.LightingLevel.EVENLY_LIT, imeta.LightingLevel.BRIGHT
            ],
            'time_of_day': [
                imeta.TimeOfDay.DAWN, imeta.TimeOfDay.MORNING,
                imeta.TimeOfDay.AFTERNOON, imeta.TimeOfDay.TWILIGHT,
                imeta.TimeOfDay.NIGHT
            ],
            'camera_pose':
            [tf.Transform((12, 13, 14), (-0.5, 0.3, 0.8, -0.9))],
            'right_camera_pose':
            [tf.Transform((11, 15, 19), (-0.2, 0.4, 0.6, -0.8))],
            'intrinsics':
            [cam_intr.CameraIntrinsics(900, 910, 894.7, 861.2, 640, 360)],
            'right_intrinsics':
            [cam_intr.CameraIntrinsics(900, 890, 760.45, 405.1, 640, 360)],
            'lens_focal_distance': [22],
            'aperture': [1.2],
            'simulator': [bson.ObjectId()],
            'simulation_world': ['TestSimulationWorld2'],
            'lighting_model': [imeta.LightingModel.UNLIT],
            'texture_mipmap_bias': [2],
            'normal_maps_enabled': [False],
            'roughness_enabled': [False],
            'geometry_decimation': [0.3],
            'procedural_generation_seed': [7329],
            'average_scene_depth': [102.33],
            'base_image': [mock.create_autospec(core.image.Image)],
            'transformation_matrix': [np.random.uniform(0, 1, (3, 3))],
            'labelled_objects': [
                tuple(),
                (imeta.LabelledObject(class_names=('cup', ),
                                      bounding_box=(142, 280, 54, 78),
                                      label_color=(2, 227, 34),
                                      relative_pose=tf.Transform(
                                          location=(-246, 468, 4),
                                          rotation=(0.2, 0.3, 0.4)),
                                      object_id='LabelledObject-68478'),
                 imeta.LabelledObject(class_names=('cat', ),
                                      bounding_box=(542, 83, 63, 123),
                                      label_color=(26, 12, 212),
                                      relative_pose=tf.Transform(
                                          location=(61, -717, 161),
                                          rotation=(0.7, 0.6, 0.3)),
                                      object_id='LabelledObject-8246'),
                 imeta.LabelledObject(class_names=('cow', ),
                                      bounding_box=(349, 672, 124, 208),
                                      label_color=(162, 134, 163),
                                      relative_pose=tf.Transform(
                                          location=(286, -465, -165),
                                          rotation=(0.9, 0.1, 0.5)),
                                      object_id='LabelledObject-56485')),
                (imeta.LabelledObject(class_names=('cup', ),
                                      bounding_box=(142, 12, 54, 78),
                                      label_color=(2, 227, 34),
                                      relative_pose=tf.Transform(
                                          location=(-246, 468, 4),
                                          rotation=(0.2, 0.3, 0.4)),
                                      object_id='LabelledObject-68478'),
                 imeta.LabelledObject(class_names=('car', ),
                                      bounding_box=(542, 83, 63, 123),
                                      label_color=(26, 12, 212),
                                      relative_pose=tf.Transform(
                                          location=(61, -717, 161),
                                          rotation=(0.7, 0.6, 0.3)),
                                      object_id='LabelledObject-8246'),
                 imeta.LabelledObject(class_names=('cow', ),
                                      bounding_box=(349, 672, 124, 208),
                                      label_color=(162, 134, 163),
                                      relative_pose=tf.Transform(
                                          location=(286, -465, -165),
                                          rotation=(0.9, 0.1, 0.5)),
                                      object_id='LabelledObject-56485')),
                (imeta.LabelledObject(class_names=('cup', ),
                                      bounding_box=(142, 280, 54, 78),
                                      label_color=(2, 227, 34),
                                      relative_pose=tf.Transform(
                                          location=(-246, 468, 4),
                                          rotation=(0.2, 0.3, 0.4)),
                                      object_id='LabelledObject-68478'),
                 imeta.LabelledObject(class_names=('car', ),
                                      bounding_box=(542, 83, 63, 123),
                                      label_color=(26, 12, 212),
                                      relative_pose=tf.Transform(
                                          location=(61, -717, 161),
                                          rotation=(0.7, 0.6, 0.3)),
                                      object_id='LabelledObject-8246'),
                 imeta.LabelledObject(class_names=('cow', ),
                                      bounding_box=(349, 672, 124, 208),
                                      label_color=(162, 134, 255),
                                      relative_pose=tf.Transform(
                                          location=(286, -465, -165),
                                          rotation=(0.9, 0.1, 0.5)),
                                      object_id='LabelledObject-56485'))
            ]
        }
        a = self.make_metadata()
        b = a.clone()
        self.assert_metadata_equal(a, b)

        # Change single keys, and make sure it is no longer equal
        for key, values in alt_metadata.items():
            for val in values:
                b = a.clone(**{key: val})

                if key == 'hash_':
                    self.assertEqual(val, b.hash)
                    self.assertNotEqual(a.hash, b.hash)
                else:
                    self.assertEqual(a.hash, b.hash)
                if key == 'source_type':
                    self.assertEqual(val, b.source_type)
                    self.assertNotEqual(a.source_type, b.source_type)
                else:
                    self.assertEqual(a.source_type, b.source_type)
                if key == 'environment_type':
                    self.assertEqual(val, b.environment_type)
                    self.assertNotEqual(a.environment_type, b.environment_type)
                else:
                    self.assertEqual(a.environment_type, b.environment_type)
                if key == 'light_level':
                    self.assertEqual(val, b.light_level)
                    self.assertNotEqual(a.light_level, b.light_level)
                else:
                    self.assertEqual(a.light_level, b.light_level)
                if key == 'time_of_day':
                    self.assertEqual(val, b.time_of_day)
                    self.assertNotEqual(a.time_of_day, b.time_of_day)
                else:
                    self.assertEqual(a.time_of_day, b.time_of_day)
                if key == 'camera_pose':
                    self.assertEqual(val, b.camera_pose)
                    self.assertNotEqual(a.camera_pose, b.camera_pose)
                else:
                    self.assertEqual(a.camera_pose, b.camera_pose)
                if key == 'right_camera_pose':
                    self.assertEqual(val, b.right_camera_pose)
                    self.assertNotEqual(a.right_camera_pose,
                                        b.right_camera_pose)
                else:
                    self.assertEqual(a.right_camera_pose, b.right_camera_pose)
                if key == 'intrinsics':
                    self.assertEqual(val, b.camera_intrinsics)
                    self.assertNotEqual(a.camera_intrinsics,
                                        b.camera_intrinsics)
                else:
                    self.assertEqual(a.camera_intrinsics, b.camera_intrinsics)
                    self.assertEqual(a.width, b.width)
                    self.assertEqual(a.height, b.height)
                if key == 'right_intrinsics':
                    self.assertEqual(val, b.right_camera_intrinsics)
                    self.assertNotEqual(a.right_camera_intrinsics,
                                        b.right_camera_intrinsics)
                else:
                    self.assertEqual(a.right_camera_intrinsics,
                                     b.right_camera_intrinsics)
                if key == 'lens_focal_distance':
                    self.assertEqual(val, b.lens_focal_distance)
                    self.assertNotEqual(a.lens_focal_distance,
                                        b.lens_focal_distance)
                else:
                    self.assertEqual(a.lens_focal_distance,
                                     b.lens_focal_distance)
                if key == 'aperture':
                    self.assertEqual(val, b.aperture)
                    self.assertNotEqual(a.aperture, b.aperture)
                else:
                    self.assertEqual(a.aperture, b.aperture)
                if key == 'simulation_world':
                    self.assertEqual(val, b.simulation_world)
                    self.assertNotEqual(a.simulation_world, b.simulation_world)
                else:
                    self.assertEqual(a.simulation_world, b.simulation_world)
                if key == 'lighting_model':
                    self.assertEqual(val, b.lighting_model)
                    self.assertNotEqual(a.lighting_model, b.lighting_model)
                else:
                    self.assertEqual(a.lighting_model, b.lighting_model)
                if key == 'texture_mipmap_bias':
                    self.assertEqual(val, b.texture_mipmap_bias)
                    self.assertNotEqual(a.texture_mipmap_bias,
                                        b.texture_mipmap_bias)
                else:
                    self.assertEqual(a.texture_mipmap_bias,
                                     b.texture_mipmap_bias)
                if key == 'normal_maps_enabled':
                    self.assertEqual(val, b.normal_maps_enabled)
                    self.assertNotEqual(a.normal_maps_enabled,
                                        b.normal_maps_enabled)
                else:
                    self.assertEqual(a.normal_maps_enabled,
                                     b.normal_maps_enabled)
                if key == 'roughness_enabled':
                    self.assertEqual(val, b.roughness_enabled)
                    self.assertNotEqual(a.roughness_enabled,
                                        b.roughness_enabled)
                else:
                    self.assertEqual(a.roughness_enabled, b.roughness_enabled)
                if key == 'geometry_decimation':
                    self.assertEqual(val, b.geometry_decimation)
                    self.assertNotEqual(a.geometry_decimation,
                                        b.geometry_decimation)
                else:
                    self.assertEqual(a.geometry_decimation,
                                     b.geometry_decimation)
                if key == 'procedural_generation_seed':
                    self.assertEqual(val, b.procedural_generation_seed)
                    self.assertNotEqual(a.procedural_generation_seed,
                                        b.procedural_generation_seed)
                else:
                    self.assertEqual(a.procedural_generation_seed,
                                     b.procedural_generation_seed)
                if key == 'labelled_objects':
                    self.assertEqual(val, b.labelled_objects)
                    self.assertNotEqual(a.labelled_objects, b.labelled_objects)
                else:
                    self.assertEqual(a.labelled_objects, b.labelled_objects)
                if key == 'average_scene_depth':
                    self.assertEqual(val, b.average_scene_depth)
                    self.assertNotEqual(a.average_scene_depth,
                                        b.average_scene_depth)
                else:
                    self.assertEqual(a.average_scene_depth,
                                     b.average_scene_depth)

    def assert_metadata_equal(self, metadata1, metadata2):
        if not isinstance(metadata1, imeta.ImageMetadata):
            self.fail("metadata 1 is not an image metadata")
        if not isinstance(metadata2, imeta.ImageMetadata):
            self.fail("metadata 1 is not an image metadata")
        self.assertEqual(metadata1.hash, metadata2.hash)
        self.assertEqual(metadata1.source_type, metadata2.source_type)
        self.assertEqual(metadata1.environment_type,
                         metadata2.environment_type)
        self.assertEqual(metadata1.light_level, metadata2.light_level)
        self.assertEqual(metadata1.time_of_day, metadata2.time_of_day)
        self.assertEqual(metadata1.height, metadata2.height)
        self.assertEqual(metadata1.width, metadata2.width)
        self.assertEqual(metadata1.camera_pose, metadata2.camera_pose)
        self.assertEqual(metadata1.right_camera_pose,
                         metadata2.right_camera_pose)
        self.assertEqual(metadata1.camera_intrinsics,
                         metadata2.camera_intrinsics)
        self.assertEqual(metadata1.right_camera_intrinsics,
                         metadata2.right_camera_intrinsics)
        self.assertEqual(metadata1.lens_focal_distance,
                         metadata2.lens_focal_distance)
        self.assertEqual(metadata1.aperture, metadata2.aperture)
        self.assertEqual(metadata1.simulator, metadata2.simulator)
        self.assertEqual(metadata1.simulation_world,
                         metadata2.simulation_world)
        self.assertEqual(metadata1.lighting_model, metadata2.lighting_model)
        self.assertEqual(metadata1.texture_mipmap_bias,
                         metadata2.texture_mipmap_bias)
        self.assertEqual(metadata1.normal_maps_enabled,
                         metadata2.normal_maps_enabled)
        self.assertEqual(metadata1.roughness_enabled,
                         metadata2.roughness_enabled)
        self.assertEqual(metadata1.geometry_decimation,
                         metadata2.geometry_decimation)
        self.assertEqual(metadata1.procedural_generation_seed,
                         metadata2.procedural_generation_seed)
        self.assertEqual(metadata1.labelled_objects,
                         metadata2.labelled_objects)
        self.assertEqual(metadata1.average_scene_depth,
                         metadata2.average_scene_depth)
        self.assertEqual(metadata1.base_image, metadata2.base_image)
        self.assertTrue(
            np.array_equal(metadata1.affine_transformation_matrix,
                           metadata2.affine_transformation_matrix))
コード例 #20
0
 def test_constructor_works_with_minimal_parameters(self):
     imeta.ImageMetadata(source_type=imeta.ImageSourceType.SYNTHETIC,
                         hash_=b'\x1f`\xa8\x8aR\xed\x9f\x0b')
コード例 #21
0
    def setUp(self):
        self.left_pose = tf.Transform((1, 2, 3), (0.5, 0.5, -0.5, -0.5))
        self.right_pose = tf.Transform(location=self.left_pose.find_independent((0, 0, 15)),
                                       rotation=self.left_pose.rotation_quat(w_first=False),
                                       w_first=False)
        self.metadata = imeta.ImageMetadata(
            hash_=b'\x1f`\xa8\x8aR\xed\x9f\x0b',
            source_type=imeta.ImageSourceType.SYNTHETIC,
            camera_pose=self.left_pose, right_camera_pose=self.right_pose,
            intrinsics=cam_intr.CameraIntrinsics(32, 32, 17, 22, 16, 16),
            right_intrinsics=cam_intr.CameraIntrinsics(32, 32, 8, 12, 16, 16),
            environment_type=imeta.EnvironmentType.INDOOR_CLOSE,
            light_level=imeta.LightingLevel.WELL_LIT, time_of_day=imeta.TimeOfDay.DAY,
            lens_focal_distance=5, aperture=22, simulation_world='TestSimulationWorld',
            lighting_model=imeta.LightingModel.LIT, texture_mipmap_bias=1,
            normal_maps_enabled=2, roughness_enabled=True, geometry_decimation=0.8,
            procedural_generation_seed=16234, labelled_objects=(
                imeta.LabelledObject(class_names=('car',), bounding_box=(12, 144, 67, 43), label_color=(123, 127, 112),
                                     relative_pose=tf.Transform((12, 3, 4), (0.5, 0.1, 1, 1.7)), object_id='Car-002'),
                imeta.LabelledObject(class_names=('cat',), bounding_box=(125, 244, 117, 67), label_color=(27, 89, 62),
                                     relative_pose=tf.Transform((378, -1890, 38), (0.3, 1.12, 1.1, 0.2)),
                                     object_id='cat-090')
            ), average_scene_depth=90.12)

        self.left_data = np.asarray(np.random.uniform(0, 255, (32, 32, 3)), dtype='uint8')
        self.right_data = np.asarray(np.random.uniform(0, 255, (32, 32, 3)), dtype='uint8')
        self.image = im.StereoImage(left_data=self.left_data,
                                    right_data=self.right_data,
                                    metadata=self.metadata)

        self.full_left_pose = tf.Transform((4, 5, 6), (-0.5, 0.5, -0.5, 0.5))
        self.full_right_pose = tf.Transform(location=self.left_pose.find_independent((0, 0, 15)),
                                            rotation=self.left_pose.rotation_quat(w_first=False),
                                            w_first=False)
        self.full_metadata = imeta.ImageMetadata(
            hash_=b'\x1f`\xa8\x8aR\xed\x9f\x0b',
            source_type=imeta.ImageSourceType.SYNTHETIC,
            camera_pose=self.full_left_pose, right_camera_pose=self.full_right_pose,
            intrinsics=cam_intr.CameraIntrinsics(32, 32, 17, 22, 16, 16),
            right_intrinsics=cam_intr.CameraIntrinsics(32, 32, 8, 12, 16, 16),
            environment_type=imeta.EnvironmentType.INDOOR_CLOSE,
            light_level=imeta.LightingLevel.WELL_LIT, time_of_day=imeta.TimeOfDay.DAY,
            lens_focal_distance=5, aperture=22, simulation_world='TestSimulationWorld',
            lighting_model=imeta.LightingModel.LIT, texture_mipmap_bias=1,
            normal_maps_enabled=2, roughness_enabled=True, geometry_decimation=0.8,
            procedural_generation_seed=16234, labelled_objects=(
                imeta.LabelledObject(class_names=('car',), bounding_box=(12, 144, 67, 43), label_color=(123, 127, 112),
                                     relative_pose=tf.Transform((12, 3, 4), (0.5, 0.1, 1, 1.7)), object_id='Car-002'),
                imeta.LabelledObject(class_names=('cat',), bounding_box=(125, 244, 117, 67), label_color=(27, 89, 62),
                                     relative_pose=tf.Transform((378, -1890, 38), (0.3, 1.12, 1.1, 0.2)),
                                     object_id='cat-090')
            ), average_scene_depth=90.12)
        self.full_left_data = np.asarray(np.random.uniform(0, 255, (32, 32, 3)), dtype='uint8')
        self.full_right_data = np.asarray(np.random.uniform(0, 255, (32, 32, 3)), dtype='uint8')
        self.left_gt_depth = np.asarray(np.random.uniform(0, 255, (32, 32)), dtype='uint8')
        self.right_gt_depth = np.asarray(np.random.uniform(0, 255, (32, 32)), dtype='uint8')
        self.left_depth = np.asarray(np.random.uniform(0, 255, (32, 32)), dtype='uint8')
        self.right_depth = np.asarray(np.random.uniform(0, 255, (32, 32)), dtype='uint8')
        self.left_labels = np.asarray(np.random.uniform(0, 255, (32, 32, 3)), dtype='uint8')
        self.right_labels = np.asarray(np.random.uniform(0, 255, (32, 32, 3)), dtype='uint8')
        self.left_normals = np.asarray(np.random.uniform(0, 255, (32, 32, 3)), dtype='uint8')
        self.right_normals = np.asarray(np.random.uniform(0, 255, (32, 32, 3)), dtype='uint8')
        self.full_image = im.StereoImage(
            left_data=self.full_left_data,
            right_data=self.full_right_data,
            left_depth_data=self.left_depth,
            right_depth_data=self.right_depth,
            left_ground_truth_depth_data=self.left_gt_depth,
            right_ground_truth_depth_data=self.right_gt_depth,
            left_labels_data=self.left_labels,
            right_labels_data=self.right_labels,
            left_world_normals_data=self.left_normals,
            right_world_normals_data=self.right_normals,
            metadata=self.full_metadata,
            additional_metadata={
                'Source': 'Generated',
                'Resolution': {'width': 1280, 'height': 720},
                'Material Properties': {
                    'BaseMipMapBias': 0,
                    'RoughnessQuality': True
                }
            }
        )