def __init__(self, scene_json, datum_names=None, requested_annotations=None, requested_autolabels=None, backward_context=0, forward_context=0, generate_depth_from_datum=None, only_annotated_datums=False): # Extract a single scene from the scene JSON scene, calibration_table = BaseSceneDataset._extract_scene_from_scene_json( scene_json, requested_autolabels=requested_autolabels) # Return SynchronizedDataset with scenes built from dataset.json dataset_metadata = DatasetMetadata.from_scene_containers([scene]) super().__init__(dataset_metadata, scenes=[scene], calibration_table=calibration_table, datum_names=datum_names, requested_annotations=requested_annotations, requested_autolabels=requested_autolabels, backward_context=backward_context, forward_context=forward_context, generate_depth_from_datum=generate_depth_from_datum, is_scene_dataset=True, only_annotated_datums=only_annotated_datums)
def __init__(self, scene_dataset_json, split='train', datum_names=None, requested_annotations=None, requested_autolabels=None, backward_context=0, forward_context=0, generate_depth_from_datum=None, only_annotated_datums=False): # Extract all scenes from the scene dataset JSON for the appropriate split scenes, calibration_table = BaseSceneDataset._extract_scenes_from_scene_dataset_json( scene_dataset_json, split=split, requested_autolabels=requested_autolabels) # Return SynchronizedDataset with scenes built from dataset.json dataset_metadata = DatasetMetadata.from_scene_containers(scenes) super().__init__(dataset_metadata, scenes=scenes, calibration_table=calibration_table, datum_names=datum_names, requested_annotations=requested_annotations, requested_autolabels=requested_autolabels, backward_context=backward_context, forward_context=forward_context, generate_depth_from_datum=generate_depth_from_datum, is_scene_dataset=True, only_annotated_datums=only_annotated_datums)
def __init__( self, scene_dataset_json, split='train', datum_names=None, requested_annotations=None, requested_autolabels=None, backward_context=0, forward_context=0, generate_depth_from_datum=None, only_annotated_datums=False, use_virtual_camera_datums=True, skip_missing_data=False, accumulation_context=None, dataset_root=None, transform_accumulated_box_points=False, use_diskcache=True, autolabel_root=None, ): if not use_diskcache: logging.warning('Instantiating a dataset with use_diskcache=False may exhaust memory with a large dataset.') # Extract all scenes from the scene dataset JSON for the appropriate split scenes = BaseDataset._extract_scenes_from_scene_dataset_json( scene_dataset_json, split, requested_autolabels, is_datums_synchronized=True, skip_missing_data=skip_missing_data, dataset_root=dataset_root, use_diskcache=use_diskcache, autolabel_root=autolabel_root, ) # Return SynchronizedDataset with scenes built from dataset.json dataset_metadata = DatasetMetadata.from_scene_containers( scenes, requested_annotations, requested_autolabels, autolabel_root=autolabel_root, ) super().__init__( dataset_metadata, scenes=scenes, datum_names=datum_names, requested_annotations=requested_annotations, requested_autolabels=requested_autolabels, backward_context=backward_context, forward_context=forward_context, generate_depth_from_datum=generate_depth_from_datum, only_annotated_datums=only_annotated_datums, use_virtual_camera_datums=use_virtual_camera_datums, accumulation_context=accumulation_context, transform_accumulated_box_points=transform_accumulated_box_points, autolabel_root=autolabel_root, )
def get_dataset_split(dataset_path): """Get a list of splits in the dataset.json. Parameters ---------- dataset_path: str Full path to the dataset json holding dataset metadata, ontology, and image and annotation paths. Returns ------- dataset_splits: list of str List of dataset splits (train | val | test | train_overfit). """ return DatasetMetadata.get_dataset_splits(dataset_path)