def test_load_rgbd_dataset_freiburg1_360(self): dbconn.connect_to_test_db() dbconn.setup_image_manager(mock=False) logging.disable(logging.CRITICAL) # Make sure there is nothing in the database ImageCollection.objects.all().delete() Image.objects.all().delete() result = tum_loader.import_dataset( dataset_root / 'rgbd_dataset_freiburg1_360', 'rgbd_dataset_freiburg1_360') self.assertIsInstance(result, ImageCollection) self.assertIsNotNone(result.pk) self.assertIsNotNone(result.image_group) self.assertEqual(1, ImageCollection.objects.all().count()) # Make sure we got all the images (there are 756 RGB images but only 755 depth maps) self.assertEqual(755, Image.objects.all().count()) # Make sure we got the depth and position data with image_manager.get().get_group(result.get_image_group()): for timestamp, image in result: self.assertIsNotNone(image.pixels) self.assertIsNotNone(image.depth) self.assertIsNotNone(image.camera_pose) # Clean up after ourselves by dropping the collections for the models ImageCollection._mongometa.collection.drop() Image._mongometa.collection.drop() dbconn.tear_down_image_manager() logging.disable(logging.NOTSET)
def setUpClass(cls): dbconn.setup_image_manager() cls.temp_folder.mkdir(parents=True, exist_ok=True) cls.path_manager = PathManager([Path(__file__).parent], cls.temp_folder) image_builder = DemoImageBuilder(mode=ImageMode.STEREO, stereo_offset=0.15, width=320, height=240, num_stars=500, length=cls.max_time * cls.speed, speed=cls.speed, min_size=4, max_size=50) # Make an image source from the image builder images = [] for time in range(cls.num_frames): image = image_builder.create_frame(time) images.append(image) cls.image_collection = ImageCollection( images=images, timestamps=list(range(len(images))), sequence_type=ImageSequenceType.SEQUENTIAL)
def setUpClass(cls): dbconn.connect_to_test_db() dbconn.setup_image_manager() cls.path_manager = PathManager(['~'], '~/tmp') cls.system = mock_types.MockSystem() cls.image_source = mock_types.MockImageSource() cls.image_source.build_images() cls.system.save() cls.image_source.save()
def setUpClass(cls): dbconn.connect_to_test_db() dbconn.setup_image_manager() cls.image = mock_types.make_image() cls.image.save() cls.image_collection = ImageCollection( images=[cls.image], timestamps=[1.2], sequence_type=ImageSequenceType.SEQUENTIAL) cls.image_collection.save()
def setUpClass(cls): dbconn.connect_to_test_db() dbconn.setup_image_manager() # Create the basic image sources, systems, and metrics. cls.image_collections = [make_image_collection() for _ in range(2)] cls.systems = [mock_types.MockSystem() for _ in range(2)] for system in cls.systems: system.save() cls.metrics = [mock_types.MockMetric() for _ in range(2)] for metric in cls.metrics: metric.save()
def test_preload_image_data_loads_pixels(self): # Mock the image manager group_name = 'test' dbconn.setup_image_manager() image_group = im_manager.get().get_group(group_name, allow_write=True) image_group.get_image = mock.Mock(wraps=image_group.get_image) # Make an image, and then let it go out of scope, so the data is not in memory image_id = make_and_store_image(image_group=group_name) system = MockSystem.get_instance() self.assertFalse(image_group.get_image.called) image = Image.objects.get({'_id': image_id}) system.preload_image_data(image) self.assertTrue(image_group.get_image.called) # Clean up Image._mongometa.collection.drop() dbconn.tear_down_image_manager()
def setUpClass(cls): dbconn.connect_to_test_db() dbconn.setup_image_manager() # Create the basic image sources, systems, and metrics. cls.image_collections = [make_image_collection() for _ in range(2)] cls.systems = [mock_types.MockSystem() for _ in range(2)] for system in cls.systems: system.save() cls.metrics = [mock_types.MockMetric() for _ in range(2)] for metric in cls.metrics: metric.save() for image_collection in cls.image_collections: for system in cls.systems: trial_result = mock_types.MockTrialResult( system=system, image_source=image_collection, success=True) trial_result.save() cls.trials.append(trial_result)
def test_load_configured_sequence(self): dbconn.connect_to_test_db() dbconn.setup_image_manager(mock=False) logging.disable(logging.CRITICAL) # count the number of images we expect to import left_images = dataset_root / 'dataset' / 'sequences' / '{0:02}'.format( sequence) / 'image_2' right_images = dataset_root / 'dataset' / 'sequences' / '{0:02}'.format( sequence) / 'image_3' num_images = sum(1 for file in left_images.iterdir() if file.is_file() and file.suffix == '.png' and ( right_images / file.name).exists()) # Make sure there is nothing in the database ImageCollection.objects.all().delete() StereoImage.objects.all().delete() result = kitti_loader.import_dataset(dataset_root, sequence) self.assertIsInstance(result, ImageCollection) self.assertIsNotNone(result.pk) self.assertIsNotNone(result.image_group) self.assertEqual(1, ImageCollection.objects.all().count()) self.assertEqual(num_images, StereoImage.objects.all().count() ) # Make sure we got all the images # Make sure we got the depth and position data with image_manager.get().get_group(result.get_image_group()): for timestamp, image in result: self.assertIsNotNone(image.pixels) self.assertIsNotNone(image.camera_pose) self.assertIsNotNone(image.right_pixels) self.assertIsNotNone(image.right_camera_pose) # Clean up after ourselves by dropping the collections for the models ImageCollection._mongometa.collection.drop() StereoImage._mongometa.collection.drop() dbconn.tear_down_image_manager() logging.disable(logging.NOTSET)
def test_load_zipped_sequence(self): dbconn.connect_to_test_db() dbconn.setup_image_manager(mock=False) logging.disable(logging.CRITICAL) # Make sure there is nothing in the database ImageCollection.objects.all().delete() StereoImage.objects.all().delete() # Make sure the un-tarred folder does not exist sequence_name = ZIPPED_SEQUENCE.split('.')[0] extracted_folder = DATASET_ROOT / sequence_name if extracted_folder.exists(): shutil.rmtree(extracted_folder) result = ndds_loader.import_dataset( DATASET_ROOT, sequence_name, DepthNoiseQuality.KINECT_NOISE.name) self.assertIsInstance(result, ImageCollection) self.assertIsNotNone(result.pk) self.assertTrue(result.is_depth_available) self.assertTrue(result.is_stereo_available) self.assertEqual(1, ImageCollection.objects.all().count()) self.assertGreater(StereoImage.objects.all().count(), 0) # Make sure we got some number of images # Make sure we got position data and depth for all frames for timestamp, image in result: self.assertIsNotNone(image.camera_pose) self.assertIsNotNone(image.right_camera_pose) self.assertIsNotNone(image.depth) # Make sure the extracted folder is cleaned up self.assertFalse(extracted_folder.exists()) # Clean up after ourselves by dropping the collections for the models ImageCollection._mongometa.collection.drop() StereoImage._mongometa.collection.drop() dbconn.tear_down_image_manager() logging.disable(logging.NOTSET)
def test_load_configured_sequence(self): dbconn.connect_to_test_db() dbconn.setup_image_manager(mock=False) logging.disable(logging.CRITICAL) # count the number of images we expect to import left_folder = DATASET_ROOT / SEQUENCE / 'left' right_folder = DATASET_ROOT / SEQUENCE / 'right' num_images = sum( 1 for file in left_folder.iterdir() if file.is_file() and file.suffix == '.png' and '.' not in file.stem and (right_folder / file.name).exists()) # Make sure there is nothing in the database ImageCollection.objects.all().delete() StereoImage.objects.all().delete() result = ndds_loader.import_dataset( DATASET_ROOT, SEQUENCE, DepthNoiseQuality.KINECT_NOISE.name) self.assertIsInstance(result, ImageCollection) self.assertIsNotNone(result.pk) self.assertTrue(result.is_depth_available) self.assertTrue(result.is_stereo_available) self.assertEqual(1, ImageCollection.objects.all().count()) self.assertEqual(num_images, StereoImage.objects.all().count() ) # Make sure we got all the images # Make sure we got the position data for timestamp, image in result: self.assertIsNotNone(image.camera_pose) self.assertIsNotNone(image.right_camera_pose) self.assertIsNotNone(image.depth) # Clean up after ourselves by dropping the collections for the models ImageCollection._mongometa.collection.drop() StereoImage._mongometa.collection.drop() dbconn.tear_down_image_manager() logging.disable(logging.NOTSET)
def test_load_rgbd_dataset_freiburg1_desk_from_tarball(self): # Ensure the uncompressed dataset doesn't exist, so we can if (dataset_root / 'rgbd_dataset_freiburg1_desk').is_dir(): shutil.rmtree(dataset_root / 'rgbd_dataset_freiburg1_desk') dbconn.connect_to_test_db() dbconn.setup_image_manager(mock=False) logging.disable(logging.CRITICAL) # Make sure there is nothing in the database ImageCollection.objects.all().delete() Image.objects.all().delete() result = tum_loader.import_dataset( dataset_root / 'rgbd_dataset_freiburg1_desk.tgz', 'rgbd_dataset_freiburg1_desk') self.assertIsInstance(result, ImageCollection) self.assertIsNotNone(result.pk) self.assertIsNotNone(result.image_group) self.assertEqual(1, ImageCollection.objects.all().count()) # Make sure we got all the images (there are 756 RGB images but only 755 depth maps) self.assertEqual(595, Image.objects.all().count()) # Make sure we got the depth and position data with image_manager.get().get_group(result.get_image_group()): for timestamp, image in result: self.assertIsNotNone(image.pixels) self.assertIsNotNone(image.depth) self.assertIsNotNone(image.camera_pose) # Make sure the loader cleaned up after itself by removing the extracted data self.assertFalse( (dataset_root / 'rgbd_dataset_freiburg1_desk').exists()) # Clean up after ourselves by dropping the collections for the models ImageCollection._mongometa.collection.drop() Image._mongometa.collection.drop() dbconn.tear_down_image_manager() logging.disable(logging.NOTSET)
def test_load_configured_sequence(self): dbconn.connect_to_test_db() dbconn.setup_image_manager(mock=False) logging.disable(logging.CRITICAL) # Make sure there is nothing in the database ImageCollection.objects.all().delete() Image.objects.all().delete() # count the number of images we expect to import rgb_images = dataset_root / sequence / 'rgb' depth_images = dataset_root / sequence / 'depth' num_images = min( sum(1 for file in rgb_images.iterdir() if file.is_file() and file.suffix == '.png'), sum(1 for file in depth_images.iterdir() if file.is_file() and file.suffix == '.png')) result = tum_loader.import_dataset(dataset_root / sequence, sequence) self.assertIsInstance(result, ImageCollection) self.assertIsNotNone(result.pk) self.assertIsNotNone(result.image_group) self.assertEqual(1, ImageCollection.objects.all().count()) # Make sure we got all the images (there are 756 RGB images but only 755 depth maps) self.assertEqual(num_images, Image.objects.all().count()) # Make sure we got the depth and position data with image_manager.get().get_group(result.get_image_group()): for timestamp, image in result: self.assertIsNotNone(image.pixels) self.assertIsNotNone(image.depth) self.assertIsNotNone(image.camera_pose) # Clean up after ourselves by dropping the collections for the models ImageCollection._mongometa.collection.drop() Image._mongometa.collection.drop() dbconn.tear_down_image_manager() logging.disable(logging.NOTSET)
def setUpClass(cls): dbconn.setup_image_manager() os.makedirs(cls.temp_folder, exist_ok=True) if not cls.vocab_path.exists(): # If there is no vocab file, make one print("Creating vocab file, this may take a while...") create_vocab(cls.vocab_path)
def setUpClass(cls): logging.disable(logging.CRITICAL) dbconn.setup_image_manager()
def setUpClass(cls) -> None: logging.basicConfig(level=20) dbconn.setup_image_manager()
def setUpClass(cls): dbconn.connect_to_test_db() dbconn.setup_image_manager()
def setUpClass(cls) -> None: dbconn.setup_image_manager()