def test_load_rgbd_dataset_freiburg1_360(self): dbconn.connect_to_test_db() dbconn.setup_image_manager(mock=False) logging.disable(logging.CRITICAL) # Make sure there is nothing in the database ImageCollection.objects.all().delete() Image.objects.all().delete() result = tum_loader.import_dataset( dataset_root / 'rgbd_dataset_freiburg1_360', 'rgbd_dataset_freiburg1_360') self.assertIsInstance(result, ImageCollection) self.assertIsNotNone(result.pk) self.assertIsNotNone(result.image_group) self.assertEqual(1, ImageCollection.objects.all().count()) # Make sure we got all the images (there are 756 RGB images but only 755 depth maps) self.assertEqual(755, Image.objects.all().count()) # Make sure we got the depth and position data with image_manager.get().get_group(result.get_image_group()): for timestamp, image in result: self.assertIsNotNone(image.pixels) self.assertIsNotNone(image.depth) self.assertIsNotNone(image.camera_pose) # Clean up after ourselves by dropping the collections for the models ImageCollection._mongometa.collection.drop() Image._mongometa.collection.drop() dbconn.tear_down_image_manager() logging.disable(logging.NOTSET)
def tearDownClass(cls): if cls.temp_folder.exists(): shutil.rmtree(cls.temp_folder) vocab_folder = Path(__file__).parent / VOCABULARY_FOLDER if vocab_folder.exists(): shutil.rmtree(vocab_folder) dbconn.tear_down_image_manager()
def tearDownClass(cls): # Clean up after ourselves by dropping the collection for this model Task._mongometa.collection.drop() TrialResult._mongometa.collection.drop() mock_types.MockSystem._mongometa.collection.drop() ImageCollection._mongometa.collection.drop() Image._mongometa.collection.drop() dbconn.tear_down_image_manager()
def tearDownClass(cls): # Clean up after ourselves by dropping the collection for this model FrameError.objects.all().delete() FrameErrorResult.objects.all().delete() TrialResult.objects.all().delete() VisionSystem.objects.all().delete() ImageCollection.objects.all().delete() FrameErrorMetric.objects.all().delete() Image._mongometa.collection.drop() dbconn.tear_down_image_manager()
def test_preload_image_data_loads_pixels(self): # Mock the image manager group_name = 'test' dbconn.setup_image_manager() image_group = im_manager.get().get_group(group_name, allow_write=True) image_group.get_image = mock.Mock(wraps=image_group.get_image) # Make an image, and then let it go out of scope, so the data is not in memory image_id = make_and_store_image(image_group=group_name) system = MockSystem.get_instance() self.assertFalse(image_group.get_image.called) image = Image.objects.get({'_id': image_id}) system.preload_image_data(image) self.assertTrue(image_group.get_image.called) # Clean up Image._mongometa.collection.drop() dbconn.tear_down_image_manager()
def test_load_zipped_sequence(self): dbconn.connect_to_test_db() dbconn.setup_image_manager(mock=False) logging.disable(logging.CRITICAL) # Make sure there is nothing in the database ImageCollection.objects.all().delete() StereoImage.objects.all().delete() # Make sure the un-tarred folder does not exist sequence_name = ZIPPED_SEQUENCE.split('.')[0] extracted_folder = DATASET_ROOT / sequence_name if extracted_folder.exists(): shutil.rmtree(extracted_folder) result = ndds_loader.import_dataset( DATASET_ROOT, sequence_name, DepthNoiseQuality.KINECT_NOISE.name) self.assertIsInstance(result, ImageCollection) self.assertIsNotNone(result.pk) self.assertTrue(result.is_depth_available) self.assertTrue(result.is_stereo_available) self.assertEqual(1, ImageCollection.objects.all().count()) self.assertGreater(StereoImage.objects.all().count(), 0) # Make sure we got some number of images # Make sure we got position data and depth for all frames for timestamp, image in result: self.assertIsNotNone(image.camera_pose) self.assertIsNotNone(image.right_camera_pose) self.assertIsNotNone(image.depth) # Make sure the extracted folder is cleaned up self.assertFalse(extracted_folder.exists()) # Clean up after ourselves by dropping the collections for the models ImageCollection._mongometa.collection.drop() StereoImage._mongometa.collection.drop() dbconn.tear_down_image_manager() logging.disable(logging.NOTSET)
def test_load_configured_sequence(self): dbconn.connect_to_test_db() dbconn.setup_image_manager(mock=False) logging.disable(logging.CRITICAL) # count the number of images we expect to import left_folder = DATASET_ROOT / SEQUENCE / 'left' right_folder = DATASET_ROOT / SEQUENCE / 'right' num_images = sum( 1 for file in left_folder.iterdir() if file.is_file() and file.suffix == '.png' and '.' not in file.stem and (right_folder / file.name).exists()) # Make sure there is nothing in the database ImageCollection.objects.all().delete() StereoImage.objects.all().delete() result = ndds_loader.import_dataset( DATASET_ROOT, SEQUENCE, DepthNoiseQuality.KINECT_NOISE.name) self.assertIsInstance(result, ImageCollection) self.assertIsNotNone(result.pk) self.assertTrue(result.is_depth_available) self.assertTrue(result.is_stereo_available) self.assertEqual(1, ImageCollection.objects.all().count()) self.assertEqual(num_images, StereoImage.objects.all().count() ) # Make sure we got all the images # Make sure we got the position data for timestamp, image in result: self.assertIsNotNone(image.camera_pose) self.assertIsNotNone(image.right_camera_pose) self.assertIsNotNone(image.depth) # Clean up after ourselves by dropping the collections for the models ImageCollection._mongometa.collection.drop() StereoImage._mongometa.collection.drop() dbconn.tear_down_image_manager() logging.disable(logging.NOTSET)
def test_load_configured_sequence(self): dbconn.connect_to_test_db() dbconn.setup_image_manager(mock=False) logging.disable(logging.CRITICAL) # count the number of images we expect to import left_images = dataset_root / 'dataset' / 'sequences' / '{0:02}'.format( sequence) / 'image_2' right_images = dataset_root / 'dataset' / 'sequences' / '{0:02}'.format( sequence) / 'image_3' num_images = sum(1 for file in left_images.iterdir() if file.is_file() and file.suffix == '.png' and ( right_images / file.name).exists()) # Make sure there is nothing in the database ImageCollection.objects.all().delete() StereoImage.objects.all().delete() result = kitti_loader.import_dataset(dataset_root, sequence) self.assertIsInstance(result, ImageCollection) self.assertIsNotNone(result.pk) self.assertIsNotNone(result.image_group) self.assertEqual(1, ImageCollection.objects.all().count()) self.assertEqual(num_images, StereoImage.objects.all().count() ) # Make sure we got all the images # Make sure we got the depth and position data with image_manager.get().get_group(result.get_image_group()): for timestamp, image in result: self.assertIsNotNone(image.pixels) self.assertIsNotNone(image.camera_pose) self.assertIsNotNone(image.right_pixels) self.assertIsNotNone(image.right_camera_pose) # Clean up after ourselves by dropping the collections for the models ImageCollection._mongometa.collection.drop() StereoImage._mongometa.collection.drop() dbconn.tear_down_image_manager() logging.disable(logging.NOTSET)
def test_load_rgbd_dataset_freiburg1_desk_from_tarball(self): # Ensure the uncompressed dataset doesn't exist, so we can if (dataset_root / 'rgbd_dataset_freiburg1_desk').is_dir(): shutil.rmtree(dataset_root / 'rgbd_dataset_freiburg1_desk') dbconn.connect_to_test_db() dbconn.setup_image_manager(mock=False) logging.disable(logging.CRITICAL) # Make sure there is nothing in the database ImageCollection.objects.all().delete() Image.objects.all().delete() result = tum_loader.import_dataset( dataset_root / 'rgbd_dataset_freiburg1_desk.tgz', 'rgbd_dataset_freiburg1_desk') self.assertIsInstance(result, ImageCollection) self.assertIsNotNone(result.pk) self.assertIsNotNone(result.image_group) self.assertEqual(1, ImageCollection.objects.all().count()) # Make sure we got all the images (there are 756 RGB images but only 755 depth maps) self.assertEqual(595, Image.objects.all().count()) # Make sure we got the depth and position data with image_manager.get().get_group(result.get_image_group()): for timestamp, image in result: self.assertIsNotNone(image.pixels) self.assertIsNotNone(image.depth) self.assertIsNotNone(image.camera_pose) # Make sure the loader cleaned up after itself by removing the extracted data self.assertFalse( (dataset_root / 'rgbd_dataset_freiburg1_desk').exists()) # Clean up after ourselves by dropping the collections for the models ImageCollection._mongometa.collection.drop() Image._mongometa.collection.drop() dbconn.tear_down_image_manager() logging.disable(logging.NOTSET)
def test_load_configured_sequence(self): dbconn.connect_to_test_db() dbconn.setup_image_manager(mock=False) logging.disable(logging.CRITICAL) # Make sure there is nothing in the database ImageCollection.objects.all().delete() Image.objects.all().delete() # count the number of images we expect to import rgb_images = dataset_root / sequence / 'rgb' depth_images = dataset_root / sequence / 'depth' num_images = min( sum(1 for file in rgb_images.iterdir() if file.is_file() and file.suffix == '.png'), sum(1 for file in depth_images.iterdir() if file.is_file() and file.suffix == '.png')) result = tum_loader.import_dataset(dataset_root / sequence, sequence) self.assertIsInstance(result, ImageCollection) self.assertIsNotNone(result.pk) self.assertIsNotNone(result.image_group) self.assertEqual(1, ImageCollection.objects.all().count()) # Make sure we got all the images (there are 756 RGB images but only 755 depth maps) self.assertEqual(num_images, Image.objects.all().count()) # Make sure we got the depth and position data with image_manager.get().get_group(result.get_image_group()): for timestamp, image in result: self.assertIsNotNone(image.pixels) self.assertIsNotNone(image.depth) self.assertIsNotNone(image.camera_pose) # Clean up after ourselves by dropping the collections for the models ImageCollection._mongometa.collection.drop() Image._mongometa.collection.drop() dbconn.tear_down_image_manager() logging.disable(logging.NOTSET)
def tearDownClass(cls): shutil.rmtree(cls.temp_folder) dbconn.tear_down_image_manager()
def tearDownClass(cls) -> None: dbconn.tear_down_image_manager() if cls.temp_folder.exists(): shutil.rmtree(cls.temp_folder)
def tearDownClass(cls) -> None: dbconn.tear_down_image_manager()
def tearDownClass(cls): # Clean up after ourselves by dropping the collection for this model VisionSystem._mongometa.collection.drop() dbconn.tear_down_image_manager()
def tearDownClass(cls): # Clean up after ourselves by dropping the collections for models we used Task._mongometa.collection.drop() Image._mongometa.collection.drop() ImageCollection._mongometa.collection.drop() dbconn.tear_down_image_manager()