Exemple #1
0
 def test_image_resize_1(self):
     images_batch = torch.ones((3, 100, 100, 3), dtype=torch.uint8) * 100
     transform = ImageResizeTransform()
     images_transformed = transform(images_batch)
     IMAGES_GT = torch.ones((3, 3, 800, 800), dtype=torch.float) * 100
     self.assertEqual(images_transformed.size(), IMAGES_GT.size())
     self.assertAlmostEqual(torch.abs(IMAGES_GT - images_transformed).max().item(), 0.0)
 def test_image_list_dataset_with_transform(self):
     height, width = 720, 1280
     with temp_image(height, width) as image_fpath:
         image_list = [image_fpath]
         category_list = [None]
         transform = ImageResizeTransform()
         dataset = ImageListDataset(image_list, category_list, transform)
         self.assertEqual(len(dataset), 1)
         data1, categories1 = dataset[0]["images"], dataset[0]["categories"]
         self.assertEqual(data1.shape, torch.Size((1, 3, 749, 1333)))
         self.assertEqual(data1.dtype, torch.float32)
         self.assertIsNone(categories1[0])
 def test_read_keyframes_with_selector_with_transform(self):
     with temp_video(60, 300, 300, 5, video_codec="mpeg4") as (fname, data):
         video_list = [fname]
         random.seed(0)
         frame_selector = RandomKFramesSelector(1)
         transform = ImageResizeTransform()
         dataset = VideoKeyframeDataset(video_list, frame_selector,
                                        transform)
         data1 = dataset[0]
         self.assertEqual(len(dataset), 1)
         self.assertEqual(data1.shape, torch.Size((1, 3, 800, 800)))
         self.assertEqual(data1.dtype, torch.float32)
         return
     self.assertTrue(False)