class CORe50Test(unittest.TestCase): @unittest.skipIf( FAST_TEST or is_github_action(), "We don't want to download large datasets in github actions.", ) def test_core50_ni_benchmark(self): benchmark = CORe50(scenario="ni") for experience in benchmark.train_stream: pass @unittest.skipIf( FAST_TEST or is_github_action(), "We don't want to download large datasets in github actions.", ) def test_core50_nc_benchmark(self): benchmark_instance = CORe50(scenario="nc") self.assertEqual(1, len(benchmark_instance.test_stream)) classes_in_test = benchmark_instance.classes_in_experience["test"][0] self.assertSetEqual(set(range(50)), set(classes_in_test))
class TinyImagenetTest(unittest.TestCase): @unittest.skipIf( FAST_TEST or is_github_action(), "We don't want to download large datasets in github actions.", ) def test_tinyimagenet_default_loader(self): logger = logging.getLogger("avalanche") logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler()) benchmark = SplitTinyImageNet() for task_info in benchmark.train_stream: self.assertIsInstance(task_info, Experience) for task_info in benchmark.test_stream: self.assertIsInstance(task_info, Experience)
class EndlessCLSimTest(unittest.TestCase): @unittest.skipIf( FAST_TEST or is_github_action(), "We don't want to download large datasets in github actions.", ) def test_endless_cl_classification(self): if "FAST_TEST" in os.environ: pass else: # "Classes" scenario = EndlessCLSim( scenario="Classes", sequence_order=None, task_order=None, semseg=False, dataset_root=None, ) for experience in scenario.train_stream: pass # Illumination scenario = EndlessCLSim( scenario="Illumination", sequence_order=None, task_order=None, semseg=False, dataset_root=None, ) for experience in scenario.train_stream: pass # Weather scenario = EndlessCLSim( scenario="Weather", sequence_order=None, task_order=None, semseg=False, dataset_root=None, ) for experience in scenario.train_stream: pass return @unittest.skipIf( FAST_TEST or is_github_action(), "We don't want to download large datasets in github actions.", ) def test_endless_cl_video(self): if "FAST_TEST" in os.environ: pass else: # "Classes" scenario = EndlessCLSim( scenario="Classes", sequence_order=None, task_order=None, semseg=True, dataset_root="/data/avalanche", ) for experience in scenario.train_stream: pass # Illumination scenario = EndlessCLSim( scenario="Illumination", sequence_order=None, task_order=None, semseg=True, dataset_root=None, ) for experience in scenario.train_stream: pass # Weather scenario = EndlessCLSim( scenario="Weather", sequence_order=None, task_order=None, semseg=True, dataset_root=None, ) for experience in scenario.train_stream: pass return
class CTrLTests(unittest.TestCase): stream_lengths = dict( s_plus=6, s_minus=6, s_in=6, s_out=6, s_pl=5, ) long_stream_lengths = [8, 15] @unittest.skipIf( FAST_TEST or is_github_action(), "We don't want to download large datasets in github actions.", ) def test_length(self): for stream, length in self.stream_lengths.items(): with self.subTest(stream=stream, length=length): bench = CTrL(stream) self.assertEqual(length, bench.n_experiences) @unittest.skipIf( FAST_TEST or is_github_action(), "We don't want to download large datasets in github actions.", ) def test_length_long(self): for n_tasks in self.long_stream_lengths: with self.subTest(n_tasks=n_tasks), TemporaryDirectory() as tmp: bench = CTrL("s_long", save_to_disk=True, path=Path(tmp), n_tasks=n_tasks) self.assertEqual(n_tasks, bench.n_experiences) @unittest.skipIf( FAST_TEST or is_github_action(), "We don't want to download large datasets in github actions.", ) def test_n_tasks_param(self): for stream in self.stream_lengths.keys(): with self.subTest(stream=stream): with self.assertRaises(ValueError): CTrL(stream, n_tasks=3) with self.subTest(stream="s_long"): CTrL("s_long", n_tasks=3) @unittest.skipIf( FAST_TEST or is_github_action(), "We don't want to download large datasets in github actions.", ) def test_determinism(self): for stream in self.stream_lengths.keys(): with self.subTest(stream=stream): bench_1 = CTrL(stream, seed=1) bench_2 = CTrL(stream, seed=1) for exp1, exp2 in zip(bench_1.train_stream, bench_2.train_stream): for sample1, sample2 in zip(exp1.dataset, exp2.dataset): self.assertTrue(custom_equals(sample1, sample2))
class MNISTBenchmarksTests(unittest.TestCase): def setUp(self): import avalanche.benchmarks.classic.cmnist as cmnist global MNIST_DOWNLOAD_METHOD MNIST_DOWNLOAD_METHOD = cmnist._get_mnist_dataset def count_downloads(*args, **kwargs): global MNIST_DOWNLOADS MNIST_DOWNLOADS += 1 return MNIST_DOWNLOAD_METHOD(*args, **kwargs) cmnist._get_mnist_dataset = count_downloads def tearDown(self): global MNIST_DOWNLOAD_METHOD if MNIST_DOWNLOAD_METHOD is not None: import avalanche.benchmarks.classic.cmnist as cmnist cmnist._get_mnist_dataset = MNIST_DOWNLOAD_METHOD MNIST_DOWNLOAD_METHOD = None @unittest.skipIf( FAST_TEST or is_github_action(), "We don't want to download large datasets in github actions.", ) def test_SplitMNIST_benchmark(self): benchmark = SplitMNIST(5) self.assertEqual(5, len(benchmark.train_stream)) self.assertEqual(5, len(benchmark.test_stream)) train_sz = 0 for experience in benchmark.train_stream: self.assertIsInstance(experience, Experience) train_sz += len(experience.dataset) # Regression test for 572 load_experience_train_eval(experience) self.assertEqual(60000, train_sz) test_sz = 0 for experience in benchmark.test_stream: self.assertIsInstance(experience, Experience) test_sz += len(experience.dataset) # Regression test for 572 load_experience_train_eval(experience) self.assertEqual(10000, test_sz) @unittest.skipIf( FAST_TEST or is_github_action(), "We don't want to download large datasets in github actions.", ) def test_PermutedMNIST_benchmark(self): benchmark = PermutedMNIST(3) self.assertEqual(3, len(benchmark.train_stream)) self.assertEqual(3, len(benchmark.test_stream)) for experience in benchmark.train_stream: self.assertIsInstance(experience, Experience) self.assertEqual(60000, len(experience.dataset)) load_experience_train_eval(experience) for experience in benchmark.test_stream: self.assertIsInstance(experience, Experience) self.assertEqual(10000, len(experience.dataset)) load_experience_train_eval(experience) @unittest.skipIf( FAST_TEST or is_github_action(), "We don't want to download large datasets in github actions.", ) def test_RotatedMNIST_benchmark(self): benchmark = RotatedMNIST(3) self.assertEqual(3, len(benchmark.train_stream)) self.assertEqual(3, len(benchmark.test_stream)) for experience in benchmark.train_stream: self.assertIsInstance(experience, Experience) self.assertEqual(60000, len(experience.dataset)) load_experience_train_eval(experience) for experience in benchmark.test_stream: self.assertIsInstance(experience, Experience) self.assertEqual(10000, len(experience.dataset)) load_experience_train_eval(experience) @unittest.skipIf( FAST_TEST or is_github_action(), "We don't want to download large datasets in github actions.", ) def test_PermutedMNIST_benchmark_download_once(self): global MNIST_DOWNLOADS MNIST_DOWNLOADS = 0 benchmark = PermutedMNIST(3) self.assertEqual(3, len(benchmark.train_stream)) self.assertEqual(3, len(benchmark.test_stream)) self.assertEqual(1, MNIST_DOWNLOADS) @unittest.skipIf( FAST_TEST or is_github_action(), "We don't want to download large datasets in github actions.", ) def test_RotatedMNIST_benchmark_download_once(self): global MNIST_DOWNLOADS MNIST_DOWNLOADS = 0 benchmark = RotatedMNIST(3) self.assertEqual(3, len(benchmark.train_stream)) self.assertEqual(3, len(benchmark.test_stream)) self.assertEqual(1, MNIST_DOWNLOADS)
class CIFAR10BenchmarksTests(unittest.TestCase): def setUp(self): import avalanche.benchmarks.classic.ccifar10 as ccifar10 global CIFAR10_DOWNLOAD_METHOD CIFAR10_DOWNLOAD_METHOD = ccifar10._get_cifar10_dataset def count_downloads(*args, **kwargs): global CIFAR10_DOWNLOADS CIFAR10_DOWNLOADS += 1 return CIFAR10_DOWNLOAD_METHOD(*args, **kwargs) ccifar10._get_cifar10_dataset = count_downloads def tearDown(self): global CIFAR10_DOWNLOAD_METHOD if CIFAR10_DOWNLOAD_METHOD is not None: import avalanche.benchmarks.classic.ccifar10 as ccifar10 ccifar10._get_cifar10_dataset = CIFAR10_DOWNLOAD_METHOD CIFAR10_DOWNLOAD_METHOD = None @unittest.skipIf( FAST_TEST or is_github_action(), "We don't want to download large datasets in github actions.", ) def test_SplitCifar10_benchmark(self): benchmark = SplitCIFAR10(5) self.assertEqual(5, len(benchmark.train_stream)) self.assertEqual(5, len(benchmark.test_stream)) train_sz = 0 for experience in benchmark.train_stream: self.assertIsInstance(experience, ClassificationExperience) train_sz += len(experience.dataset) # Regression test for 575 load_experience_train_eval(experience) self.assertEqual(50000, train_sz) test_sz = 0 for experience in benchmark.test_stream: self.assertIsInstance(experience, ClassificationExperience) test_sz += len(experience.dataset) # Regression test for 575 load_experience_train_eval(experience) self.assertEqual(10000, test_sz) @unittest.skipIf( FAST_TEST or is_github_action(), "We don't want to download large datasets in github actions.", ) def test_SplitCifar10_benchmark_download_once(self): global CIFAR10_DOWNLOADS CIFAR10_DOWNLOADS = 0 benchmark = SplitCIFAR10(5) self.assertEqual(5, len(benchmark.train_stream)) self.assertEqual(5, len(benchmark.test_stream)) self.assertEqual(1, CIFAR10_DOWNLOADS)