def test_nc_benchmark_classes_in_exp_range(self): train_set = CIFAR100( default_dataset_location("cifar100"), train=True, download=True ) test_set = CIFAR100( default_dataset_location("cifar100"), train=False, download=True ) benchmark_instance = nc_benchmark( train_dataset=train_set, test_dataset=test_set, n_experiences=5, task_labels=False, seed=1234, shuffle=False, ) cie_data = benchmark_instance.classes_in_exp_range(0, None) self.assertEqual(5, len(cie_data)) for i in range(5): expected = set(range(i * 20, (i + 1) * 20)) self.assertSetEqual(expected, set(cie_data[i])) cie_data = benchmark_instance.classes_in_exp_range(1, 4) self.assertEqual(3, len(cie_data)) for i in range(1, 3): expected = set(range(i * 20, (i + 1) * 20)) self.assertSetEqual(expected, set(cie_data[i - 1])) random_class_order = list(range(100)) random.shuffle(random_class_order) benchmark_instance = nc_benchmark( train_dataset=train_set, test_dataset=test_set, n_experiences=5, task_labels=False, seed=1234, fixed_class_order=random_class_order, shuffle=False, ) cie_data = benchmark_instance.classes_in_exp_range(0, None) self.assertEqual(5, len(cie_data)) for i in range(5): expected = set(random_class_order[i * 20 : (i + 1) * 20]) self.assertSetEqual(expected, set(cie_data[i]))
def test_nc_benchmark_transformations_advanced(self): # Regression for #577 ds = CIFAR100( root=default_dataset_location("cifar100"), train=True, download=True, ) benchmark = nc_benchmark( ds, ds, n_experiences=10, shuffle=True, seed=1234, task_labels=False, train_transform=ToTensor(), eval_transform=None, ) ds_train_train = benchmark.train_stream[0].dataset self.assertIsInstance(ds_train_train[0][0], Tensor) ds_train_eval = benchmark.train_stream[0].dataset.eval() self.assertIsInstance(ds_train_eval[0][0], Image) ds_test_eval = benchmark.test_stream[0].dataset self.assertIsInstance(ds_test_eval[0][0], Image) ds_test_train = benchmark.test_stream[0].dataset.train() self.assertIsInstance(ds_test_train[0][0], Tensor)
def test_nc_benchmark_transformations_advanced(self): # Regression for #577 ds = CIFAR100(root=expanduser("~") + "/.avalanche/data/cifar100/", train=True, download=True) scenario = nc_benchmark(ds, ds, n_experiences=10, shuffle=True, seed=1234, task_labels=False, train_transform=ToTensor(), eval_transform=None) ds_train_train = scenario.train_stream[0].dataset self.assertIsInstance(ds_train_train[0][0], Tensor) ds_train_eval = scenario.train_stream[0].dataset.eval() self.assertIsInstance(ds_train_eval[0][0], Image) ds_test_eval = scenario.test_stream[0].dataset self.assertIsInstance(ds_test_eval[0][0], Image) ds_test_train = scenario.test_stream[0].dataset.train() self.assertIsInstance(ds_test_train[0][0], Tensor)
def test_nc_benchmark_transformations_basic(self): # Regression for #577 ds = CIFAR100(root=expanduser("~") + "/.avalanche/data/cifar100/", train=True, download=True) ds = AvalancheDataset(ds, transform=ToTensor()) benchmark = nc_benchmark( ds, ds, n_experiences=10, shuffle=True, seed=1234, task_labels=False ) exp_0_dataset = benchmark.train_stream[0].dataset self.assertIsInstance(exp_0_dataset[0][0], Tensor)
def run_experiment(config): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") torch.manual_seed(config.seed) torch.cuda.manual_seed(config.seed) np.random.seed(config.seed) random.seed(config.seed) torch.backends.cudnn.enabled = False torch.backends.cudnn.deterministic = True per_pixel_mean = get_dataset_per_pixel_mean( CIFAR100( expanduser("~") + "/.avalanche/data/cifar100/", train=True, download=True, transform=transforms.Compose([transforms.ToTensor()]), ) ) transforms_group = dict( eval=( transforms.Compose( [ transforms.ToTensor(), lambda img_pattern: img_pattern - per_pixel_mean, ] ), None, ), train=( transforms.Compose( [ transforms.ToTensor(), lambda img_pattern: img_pattern - per_pixel_mean, icarl_cifar100_augment_data, ] ), None, ), ) train_set = CIFAR100( expanduser("~") + "/.avalanche/data/cifar100/", train=True, download=True, ) test_set = CIFAR100( expanduser("~") + "/.avalanche/data/cifar100/", train=False, download=True, ) train_set = AvalancheDataset( train_set, transform_groups=transforms_group, initial_transform_group="train", ) test_set = AvalancheDataset( test_set, transform_groups=transforms_group, initial_transform_group="eval", ) scenario = nc_benchmark( train_dataset=train_set, test_dataset=test_set, n_experiences=config.nb_exp, task_labels=False, seed=config.seed, shuffle=False, fixed_class_order=config.fixed_class_order, ) evaluator = EvaluationPlugin( EpochAccuracy(), ExperienceAccuracy(), StreamAccuracy(), loggers=[InteractiveLogger()], ) model: IcarlNet = make_icarl_net(num_classes=100) model.apply(initialize_icarl_net) optim = SGD( model.parameters(), lr=config.lr_base, weight_decay=config.wght_decay, momentum=0.9, ) sched = LRSchedulerPlugin( MultiStepLR(optim, config.lr_milestones, gamma=1.0 / config.lr_factor) ) strategy = ICaRL( model.feature_extractor, model.classifier, optim, config.memory_size, buffer_transform=transforms.Compose([icarl_cifar100_augment_data]), fixed_memory=True, train_mb_size=config.batch_size, train_epochs=config.epochs, eval_mb_size=config.batch_size, plugins=[sched], device=device, evaluator=evaluator, ) for i, exp in enumerate(scenario.train_stream): eval_exps = [e for e in scenario.test_stream][: i + 1] strategy.train(exp, num_workers=4) strategy.eval(eval_exps, num_workers=4)