def test_mt_single_dataset_reproducibility_data(self):
        mnist_train = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                            train=True,
                            download=True)
        mnist_test = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                           train=False,
                           download=True)
        nc_benchmark_ref = nc_benchmark(mnist_train,
                                        mnist_test,
                                        5,
                                        task_labels=True,
                                        shuffle=True,
                                        seed=5678)

        my_nc_benchmark = nc_benchmark(
            mnist_train,
            mnist_test,
            -1,
            task_labels=True,
            reproducibility_data=nc_benchmark_ref.get_reproducibility_data())

        self.assertEqual(nc_benchmark_ref.train_exps_patterns_assignment,
                         my_nc_benchmark.train_exps_patterns_assignment)

        self.assertEqual(nc_benchmark_ref.test_exps_patterns_assignment,
                         my_nc_benchmark.test_exps_patterns_assignment)
    def test_sit_single_dataset_remap_indexes_each_exp(self):
        order = [2, 3, 5, 8, 9, 1, 4, 6]
        mnist_train = MNIST(
            root=default_dataset_location("mnist"),
            train=True,
            download=True,
        )
        mnist_test = MNIST(
            root=default_dataset_location("mnist"),
            train=False,
            download=True,
        )

        with self.assertRaises(ValueError):
            # class_ids_from_zero_* are mutually exclusive
            nc_benchmark(
                mnist_train,
                mnist_test,
                4,
                task_labels=False,
                fixed_class_order=order,
                class_ids_from_zero_from_first_exp=True,
                class_ids_from_zero_in_each_exp=True,
            )

        my_nc_benchmark = nc_benchmark(
            mnist_train,
            mnist_test,
            4,
            task_labels=False,
            fixed_class_order=order,
            class_ids_from_zero_in_each_exp=True,
        )

        self.assertEqual(4, len(my_nc_benchmark.classes_in_experience["train"]))

        all_classes = []
        for batch_id in range(4):
            self.assertEqual(
                2, len(my_nc_benchmark.classes_in_experience["train"][batch_id])
            )
            all_classes.extend(
                my_nc_benchmark.classes_in_experience["train"][batch_id]
            )
        self.assertEqual(8, len(all_classes))
        self.assertListEqual([0, 1], sorted(set(all_classes)))

        # Regression test for issue #258
        for i, experience in enumerate(my_nc_benchmark.train_stream):
            unique_dataset_classes = sorted(set(experience.dataset.targets))
            expected_dataset_classes = [0, 1]
            self.assertListEqual(
                expected_dataset_classes, unique_dataset_classes
            )
            self.assertListEqual(
                sorted(order[2 * i : 2 * (i + 1)]),
                sorted(my_nc_benchmark.original_classes_in_exp[i]),
            )
    def test_nc_benchmark_classes_in_exp_range(self):
        train_set = CIFAR100(
            default_dataset_location("cifar100"), train=True, download=True
        )

        test_set = CIFAR100(
            default_dataset_location("cifar100"), train=False, download=True
        )

        benchmark_instance = nc_benchmark(
            train_dataset=train_set,
            test_dataset=test_set,
            n_experiences=5,
            task_labels=False,
            seed=1234,
            shuffle=False,
        )

        cie_data = benchmark_instance.classes_in_exp_range(0, None)
        self.assertEqual(5, len(cie_data))

        for i in range(5):
            expected = set(range(i * 20, (i + 1) * 20))
            self.assertSetEqual(expected, set(cie_data[i]))

        cie_data = benchmark_instance.classes_in_exp_range(1, 4)
        self.assertEqual(3, len(cie_data))

        for i in range(1, 3):
            expected = set(range(i * 20, (i + 1) * 20))
            self.assertSetEqual(expected, set(cie_data[i - 1]))

        random_class_order = list(range(100))
        random.shuffle(random_class_order)
        benchmark_instance = nc_benchmark(
            train_dataset=train_set,
            test_dataset=test_set,
            n_experiences=5,
            task_labels=False,
            seed=1234,
            fixed_class_order=random_class_order,
            shuffle=False,
        )

        cie_data = benchmark_instance.classes_in_exp_range(0, None)
        self.assertEqual(5, len(cie_data))

        for i in range(5):
            expected = set(random_class_order[i * 20 : (i + 1) * 20])
            self.assertSetEqual(expected, set(cie_data[i]))
Exemple #4
0
    def create_benchmark(task_labels=False, seed=None):
        n_samples_per_class = 20

        dataset = make_classification(
            n_samples=10 * n_samples_per_class,
            n_classes=10,
            n_features=6,
            n_informative=6,
            n_redundant=0,
            random_state=seed,
        )

        X = torch.from_numpy(dataset[0]).float()
        y = torch.from_numpy(dataset[1]).long()

        train_X, test_X, train_y, test_y = train_test_split(
            X, y, train_size=0.6, shuffle=True, stratify=y, random_state=seed
        )

        train_dataset = TensorDataset(train_X, train_y)
        test_dataset = TensorDataset(test_X, test_y)
        return nc_benchmark(
            train_dataset,
            test_dataset,
            5,
            task_labels=task_labels,
            fixed_class_order=list(range(10)),
        )
    def test_sit_single_dataset_fixed_order_subset(self):
        order = [2, 5, 7, 8, 9, 0, 1, 4]
        mnist_train = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                            train=True,
                            download=True)
        mnist_test = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                           train=False,
                           download=True)
        my_nc_benchmark = nc_benchmark(mnist_train,
                                       mnist_test,
                                       4,
                                       task_labels=True,
                                       fixed_class_order=order,
                                       class_ids_from_zero_in_each_exp=True)

        self.assertEqual(4,
                         len(my_nc_benchmark.classes_in_experience['train']))

        all_classes = []
        for task_id in range(4):
            self.assertEqual(
                2,
                len(my_nc_benchmark.classes_in_experience['train'][task_id]))
            self.assertEqual(set(order[task_id * 2:(task_id + 1) * 2]),
                             my_nc_benchmark.original_classes_in_exp[task_id])
            all_classes.extend(
                my_nc_benchmark.classes_in_experience['train'][task_id])

        self.assertEqual([0, 1] * 4, all_classes)
    def test_mt_single_dataset_task_size(self):
        mnist_train = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                            train=True,
                            download=True)
        mnist_test = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                           train=False,
                           download=True)
        my_nc_benchmark = nc_benchmark(mnist_train,
                                       mnist_test,
                                       3,
                                       task_labels=True,
                                       per_exp_classes={
                                           0: 5,
                                           2: 2
                                       },
                                       class_ids_from_zero_in_each_exp=True)

        self.assertEqual(3, my_nc_benchmark.n_experiences)
        self.assertEqual(10, my_nc_benchmark.n_classes)

        all_classes = set()
        for task_id in range(3):
            all_classes.update(
                my_nc_benchmark.classes_in_experience['train'][task_id])
        self.assertEqual(5, len(all_classes))

        self.assertEqual(
            5, len(my_nc_benchmark.classes_in_experience['train'][0]))
        self.assertEqual(
            3, len(my_nc_benchmark.classes_in_experience['train'][1]))
        self.assertEqual(
            2, len(my_nc_benchmark.classes_in_experience['train'][2]))
Exemple #7
0
def load_benchmark(use_task_labels=False, fast_test=True):
    """
    Returns a NC Benchmark from a fake dataset of 10 classes, 5 experiences,
    2 classes per experience.
    """
    if fast_test:
        my_nc_benchmark = get_fast_benchmark(use_task_labels)
    else:
        mnist_train = MNIST(
            root=expanduser("~") + "/.avalanche/data/mnist/",
            train=True,
            download=True,
            transform=Compose([ToTensor()]),
        )

        mnist_test = MNIST(
            root=expanduser("~") + "/.avalanche/data/mnist/",
            train=False,
            download=True,
            transform=Compose([ToTensor()]),
        )
        my_nc_benchmark = nc_benchmark(mnist_train,
                                       mnist_test,
                                       5,
                                       task_labels=use_task_labels,
                                       seed=1234)

    return my_nc_benchmark
Exemple #8
0
    def test_mt_single_dataset_fixed_order(self):
        order = [2, 3, 5, 7, 8, 9, 0, 1, 4, 6]
        mnist_train = MNIST(
            root=expanduser("~") + "/.avalanche/data/mnist/",
            train=True,
            download=True,
        )
        mnist_test = MNIST(
            root=expanduser("~") + "/.avalanche/data/mnist/",
            train=False,
            download=True,
        )
        my_nc_benchmark = nc_benchmark(
            mnist_train,
            mnist_test,
            5,
            task_labels=True,
            fixed_class_order=order,
            class_ids_from_zero_in_each_exp=False,
        )

        all_classes = []
        for task_id in range(5):
            all_classes.extend(
                my_nc_benchmark.classes_in_experience["train"][task_id])

        self.assertEqual(order, all_classes)
    def test_sit_single_dataset_fixed_order_subset(self):
        order = [2, 3, 5, 8, 9, 1, 4, 6]
        mnist_train = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                            train=True,
                            download=True)
        mnist_test = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                           train=False,
                           download=True)
        my_nc_benchmark = nc_benchmark(mnist_train,
                                       mnist_test,
                                       4,
                                       task_labels=False,
                                       fixed_class_order=order)

        self.assertEqual(4,
                         len(my_nc_benchmark.classes_in_experience['train']))

        all_classes = set()
        for batch_id in range(4):
            self.assertEqual(
                2,
                len(my_nc_benchmark.classes_in_experience['train'][batch_id]))
            all_classes.update(
                my_nc_benchmark.classes_in_experience['train'][batch_id])

        self.assertEqual(set(order), all_classes)
def main(args):

    # Model getter: specify dataset and depth of the network.
    model = pytorchcv_wrapper.resnet('cifar10', depth=20, pretrained=False)

    # Or get a more specific model. E.g. wide resnet, with depth 40 and growth
    # factor 8 for Cifar 10.
    # model = pytorchcv_wrapper.get_model("wrn40_8_cifar10", pretrained=False)

    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}"
                          if torch.cuda.is_available() and
                          args.cuda >= 0 else "cpu")

    device = "cpu"

    # --- TRANSFORMATIONS
    transform = transforms.Compose([
        ToTensor(),
        transforms.Normalize((0.491, 0.482, 0.446), (0.247, 0.243, 0.261))
    ])

    # --- SCENARIO CREATION
    cifar_train = CIFAR10(root=expanduser("~") + "/.avalanche/data/cifar10/",
                          train=True, download=True, transform=transform)
    cifar_test = CIFAR10(root=expanduser("~") + "/.avalanche/data/cifar10/",
                         train=False, download=True, transform=transform)
    scenario = nc_benchmark(
        cifar_train, cifar_test, 5, task_labels=False, seed=1234,
        fixed_class_order=[i for i in range(10)])

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(
            minibatch=True, epoch=True, experience=True, stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True),
        loggers=[interactive_logger])

    # CREATE THE STRATEGY INSTANCE (Naive, with Replay)
    cl_strategy = Naive(model, torch.optim.SGD(model.parameters(), lr=0.01),
                        CrossEntropyLoss(),
                        train_mb_size=100, train_epochs=1, eval_mb_size=100,
                        device=device,
                        plugins=[ReplayPlugin(mem_size=1000)],
                        evaluator=eval_plugin
                        )

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience ", experience.current_experience)
        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
Exemple #11
0
def fast_scenario(use_task_labels=False, shuffle=True):
    """ Copied directly from Avalanche in "tests/unit_tests_utils.py".

    Not used anywhere atm, but could be used as inspiration for writing quicker tests
    in Sequoia.
    """
    n_samples_per_class = 100
    dataset = make_classification(
        n_samples=10 * n_samples_per_class,
        n_classes=10,
        n_features=6,
        n_informative=6,
        n_redundant=0,
    )

    X = torch.from_numpy(dataset[0]).float()
    y = torch.from_numpy(dataset[1]).long()

    train_X, test_X, train_y, test_y = train_test_split(
        X, y, train_size=0.6, shuffle=True, stratify=y
    )

    train_dataset = TensorDataset(train_X, train_y)
    test_dataset = TensorDataset(test_X, test_y)
    my_nc_benchmark = nc_benchmark(
        train_dataset, test_dataset, 5, task_labels=use_task_labels, shuffle=shuffle
    )
    return my_nc_benchmark
    def test_nc_benchmark_transformations_advanced(self):
        # Regression for #577
        ds = CIFAR100(root=expanduser("~") + "/.avalanche/data/cifar100/",
                      train=True,
                      download=True)
        scenario = nc_benchmark(ds,
                                ds,
                                n_experiences=10,
                                shuffle=True,
                                seed=1234,
                                task_labels=False,
                                train_transform=ToTensor(),
                                eval_transform=None)

        ds_train_train = scenario.train_stream[0].dataset
        self.assertIsInstance(ds_train_train[0][0], Tensor)

        ds_train_eval = scenario.train_stream[0].dataset.eval()
        self.assertIsInstance(ds_train_eval[0][0], Image)

        ds_test_eval = scenario.test_stream[0].dataset
        self.assertIsInstance(ds_test_eval[0][0], Image)

        ds_test_train = scenario.test_stream[0].dataset.train()
        self.assertIsInstance(ds_test_train[0][0], Tensor)
Exemple #13
0
    def load_ar1_benchmark(self):
        """
        Returns a NC benchmark from a fake dataset of 10 classes, 5 experiences,
        2 classes per experience. This toy benchmark is intended
        """
        n_samples_per_class = 5
        dataset = make_classification(
            n_samples=10 * n_samples_per_class,
            n_classes=9,
            n_features=224 * 224 * 3,
            n_informative=6,
            n_redundant=0,
        )

        X = torch.from_numpy(dataset[0]).reshape(-1, 3, 224, 224).float()
        y = torch.from_numpy(dataset[1]).long()

        train_X, test_X, train_y, test_y = train_test_split(
            X, y, train_size=0.6, shuffle=True, stratify=y
        )

        train_dataset = TensorDataset(train_X, train_y)
        test_dataset = TensorDataset(test_X, test_y)
        my_nc_benchmark = nc_benchmark(
            train_dataset, test_dataset, 3, task_labels=False
        )
        return my_nc_benchmark
Exemple #14
0
    def test_sit_single_dataset_remap_indexes(self):
        order = [2, 3, 5, 8, 9, 1, 4, 6]
        mnist_train = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                            train=True, download=True)
        mnist_test = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                           train=False, download=True)
        my_nc_benchmark = nc_benchmark(
            mnist_train, mnist_test, 4, task_labels=False,
            fixed_class_order=order, class_ids_from_zero_from_first_exp=True)

        self.assertEqual(4, len(my_nc_benchmark.classes_in_experience))

        all_classes = []
        for batch_id in range(4):
            self.assertEqual(
                2, len(my_nc_benchmark.classes_in_experience[batch_id]))
            all_classes.extend(my_nc_benchmark.classes_in_experience[batch_id])
        self.assertEqual(list(range(8)), all_classes)

        # Regression test for issue #258
        for i, experience in enumerate(my_nc_benchmark.train_stream):
            unique_dataset_classes = sorted(set(experience.dataset.targets))
            expected_dataset_classes = list(range(2 * i, 2 * (i+1)))

            self.assertListEqual(expected_dataset_classes,
                                 unique_dataset_classes)
            self.assertListEqual(
                sorted(order[2 * i:2 * (i+1)]),
                sorted(my_nc_benchmark.original_classes_in_exp[i]))
Exemple #15
0
    def test_sit_multi_dataset_merge(self):
        split_mapping = [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
        mnist_train = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                            train=True, download=True)
        mnist_test = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                           train=False, download=True)

        train_part1 = make_nc_transformation_subset(
            mnist_train, None, None, range(5))
        train_part2 = make_nc_transformation_subset(
            mnist_train, None, None, range(5, 10))
        train_part2 = AvalancheSubset(
            train_part2, class_mapping=split_mapping)

        test_part1 = make_nc_transformation_subset(
            mnist_test, None, None, range(5))
        test_part2 = make_nc_transformation_subset(
            mnist_test, None, None, range(5, 10))
        test_part2 = AvalancheSubset(test_part2,
                                     class_mapping=split_mapping)
        my_nc_benchmark = nc_benchmark(
            [train_part1, train_part2], [test_part1, test_part2], 5,
            task_labels=False, shuffle=True, seed=1234)

        self.assertEqual(5, my_nc_benchmark.n_experiences)
        self.assertEqual(10, my_nc_benchmark.n_classes)
        for batch_id in range(5):
            self.assertEqual(
                2, len(my_nc_benchmark.classes_in_experience[batch_id]))

        all_classes = set()
        for batch_id in range(5):
            all_classes.update(my_nc_benchmark.classes_in_experience[batch_id])

        self.assertEqual(10, len(all_classes))
    def test_sit_single_dataset(self):
        mnist_train = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                            train=True,
                            download=True)
        mnist_test = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                           train=False,
                           download=True)
        my_nc_benchmark = nc_benchmark(mnist_train,
                                       mnist_test,
                                       5,
                                       task_labels=False,
                                       shuffle=True,
                                       seed=1234)

        self.assertEqual(5, my_nc_benchmark.n_experiences)
        self.assertEqual(10, my_nc_benchmark.n_classes)
        for batch_id in range(my_nc_benchmark.n_experiences):
            self.assertEqual(
                2, len(my_nc_benchmark.classes_in_experience[batch_id]))

        all_classes = set()
        for batch_id in range(5):
            all_classes.update(my_nc_benchmark.classes_in_experience[batch_id])

        self.assertEqual(10, len(all_classes))
    def test_mt_single_dataset_without_class_id_remap(self):
        mnist_train = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                            train=True,
                            download=True)
        mnist_test = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                           train=False,
                           download=True)
        my_nc_benchmark = nc_benchmark(mnist_train,
                                       mnist_test,
                                       5,
                                       task_labels=True,
                                       shuffle=True,
                                       seed=1234,
                                       class_ids_from_zero_in_each_exp=False)

        self.assertEqual(5, my_nc_benchmark.n_experiences)
        self.assertEqual(10, my_nc_benchmark.n_classes)
        for task_id in range(5):
            self.assertEqual(
                2,
                len(my_nc_benchmark.classes_in_experience['train'][task_id]))

        all_classes = set()
        for task_id in range(my_nc_benchmark.n_experiences):
            all_classes.update(
                my_nc_benchmark.classes_in_experience['train'][task_id])

        self.assertEqual(10, len(all_classes))
    def test_sit_single_dataset_fixed_order_subset(self):
        order = [2, 3, 5, 8, 9, 1, 4, 6]
        mnist_train = MNIST(
            root=default_dataset_location("mnist"),
            train=True,
            download=True,
        )
        mnist_test = MNIST(
            root=default_dataset_location("mnist"),
            train=False,
            download=True,
        )
        my_nc_benchmark = nc_benchmark(
            mnist_train,
            mnist_test,
            4,
            task_labels=False,
            fixed_class_order=order,
        )

        self.assertEqual(4, len(my_nc_benchmark.classes_in_experience["train"]))

        all_classes = set()
        for batch_id in range(4):
            self.assertEqual(
                2, len(my_nc_benchmark.classes_in_experience["train"][batch_id])
            )
            all_classes.update(
                my_nc_benchmark.classes_in_experience["train"][batch_id]
            )

        self.assertEqual(set(order), all_classes)
    def test_sit_single_dataset(self):
        mnist_train = MNIST(
            root=default_dataset_location("mnist"),
            train=True,
            download=True,
        )
        mnist_test = MNIST(
            root=default_dataset_location("mnist"),
            train=False,
            download=True,
        )
        my_nc_benchmark = nc_benchmark(
            mnist_train,
            mnist_test,
            5,
            task_labels=False,
            shuffle=True,
            seed=1234,
        )

        self.assertEqual(5, my_nc_benchmark.n_experiences)
        self.assertEqual(10, my_nc_benchmark.n_classes)
        for batch_id in range(my_nc_benchmark.n_experiences):
            self.assertEqual(
                2, len(my_nc_benchmark.classes_in_experience["train"][batch_id])
            )

        all_classes = set()
        for batch_id in range(5):
            all_classes.update(
                my_nc_benchmark.classes_in_experience["train"][batch_id]
            )

        self.assertEqual(10, len(all_classes))
    def test_sit_single_dataset_fixed_order(self):
        order = [2, 3, 5, 7, 8, 9, 0, 1, 4, 6]
        mnist_train = MNIST(
            root=default_dataset_location("mnist"),
            train=True,
            download=True,
        )
        mnist_test = MNIST(
            root=default_dataset_location("mnist"),
            train=False,
            download=True,
        )
        my_nc_benchmark = nc_benchmark(
            mnist_train,
            mnist_test,
            5,
            task_labels=False,
            fixed_class_order=order,
        )

        all_classes = []
        for batch_id in range(5):
            all_classes.extend(
                my_nc_benchmark.classes_in_experience["train"][batch_id]
            )

        self.assertEqual(order, all_classes)
    def test_nc_benchmark_transformations_advanced(self):
        # Regression for #577
        ds = CIFAR100(
            root=default_dataset_location("cifar100"),
            train=True,
            download=True,
        )
        benchmark = nc_benchmark(
            ds,
            ds,
            n_experiences=10,
            shuffle=True,
            seed=1234,
            task_labels=False,
            train_transform=ToTensor(),
            eval_transform=None,
        )

        ds_train_train = benchmark.train_stream[0].dataset
        self.assertIsInstance(ds_train_train[0][0], Tensor)

        ds_train_eval = benchmark.train_stream[0].dataset.eval()
        self.assertIsInstance(ds_train_eval[0][0], Image)

        ds_test_eval = benchmark.test_stream[0].dataset
        self.assertIsInstance(ds_test_eval[0][0], Image)

        ds_test_train = benchmark.test_stream[0].dataset.train()
        self.assertIsInstance(ds_test_train[0][0], Tensor)
Exemple #22
0
def get_fast_benchmark(use_task_labels=False,
                       shuffle=True,
                       n_samples_per_class=100):
    dataset = make_classification(
        n_samples=10 * n_samples_per_class,
        n_classes=10,
        n_features=6,
        n_informative=6,
        n_redundant=0,
    )

    X = torch.from_numpy(dataset[0]).float()
    y = torch.from_numpy(dataset[1]).long()

    train_X, test_X, train_y, test_y = train_test_split(X,
                                                        y,
                                                        train_size=0.6,
                                                        shuffle=True,
                                                        stratify=y)

    train_dataset = TensorDataset(train_X, train_y)
    test_dataset = TensorDataset(test_X, test_y)
    my_nc_benchmark = nc_benchmark(
        train_dataset,
        test_dataset,
        5,
        task_labels=use_task_labels,
        shuffle=shuffle,
    )
    return my_nc_benchmark
Exemple #23
0
    def __init__(self, scenario="ni", run_id=0):
        """Init.

        :param scenario: The desired CoRE50 scenario. Supports 'nc', 'ni', and
            'joint', which is the scenario with a single experience.
        :param run_id: an integer in [0, 4]. Each run uses a different set of
            expert models and data splits.
        """

        assert scenario in {
            "ni",
            "joint",
            "nc",
        }, "`scenario` argument must be one of {'ni', 'joint', 'nc'}."

        core50_normalization = Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
        core50_train_transforms = Compose([
            RandomHorizontalFlip(p=0.5),
            RandomCrop(size=128, padding=1),
            RandomRotation(15),
            ToTensor(),
            core50_normalization,
        ])
        core50_eval_transforms = Compose(
            [CenterCrop(size=128),
             ToTensor(), core50_normalization])

        if scenario == "ni":
            benchmark = CORe50(
                scenario="ni",
                train_transform=core50_train_transforms,
                eval_transform=core50_eval_transforms,
                run=run_id,
            )
        elif scenario == "nc":
            benchmark = CORe50(
                scenario="nc",
                train_transform=core50_train_transforms,
                eval_transform=core50_eval_transforms,
                run=run_id,
            )
        elif scenario == "joint":
            core50nc = CORe50(scenario="nc")
            train_cat = AvalancheConcatDataset(
                [e.dataset for e in core50nc.train_stream])
            test_cat = AvalancheConcatDataset(
                [e.dataset for e in core50nc.test_stream])
            benchmark = nc_benchmark(train_cat,
                                     test_cat,
                                     n_experiences=1,
                                     task_labels=False)
        else:
            assert False, "Should never get here."

        ll = len(benchmark.train_stream)
        experts = _load_expert_models(f"{scenario}_core50", run_id, ll)
        super().__init__(benchmark, experts)
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    # ---------

    # --- TRANSFORMATIONS
    train_transform = transforms.Compose([
        RandomCrop(28, padding=4),
        ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, ))
    ])
    test_transform = transforms.Compose(
        [ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))])
    # ---------

    # --- SCENARIO CREATION
    mnist_train = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                        train=True,
                        download=True,
                        transform=train_transform)
    mnist_test = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                       train=False,
                       download=True,
                       transform=test_transform)
    scenario = nc_benchmark(mnist_train,
                            mnist_test,
                            5,
                            task_labels=False,
                            seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(model,
                        SGD(model.parameters(), lr=0.001, momentum=0.9),
                        CrossEntropyLoss(),
                        train_mb_size=100,
                        train_epochs=4,
                        eval_mb_size=100,
                        device=device)

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience: ", experience.current_experience)
        print("Current Classes: ", experience.classes_in_this_experience)

        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
Exemple #25
0
    def test_nc_benchmark_transformations_basic(self):
        # Regression for #577
        ds = CIFAR100(root=expanduser("~") + "/.avalanche/data/cifar100/",
                      train=True, download=True)
        ds = AvalancheDataset(ds, transform=ToTensor())

        benchmark = nc_benchmark(
            ds, ds, n_experiences=10, shuffle=True, seed=1234,
            task_labels=False
        )

        exp_0_dataset = benchmark.train_stream[0].dataset
        self.assertIsInstance(exp_0_dataset[0][0], Tensor)
Exemple #26
0
    def test_sit_multi_dataset_one_batch_per_set(self):
        split_mapping = [0, 1, 2, 0, 1, 2, 3, 4, 5, 6]
        mnist_train = MNIST(
            root=expanduser("~") + "/.avalanche/data/mnist/",
            train=True,
            download=True,
        )
        mnist_test = MNIST(
            root=expanduser("~") + "/.avalanche/data/mnist/",
            train=False,
            download=True,
        )

        train_part1 = make_nc_transformation_subset(mnist_train, None, None,
                                                    range(3))
        train_part2 = make_nc_transformation_subset(mnist_train, None, None,
                                                    range(3, 10))
        train_part2 = AvalancheSubset(train_part2, class_mapping=split_mapping)

        test_part1 = make_nc_transformation_subset(mnist_test, None, None,
                                                   range(3))
        test_part2 = make_nc_transformation_subset(mnist_test, None, None,
                                                   range(3, 10))
        test_part2 = AvalancheSubset(test_part2, class_mapping=split_mapping)
        my_nc_benchmark = nc_benchmark(
            [train_part1, train_part2],
            [test_part1, test_part2],
            2,
            task_labels=False,
            shuffle=True,
            seed=1234,
            one_dataset_per_exp=True,
        )

        self.assertEqual(2, my_nc_benchmark.n_experiences)
        self.assertEqual(10, my_nc_benchmark.n_classes)

        all_classes = set()
        for batch_id in range(2):
            all_classes.update(
                my_nc_benchmark.classes_in_experience["train"][batch_id])

        self.assertEqual(10, len(all_classes))

        self.assertTrue(
            (my_nc_benchmark.classes_in_experience["train"][0] == {0, 1, 2}
             and my_nc_benchmark.classes_in_experience["train"][1] == set(
                 range(3, 10))) or
            (my_nc_benchmark.classes_in_experience["train"][0] == set(
                range(3, 10)) and
             my_nc_benchmark.classes_in_experience["train"][1] == {0, 1, 2}))
    def test_nc_sit_slicing(self):
        mnist_train = MNIST(
            root=default_dataset_location("mnist"),
            train=True,
            download=True,
        )
        mnist_test = MNIST(
            root=default_dataset_location("mnist"),
            train=False,
            download=True,
        )
        my_nc_benchmark = nc_benchmark(
            mnist_train,
            mnist_test,
            5,
            task_labels=False,
            shuffle=True,
            seed=1234,
        )

        experience: NCExperience
        for batch_id, experience in enumerate(my_nc_benchmark.train_stream):
            self.assertEqual(batch_id, experience.current_experience)
            self.assertIsInstance(experience, NCExperience)

        for batch_id, experience in enumerate(my_nc_benchmark.test_stream):
            self.assertEqual(batch_id, experience.current_experience)
            self.assertIsInstance(experience, NCExperience)

        iterable_slice = [3, 4, 1]
        sliced_stream = my_nc_benchmark.train_stream[iterable_slice]
        self.assertIsInstance(sliced_stream, ClassificationStream)
        self.assertEqual(len(iterable_slice), len(sliced_stream))
        self.assertEqual("train", sliced_stream.name)

        for batch_id, experience in enumerate(sliced_stream):
            self.assertEqual(
                iterable_slice[batch_id], experience.current_experience
            )
            self.assertIsInstance(experience, NCExperience)

        sliced_stream = my_nc_benchmark.test_stream[iterable_slice]
        self.assertIsInstance(sliced_stream, ClassificationStream)
        self.assertEqual(len(iterable_slice), len(sliced_stream))
        self.assertEqual("test", sliced_stream.name)

        for batch_id, experience in enumerate(sliced_stream):
            self.assertEqual(
                iterable_slice[batch_id], experience.current_experience
            )
            self.assertIsInstance(experience, NCExperience)
    def test_sit_single_dataset_reproducibility_data(self):
        mnist_train = MNIST(
            root=default_dataset_location("mnist"),
            train=True,
            download=True,
        )
        mnist_test = MNIST(
            root=default_dataset_location("mnist"),
            train=False,
            download=True,
        )
        nc_benchmark_ref = nc_benchmark(
            mnist_train,
            mnist_test,
            5,
            task_labels=False,
            shuffle=True,
            seed=5678,
        )

        my_nc_benchmark = nc_benchmark(
            mnist_train,
            mnist_test,
            -1,
            task_labels=False,
            reproducibility_data=nc_benchmark_ref.get_reproducibility_data(),
        )

        self.assertEqual(
            nc_benchmark_ref.train_exps_patterns_assignment,
            my_nc_benchmark.train_exps_patterns_assignment,
        )

        self.assertEqual(
            nc_benchmark_ref.test_exps_patterns_assignment,
            my_nc_benchmark.test_exps_patterns_assignment,
        )
Exemple #29
0
    def test_nc_sit_slicing(self):
        mnist_train = MNIST(
            root=expanduser("~") + "/.avalanche/data/mnist/",
            train=True,
            download=True,
        )
        mnist_test = MNIST(
            root=expanduser("~") + "/.avalanche/data/mnist/",
            train=False,
            download=True,
        )
        my_nc_benchmark = nc_benchmark(
            mnist_train,
            mnist_test,
            5,
            task_labels=False,
            shuffle=True,
            seed=1234,
        )

        experience: NCExperience
        for batch_id, experience in enumerate(my_nc_benchmark.train_stream):
            self.assertEqual(batch_id, experience.current_experience)
            self.assertIsInstance(experience, NCExperience)

        for batch_id, experience in enumerate(my_nc_benchmark.test_stream):
            self.assertEqual(batch_id, experience.current_experience)
            self.assertIsInstance(experience, NCExperience)

        iterable_slice = [3, 4, 1]
        sliced_stream = my_nc_benchmark.train_stream[iterable_slice]
        self.assertIsInstance(sliced_stream, GenericScenarioStream)
        self.assertEqual(len(iterable_slice), len(sliced_stream))
        self.assertEqual("train", sliced_stream.name)

        for batch_id, experience in enumerate(sliced_stream):
            self.assertEqual(iterable_slice[batch_id],
                             experience.current_experience)
            self.assertIsInstance(experience, NCExperience)

        sliced_stream = my_nc_benchmark.test_stream[iterable_slice]
        self.assertIsInstance(sliced_stream, GenericScenarioStream)
        self.assertEqual(len(iterable_slice), len(sliced_stream))
        self.assertEqual("test", sliced_stream.name)

        for batch_id, experience in enumerate(sliced_stream):
            self.assertEqual(iterable_slice[batch_id],
                             experience.current_experience)
            self.assertIsInstance(experience, NCExperience)
Exemple #30
0
    def test_sit_single_dataset_fixed_subset_no_remap_idx(self):
        order = [2, 5, 7, 8, 9, 0, 1, 4]
        mnist_train = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                            train=True, download=True)
        mnist_test = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                           train=False, download=True)
        my_nc_benchmark = nc_benchmark(
            mnist_train, mnist_test, 2, task_labels=True,
            fixed_class_order=order, class_ids_from_zero_in_each_exp=False)

        self.assertEqual(2, len(my_nc_benchmark.classes_in_experience))

        all_classes = set()
        for task_id in range(2):
            self.assertEqual(
                4, len(my_nc_benchmark.classes_in_experience[task_id])
            )
            self.assertEqual(set(order[task_id*4:(task_id+1)*4]),
                             my_nc_benchmark.original_classes_in_exp[task_id])
            all_classes.update(my_nc_benchmark.classes_in_experience[task_id])

        self.assertEqual(set(order), all_classes)