예제 #1
0
    def test_ocl_scenario(self):
        benchmark = get_fast_benchmark()
        batch_streams = benchmark.streams.values()
        ocl_benchmark = OnlineCLScenario(batch_streams)

        for s in ocl_benchmark.streams.values():
            print(s.name)
예제 #2
0
    def test_multihead_head_selection(self):
        # Check if the optimizer is updated correctly
        # when heads are created and updated.
        model = MultiHeadClassifier(in_features=6)
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()
        benchmark = get_fast_benchmark(use_task_labels=True, shuffle=False)

        strategy = Naive(model, optimizer, criterion,
                         train_mb_size=100, train_epochs=1,
                         eval_mb_size=100, device='cpu')
        strategy.evaluator.loggers = [TextLogger(sys.stdout)]

        # initialize head
        strategy.train(benchmark.train_stream[0])
        strategy.train(benchmark.train_stream[4])

        # create models with fixed head
        model_t0 = model.classifiers['0']
        model_t4 = model.classifiers['4']

        # check head task0
        for x, y, t in DataLoader(benchmark.train_stream[0].dataset):
            y_mh = model(x, t)
            y_t = model_t0(x)
            assert ((y_mh - y_t) ** 2).sum() < 1.e-7
            break

        # check head task4
        for x, y, t in DataLoader(benchmark.train_stream[4].dataset):
            y_mh = model(x, t)
            y_t = model_t4(x)
            assert ((y_mh - y_t) ** 2).sum() < 1.e-7
            break
    def test_multihead_cumulative(self):
        # check that multi-head reaches high enough accuracy.
        # Ensure nothing weird is happening with the multiple heads.

        set_deterministic_run(seed=0)

        model = MHTestMLP(input_size=6, hidden_size=100)
        criterion = CrossEntropyLoss()
        optimizer = SGD(model.parameters(), lr=1)

        main_metric = StreamAccuracy()
        exp_acc = ExperienceAccuracy()
        evalp = EvaluationPlugin(main_metric, exp_acc, loggers=None)
        strategy = Cumulative(
            model,
            optimizer,
            criterion,
            train_mb_size=64,
            device=get_device(),
            eval_mb_size=512,
            train_epochs=6,
            evaluator=evalp,
        )
        benchmark = get_fast_benchmark(use_task_labels=True)

        for train_batch_info in benchmark.train_stream:
            strategy.train(train_batch_info)
        strategy.eval(benchmark.train_stream[:])
        print("TRAIN STREAM ACC: ", main_metric.result())
        assert (sum(main_metric.result().values()) /
                float(len(main_metric.result().keys())) > 0.7)
    def test_pnn(self):
        # check that pnn reaches high enough accuracy.
        # Ensure nothing weird is happening with the multiple heads.

        set_deterministic_run(seed=0)

        main_metric = StreamAccuracy()
        exp_acc = ExperienceAccuracy()
        evalp = EvaluationPlugin(main_metric, exp_acc, loggers=None)
        model = PNN(num_layers=1, in_features=6, hidden_features_per_column=50)
        optimizer = SGD(model.parameters(), lr=0.1)
        strategy = PNNStrategy(
            model,
            optimizer,
            train_mb_size=32,
            device=get_device(),
            eval_mb_size=512,
            train_epochs=1,
            evaluator=evalp,
        )
        benchmark = get_fast_benchmark(use_task_labels=True)

        for train_batch_info in benchmark.train_stream:
            strategy.train(train_batch_info)

        strategy.eval(benchmark.train_stream[:])
        print("TRAIN STREAM ACC: ", main_metric.result())
        assert (sum(main_metric.result().values()) /
                float(len(main_metric.result().keys())) > 0.5)
예제 #5
0
    def test_early_stop(self):
        class EarlyStopP(SupervisedPlugin):
            def after_training_iteration(
                self, strategy: "SupervisedTemplate", **kwargs
            ):
                if strategy.clock.train_epoch_iterations == 10:
                    strategy.stop_training()

        model = SimpleMLP(input_size=6, hidden_size=100)
        criterion = CrossEntropyLoss()
        optimizer = SGD(model.parameters(), lr=1)

        strategy = Cumulative(
            model,
            optimizer,
            criterion,
            train_mb_size=1,
            device=get_device(),
            eval_mb_size=512,
            train_epochs=1,
            evaluator=None,
            plugins=[EarlyStopP()],
        )
        benchmark = get_fast_benchmark()

        for train_batch_info in benchmark.train_stream:
            strategy.train(train_batch_info)
            assert strategy.clock.train_epoch_iterations == 11
예제 #6
0
    def _test_replay_balanced_memory(self, storage_policy, mem_size):
        benchmark = get_fast_benchmark(use_task_labels=True)
        model = SimpleMLP(input_size=6, hidden_size=10)
        replayPlugin = ReplayPlugin(
            mem_size=mem_size, storage_policy=storage_policy
        )
        cl_strategy = Naive(
            model,
            SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.001),
            CrossEntropyLoss(),
            train_mb_size=32,
            train_epochs=1,
            eval_mb_size=100,
            plugins=[replayPlugin],
        )

        n_seen_data = 0
        for step in benchmark.train_stream:
            n_seen_data += len(step.dataset)
            mem_fill = min(mem_size, n_seen_data)
            cl_strategy.train(step)
            lengths = []
            for d in replayPlugin.storage_policy.buffer_datasets:
                lengths.append(len(d))
            self.assertEqual(sum(lengths), mem_fill)  # Always fully filled
예제 #7
0
    def test_plugins_compatibility_checks(self):
        model = SimpleMLP(input_size=6, hidden_size=10)
        benchmark = get_fast_benchmark()
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()

        evalp = EvaluationPlugin(
            loss_metrics(minibatch=True,
                         epoch=True,
                         experience=True,
                         stream=True),
            loggers=[InteractiveLogger()],
            strict_checks=None,
        )

        strategy = Naive(
            model,
            optimizer,
            criterion,
            train_epochs=2,
            eval_every=-1,
            evaluator=evalp,
            plugins=[
                EarlyStoppingPlugin(patience=10, val_stream_name="train")
            ],
        )
        strategy.train(benchmark.train_stream[0])
예제 #8
0
    def assert_balancing(self, policy):
        benchmark = get_fast_benchmark(use_task_labels=True)
        replay = ReplayPlugin(mem_size=100, storage_policy=policy)
        model = SimpleMLP(num_classes=benchmark.n_classes)

        # CREATE THE STRATEGY INSTANCE (NAIVE)
        cl_strategy = Naive(
            model,
            SGD(model.parameters(), lr=0.001),
            CrossEntropyLoss(),
            train_mb_size=100,
            train_epochs=0,
            eval_mb_size=100,
            plugins=[replay],
            evaluator=None,
        )

        for exp in benchmark.train_stream:
            cl_strategy.train(exp)

            ext_mem = policy.buffer_groups
            ext_mem_data = policy.buffer_datasets
            print(list(ext_mem.keys()), [len(el) for el in ext_mem_data])

            # buffer size should equal self.mem_size if data is large enough
            len_tot = sum([len(el) for el in ext_mem_data])
            assert len_tot == policy.max_size
예제 #9
0
    def load_benchmark(self, use_task_labels=False):
        """
        Returns a NC benchmark from a fake dataset of 10 classes, 5 experiences,
        2 classes per experience.

        :param fast_test: if True loads fake data, MNIST otherwise.
        """
        return get_fast_benchmark(use_task_labels=use_task_labels)
예제 #10
0
    def test_multihead_head_creation(self):
        # Check if the optimizer is updated correctly
        # when heads are created and updated.
        model = MTSimpleMLP(input_size=6, hidden_size=10)
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()
        benchmark = get_fast_benchmark(use_task_labels=True, shuffle=False)

        strategy = Naive(
            model,
            optimizer,
            criterion,
            train_mb_size=100,
            train_epochs=1,
            eval_mb_size=100,
            device="cpu",
        )
        strategy.evaluator.loggers = [TextLogger(sys.stdout)]
        print(
            "Current Classes: ",
            benchmark.train_stream[4].classes_in_this_experience,
        )
        print(
            "Current Classes: ",
            benchmark.train_stream[0].classes_in_this_experience,
        )

        # head creation
        strategy.train(benchmark.train_stream[0])
        w_ptr = model.classifier.classifiers["0"].classifier.weight.data_ptr()
        b_ptr = model.classifier.classifiers["0"].classifier.bias.data_ptr()
        opt_params_ptrs = [
            w.data_ptr() for group in optimizer.param_groups
            for w in group["params"]
        ]
        assert w_ptr in opt_params_ptrs
        assert b_ptr in opt_params_ptrs

        # head update
        strategy.train(benchmark.train_stream[4])
        w_ptr_t0 = model.classifier.classifiers[
            "0"].classifier.weight.data_ptr()
        b_ptr_t0 = model.classifier.classifiers["0"].classifier.bias.data_ptr()
        w_ptr_new = model.classifier.classifiers[
            "4"].classifier.weight.data_ptr()
        b_ptr_new = model.classifier.classifiers["4"].classifier.bias.data_ptr(
        )
        opt_params_ptrs = [
            w.data_ptr() for group in optimizer.param_groups
            for w in group["params"]
        ]

        assert w_ptr not in opt_params_ptrs  # head0 has been updated
        assert b_ptr not in opt_params_ptrs  # head0 has been updated
        assert w_ptr_t0 in opt_params_ptrs
        assert b_ptr_t0 in opt_params_ptrs
        assert w_ptr_new in opt_params_ptrs
        assert b_ptr_new in opt_params_ptrs
예제 #11
0
    def test_periodic_eval(self):
        model = SimpleMLP(input_size=6, hidden_size=10)
        benchmark = get_fast_benchmark()
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()
        curve_key = 'Top1_Acc_Stream/eval_phase/train_stream/Task000'

        ###################
        # Case #1: No eval
        ###################
        # we use stream acc. because it emits a single value
        # for each eval loop.
        acc = StreamAccuracy()
        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_epochs=2,
                         eval_every=-1,
                         evaluator=EvaluationPlugin(acc))
        strategy.train(benchmark.train_stream[0])
        # eval is not called in this case
        assert len(strategy.evaluator.get_all_metrics()) == 0

        ###################
        # Case #2: Eval at the end only and before training
        ###################
        acc = StreamAccuracy()
        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_epochs=2,
                         eval_every=0,
                         evaluator=EvaluationPlugin(acc))
        strategy.train(benchmark.train_stream[0])
        # eval is called once at the end of the training loop
        curve = strategy.evaluator.get_all_metrics()[curve_key][1]
        assert len(curve) == 2

        ###################
        # Case #3: Eval after every epoch and before training
        ###################
        acc = StreamAccuracy()
        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_epochs=2,
                         eval_every=1,
                         evaluator=EvaluationPlugin(acc))
        strategy.train(benchmark.train_stream[0])
        # eval is called after every epoch + the end of the training loop
        curve = strategy.evaluator.get_all_metrics()[curve_key][1]
        assert len(curve) == 4
예제 #12
0
    def test_dataset_benchmark(self):
        benchmark = get_fast_benchmark(n_samples_per_class=1000)
        exp = benchmark.train_stream[0]
        num_classes = len(exp.classes_in_this_experience)

        train_d, valid_d = class_balanced_split_strategy(0.5, exp)
        assert abs(len(train_d) - len(valid_d)) <= num_classes
        for cid in exp.classes_in_this_experience:
            train_cnt = (torch.as_tensor(train_d.targets) == cid).sum()
            valid_cnt = (torch.as_tensor(valid_d.targets) == cid).sum()
            assert abs(train_cnt - valid_cnt) <= 1

        ratio = 0.123
        len_data = len(exp.dataset)
        train_d, valid_d = class_balanced_split_strategy(ratio, exp)
        assert_almost_equal(len(valid_d) / len_data, ratio, decimal=2)
        for cid in exp.classes_in_this_experience:
            data_cnt = (torch.as_tensor(exp.dataset.targets) == cid).sum()
            valid_cnt = (torch.as_tensor(valid_d.targets) == cid).sum()
            assert_almost_equal(valid_cnt / data_cnt, ratio, decimal=2)
예제 #13
0
 def setUp(self):
     common_setups()
     self.benchmark = get_fast_benchmark(
         use_task_labels=False, shuffle=False)
예제 #14
0
 def setUp(self) -> None:
     self.benchmark = get_fast_benchmark(use_task_labels=True)
예제 #15
0
 def setUpClass(cls) -> None:
     cls.benchmark = get_fast_benchmark()
     cls.model = SimpleMLP(input_size=6, hidden_size=10)
     cls.optimizer = SGD(cls.model.parameters(), lr=0.001)
     cls.tempdir = tempfile.TemporaryDirectory()
     cls.logdir = cls.tempdir.__enter__()
예제 #16
0
    def setUp(cls) -> None:
        cls.model = SimpleMLP(input_size=6, hidden_size=10)
        cls.optimizer = SGD(cls.model.parameters(), lr=1e-3)
        cls.criterion = CrossEntropyLoss()

        cls.benchmark = get_fast_benchmark()
예제 #17
0
 def setUp(self):
     common_setups()
     self.benchmark = get_fast_benchmark(use_task_labels=True, shuffle=True)
     self.device = get_device()