Esempio n. 1
0
    def test_early_stop(self):
        class EarlyStopP(StrategyPlugin):
            def after_training_iteration(self, strategy: 'BaseStrategy',
                                         **kwargs):
                if strategy.mb_it == 10:
                    strategy.stop_training()

        model = SimpleMLP(input_size=6, hidden_size=100)
        criterion = CrossEntropyLoss()
        optimizer = SGD(model.parameters(), lr=1)

        strategy = Cumulative(model,
                              optimizer,
                              criterion,
                              train_mb_size=1,
                              device=get_device(),
                              eval_mb_size=512,
                              train_epochs=1,
                              evaluator=None,
                              plugins=[EarlyStopP()])
        scenario = get_fast_scenario()

        for train_batch_info in scenario.train_stream:
            strategy.train(train_batch_info)
            assert strategy.mb_it == 11
    def test_multihead_cumulative(self):
        # check that multi-head reaches high enough accuracy.
        # Ensure nothing weird is happening with the multiple heads.
        model = MHTestMLP(input_size=6, hidden_size=100)
        criterion = CrossEntropyLoss()
        optimizer = SGD(model.parameters(), lr=1)

        main_metric = StreamAccuracy()
        exp_acc = ExperienceAccuracy()
        evalp = EvaluationPlugin(main_metric, exp_acc, loggers=None)
        strategy = Cumulative(model,
                              optimizer,
                              criterion,
                              train_mb_size=32,
                              device=get_device(),
                              eval_mb_size=512,
                              train_epochs=1,
                              evaluator=evalp)
        scenario = get_fast_scenario(use_task_labels=True)

        for train_batch_info in scenario.train_stream:
            strategy.train(train_batch_info)
        strategy.eval(scenario.train_stream[:])
        print("TRAIN STREAM ACC: ", main_metric.result())
        assert sum(main_metric.result().values()) / \
               float(len(main_metric.result().keys())) > 0.7
Esempio n. 3
0
    def _test_replay_balanced_memory(self, storage_policy, mem_size):
        scenario = get_fast_scenario(use_task_labels=True)
        model = SimpleMLP(input_size=6, hidden_size=10)
        replayPlugin = ReplayPlugin(mem_size=mem_size,
                                    storage_policy=storage_policy)
        cl_strategy = Naive(model,
                            SGD(model.parameters(),
                                lr=0.001,
                                momentum=0.9,
                                weight_decay=0.001),
                            CrossEntropyLoss(),
                            train_mb_size=32,
                            train_epochs=1,
                            eval_mb_size=100,
                            plugins=[replayPlugin])

        n_seen_data = 0
        for step in scenario.train_stream:
            n_seen_data += len(step.dataset)
            mem_fill = min(mem_size, n_seen_data)
            cl_strategy.train(step)
            ext_mem = replayPlugin.ext_mem
            lengths = []
            for task_id in ext_mem.keys():
                lengths.append(len(ext_mem[task_id]))
            self.assertEqual(sum(lengths), mem_fill)  # Always fully filled
Esempio n. 4
0
    def load_scenario(self, use_task_labels=False):
        """
        Returns a NC Scenario from a fake dataset of 10 classes, 5 experiences,
        2 classes per experience.

        :param fast_test: if True loads fake data, MNIST otherwise.
        """
        return get_fast_scenario(use_task_labels=use_task_labels)
Esempio n. 5
0
    def test_periodic_eval(self):
        model = SimpleMLP(input_size=6, hidden_size=10)
        scenario = get_fast_scenario()
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()
        curve_key = 'Top1_Acc_Stream/eval_phase/train_stream/Task000'

        ###################
        # Case #1: No eval
        ###################
        # we use stream acc. because it emits a single value
        # for each eval loop.
        acc = StreamAccuracy()
        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_epochs=2,
                         eval_every=-1,
                         evaluator=EvaluationPlugin(acc))
        strategy.train(scenario.train_stream[0])
        # eval is not called in this case
        assert len(strategy.evaluator.get_all_metrics()) == 0

        ###################
        # Case #2: Eval at the end only
        ###################
        acc = StreamAccuracy()
        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_epochs=2,
                         eval_every=0,
                         evaluator=EvaluationPlugin(acc))
        strategy.train(scenario.train_stream[0])
        # eval is called once at the end of the training loop
        curve = strategy.evaluator.get_all_metrics()[curve_key][1]
        assert len(curve) == 1

        ###################
        # Case #3: Eval after every epoch
        ###################
        acc = StreamAccuracy()
        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_epochs=2,
                         eval_every=1,
                         evaluator=EvaluationPlugin(acc))
        strategy.train(scenario.train_stream[0])
        # eval is called after every epoch + the end of the training loop
        curve = strategy.evaluator.get_all_metrics()[curve_key][1]
        assert len(curve) == 3
Esempio n. 6
0
    def test_multihead_head_creation(self):
        # Check if the optimizer is updated correctly
        # when heads are created and updated.
        model = MTSimpleMLP(input_size=6, hidden_size=10)
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()
        scenario = get_fast_scenario(use_task_labels=True, shuffle=False)

        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_mb_size=100,
                         train_epochs=1,
                         eval_mb_size=100,
                         device='cpu')
        strategy.evaluator.loggers = [TextLogger(sys.stdout)]
        print("Current Classes: ",
              scenario.train_stream[4].classes_in_this_experience)
        print("Current Classes: ",
              scenario.train_stream[0].classes_in_this_experience)

        # head creation
        strategy.train(scenario.train_stream[0])
        w_ptr = model.classifier.classifiers['0'].classifier.weight.data_ptr()
        b_ptr = model.classifier.classifiers['0'].classifier.bias.data_ptr()
        opt_params_ptrs = [
            w.data_ptr() for group in optimizer.param_groups
            for w in group['params']
        ]
        assert w_ptr in opt_params_ptrs
        assert b_ptr in opt_params_ptrs

        # head update
        strategy.train(scenario.train_stream[4])
        w_ptr_t0 = model.classifier.classifiers[
            '0'].classifier.weight.data_ptr()
        b_ptr_t0 = model.classifier.classifiers['0'].classifier.bias.data_ptr()
        w_ptr_new = model.classifier.classifiers[
            '4'].classifier.weight.data_ptr()
        b_ptr_new = model.classifier.classifiers['4'].classifier.bias.data_ptr(
        )
        opt_params_ptrs = [
            w.data_ptr() for group in optimizer.param_groups
            for w in group['params']
        ]

        assert w_ptr not in opt_params_ptrs  # head0 has been updated
        assert b_ptr not in opt_params_ptrs  # head0 has been updated
        assert w_ptr_t0 in opt_params_ptrs
        assert b_ptr_t0 in opt_params_ptrs
        assert w_ptr_new in opt_params_ptrs
        assert b_ptr_new in opt_params_ptrs
Esempio n. 7
0
    def assert_balancing(self, policy):
        ext_mem = policy.ext_mem
        scenario = get_fast_scenario(use_task_labels=True)
        replay = ReplayPlugin(mem_size=100, storage_policy=policy)
        model = SimpleMLP(num_classes=scenario.n_classes)

        # CREATE THE STRATEGY INSTANCE (NAIVE)
        cl_strategy = Naive(model,
                            SGD(model.parameters(), lr=0.001),
                            CrossEntropyLoss(), train_mb_size=100,
                            train_epochs=0,
                            eval_mb_size=100, plugins=[replay], evaluator=None)

        for exp in scenario.train_stream:
            cl_strategy.train(exp)
            print(list(ext_mem.keys()), [len(el) for el in ext_mem.values()])

            # buffer size should equal self.mem_size if data is large enough
            len_tot = sum([len(el) for el in ext_mem.values()])
            assert len_tot == policy.mem_size
Esempio n. 8
0
    def test_multihead_head_selection(self):
        # Check if the optimizer is updated correctly
        # when heads are created and updated.
        model = MultiHeadClassifier(in_features=6)
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()
        scenario = get_fast_scenario(use_task_labels=True, shuffle=False)

        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_mb_size=100,
                         train_epochs=1,
                         eval_mb_size=100,
                         device='cpu')
        strategy.evaluator.loggers = [TextLogger(sys.stdout)]

        # initialize head
        strategy.train(scenario.train_stream[0])
        strategy.train(scenario.train_stream[4])

        # create models with fixed head
        model_t0 = model.classifiers['0']
        model_t4 = model.classifiers['4']

        # check head task0
        for x, y, t in DataLoader(scenario.train_stream[0].dataset):
            y_mh = model(x, t)
            y_t = model_t0(x)
            assert ((y_mh - y_t)**2).sum() < 1.e-7
            break

        # check head task4
        for x, y, t in DataLoader(scenario.train_stream[4].dataset):
            y_mh = model(x, t)
            y_t = model_t4(x)
            assert ((y_mh - y_t)**2).sum() < 1.e-7
            break
Esempio n. 9
0
    def test_pnn(self):
        # check that pnn reaches high enough accuracy.
        # Ensure nothing weird is happening with the multiple heads.
        main_metric = StreamAccuracy()
        exp_acc = ExperienceAccuracy()
        evalp = EvaluationPlugin(main_metric, exp_acc, loggers=None)
        strategy = PNNStrategy(1,
                               6,
                               50,
                               0.1,
                               train_mb_size=32,
                               device=get_device(),
                               eval_mb_size=512,
                               train_epochs=1,
                               evaluator=evalp)
        scenario = get_fast_scenario(use_task_labels=True)

        for train_batch_info in scenario.train_stream:
            strategy.train(train_batch_info)

        strategy.eval(scenario.train_stream[:])
        print("TRAIN STREAM ACC: ", main_metric.result())
        assert main_metric.result() > 0.5
Esempio n. 10
0
 def setUp(self):
     common_setups()
     self.scenario = get_fast_scenario(use_task_labels=False, shuffle=False)