コード例 #1
0
    def test_pnn(self):
        # check that pnn reaches high enough accuracy.
        # Ensure nothing weird is happening with the multiple heads.

        set_deterministic_run(seed=0)

        main_metric = StreamAccuracy()
        exp_acc = ExperienceAccuracy()
        evalp = EvaluationPlugin(main_metric, exp_acc, loggers=None)
        model = PNN(num_layers=1, in_features=6, hidden_features_per_column=50)
        optimizer = SGD(model.parameters(), lr=0.1)
        strategy = PNNStrategy(
            model,
            optimizer,
            train_mb_size=32,
            device=get_device(),
            eval_mb_size=512,
            train_epochs=1,
            evaluator=evalp,
        )
        benchmark = get_fast_benchmark(use_task_labels=True)

        for train_batch_info in benchmark.train_stream:
            strategy.train(train_batch_info)

        strategy.eval(benchmark.train_stream[:])
        print("TRAIN STREAM ACC: ", main_metric.result())
        assert (sum(main_metric.result().values()) /
                float(len(main_metric.result().keys())) > 0.5)
コード例 #2
0
    def test_multihead_cumulative(self):
        # check that multi-head reaches high enough accuracy.
        # Ensure nothing weird is happening with the multiple heads.

        set_deterministic_run(seed=0)

        model = MHTestMLP(input_size=6, hidden_size=100)
        criterion = CrossEntropyLoss()
        optimizer = SGD(model.parameters(), lr=1)

        main_metric = StreamAccuracy()
        exp_acc = ExperienceAccuracy()
        evalp = EvaluationPlugin(main_metric, exp_acc, loggers=None)
        strategy = Cumulative(
            model,
            optimizer,
            criterion,
            train_mb_size=64,
            device=get_device(),
            eval_mb_size=512,
            train_epochs=6,
            evaluator=evalp,
        )
        benchmark = get_fast_benchmark(use_task_labels=True)

        for train_batch_info in benchmark.train_stream:
            strategy.train(train_batch_info)
        strategy.eval(benchmark.train_stream[:])
        print("TRAIN STREAM ACC: ", main_metric.result())
        assert (sum(main_metric.result().values()) /
                float(len(main_metric.result().keys())) > 0.7)
コード例 #3
0
    def test_multihead_cumulative(self):
        # check that multi-head reaches high enough accuracy.
        # Ensure nothing weird is happening with the multiple heads.
        model = MHTestMLP(input_size=6, hidden_size=100)
        criterion = CrossEntropyLoss()
        optimizer = SGD(model.parameters(), lr=1)

        main_metric = StreamAccuracy()
        exp_acc = ExperienceAccuracy()
        evalp = EvaluationPlugin(main_metric, exp_acc, loggers=None)
        strategy = Cumulative(model,
                              optimizer,
                              criterion,
                              train_mb_size=32,
                              device=get_device(),
                              eval_mb_size=512,
                              train_epochs=1,
                              evaluator=evalp)
        scenario = get_fast_scenario(use_task_labels=True)

        for train_batch_info in scenario.train_stream:
            strategy.train(train_batch_info)
        strategy.eval(scenario.train_stream[:])
        print("TRAIN STREAM ACC: ", main_metric.result())
        assert main_metric.result() > 0.7
コード例 #4
0
    def test_pnn(self):
        # check that pnn reaches high enough accuracy.
        # Ensure nothing weird is happening with the multiple heads.
        main_metric = StreamAccuracy()
        exp_acc = ExperienceAccuracy()
        evalp = EvaluationPlugin(main_metric, exp_acc, loggers=None)
        strategy = PNNStrategy(1,
                               6,
                               50,
                               0.1,
                               train_mb_size=32,
                               device=get_device(),
                               eval_mb_size=512,
                               train_epochs=1,
                               evaluator=evalp)
        scenario = get_fast_scenario(use_task_labels=True)

        for train_batch_info in scenario.train_stream:
            strategy.train(train_batch_info)

        strategy.eval(scenario.train_stream[:])
        print("TRAIN STREAM ACC: ", main_metric.result())
        assert main_metric.result() > 0.5