def test_pnn(self):
        # check that pnn reaches high enough accuracy.
        # Ensure nothing weird is happening with the multiple heads.

        set_deterministic_run(seed=0)

        main_metric = StreamAccuracy()
        exp_acc = ExperienceAccuracy()
        evalp = EvaluationPlugin(main_metric, exp_acc, loggers=None)
        model = PNN(num_layers=1, in_features=6, hidden_features_per_column=50)
        optimizer = SGD(model.parameters(), lr=0.1)
        strategy = PNNStrategy(
            model,
            optimizer,
            train_mb_size=32,
            device=get_device(),
            eval_mb_size=512,
            train_epochs=1,
            evaluator=evalp,
        )
        benchmark = get_fast_benchmark(use_task_labels=True)

        for train_batch_info in benchmark.train_stream:
            strategy.train(train_batch_info)

        strategy.eval(benchmark.train_stream[:])
        print("TRAIN STREAM ACC: ", main_metric.result())
        assert (sum(main_metric.result().values()) /
                float(len(main_metric.result().keys())) > 0.5)
    def test_multihead_cumulative(self):
        # check that multi-head reaches high enough accuracy.
        # Ensure nothing weird is happening with the multiple heads.

        set_deterministic_run(seed=0)

        model = MHTestMLP(input_size=6, hidden_size=100)
        criterion = CrossEntropyLoss()
        optimizer = SGD(model.parameters(), lr=1)

        main_metric = StreamAccuracy()
        exp_acc = ExperienceAccuracy()
        evalp = EvaluationPlugin(main_metric, exp_acc, loggers=None)
        strategy = Cumulative(
            model,
            optimizer,
            criterion,
            train_mb_size=64,
            device=get_device(),
            eval_mb_size=512,
            train_epochs=6,
            evaluator=evalp,
        )
        benchmark = get_fast_benchmark(use_task_labels=True)

        for train_batch_info in benchmark.train_stream:
            strategy.train(train_batch_info)
        strategy.eval(benchmark.train_stream[:])
        print("TRAIN STREAM ACC: ", main_metric.result())
        assert (sum(main_metric.result().values()) /
                float(len(main_metric.result().keys())) > 0.7)
示例#3
0
    def test_multihead_cumulative(self):
        # check that multi-head reaches high enough accuracy.
        # Ensure nothing weird is happening with the multiple heads.
        model = MHTestMLP(input_size=6, hidden_size=100)
        criterion = CrossEntropyLoss()
        optimizer = SGD(model.parameters(), lr=1)

        main_metric = StreamAccuracy()
        exp_acc = ExperienceAccuracy()
        evalp = EvaluationPlugin(main_metric, exp_acc, loggers=None)
        strategy = Cumulative(model,
                              optimizer,
                              criterion,
                              train_mb_size=32,
                              device=get_device(),
                              eval_mb_size=512,
                              train_epochs=1,
                              evaluator=evalp)
        scenario = get_fast_scenario(use_task_labels=True)

        for train_batch_info in scenario.train_stream:
            strategy.train(train_batch_info)
        strategy.eval(scenario.train_stream[:])
        print("TRAIN STREAM ACC: ", main_metric.result())
        assert main_metric.result() > 0.7
示例#4
0
    def test_periodic_eval(self):
        model = SimpleMLP(input_size=6, hidden_size=10)
        scenario = get_fast_scenario()
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()
        curve_key = 'Top1_Acc_Stream/eval_phase/train_stream'

        ###################
        # Case #1: No eval
        ###################
        # we use stream acc. because it emits a single value
        # for each eval loop.
        acc = StreamAccuracy()
        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_epochs=2,
                         eval_every=-1,
                         evaluator=EvaluationPlugin(acc))
        strategy.train(scenario.train_stream[0])
        # eval is not called in this case
        assert len(strategy.evaluator.all_metrics) == 0

        ###################
        # Case #2: Eval at the end only
        ###################
        acc = StreamAccuracy()
        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_epochs=2,
                         eval_every=0,
                         evaluator=EvaluationPlugin(acc))
        strategy.train(scenario.train_stream[0])
        # eval is called once at the end of the training loop
        curve = strategy.evaluator.all_metrics[curve_key][1]
        assert len(curve) == 1

        ###################
        # Case #3: Eval after every epoch
        ###################
        acc = StreamAccuracy()
        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_epochs=2,
                         eval_every=1,
                         evaluator=EvaluationPlugin(acc))
        strategy.train(scenario.train_stream[0])
        # eval is called after every epoch + the end of the training loop
        curve = strategy.evaluator.all_metrics[curve_key][1]
        assert len(curve) == 3
示例#5
0
    def test_pnn(self):
        # check that pnn reaches high enough accuracy.
        # Ensure nothing weird is happening with the multiple heads.
        main_metric = StreamAccuracy()
        exp_acc = ExperienceAccuracy()
        evalp = EvaluationPlugin(main_metric, exp_acc, loggers=None)
        strategy = PNNStrategy(1,
                               6,
                               50,
                               0.1,
                               train_mb_size=32,
                               device=get_device(),
                               eval_mb_size=512,
                               train_epochs=1,
                               evaluator=evalp)
        scenario = get_fast_scenario(use_task_labels=True)

        for train_batch_info in scenario.train_stream:
            strategy.train(train_batch_info)

        strategy.eval(scenario.train_stream[:])
        print("TRAIN STREAM ACC: ", main_metric.result())
        assert main_metric.result() > 0.5
    def test_periodic_eval(self):
        model = SimpleMLP(input_size=6, hidden_size=10)
        model.classifier = IncrementalClassifier(model.classifier.in_features)
        benchmark = get_fast_benchmark()
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()
        curve_key = "Top1_Acc_Stream/eval_phase/train_stream/Task000"

        ###################
        # Case #1: No eval
        ###################
        # we use stream acc. because it emits a single value
        # for each eval loop.
        acc = StreamAccuracy()
        strategy = Naive(
            model,
            optimizer,
            criterion,
            train_epochs=2,
            eval_every=-1,
            evaluator=EvaluationPlugin(acc),
        )
        strategy.train(benchmark.train_stream[0])
        # eval is not called in this case
        assert len(strategy.evaluator.get_all_metrics()) == 0

        ###################
        # Case #2: Eval at the end only and before training
        ###################
        acc = StreamAccuracy()
        evalp = EvaluationPlugin(acc)
        strategy = Naive(
            model,
            optimizer,
            criterion,
            train_epochs=2,
            eval_every=0,
            evaluator=evalp,
        )
        strategy.train(benchmark.train_stream[0])
        # eval is called once at the end of the training loop
        curve = strategy.evaluator.get_all_metrics()[curve_key][1]
        assert len(curve) == 2

        ###################
        # Case #3: Eval after every epoch and before training
        ###################
        acc = StreamAccuracy()
        strategy = Naive(
            model,
            optimizer,
            criterion,
            train_epochs=2,
            eval_every=1,
            evaluator=EvaluationPlugin(acc),
        )
        strategy.train(benchmark.train_stream[0])
        curve = strategy.evaluator.get_all_metrics()[curve_key][1]
        assert len(curve) == 3

        ###################
        # Case #4: Eval in iteration mode
        ###################
        acc = StreamAccuracy()
        strategy = Naive(
            model,
            optimizer,
            criterion,
            train_epochs=2,
            eval_every=100,
            evaluator=EvaluationPlugin(acc),
            peval_mode="iteration",
        )
        strategy.train(benchmark.train_stream[0])
        curve = strategy.evaluator.get_all_metrics()[curve_key][1]
        assert len(curve) == 5
示例#7
0
def run_experiment(config):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    torch.manual_seed(config.seed)
    torch.cuda.manual_seed(config.seed)
    np.random.seed(config.seed)
    random.seed(config.seed)
    torch.backends.cudnn.enabled = False
    torch.backends.cudnn.deterministic = True

    per_pixel_mean = get_dataset_per_pixel_mean(
        CIFAR100(
            expanduser("~") + "/.avalanche/data/cifar100/",
            train=True,
            download=True,
            transform=transforms.Compose([transforms.ToTensor()]),
        )
    )

    transforms_group = dict(
        eval=(
            transforms.Compose(
                [
                    transforms.ToTensor(),
                    lambda img_pattern: img_pattern - per_pixel_mean,
                ]
            ),
            None,
        ),
        train=(
            transforms.Compose(
                [
                    transforms.ToTensor(),
                    lambda img_pattern: img_pattern - per_pixel_mean,
                    icarl_cifar100_augment_data,
                ]
            ),
            None,
        ),
    )

    train_set = CIFAR100(
        expanduser("~") + "/.avalanche/data/cifar100/",
        train=True,
        download=True,
    )
    test_set = CIFAR100(
        expanduser("~") + "/.avalanche/data/cifar100/",
        train=False,
        download=True,
    )

    train_set = AvalancheDataset(
        train_set,
        transform_groups=transforms_group,
        initial_transform_group="train",
    )
    test_set = AvalancheDataset(
        test_set,
        transform_groups=transforms_group,
        initial_transform_group="eval",
    )

    scenario = nc_benchmark(
        train_dataset=train_set,
        test_dataset=test_set,
        n_experiences=config.nb_exp,
        task_labels=False,
        seed=config.seed,
        shuffle=False,
        fixed_class_order=config.fixed_class_order,
    )

    evaluator = EvaluationPlugin(
        EpochAccuracy(),
        ExperienceAccuracy(),
        StreamAccuracy(),
        loggers=[InteractiveLogger()],
    )

    model: IcarlNet = make_icarl_net(num_classes=100)
    model.apply(initialize_icarl_net)

    optim = SGD(
        model.parameters(),
        lr=config.lr_base,
        weight_decay=config.wght_decay,
        momentum=0.9,
    )
    sched = LRSchedulerPlugin(
        MultiStepLR(optim, config.lr_milestones, gamma=1.0 / config.lr_factor)
    )

    strategy = ICaRL(
        model.feature_extractor,
        model.classifier,
        optim,
        config.memory_size,
        buffer_transform=transforms.Compose([icarl_cifar100_augment_data]),
        fixed_memory=True,
        train_mb_size=config.batch_size,
        train_epochs=config.epochs,
        eval_mb_size=config.batch_size,
        plugins=[sched],
        device=device,
        evaluator=evaluator,
    )

    for i, exp in enumerate(scenario.train_stream):
        eval_exps = [e for e in scenario.test_stream][: i + 1]
        strategy.train(exp, num_workers=4)
        strategy.eval(eval_exps, num_workers=4)