Esempio n. 1
0
    def test_gem(self):
        model = self.get_model(fast_test=self.fast_test)
        optimizer = SGD(model.parameters(), lr=1e-1)
        criterion = CrossEntropyLoss()

        # SIT scenario
        my_nc_scenario = self.load_scenario(fast_test=self.fast_test)
        strategy = GEM(model,
                       optimizer,
                       criterion,
                       patterns_per_exp=256,
                       train_mb_size=10,
                       eval_mb_size=50,
                       train_epochs=2)

        self.run_strategy(my_nc_scenario, strategy)

        # MT scenario
        strategy = GEM(model,
                       optimizer,
                       criterion,
                       patterns_per_exp=256,
                       train_mb_size=10,
                       eval_mb_size=50,
                       train_epochs=2)
        self.run_strategy(my_nc_scenario, strategy)
        scenario = self.load_scenario(fast_test=self.fast_test,
                                      use_task_labels=True)
        self.run_strategy(scenario, strategy)
Esempio n. 2
0
    def test_gem(self):
        # SIT scenario
        model, optimizer, criterion, my_nc_benchmark = self.init_sit()
        strategy = GEM(
            model,
            optimizer,
            criterion,
            patterns_per_exp=256,
            train_mb_size=10,
            eval_mb_size=50,
            train_epochs=2,
        )

        self.run_strategy(my_nc_benchmark, strategy)

        # MT scenario
        strategy = GEM(
            model,
            optimizer,
            criterion,
            patterns_per_exp=256,
            train_mb_size=10,
            eval_mb_size=50,
            train_epochs=2,
        )
        self.run_strategy(my_nc_benchmark, strategy)
        benchmark = self.load_benchmark(use_task_labels=True)
        self.run_strategy(benchmark, strategy)
Esempio n. 3
0
def main(args):
    model = SimpleMLP(hidden_size=args.hs)
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
    criterion = torch.nn.CrossEntropyLoss()

    # check if selected GPU is available or use CPU
    assert args.cuda == -1 or args.cuda >= 0, "cuda must be -1 or >= 0."
    device = torch.device(f"cuda:{args.cuda}"
                          if torch.cuda.is_available() and
                          args.cuda >= 0 else "cpu")
    print(f'Using device: {device}')

    # create scenario
    if args.scenario == 'pmnist':
        scenario = PermutedMNIST(n_experiences=args.permutations)
    elif args.scenario == 'smnist':
        scenario = SplitMNIST(n_experiences=5, return_task_id=False)
    else:
        raise ValueError("Wrong scenario name. Allowed pmnist, smnist.")

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True),
        loggers=[interactive_logger])

    # create strategy
    if args.strategy == 'gem':
        strategy = GEM(model, optimizer, criterion, args.patterns_per_exp,
                       args.memory_strength, train_epochs=args.epochs,
                       device=device, train_mb_size=10, evaluator=eval_plugin)
    elif args.strategy == 'agem':
        strategy = AGEM(model, optimizer, criterion, args.patterns_per_exp,
                        args.sample_size, train_epochs=args.epochs, device=device,
                        train_mb_size=10, evaluator=eval_plugin)
    else:
        raise ValueError("Wrong strategy name. Allowed gem, agem.")
    # train on the selected scenario with the chosen strategy
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start training on experience ", experience.current_experience)

        strategy.train(experience)
        print("End training on experience ", experience.current_experience)
        print('Computing accuracy on the test set')
        results.append(strategy.eval(scenario.test_stream[:]))
Esempio n. 4
0
    accuracy_metrics(minibatch=True, epoch=True, experience=True, stream=True),
    loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
    timing_metrics(epoch=True, epoch_running=True),
    ExperienceForgetting(),
    cpu_usage_metrics(experience=True),
    StreamConfusionMatrix(num_classes=2, save_image=False),
    disk_usage_metrics(minibatch=True, epoch=True, experience=True, stream=True),
    loggers=[interactive_logger, text_logger, tb_logger],
)

cl_strategy = GEM(
    model,
    optimizer=Adam(model.parameters()),
    patterns_per_exp=1470,
    criterion=CrossEntropyLoss(),
    train_mb_size=128,
    train_epochs=50,
    eval_mb_size=128,
    evaluator=eval_plugin,
    device=device,
)

# TRAINING LOOP
print("Starting experiment...")

os.makedirs(os.path.join("weights", f"SimpleMLP"), exist_ok=True)

results = []
i = 1
for task_number, experience in enumerate(generic_scenario.train_stream):
    print("Start of experience: ", experience.current_experience)
Esempio n. 5
0
    cl_strategy = LwF(model,
                      Adam(model.parameters(), lr=0.001),
                      CrossEntropyLoss(),
                      alpha=0.5,
                      temperature=2.0,
                      train_mb_size=args.batch_size,
                      train_epochs=args.num_epochs,
                      eval_mb_size=args.batch_size * 2,
                      evaluator=eval_plugin,
                      device=device)
elif (args.cl_strategy == "GEM"):
    cl_strategy = GEM(model,
                      Adam(model.parameters(), lr=0.001),
                      CrossEntropyLoss(),
                      patterns_per_exp=150,
                      memory_strength=0.5,
                      train_mb_size=args.batch_size,
                      train_epochs=args.num_epochs,
                      eval_mb_size=args.batch_size * 2,
                      evaluator=eval_plugin,
                      device=device)
elif (args.cl_strategy == "EWC"):
    cl_strategy = EWC(model,
                      Adam(model.parameters(), lr=0.001),
                      CrossEntropyLoss(),
                      ewc_lambda=0.5,
                      mode="separate",
                      train_mb_size=args.batch_size,
                      train_epochs=args.num_epochs,
                      eval_mb_size=args.batch_size * 2,
                      evaluator=eval_plugin,
                      device=device)