コード例 #1
0
ファイル: test_strategies.py プロジェクト: rrmina/avalanche
    def test_lwf(self):
        model = self.get_model(fast_test=self.fast_test)
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()

        # SIT scenario
        my_nc_scenario = self.load_scenario(fast_test=self.fast_test)
        strategy = LwF(model,
                       optimizer,
                       criterion,
                       alpha=[0, 1 / 2, 2 * (2 / 3), 3 * (3 / 4), 4 * (4 / 5)],
                       temperature=2,
                       device=self.device,
                       train_mb_size=10,
                       eval_mb_size=50,
                       train_epochs=2)
        self.run_strategy(my_nc_scenario, strategy)

        # MT scenario
        strategy = LwF(model,
                       optimizer,
                       criterion,
                       alpha=[0, 1 / 2, 2 * (2 / 3), 3 * (3 / 4), 4 * (4 / 5)],
                       temperature=2,
                       device=self.device,
                       train_mb_size=10,
                       eval_mb_size=50,
                       train_epochs=2)
        scenario = self.load_scenario(fast_test=self.fast_test,
                                      use_task_labels=True)
        self.run_strategy(scenario, strategy)
コード例 #2
0
    def test_lwf(self):
        # SIT scenario
        model, optimizer, criterion, my_nc_benchmark = self.init_sit()
        strategy = LwF(model,
                       optimizer,
                       criterion,
                       alpha=[0, 1 / 2, 2 * (2 / 3), 3 * (3 / 4), 4 * (4 / 5)],
                       temperature=2,
                       device=self.device,
                       train_mb_size=10,
                       eval_mb_size=50,
                       train_epochs=2)
        self.run_strategy(my_nc_benchmark, strategy)

        # MT scenario
        strategy = LwF(model,
                       optimizer,
                       criterion,
                       alpha=[0, 1 / 2, 2 * (2 / 3), 3 * (3 / 4), 4 * (4 / 5)],
                       temperature=2,
                       device=self.device,
                       train_mb_size=10,
                       eval_mb_size=50,
                       train_epochs=2)
        scenario = self.load_scenario(use_task_labels=True)
        self.run_strategy(scenario, strategy)
コード例 #3
0
def main(args):
    model = SimpleMLP(hidden_size=args.hs)
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
    criterion = torch.nn.CrossEntropyLoss()

    # check if selected GPU is available or use CPU
    assert args.cuda == -1 or args.cuda >= 0, "cuda must be -1 or >= 0."
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    print(f"Using device: {device}")

    # create split scenario
    scenario = SplitMNIST(n_experiences=5, return_task_id=False)

    interactive_logger = InteractiveLogger()
    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True),
        loggers=[interactive_logger],
    )

    # create strategy
    assert (len(args.lwf_alpha) == 1
            or len(args.lwf_alpha) == 5), "Alpha must be a non-empty list."
    lwf_alpha = (args.lwf_alpha[0]
                 if len(args.lwf_alpha) == 1 else args.lwf_alpha)

    strategy = LwF(
        model,
        optimizer,
        criterion,
        alpha=lwf_alpha,
        temperature=args.softmax_temperature,
        train_epochs=args.epochs,
        device=device,
        train_mb_size=args.minibatch_size,
        evaluator=eval_plugin,
    )

    # train on the selected scenario with the chosen strategy
    print("Starting experiment...")
    results = []
    for train_batch_info in scenario.train_stream:
        print("Start training on experience ",
              train_batch_info.current_experience)

        strategy.train(train_batch_info, num_workers=0)
        print("End training on experience ",
              train_batch_info.current_experience)
        print("Computing accuracy on the test set")
        results.append(strategy.eval(scenario.test_stream[:]))
コード例 #4
0
elif (args.cl_strategy == "SI"):
    cl_strategy = SynapticIntelligence(model,
                                       Adam(model.parameters(), lr=0.001),
                                       CrossEntropyLoss(),
                                       si_lambda=1.0,
                                       train_mb_size=args.batch_size,
                                       train_epochs=args.num_epochs,
                                       eval_mb_size=args.batch_size * 2,
                                       evaluator=eval_plugin,
                                       device=device)
elif (args.cl_strategy == "LwF"):
    cl_strategy = LwF(model,
                      Adam(model.parameters(), lr=0.001),
                      CrossEntropyLoss(),
                      alpha=0.5,
                      temperature=2.0,
                      train_mb_size=args.batch_size,
                      train_epochs=args.num_epochs,
                      eval_mb_size=args.batch_size * 2,
                      evaluator=eval_plugin,
                      device=device)
elif (args.cl_strategy == "GEM"):
    cl_strategy = GEM(model,
                      Adam(model.parameters(), lr=0.001),
                      CrossEntropyLoss(),
                      patterns_per_exp=150,
                      memory_strength=0.5,
                      train_mb_size=args.batch_size,
                      train_epochs=args.num_epochs,
                      eval_mb_size=args.batch_size * 2,
                      evaluator=eval_plugin,
                      device=device)