Esempio n. 1
0
def main(args):
    # Compute device
    device = "cuda" if args.cuda >= 0 and torch.cuda.is_available() else "cpu"
    print("Using ", device)

    # Benchmark
    benchmark = SplitMNIST(
        n_experiences=5,
        dataset_root=expanduser("~") + "/.avalanche/data/mnist/",
    )

    # Train for the first experience only
    experience_0 = benchmark.train_stream[0]

    # Run tests
    dur_simple = run_simple_online(experience_0, device)
    # dur_base_online = run_base_online(experience_0, device,
    #                                   use_interactive_logger=False)
    # dur_base_online_intlog = run_base_online(experience_0, device,
    #                                          use_interactive_logger=True)
    # dur_base = run_base(experience_0, device,
    #                     use_interactive_logger=False)
    # dur_base_intlog = run_base(experience_0, device,
    #                            use_interactive_logger=True)
    dur_ocl_lazy_stream = run_ocl_lazy_stream(experience_0, device)

    print(f"Duration for SimpleOnlineStrategy: ", dur_simple)
    # print(f"Duration for BaseOnlineStrategy: ", dur_base_online)
    # print(f"Duration for BaseOnlineStrategy+IntLogger: ",
    #       dur_base_online_intlog)
    # print(f"Duration for BaseStrategy: ", dur_base)
    # print(f"Duration for BaseStrategy+IntLogger: ",
    #       dur_base_intlog)
    print(f"Duration for dur_ocl_lazy_stream: ", dur_ocl_lazy_stream)
Esempio n. 2
0
def main(args):
    """
    Last Avalanche version reference performance (online):
        Top1_Acc_Stream/eval_phase/test_stream = 0.9421
    """
    # --- DEFAULT PARAMS ONLINE DATA INCREMENTAL LEARNING
    nb_tasks = 5  # Can still design the data stream based on tasks
    epochs = 1  # All data is only seen once: Online
    batch_size = 10  # Only process small amount of data at a time
    return_task_id = False  # Data incremental (task-agnostic/task-free)
    # TODO use data_incremental_generator, now experience=task

    # --- CONFIG
    device = torch.device(
        f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0
        else "cpu")
    # ---------

    # --- SCENARIO CREATION
    scenario = SplitMNIST(nb_tasks, return_task_id=return_task_id,
                          fixed_class_order=[i for i in range(10)])
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=args.featsize,
                      hidden_size=400, hidden_layers=2)

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(experience=True, stream=True),
        loss_metrics(experience=True, stream=True),
        ExperienceForgetting(),
        loggers=[interactive_logger])

    # CoPE PLUGIN
    cope = CoPEPlugin(mem_size=2000, p_size=args.featsize,
                      n_classes=scenario.n_classes)

    # CREATE THE STRATEGY INSTANCE (NAIVE) WITH CoPE PLUGIN
    cl_strategy = Naive(model, torch.optim.SGD(model.parameters(), lr=0.01),
                        cope.loss,  # CoPE PPP-Loss
                        train_mb_size=batch_size, train_epochs=epochs,
                        eval_mb_size=100, device=device,
                        plugins=[cope],
                        evaluator=eval_plugin
                        )

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience ", experience.current_experience)
        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
Esempio n. 3
0
def main(args):
    model = SimpleMLP(hidden_size=args.hs)
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
    criterion = torch.nn.CrossEntropyLoss()

    # check if selected GPU is available or use CPU
    assert args.cuda == -1 or args.cuda >= 0, "cuda must be -1 or >= 0."
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    print(f'Using device: {device}')

    # create scenario
    if args.scenario == 'pmnist':
        scenario = PermutedMNIST(n_experiences=args.permutations)
    elif args.scenario == 'smnist':
        scenario = SplitMNIST(n_experiences=5, return_task_id=False)
    else:
        raise ValueError("Wrong scenario name. Allowed pmnist, smnist.")

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()
    text_logger = TextLogger(open('log.txt', 'a'))

    eval_plugin = EvaluationPlugin(accuracy_metrics(minibatch=True,
                                                    epoch=True,
                                                    experience=True,
                                                    stream=True),
                                   loss_metrics(minibatch=True,
                                                epoch=True,
                                                experience=True,
                                                stream=True),
                                   ExperienceForgetting(),
                                   loggers=[interactive_logger])

    # create strategy
    strategy = EWC(model,
                   optimizer,
                   criterion,
                   args.ewc_lambda,
                   args.ewc_mode,
                   decay_factor=args.decay_factor,
                   train_epochs=args.epochs,
                   device=device,
                   train_mb_size=args.minibatch_size,
                   evaluator=eval_plugin)

    # train on the selected scenario with the chosen strategy
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start training on experience ", experience.current_experience)

        strategy.train(experience)
        print("End training on experience", experience.current_experience)
        print('Computing accuracy on the test set')
        results.append(strategy.eval(scenario.test_stream[:]))
Esempio n. 4
0
def main(args):
    model = SimpleMLP(hidden_size=args.hs)
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
    criterion = torch.nn.CrossEntropyLoss()

    # check if selected GPU is available or use CPU
    assert args.cuda == -1 or args.cuda >= 0, "cuda must be -1 or >= 0."
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    print(f"Using device: {device}")

    # create split scenario
    scenario = SplitMNIST(n_experiences=5, return_task_id=False)

    interactive_logger = InteractiveLogger()
    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True),
        loggers=[interactive_logger],
    )

    # create strategy
    assert (len(args.lwf_alpha) == 1
            or len(args.lwf_alpha) == 5), "Alpha must be a non-empty list."
    lwf_alpha = (args.lwf_alpha[0]
                 if len(args.lwf_alpha) == 1 else args.lwf_alpha)

    strategy = LwF(
        model,
        optimizer,
        criterion,
        alpha=lwf_alpha,
        temperature=args.softmax_temperature,
        train_epochs=args.epochs,
        device=device,
        train_mb_size=args.minibatch_size,
        evaluator=eval_plugin,
    )

    # train on the selected scenario with the chosen strategy
    print("Starting experiment...")
    results = []
    for train_batch_info in scenario.train_stream:
        print("Start training on experience ",
              train_batch_info.current_experience)

        strategy.train(train_batch_info, num_workers=0)
        print("End training on experience ",
              train_batch_info.current_experience)
        print("Computing accuracy on the test set")
        results.append(strategy.eval(scenario.test_stream[:]))
Esempio n. 5
0
    def test_image_samples(args):
        p_metric = ImagesSamplePlugin(
            n_cols=5, n_rows=5, group=True, mode="train"
        )

        scenario = SplitMNIST(5)
        curr_exp = scenario.train_stream[0]
        curr_dataset = curr_exp.dataset
        strategy_mock = MagicMock(
            eval_mb_size=32, experience=curr_exp, adapted_dataset=curr_dataset
        )

        mval = p_metric.after_train_dataset_adaptation(strategy_mock)
        img_grid = mval[0].value.image
Esempio n. 6
0
    def test_samples_augmentations(args):
        scenario = SplitMNIST(5)
        curr_exp = scenario.train_stream[0]

        # we use a ReSize transform because it's easy to detect if it's been
        # applied without looking at the image.
        curr_dataset = curr_exp.dataset.replace_transforms(
            transform=Compose([Resize(8), ToTensor()]), target_transform=None
        )

        ##########################################
        # WITH AUGMENTATIONS
        ##########################################
        p_metric = ImagesSamplePlugin(
            n_cols=5,
            n_rows=5,
            group=True,
            mode="train",
            disable_augmentations=False,
        )

        strategy_mock = MagicMock(
            eval_mb_size=32, experience=curr_exp, adapted_dataset=curr_dataset
        )

        mval = p_metric.after_train_dataset_adaptation(strategy_mock)
        img_grid = mval[0].value.image
        assert img_grid.shape == (3, 52, 52)
        # save_image(img_grid, './logs/test_image_with_aug.png')

        ##########################################
        # WITHOUT AUGMENTATIONS
        ##########################################
        p_metric = ImagesSamplePlugin(
            n_cols=5,
            n_rows=5,
            group=True,
            mode="train",
            disable_augmentations=True,
        )

        strategy_mock = MagicMock(
            eval_mb_size=32, experience=curr_exp, adapted_dataset=curr_dataset
        )

        mval = p_metric.after_train_dataset_adaptation(strategy_mock)
        img_grid = mval[0].value.image
        assert img_grid.shape == (3, 152, 152)
Esempio n. 7
0
    def test_SplitMNIST_scenario(self):
        scenario = SplitMNIST(5)
        self.assertEqual(5, len(scenario.train_stream))
        self.assertEqual(5, len(scenario.test_stream))

        train_sz = 0
        for experience in scenario.train_stream:
            self.assertIsInstance(experience, Experience)
            train_sz += len(experience.dataset)
        self.assertEqual(60000, train_sz)

        test_sz = 0
        for experience in scenario.test_stream:
            self.assertIsInstance(experience, Experience)
            test_sz += len(experience.dataset)
        self.assertEqual(10000, test_sz)
Esempio n. 8
0
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")

    # --- SCENARIO CREATION
    scenario = SplitMNIST(n_experiences=10, seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True),
        loggers=[interactive_logger],
    )

    # CREATE THE STRATEGY INSTANCE (GenerativeReplay)
    cl_strategy = GenerativeReplay(
        model,
        torch.optim.Adam(model.parameters(), lr=0.001),
        CrossEntropyLoss(),
        train_mb_size=100,
        train_epochs=4,
        eval_mb_size=100,
        device=device,
        evaluator=eval_plugin,
    )

    # TRAINING LOOP
    print("Starting experiment...")
    results = []
    for experience in scenario.train_stream:
        print("Start of experience ", experience.current_experience)
        cl_strategy.train(experience)
        print("Training completed")

        print("Computing accuracy on the whole test set")
        results.append(cl_strategy.eval(scenario.test_stream))
Esempio n. 9
0
    def test_tensor_samples(args):
        p_metric = ImagesSamplePlugin(
            n_cols=5, n_rows=5, group=True, mode="train"
        )

        scenario = SplitMNIST(5)
        curr_exp = scenario.train_stream[0]
        for mb in DataLoader(curr_exp.dataset, batch_size=32):
            break
        curr_dataset = AvalancheTensorDataset(*mb[:2], targets=mb[1])

        strategy_mock = MagicMock(
            eval_mb_size=32, experience=curr_exp, adapted_dataset=curr_dataset
        )

        mval = p_metric.after_train_dataset_adaptation(strategy_mock)
        img_grid = mval[0].value.image
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")

    # --- SCENARIO CREATION
    scenario = SplitMNIST(n_experiences=10, seed=1234)
    # ---------

    # MODEL CREATION
    model = MlpVAE((1, 28, 28), nhid=2, device=device)

    # CREATE THE STRATEGY INSTANCE (GenerativeReplay)
    cl_strategy = VAETraining(
        model,
        torch.optim.Adam(model.parameters(), lr=0.001),
        train_mb_size=100,
        train_epochs=4,
        device=device,
        plugins=[GenerativeReplayPlugin()],
    )

    # TRAINING LOOP
    print("Starting experiment...")
    f, axarr = plt.subplots(scenario.n_experiences, 10)
    k = 0
    for experience in scenario.train_stream:
        print("Start of experience ", experience.current_experience)
        cl_strategy.train(experience)
        print("Training completed")

        samples = model.generate(10)
        samples = samples.detach().cpu().numpy()

        for j in range(10):
            axarr[k, j].imshow(samples[j, 0], cmap="gray")
            axarr[k, 4].set_title("Generated images for experience " + str(k))
        np.vectorize(lambda ax: ax.axis("off"))(axarr)
        k += 1

    f.subplots_adjust(hspace=1.2)
    plt.savefig("VAE_output_per_exp")
    plt.show()
Esempio n. 11
0
def main(cuda: int):
    # --- CONFIG
    device = torch.device(
        f"cuda:{cuda}" if torch.cuda.is_available() else "cpu")

    # --- SCENARIO CREATION
    scenario = SplitMNIST(n_experiences=5, seed=42)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    # choose some metrics and evaluation method
    eval_plugin = EvaluationPlugin(
        accuracy_metrics(stream=True, experience=True),
        mean_scores_metrics(on_train=True, on_eval=True),
        loggers=[
            TensorboardLogger(f"tb_data/{datetime.now()}"),
            InteractiveLogger(),
        ],
    )

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(
        model,
        Adam(model.parameters()),
        train_mb_size=128,
        train_epochs=2,
        eval_mb_size=128,
        device=device,
        plugins=[ReplayPlugin(mem_size=100)],
        evaluator=eval_plugin,
    )

    # TRAINING LOOP
    for i, experience in enumerate(scenario.train_stream, 1):
        cl_strategy.train(experience)
        cl_strategy.eval(scenario.test_stream[:i])
Esempio n. 12
0
    def test_SplitMNIST_benchmark(self):
        benchmark = SplitMNIST(5)
        self.assertEqual(5, len(benchmark.train_stream))
        self.assertEqual(5, len(benchmark.test_stream))

        train_sz = 0
        for experience in benchmark.train_stream:
            self.assertIsInstance(experience, Experience)
            train_sz += len(experience.dataset)

            # Regression test for 572
            load_experience_train_eval(experience)

        self.assertEqual(60000, train_sz)

        test_sz = 0
        for experience in benchmark.test_stream:
            self.assertIsInstance(experience, Experience)
            test_sz += len(experience.dataset)

            # Regression test for 572
            load_experience_train_eval(experience)

        self.assertEqual(10000, test_sz)
Esempio n. 13
0
from avalanche.benchmarks.utils.data_loader import TaskBalancedDataLoader
from avalanche.models import SimpleMLP
from avalanche.training.strategies import Naive, Cumulative


class MyCumulativeStrategy(Cumulative):
    def make_train_dataloader(self, shuffle=True, **kwargs):
        # you can override make_train_dataloader to change the
        # strategy's dataloader
        # remember to iterate over self.adapted_dataset
        self.dataloader = TaskBalancedDataLoader(self.adapted_dataset,
                                                 batch_size=self.train_mb_size)


if __name__ == "__main__":
    benchmark = SplitMNIST(n_experiences=5)

    model = SimpleMLP(input_size=784, hidden_size=10)
    opt = SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.001)

    # we use our custom strategy to change the dataloading policy.
    cl_strategy = MyCumulativeStrategy(
        model,
        opt,
        CrossEntropyLoss(),
        train_epochs=1,
        train_mb_size=512,
        eval_mb_size=512,
    )

    for step in benchmark.train_stream:
Esempio n. 14
0
def main(args):
    """
    Last Avalanche version reference performance (online = 1 epoch):

    Class-incremental (online):
        Top1_Acc_Stream/eval_phase/test_stream = 0.9421
    Data-incremental (online:
        Top1_Acc_Stream/eval_phase/test_stream = 0.9309

    These are reference results for a single run.
    """
    # --- DEFAULT PARAMS ONLINE DATA INCREMENTAL LEARNING
    nb_tasks = 5  # Can still design the data stream based on tasks
    batch_size = 10  # Learning agent only has small amount of data available
    epochs = 1  # How many times to process each mini-batch
    return_task_id = False  # Data incremental (task-agnostic/task-free)

    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    # ---------

    # --- SCENARIO CREATION
    n_classes = 10
    task_scenario = SplitMNIST(
        nb_tasks,
        return_task_id=return_task_id,
        fixed_class_order=[i for i in range(n_classes)],
    )

    # Make data incremental (one batch = one experience)
    scenario = data_incremental_benchmark(task_scenario,
                                          experience_size=batch_size)
    print(
        f"{scenario.n_experiences} batches in online data incremental setup.")
    # 6002 batches for SplitMNIST with batch size 10
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=args.featsize,
                      hidden_size=400,
                      hidden_layers=2,
                      drop_rate=0)

    # choose some metrics and evaluation method
    logger = TextLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(experience=True, stream=True),
        loss_metrics(experience=False, stream=True),
        StreamForgetting(),
        loggers=[logger],
        benchmark=scenario,
    )

    # CoPE PLUGIN
    cope = CoPEPlugin(mem_size=2000,
                      alpha=0.99,
                      p_size=args.featsize,
                      n_classes=n_classes)

    # CREATE THE STRATEGY INSTANCE (NAIVE) WITH CoPE PLUGIN
    cl_strategy = Naive(
        model,
        torch.optim.SGD(model.parameters(), lr=0.01),
        cope.ppp_loss,  # CoPE PPP-Loss
        train_mb_size=batch_size,
        train_epochs=epochs,
        eval_mb_size=100,
        device=device,
        plugins=[cope],
        evaluator=eval_plugin,
    )

    # TRAINING LOOP
    print("Starting experiment...")
    results = []
    cl_strategy.train(scenario.train_stream)

    print("Computing accuracy on the whole test set")
    results.append(cl_strategy.eval(scenario.test_stream))
Esempio n. 15
0
def main():
    args = parser.parse_args()
    args.cuda = args.cuda == 'yes'
    args.disable_pbar = args.disable_pbar == 'yes'
    args.stable_sgd = args.stable_sgd == 'yes'
    print(f"args={vars(args)}")

    device = torch.device("cuda:0" if torch.cuda.is_available() and args.cuda else "cpu")
    print(f'Using device: {device}')

    # unique identifier
    uid = uuid.uuid4().hex if args.uid is None else args.uid
    now = str(datetime.datetime.now().date()) + "_" + ':'.join(str(datetime.datetime.now().time()).split(':')[:-1])
    runname = 'T={}_id={}'.format(now, uid) if not args.resume else args.resume

    # Paths
    setupname = [args.strategy, args.exp_name, args.model, args.scenario]
    parentdir = os.path.join(args.save_path, '_'.join(setupname))
    results_path = Path(os.path.join(parentdir, runname))
    results_path.mkdir(parents=True, exist_ok=True)
    tb_log_dir = os.path.join(results_path, 'tb_run')  # Group all runs

    # Eval results
    eval_metric = 'Top1_Acc_Stream/eval_phase/test_stream'
    eval_results_dir = results_path / eval_metric.split('/')[0]
    eval_results_dir.mkdir(parents=True, exist_ok=True)

    eval_result_files = []  # To avg over seeds
    seeds = [args.seed] if args.seed is not None else list(range(args.n_seeds))
    for seed in seeds:
        # initialize seeds
        print("STARTING SEED {}/{}".format(seed, len(seeds) - 1))

        set_seed(seed)

        # create scenario
        if args.scenario == 'smnist':
            inputsize = 28 * 28
            scenario = SplitMNIST(n_experiences=5, return_task_id=False, seed=seed,
                                  fixed_class_order=[i for i in range(10)])
        elif args.scenario == 'CIFAR10':
            scenario = SplitCIFAR10(n_experiences=5, return_task_id=False, seed=seed,
                                    fixed_class_order=[i for i in range(10)])
            inputsize = (3, 32, 32)
        elif args.scenario == 'miniimgnet':
            scenario = SplitMiniImageNet(args.dset_rootpath, n_experiences=20, return_task_id=False, seed=seed,
                                         fixed_class_order=[i for i in range(100)])
            inputsize = (3, 84, 84)
        else:
            raise ValueError("Wrong scenario name.")
        print(f"Scenario = {args.scenario}")

        if args.model == 'simple_mlp':
            model = MyMLP(input_size=inputsize, hidden_size=args.hs)
        elif args.model == 'resnet18':
            if not args.stable_sgd:
                assert args.drop_prob == 0
            model = ResNet18(inputsize, scenario.n_classes, drop_prob=args.drop_prob)

        criterion = torch.nn.CrossEntropyLoss()
        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)

        # Paths
        eval_results_file = eval_results_dir / f'seed={seed}.csv'

        # LOGGING
        tb_logger = TensorboardLogger(tb_log_dir=tb_log_dir, tb_log_exp_name=f'seed={seed}.pt')  # log to Tensorboard
        print_logger = TextLogger() if args.disable_pbar else InteractiveLogger()  # print to stdout
        eval_logger = EvalTextLogger(metric_filter=eval_metric, file=open(eval_results_file, 'a'))
        eval_result_files.append(eval_results_file)

        # METRICS
        eval_plugin = EvaluationPlugin(
            accuracy_metrics(experience=True, stream=True),
            loss_metrics(minibatch=True, experience=True),
            ExperienceForgetting(),  # Test only
            StreamConfusionMatrix(num_classes=scenario.n_classes, save_image=True),

            # LOG OTHER STATS
            # timing_metrics(epoch=True, experience=False),
            # cpu_usage_metrics(experience=True),
            # DiskUsageMonitor(),
            # MinibatchMaxRAM(),
            # GpuUsageMonitor(0),
            loggers=[print_logger, tb_logger, eval_logger])

        plugins = None
        if args.strategy == 'replay':
            plugins = [RehRevPlugin(n_total_memories=args.mem_size,
                                    mode=args.replay_mode,  # STEP-BACK
                                    aversion_steps=args.aversion_steps,
                                    aversion_lr=args.aversion_lr,
                                    stable_sgd=args.stable_sgd,  # Stable SGD
                                    lr_decay=args.lr_decay,
                                    init_epochs=args.init_epochs  # First task epochs
                                    )]

        # CREATE THE STRATEGY INSTANCE (NAIVE)
        strategy = Naive(model, optimizer, criterion,
                         train_epochs=args.epochs, device=device,
                         train_mb_size=args.bs, evaluator=eval_plugin,
                         plugins=plugins
                         )

        # train on the selected scenario with the chosen strategy
        print('Starting experiment...')
        for experience in scenario.train_stream:
            if experience.current_experience == args.until_task:
                print("CUTTING OF TRAINING AT TASK ", experience.current_experience)
                break
            else:
                print("Start training on step ", experience.current_experience)

            strategy.train(experience)
            print("End training on step ", experience.current_experience)
            print('Computing accuracy on the test set')
            res = strategy.eval(scenario.test_stream[:args.until_task])  # Gathered by EvalLogger

    final_results_file = eval_results_dir / f'seed_summary.pt'
    stat_summarize(eval_result_files, final_results_file)
    print(f"[FILE:TB-RESULTS]: {tb_log_dir}")
    print(f"[FILE:FINAL-RESULTS]: {final_results_file}")
    print("FINISHED SCRIPT")
Esempio n. 16
0
def main(args):
    model = SimpleMLP(hidden_size=args.hs)
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
    criterion = torch.nn.CrossEntropyLoss()

    # check if selected GPU is available or use CPU
    assert args.cuda == -1 or args.cuda >= 0, "cuda must be -1 or >= 0."
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    print(f"Using device: {device}")

    # create scenario
    if args.scenario == "pmnist":
        scenario = PermutedMNIST(n_experiences=args.permutations)
    elif args.scenario == "smnist":
        scenario = SplitMNIST(n_experiences=5, return_task_id=False)
    else:
        raise ValueError("Wrong scenario name. Allowed pmnist, smnist.")

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True),
        loggers=[interactive_logger],
    )

    # create strategy
    if args.strategy == "gem":
        strategy = GEM(
            model,
            optimizer,
            criterion,
            args.patterns_per_exp,
            args.memory_strength,
            train_epochs=args.epochs,
            device=device,
            train_mb_size=10,
            evaluator=eval_plugin,
        )
    elif args.strategy == "agem":
        strategy = AGEM(
            model,
            optimizer,
            criterion,
            args.patterns_per_exp,
            args.sample_size,
            train_epochs=args.epochs,
            device=device,
            train_mb_size=10,
            evaluator=eval_plugin,
        )
    else:
        raise ValueError("Wrong strategy name. Allowed gem, agem.")
    # train on the selected scenario with the chosen strategy
    print("Starting experiment...")
    results = []
    for experience in scenario.train_stream:
        print("Start training on experience ", experience.current_experience)

        strategy.train(experience)
        print("End training on experience ", experience.current_experience)
        print("Computing accuracy on the test set")
        results.append(strategy.eval(scenario.test_stream[:]))