コード例 #1
0
ファイル: replay.py プロジェクト: ryanlindeborg/avalanche
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}"
                          if torch.cuda.is_available() and
                          args.cuda >= 0 else "cpu")
    n_batches = 5
    # ---------

    # --- TRANSFORMATIONS
    train_transform = transforms.Compose([
        RandomCrop(28, padding=4),
        ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    test_transform = transforms.Compose([
        ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    # ---------

    # --- SCENARIO CREATION
    mnist_train = MNIST('./data/mnist', train=True,
                        download=True, transform=train_transform)
    mnist_test = MNIST('./data/mnist', train=False,
                       download=True, transform=test_transform)
    scenario = nc_scenario(
        mnist_train, mnist_test, n_batches, task_labels=False, seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(
            minibatch=True, epoch=True, experience=True, stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        ExperienceForgetting(),
        loggers=[interactive_logger])

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(model, torch.optim.Adam(model.parameters(), lr=0.001),
                        CrossEntropyLoss(),
                        train_mb_size=100, train_epochs=4, eval_mb_size=100, device=device,
                        plugins=[ReplayPlugin(mem_size=10000)],
                        evaluator=eval_plugin
                        )

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience ", experience.current_experience)
        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
コード例 #2
0
def main(args):

    # Model getter: specify dataset and depth of the network.
    model = pytorchcv_wrapper.resnet('cifar10', depth=20, pretrained=False)

    # Or get a more specific model. E.g. wide resnet, with depth 40 and growth
    # factor 8 for Cifar 10.
    # model = pytorchcv_wrapper.get_model("wrn40_8_cifar10", pretrained=False)

    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}"
                          if torch.cuda.is_available() and
                          args.cuda >= 0 else "cpu")

    device = "cpu"

    # --- TRANSFORMATIONS
    transform = transforms.Compose([
        ToTensor(),
        transforms.Normalize((0.491, 0.482, 0.446), (0.247, 0.243, 0.261))
    ])

    # --- SCENARIO CREATION
    cifar_train = CIFAR10(root=expanduser("~") + "/.avalanche/data/cifar10/",
                          train=True, download=True, transform=transform)
    cifar_test = CIFAR10(root=expanduser("~") + "/.avalanche/data/cifar10/",
                         train=False, download=True, transform=transform)
    scenario = nc_benchmark(
        cifar_train, cifar_test, 5, task_labels=False, seed=1234,
        fixed_class_order=[i for i in range(10)])

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(
            minibatch=True, epoch=True, experience=True, stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True),
        loggers=[interactive_logger])

    # CREATE THE STRATEGY INSTANCE (Naive, with Replay)
    cl_strategy = Naive(model, torch.optim.SGD(model.parameters(), lr=0.01),
                        CrossEntropyLoss(),
                        train_mb_size=100, train_epochs=1, eval_mb_size=100,
                        device=device,
                        plugins=[ReplayPlugin(mem_size=1000)],
                        evaluator=eval_plugin
                        )

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience ", experience.current_experience)
        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
コード例 #3
0
    def test_replay_balanced_memory(self):
        scenario = self.create_scenario(task_labels=True)
        mem_size = 25
        model = SimpleMLP(input_size=6, hidden_size=10)
        replayPlugin = ReplayPlugin(mem_size=mem_size)
        cl_strategy = Naive(model,
                            SGD(model.parameters(),
                                lr=0.001,
                                momentum=0.9,
                                weight_decay=0.001),
                            CrossEntropyLoss(),
                            train_mb_size=32,
                            train_epochs=1,
                            eval_mb_size=100,
                            plugins=[replayPlugin])

        for step in scenario.train_stream:
            curr_mem_size = min(mem_size, len(step.dataset))
            cl_strategy.train(step)
            ext_mem = replayPlugin.ext_mem
            lengths = []
            for task_id in ext_mem.keys():
                lengths.append(len(ext_mem[task_id]))
            self.assertEqual(sum(lengths), curr_mem_size)
            difference = max(lengths) - min(lengths)
            self.assertLessEqual(difference, 1)
コード例 #4
0
    def test_dataload_batch_balancing(self):
        scenario = get_fast_scenario()
        model = SimpleMLP(input_size=6, hidden_size=10)
        batch_size = 32
        replayPlugin = ReplayPlugin(mem_size=20)
        cl_strategy = Naive(model,
                            SGD(model.parameters(),
                                lr=0.001,
                                momentum=0.9,
                                weight_decay=0.001),
                            CrossEntropyLoss(),
                            train_mb_size=batch_size,
                            train_epochs=1,
                            eval_mb_size=100,
                            plugins=[replayPlugin])

        for step in scenario.train_stream:
            adapted_dataset = step.dataset
            dataloader = MultiTaskJoinedBatchDataLoader(
                adapted_dataset,
                AvalancheConcatDataset(replayPlugin.ext_mem.values()),
                oversample_small_tasks=True,
                num_workers=0,
                batch_size=batch_size,
                shuffle=True)

            for mini_batch in dataloader:
                lengths = []
                for task_id in mini_batch.keys():
                    lengths.append(len(mini_batch[task_id][1]))
                if sum(lengths) == batch_size:
                    difference = max(lengths) - min(lengths)
                    self.assertLessEqual(difference, 1)
                self.assertLessEqual(sum(lengths), batch_size)
            cl_strategy.train(step)
コード例 #5
0
ファイル: test_models.py プロジェクト: Mattdl/avalanche-1
    def test_multihead_head_selection(self):
        # Check if the optimizer is updated correctly
        # when heads are created and updated.
        model = MultiHeadClassifier(in_features=6)
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()
        benchmark = get_fast_benchmark(use_task_labels=True, shuffle=False)

        strategy = Naive(model, optimizer, criterion,
                         train_mb_size=100, train_epochs=1,
                         eval_mb_size=100, device='cpu')
        strategy.evaluator.loggers = [TextLogger(sys.stdout)]

        # initialize head
        strategy.train(benchmark.train_stream[0])
        strategy.train(benchmark.train_stream[4])

        # create models with fixed head
        model_t0 = model.classifiers['0']
        model_t4 = model.classifiers['4']

        # check head task0
        for x, y, t in DataLoader(benchmark.train_stream[0].dataset):
            y_mh = model(x, t)
            y_t = model_t0(x)
            assert ((y_mh - y_t) ** 2).sum() < 1.e-7
            break

        # check head task4
        for x, y, t in DataLoader(benchmark.train_stream[4].dataset):
            y_mh = model(x, t)
            y_t = model_t4(x)
            assert ((y_mh - y_t) ** 2).sum() < 1.e-7
            break
コード例 #6
0
def main(args):

    # Config
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    # model
    model = SimpleMLP(input_size=32 * 32 * 3, num_classes=10)

    # CL Benchmark Creation
    scenario = SplitCIFAR10(n_experiences=5, return_task_id=True)
    train_stream = scenario.train_stream
    test_stream = scenario.test_stream

    # Prepare for training & testing
    optimizer = Adam(model.parameters(), lr=0.01)
    criterion = CrossEntropyLoss()

    # Choose a CL strategy
    strategy = Naive(model=model,
                     optimizer=optimizer,
                     criterion=criterion,
                     train_mb_size=128,
                     train_epochs=3,
                     eval_mb_size=128,
                     device=device)

    # train and test loop
    for train_task in train_stream:
        strategy.train(train_task, num_workers=0)
        strategy.eval(test_stream)
コード例 #7
0
    def _test_replay_balanced_memory(self, storage_policy, mem_size):
        benchmark = get_fast_benchmark(use_task_labels=True)
        model = SimpleMLP(input_size=6, hidden_size=10)
        replayPlugin = ReplayPlugin(
            mem_size=mem_size, storage_policy=storage_policy
        )
        cl_strategy = Naive(
            model,
            SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.001),
            CrossEntropyLoss(),
            train_mb_size=32,
            train_epochs=1,
            eval_mb_size=100,
            plugins=[replayPlugin],
        )

        n_seen_data = 0
        for step in benchmark.train_stream:
            n_seen_data += len(step.dataset)
            mem_fill = min(mem_size, n_seen_data)
            cl_strategy.train(step)
            lengths = []
            for d in replayPlugin.storage_policy.buffer_datasets:
                lengths.append(len(d))
            self.assertEqual(sum(lengths), mem_fill)  # Always fully filled
コード例 #8
0
    def _test_scheduler_plugin(self, gamma, milestones, base_lr, epochs,
                               reset_lr, reset_scheduler, expected):
        class TestPlugin(StrategyPlugin):
            def __init__(self, expected_lrs):
                super().__init__()
                self.expected_lrs = expected_lrs

            def after_training_epoch(self, strategy, **kwargs):
                exp_id = strategy.training_exp_counter

                expected_lr = self.expected_lrs[exp_id][strategy.epoch]
                for group in strategy.optimizer.param_groups:
                    assert group['lr'] == expected_lr

        scenario = self.create_scenario()
        model = SimpleMLP(input_size=6, hidden_size=10)

        optim = SGD(model.parameters(), lr=base_lr)
        lrSchedulerPlugin = LRSchedulerPlugin(MultiStepLR(
            optim, milestones=milestones, gamma=gamma),
                                              reset_lr=reset_lr,
                                              reset_scheduler=reset_scheduler)

        cl_strategy = Naive(model,
                            optim,
                            CrossEntropyLoss(),
                            train_mb_size=32,
                            train_epochs=epochs,
                            eval_mb_size=100,
                            plugins=[lrSchedulerPlugin,
                                     TestPlugin(expected)])

        cl_strategy.train(scenario.train_stream[0])
        cl_strategy.train(scenario.train_stream[1])
コード例 #9
0
    def assert_balancing(self, policy):
        benchmark = get_fast_benchmark(use_task_labels=True)
        replay = ReplayPlugin(mem_size=100, storage_policy=policy)
        model = SimpleMLP(num_classes=benchmark.n_classes)

        # CREATE THE STRATEGY INSTANCE (NAIVE)
        cl_strategy = Naive(
            model,
            SGD(model.parameters(), lr=0.001),
            CrossEntropyLoss(),
            train_mb_size=100,
            train_epochs=0,
            eval_mb_size=100,
            plugins=[replay],
            evaluator=None,
        )

        for exp in benchmark.train_stream:
            cl_strategy.train(exp)

            ext_mem = policy.buffer_groups
            ext_mem_data = policy.buffer_datasets
            print(list(ext_mem.keys()), [len(el) for el in ext_mem_data])

            # buffer size should equal self.mem_size if data is large enough
            len_tot = sum([len(el) for el in ext_mem_data])
            assert len_tot == policy.max_size
コード例 #10
0
def main(args):
    """
    Last Avalanche version reference performance (online):
        Top1_Acc_Stream/eval_phase/test_stream = 0.9421
    """
    # --- DEFAULT PARAMS ONLINE DATA INCREMENTAL LEARNING
    nb_tasks = 5  # Can still design the data stream based on tasks
    epochs = 1  # All data is only seen once: Online
    batch_size = 10  # Only process small amount of data at a time
    return_task_id = False  # Data incremental (task-agnostic/task-free)
    # TODO use data_incremental_generator, now experience=task

    # --- CONFIG
    device = torch.device(
        f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0
        else "cpu")
    # ---------

    # --- SCENARIO CREATION
    scenario = SplitMNIST(nb_tasks, return_task_id=return_task_id,
                          fixed_class_order=[i for i in range(10)])
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=args.featsize,
                      hidden_size=400, hidden_layers=2)

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(experience=True, stream=True),
        loss_metrics(experience=True, stream=True),
        ExperienceForgetting(),
        loggers=[interactive_logger])

    # CoPE PLUGIN
    cope = CoPEPlugin(mem_size=2000, p_size=args.featsize,
                      n_classes=scenario.n_classes)

    # CREATE THE STRATEGY INSTANCE (NAIVE) WITH CoPE PLUGIN
    cl_strategy = Naive(model, torch.optim.SGD(model.parameters(), lr=0.01),
                        cope.loss,  # CoPE PPP-Loss
                        train_mb_size=batch_size, train_epochs=epochs,
                        eval_mb_size=100, device=device,
                        plugins=[cope],
                        evaluator=eval_plugin
                        )

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience ", experience.current_experience)
        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
コード例 #11
0
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}"
                          if torch.cuda.is_available() and
                          args.cuda >= 0 else "cpu")
    # ---------

    # --- TRANSFORMATIONS
    train_transform = transforms.Compose([
        RandomCrop(28, padding=4),
        ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    test_transform = transforms.Compose([
        ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    # ---------

    # --- SCENARIO CREATION
    mnist_train = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                        train=True, download=True, transform=train_transform)
    mnist_test = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                       train=False, download=True, transform=test_transform)
    scenario = nc_scenario(
        mnist_train, mnist_test, 5, task_labels=False, seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(epoch=True, experience=True, stream=True),
        loss_metrics(epoch=True, experience=True, stream=True),
        # save image should be False to appropriately view
        # results in Interactive Logger.
        # a tensor will be printed
        StreamConfusionMatrix(save_image=False, normalize='all'),
        loggers=InteractiveLogger()
    )

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(
        model, SGD(model.parameters(), lr=0.001, momentum=0.9),
        CrossEntropyLoss(), train_mb_size=100, train_epochs=4, eval_mb_size=100,
        device=device, evaluator=eval_plugin, plugins=[ReplayPlugin(5000)])

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience: ", experience.current_experience)
        print("Current Classes: ", experience.classes_in_this_experience)

        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
コード例 #12
0
ファイル: test_models.py プロジェクト: pkraison/avalanche
    def test_multihead_head_creation(self):
        # Check if the optimizer is updated correctly
        # when heads are created and updated.
        model = MTSimpleMLP(input_size=6, hidden_size=10)
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()
        benchmark = get_fast_benchmark(use_task_labels=True, shuffle=False)

        strategy = Naive(
            model,
            optimizer,
            criterion,
            train_mb_size=100,
            train_epochs=1,
            eval_mb_size=100,
            device="cpu",
        )
        strategy.evaluator.loggers = [TextLogger(sys.stdout)]
        print(
            "Current Classes: ",
            benchmark.train_stream[4].classes_in_this_experience,
        )
        print(
            "Current Classes: ",
            benchmark.train_stream[0].classes_in_this_experience,
        )

        # head creation
        strategy.train(benchmark.train_stream[0])
        w_ptr = model.classifier.classifiers["0"].classifier.weight.data_ptr()
        b_ptr = model.classifier.classifiers["0"].classifier.bias.data_ptr()
        opt_params_ptrs = [
            w.data_ptr() for group in optimizer.param_groups
            for w in group["params"]
        ]
        assert w_ptr in opt_params_ptrs
        assert b_ptr in opt_params_ptrs

        # head update
        strategy.train(benchmark.train_stream[4])
        w_ptr_t0 = model.classifier.classifiers[
            "0"].classifier.weight.data_ptr()
        b_ptr_t0 = model.classifier.classifiers["0"].classifier.bias.data_ptr()
        w_ptr_new = model.classifier.classifiers[
            "4"].classifier.weight.data_ptr()
        b_ptr_new = model.classifier.classifiers["4"].classifier.bias.data_ptr(
        )
        opt_params_ptrs = [
            w.data_ptr() for group in optimizer.param_groups
            for w in group["params"]
        ]

        assert w_ptr not in opt_params_ptrs  # head0 has been updated
        assert b_ptr not in opt_params_ptrs  # head0 has been updated
        assert w_ptr_t0 in opt_params_ptrs
        assert b_ptr_t0 in opt_params_ptrs
        assert w_ptr_new in opt_params_ptrs
        assert b_ptr_new in opt_params_ptrs
コード例 #13
0
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    # ---------

    # --- TRANSFORMATIONS
    train_transform = transforms.Compose([
        RandomCrop(28, padding=4),
        ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, ))
    ])
    test_transform = transforms.Compose(
        [ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))])
    # ---------

    # --- SCENARIO CREATION
    mnist_train = MNIST('./data/mnist',
                        train=True,
                        download=True,
                        transform=train_transform)
    mnist_test = MNIST('./data/mnist',
                       train=False,
                       download=True,
                       transform=test_transform)
    scenario = nc_scenario(mnist_train,
                           mnist_test,
                           5,
                           task_labels=False,
                           seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(model,
                        SGD(model.parameters(), lr=0.001, momentum=0.9),
                        CrossEntropyLoss(),
                        train_mb_size=100,
                        train_epochs=4,
                        eval_mb_size=100,
                        device=device)

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience: ", experience.current_experience)
        print("Current Classes: ", experience.classes_in_this_experience)

        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
コード例 #14
0
ファイル: demo_cifar_100.py プロジェクト: Mathieu4141/TriCICL
def evaluate_on_cifar_100(
    *,
    method_name: str,
    plugins: List[StrategyPlugin],
    tb_dir: str = str(TB_DIR),
    seed: int = 42,
    verbose: bool = False,
    train_epochs: int = 70,
    n_classes_per_batch: int = 10,
    start_lr: float = 2.0,
    lr_milestones: List[int] = None,
    lr_gamma: float = 0.2,
):
    assert not N_CLASSES % n_classes_per_batch, "n_classes should be a multiple of n_classes_per_batch"

    scenario = SplitCIFAR100(n_experiences=N_CLASSES // n_classes_per_batch)
    model = ResNet32(n_classes=N_CLASSES)

    tb_logger = TensorboardLogger(tb_dir + f"/cifar100_{n_classes_per_batch}/{method_name}/{seed}_{create_time_id()}")

    loggers = [tb_logger]
    if verbose:
        loggers.append(InteractiveLogger())

    strategy = Naive(
        model=model,
        optimizer=SGD(model.parameters(), lr=2.0, weight_decay=0.00001),
        criterion=CrossEntropyLoss(),
        train_epochs=train_epochs,
        train_mb_size=128,
        device=device,
        plugins=plugins + [LRSchedulerPlugin(start_lr=start_lr, milestones=lr_milestones, gamma=lr_gamma)],
        evaluator=EvaluationPlugin(
            [
                NormalizedStreamAccuracy(),
                NormalizedExperienceAccuracy(),
                ExperienceMeanRepresentationShift(MeanL2RepresentationShift()),
                ExperienceMeanRepresentationShift(MeanCosineRepresentationShift()),
            ],
            StreamConfusionMatrix(
                num_classes=N_CLASSES,
                image_creator=SortedCMImageCreator(scenario.classes_order),
            ),
            loggers=loggers,
        ),
    )

    for i, train_task in enumerate(scenario.train_stream, 1):
        strategy.train(train_task, num_workers=0)
        strategy.eval(scenario.test_stream[:i])

    tb_logger.writer.flush()
コード例 #15
0
 def test_no_errors(self):
     eval_plugin = EvaluationPlugin(accuracy_metrics(stream=True),
                                    loggers=None,
                                    benchmark=self.benchmark,
                                    strict_checks=True)
     strategy = Naive(self.model,
                      self.optimizer,
                      self.criterion,
                      train_epochs=2,
                      eval_every=0,
                      evaluator=eval_plugin)
     for exp in self.benchmark.train_stream:
         strategy.train(exp, eval_streams=[self.benchmark.test_stream])
         strategy.eval(self.benchmark.test_stream)
コード例 #16
0
    def test_dataload_reinit(self):
        scenario = get_fast_scenario()
        model = SimpleMLP(input_size=6, hidden_size=10)

        replayPlugin = ReplayPlugin(mem_size=5)
        cl_strategy = Naive(
            model,
            SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.001),
            CrossEntropyLoss(), train_mb_size=16, train_epochs=1,
            eval_mb_size=16,
            plugins=[replayPlugin]
        )
        for step in scenario.train_stream[:2]:
            cl_strategy.train(step)
コード例 #17
0
def evaluate_split_mnist(
    name: str,
    plugins: List[StrategyPlugin],
    seed: int,
    tensorboard_logs_dir: Union[str, Path] = str(TB_DIR),
    verbose: bool = False,
    criterion: Any = CrossEntropyLoss(),
):

    split_mnist = SplitMNIST(n_experiences=5, seed=seed)

    model = SimpleMLP(n_classes=split_mnist.n_classes, input_size=28 * 28)
    # model = SimpleCNN(n_channels=1, n_classes=split_mnist.n_classes)

    tb_logger = TensorboardLogger(tensorboard_logs_dir + f"/split_mnist/{name}/{seed}_{create_time_id()}")

    loggers = [tb_logger]
    if verbose:
        loggers.append(InteractiveLogger())

    cl_strategy = Naive(
        model=model,
        optimizer=SGD(model.parameters(), lr=0.001, momentum=0.9),
        criterion=criterion,
        train_mb_size=32,
        train_epochs=2,
        eval_mb_size=32,
        device=device,
        plugins=plugins,
        evaluator=EvaluationPlugin(
            [
                NormalizedStreamAccuracy(),
                NormalizedExperienceAccuracy(),
                ExperienceMeanRepresentationShift(MeanL2RepresentationShift()),
                ExperienceMeanRepresentationShift(MeanCosineRepresentationShift()),
            ],
            StreamConfusionMatrix(
                num_classes=split_mnist.n_classes,
                image_creator=SortedCMImageCreator(split_mnist.classes_order),
            ),
            loggers=loggers,
        ),
    )

    for i, train_task in enumerate(split_mnist.train_stream, 1):
        cl_strategy.train(train_task, num_workers=0)
        cl_strategy.eval(split_mnist.test_stream[:i])

    tb_logger.writer.flush()
コード例 #18
0
def main(cuda: int):
    # --- CONFIG
    device = torch.device(
        f"cuda:{cuda}" if torch.cuda.is_available() else "cpu"
    )
    # --- SCENARIO CREATION
    scenario = SplitCIFAR10(n_experiences=2, seed=42)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes, input_size=196608 // 64)

    # choose some metrics and evaluation method
    eval_plugin = EvaluationPlugin(
        accuracy_metrics(stream=True, experience=True),
        images_samples_metrics(
            on_train=True,
            on_eval=True,
            n_cols=10,
            n_rows=10,
        ),
        labels_repartition_metrics(
            # image_creator=repartition_bar_chart_image_creator,
            on_train=True,
            on_eval=True,
        ),
        loggers=[
            TensorboardLogger(f"tb_data/{datetime.now()}"),
            InteractiveLogger(),
        ],
    )

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(
        model,
        Adam(model.parameters()),
        train_mb_size=128,
        train_epochs=1,
        eval_mb_size=128,
        device=device,
        plugins=[ReplayPlugin(mem_size=1_000)],
        evaluator=eval_plugin,
    )

    # TRAINING LOOP
    for i, experience in enumerate(scenario.train_stream, 1):
        cl_strategy.train(experience)
        cl_strategy.eval(scenario.test_stream[:i])
コード例 #19
0
 def test_raise_warning(self):
     eval_plugin = EvaluationPlugin(accuracy_metrics(stream=True),
                                    loggers=None,
                                    benchmark=self.benchmark,
                                    strict_checks=False)
     strategy = Naive(self.model,
                      self.optimizer,
                      self.criterion,
                      train_epochs=2,
                      eval_every=-1,
                      evaluator=eval_plugin)
     for exp in self.benchmark.train_stream:
         strategy.train(exp)
         strategy.eval(self.benchmark.test_stream)
     with self.assertWarns(UserWarning):
         strategy.eval(self.benchmark.test_stream[:2])
コード例 #20
0
ファイル: test_plugins.py プロジェクト: bbeatrix/avalanche
    def test_callback_reachability(self):
        # Check that all the callbacks are called during
        # training and test loops.
        model = SimpleMLP(input_size=6, hidden_size=10)
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()
        scenario = self.create_scenario()

        plug = MockPlugin()
        strategy = Naive(model, optimizer, criterion,
                         train_mb_size=100, train_epochs=1, eval_mb_size=100,
                         device='cpu', plugins=[plug]
                         )
        strategy.evaluator.loggers = [TextLogger(sys.stdout)]
        strategy.train(scenario.train_stream[0], num_workers=4)
        strategy.eval([scenario.test_stream[0]], num_workers=4)
        assert all(plug.activated)
コード例 #21
0
    def test_multihead_optimizer_update(self):
        # Check if the optimizer is updated correctly
        # when heads are created and updated.
        model = SimpleMLP(input_size=6, hidden_size=10)
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()
        scenario = self.create_scenario()

        plug = MultiHeadPlugin(model, 'classifier')
        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_mb_size=100,
                         train_epochs=1,
                         eval_mb_size=100,
                         device='cpu',
                         plugins=[plug])
        strategy.evaluator.loggers = [TextLogger(sys.stdout)]
        print("Current Classes: ",
              scenario.train_stream[0].classes_in_this_experience)
        print("Current Classes: ",
              scenario.train_stream[4].classes_in_this_experience)

        # head creation
        strategy.train(scenario.train_stream[0])
        w_ptr = model.classifier.weight.data_ptr()
        b_ptr = model.classifier.bias.data_ptr()
        opt_params_ptrs = [
            w.data_ptr() for group in optimizer.param_groups
            for w in group['params']
        ]
        assert w_ptr in opt_params_ptrs
        assert b_ptr in opt_params_ptrs

        # head update
        strategy.train(scenario.train_stream[4])
        w_ptr_new = model.classifier.weight.data_ptr()
        b_ptr_new = model.classifier.bias.data_ptr()
        opt_params_ptrs = [
            w.data_ptr() for group in optimizer.param_groups
            for w in group['params']
        ]
        assert w_ptr not in opt_params_ptrs
        assert b_ptr not in opt_params_ptrs
        assert w_ptr_new in opt_params_ptrs
        assert b_ptr_new in opt_params_ptrs
コード例 #22
0
def main(args):
    # Device config
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")

    # model
    model = SimpleMLP(num_classes=10)

    # Here we show all the MNIST variation we offer in the "classic" benchmarks
    if args.mnist_type == "permuted":
        scenario = PermutedMNIST(n_experiences=5, seed=1)
    elif args.mnist_type == "rotated":
        scenario = RotatedMNIST(n_experiences=5,
                                rotations_list=[30, 60, 90, 120, 150],
                                seed=1)
    else:
        scenario = SplitMNIST(n_experiences=5, seed=1)

    # Than we can extract the parallel train and test streams
    train_stream = scenario.train_stream
    test_stream = scenario.test_stream

    # Prepare for training & testing
    optimizer = SGD(model.parameters(), lr=0.001, momentum=0.9)
    criterion = CrossEntropyLoss()

    # Continual learning strategy with default logger
    cl_strategy = Naive(
        model,
        optimizer,
        criterion,
        train_mb_size=32,
        train_epochs=100,
        eval_mb_size=32,
        device=device,
        eval_every=1,
        plugins=[EarlyStoppingPlugin(args.patience, "test_stream")],
    )

    # train and test loop
    results = []
    for train_task, test_task in zip(train_stream, test_stream):
        print("Current Classes: ", train_task.classes_in_this_experience)
        cl_strategy.train(train_task, eval_streams=[test_task])
        results.append(cl_strategy.eval(test_stream))
コード例 #23
0
    def _test_integration(self, module, clf_name, plugins=[]):
        module = as_multitask(module, clf_name)
        module = module.to(self.device)
        optimizer = SGD(
            module.parameters(), lr=0.05, momentum=0.9, weight_decay=0.0002
        )

        strategy = Naive(
            module,
            optimizer,
            train_mb_size=32,
            eval_mb_size=32,
            device=self.device,
            plugins=plugins,
        )

        for t, experience in enumerate(self.benchmark.train_stream):
            strategy.train(experience)
            strategy.eval(self.benchmark.test_stream[: t + 1])
コード例 #24
0
ファイル: endless_cl_sim.py プロジェクト: pkraison/avalanche
def main(args):
    # Config
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")

    # Model
    model = SimpleCNN(num_classes=5)

    # CL Benchmark Creation
    scenario = EndlessCLSim(
        scenario=args.scenario,  # "Classes", "Illumination", "Weather"
        sequence_order=None,
        task_order=None,
        semseg=args.semseg,
        dataset_root=args.dataset_root,
    )

    train_stream = scenario.train_stream
    test_stream = scenario.test_stream

    # Prepare for training & testing
    optimizer = Adam(model.parameters(), lr=0.001)
    criterion = CrossEntropyLoss()

    # Choose a CL strategy
    strategy = Naive(
        model=model,
        optimizer=optimizer,
        criterion=criterion,
        train_mb_size=64,
        train_epochs=3,
        eval_mb_size=128,
        device=device,
    )

    # Train and test loop
    for train_task in train_stream:
        strategy.train(train_task, num_worker=0)
        strategy.eval(test_stream)

    return
コード例 #25
0
ファイル: test_plugins.py プロジェクト: bbeatrix/avalanche
    def _test_replay_balanced_memory(self, storage_policy, mem_size):
        scenario = self.create_scenario(task_labels=True)
        model = SimpleMLP(input_size=6, hidden_size=10)
        replayPlugin = ReplayPlugin(mem_size=mem_size,
                                    storage_policy=storage_policy)
        cl_strategy = Naive(
            model,
            SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.001),
            CrossEntropyLoss(), train_mb_size=32, train_epochs=1,
            eval_mb_size=100, plugins=[replayPlugin]
        )

        n_seen_data = 0
        for step in scenario.train_stream:
            n_seen_data += len(step.dataset)
            mem_fill = min(mem_size, n_seen_data)
            cl_strategy.train(step)
            ext_mem = replayPlugin.ext_mem
            lengths = []
            for task_id in ext_mem.keys():
                lengths.append(len(ext_mem[task_id]))
            self.assertEqual(sum(lengths), mem_fill)  # Always fully filled
コード例 #26
0
    def test_dataload_batch_balancing(self):
        benchmark = get_fast_benchmark()
        batch_size = 32
        replayPlugin = ReplayPlugin(mem_size=20)

        model = SimpleMLP(input_size=6, hidden_size=10)
        cl_strategy = Naive(
            model,
            SGD(model.parameters(), lr=0.001, momentum=0.9,
                weight_decay=0.001),
            CrossEntropyLoss(),
            train_mb_size=batch_size,
            train_epochs=1,
            eval_mb_size=100,
            plugins=[replayPlugin],
        )
        for step in benchmark.train_stream:
            adapted_dataset = step.dataset
            dataloader = ReplayDataLoader(
                adapted_dataset,
                replayPlugin.storage_policy.buffer,
                oversample_small_tasks=True,
                num_workers=0,
                batch_size=batch_size,
                shuffle=True,
            )

            for mini_batch in dataloader:
                mb_task_labels = mini_batch[-1]
                lengths = []
                for task_id in adapted_dataset.task_set:
                    len_task = (mb_task_labels == task_id).sum()
                    lengths.append(len_task)
                if sum(lengths) == batch_size:
                    difference = max(lengths) - min(lengths)
                    self.assertLessEqual(difference, 1)
                self.assertLessEqual(sum(lengths), batch_size)
            cl_strategy.train(step)
コード例 #27
0
    def test_incremental_classifier(self):
        model = SimpleMLP(input_size=6, hidden_size=10)
        model.classifier = IncrementalClassifier(in_features=10)
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()
        scenario = self.scenario

        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_mb_size=100,
                         train_epochs=1,
                         eval_mb_size=100,
                         device='cpu')
        strategy.evaluator.loggers = [TextLogger(sys.stdout)]
        print("Current Classes: ",
              scenario.train_stream[0].classes_in_this_experience)
        print("Current Classes: ",
              scenario.train_stream[4].classes_in_this_experience)

        # train on first task
        strategy.train(scenario.train_stream[0])
        w_ptr = model.classifier.classifier.weight.data_ptr()
        b_ptr = model.classifier.classifier.bias.data_ptr()
        opt_params_ptrs = [
            w.data_ptr() for group in optimizer.param_groups
            for w in group['params']
        ]
        # classifier params should be optimized
        assert w_ptr in opt_params_ptrs
        assert b_ptr in opt_params_ptrs

        # train again on the same task.
        strategy.train(scenario.train_stream[0])
        # parameters should not change.
        assert w_ptr == model.classifier.classifier.weight.data_ptr()
        assert b_ptr == model.classifier.classifier.bias.data_ptr()
        # the same classifier params should still be optimized
        assert w_ptr in opt_params_ptrs
        assert b_ptr in opt_params_ptrs

        # update classifier with new classes.
        old_w_ptr, old_b_ptr = w_ptr, b_ptr
        strategy.train(scenario.train_stream[4])
        opt_params_ptrs = [
            w.data_ptr() for group in optimizer.param_groups
            for w in group['params']
        ]
        new_w_ptr = model.classifier.classifier.weight.data_ptr()
        new_b_ptr = model.classifier.classifier.bias.data_ptr()
        # weights should change.
        assert old_w_ptr != new_w_ptr
        assert old_b_ptr != new_b_ptr
        # Old params should not be optimized. New params should be optimized.
        assert old_w_ptr not in opt_params_ptrs
        assert old_b_ptr not in opt_params_ptrs
        assert new_w_ptr in opt_params_ptrs
        assert new_b_ptr in opt_params_ptrs
コード例 #28
0
ファイル: test_strategies.py プロジェクト: rrmina/avalanche
    def test_periodic_eval(self):
        model = SimpleMLP(input_size=6, hidden_size=10)
        scenario = get_fast_scenario()
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()
        curve_key = 'Top1_Acc_Stream/eval_phase/train_stream'

        ###################
        # Case #1: No eval
        ###################
        # we use stream acc. because it emits a single value
        # for each eval loop.
        acc = StreamAccuracy()
        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_epochs=2,
                         eval_every=-1,
                         evaluator=EvaluationPlugin(acc))
        strategy.train(scenario.train_stream[0])
        # eval is not called in this case
        assert len(strategy.evaluator.all_metrics) == 0

        ###################
        # Case #2: Eval at the end only
        ###################
        acc = StreamAccuracy()
        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_epochs=2,
                         eval_every=0,
                         evaluator=EvaluationPlugin(acc))
        strategy.train(scenario.train_stream[0])
        # eval is called once at the end of the training loop
        curve = strategy.evaluator.all_metrics[curve_key][1]
        assert len(curve) == 1

        ###################
        # Case #3: Eval after every epoch
        ###################
        acc = StreamAccuracy()
        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_epochs=2,
                         eval_every=1,
                         evaluator=EvaluationPlugin(acc))
        strategy.train(scenario.train_stream[0])
        # eval is called after every epoch + the end of the training loop
        curve = strategy.evaluator.all_metrics[curve_key][1]
        assert len(curve) == 3
コード例 #29
0
ファイル: test_plugins.py プロジェクト: pkraison/avalanche
    def _test_scheduler_plugin(
        benchmark,
        model,
        optim,
        scheduler,
        epochs,
        reset_lr,
        reset_scheduler,
        expected,
        criterion=None,
        metric=None,
        eval_on_valid_stream=False,
    ):
        lr_scheduler_plugin = LRSchedulerPlugin(
            scheduler,
            reset_lr=reset_lr,
            reset_scheduler=reset_scheduler,
            metric=metric,
        )

        verifier_plugin = SchedulerPluginTestPlugin(expected)

        if criterion is None:
            criterion = CrossEntropyLoss()
        if eval_on_valid_stream:
            cl_strategy = Naive(
                model,
                optim,
                criterion,
                train_mb_size=32,
                train_epochs=epochs,
                eval_mb_size=100,
                plugins=[lr_scheduler_plugin, verifier_plugin],
                eval_every=1,
                evaluator=None,
            )

            cl_strategy.train(
                benchmark.train_stream[0],
                shuffle=False,
                eval_streams=[benchmark.valid_stream[0]],
            )
            cl_strategy.train(
                benchmark.train_stream[1],
                shuffle=False,
                eval_streams=[benchmark.valid_stream[1]],
            )
        else:
            cl_strategy = Naive(
                model,
                optim,
                criterion,
                train_mb_size=32,
                train_epochs=epochs,
                eval_mb_size=100,
                plugins=[lr_scheduler_plugin, verifier_plugin],
                evaluator=None,
            )

            cl_strategy.train(benchmark.train_stream[0], shuffle=False)
            cl_strategy.train(benchmark.train_stream[1], shuffle=False)
コード例 #30
0
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    # ---------

    # --- TRANSFORMATIONS
    train_transform = transforms.Compose([
        RandomCrop(28, padding=4),
        ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, ))
    ])
    test_transform = transforms.Compose(
        [ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))])
    # ---------

    # --- SCENARIO CREATION
    mnist_train = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                        train=True,
                        download=True,
                        transform=train_transform)
    mnist_test = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                       train=False,
                       download=True,
                       transform=test_transform)
    scenario = nc_benchmark(mnist_train,
                            mnist_test,
                            5,
                            task_labels=False,
                            seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    interactive_logger = InteractiveLogger()
    tensorboard_logger = TensorboardLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         epoch_running=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True,
                     epoch=True,
                     epoch_running=True,
                     experience=True,
                     stream=True),
        forgetting_metrics(experience=True, stream=True),
        StreamConfusionMatrix(),
        cpu_usage_metrics(minibatch=True,
                          epoch=True,
                          experience=True,
                          stream=True),
        timing_metrics(minibatch=True,
                       epoch=True,
                       experience=True,
                       stream=True),
        ram_usage_metrics(every=0.5,
                          minibatch=True,
                          epoch=True,
                          experience=True,
                          stream=True),
        gpu_usage_metrics(args.cuda,
                          every=0.5,
                          minibatch=True,
                          epoch=True,
                          experience=True,
                          stream=True),
        disk_usage_metrics(minibatch=True,
                           epoch=True,
                           experience=True,
                           stream=True),
        MAC_metrics(minibatch=True, epoch=True, experience=True),
        loggers=[interactive_logger, tensorboard_logger])

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(model,
                        SGD(model.parameters(), lr=0.001, momentum=0.9),
                        CrossEntropyLoss(),
                        train_mb_size=100,
                        train_epochs=4,
                        eval_mb_size=100,
                        device=device,
                        evaluator=eval_plugin)

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience: ", experience.current_experience)
        print("Current Classes: ", experience.classes_in_this_experience)

        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))