예제 #1
0
def main(args):

    # Model getter: specify dataset and depth of the network.
    model = pytorchcv_wrapper.resnet('cifar10', depth=20, pretrained=False)

    # Or get a more specific model. E.g. wide resnet, with depth 40 and growth
    # factor 8 for Cifar 10.
    # model = pytorchcv_wrapper.get_model("wrn40_8_cifar10", pretrained=False)

    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}"
                          if torch.cuda.is_available() and
                          args.cuda >= 0 else "cpu")

    device = "cpu"

    # --- TRANSFORMATIONS
    transform = transforms.Compose([
        ToTensor(),
        transforms.Normalize((0.491, 0.482, 0.446), (0.247, 0.243, 0.261))
    ])

    # --- SCENARIO CREATION
    cifar_train = CIFAR10(root=expanduser("~") + "/.avalanche/data/cifar10/",
                          train=True, download=True, transform=transform)
    cifar_test = CIFAR10(root=expanduser("~") + "/.avalanche/data/cifar10/",
                         train=False, download=True, transform=transform)
    scenario = nc_benchmark(
        cifar_train, cifar_test, 5, task_labels=False, seed=1234,
        fixed_class_order=[i for i in range(10)])

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(
            minibatch=True, epoch=True, experience=True, stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True),
        loggers=[interactive_logger])

    # CREATE THE STRATEGY INSTANCE (Naive, with Replay)
    cl_strategy = Naive(model, torch.optim.SGD(model.parameters(), lr=0.01),
                        CrossEntropyLoss(),
                        train_mb_size=100, train_epochs=1, eval_mb_size=100,
                        device=device,
                        plugins=[ReplayPlugin(mem_size=1000)],
                        evaluator=eval_plugin
                        )

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience ", experience.current_experience)
        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
예제 #2
0
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}"
                          if torch.cuda.is_available() and
                          args.cuda >= 0 else "cpu")
    n_batches = 5
    # ---------

    # --- TRANSFORMATIONS
    train_transform = transforms.Compose([
        RandomCrop(28, padding=4),
        ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    test_transform = transforms.Compose([
        ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    # ---------

    # --- SCENARIO CREATION
    mnist_train = MNIST('./data/mnist', train=True,
                        download=True, transform=train_transform)
    mnist_test = MNIST('./data/mnist', train=False,
                       download=True, transform=test_transform)
    scenario = nc_scenario(
        mnist_train, mnist_test, n_batches, task_labels=False, seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(
            minibatch=True, epoch=True, experience=True, stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        ExperienceForgetting(),
        loggers=[interactive_logger])

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(model, torch.optim.Adam(model.parameters(), lr=0.001),
                        CrossEntropyLoss(),
                        train_mb_size=100, train_epochs=4, eval_mb_size=100, device=device,
                        plugins=[ReplayPlugin(mem_size=10000)],
                        evaluator=eval_plugin
                        )

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience ", experience.current_experience)
        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
예제 #3
0
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}"
                          if torch.cuda.is_available() and
                          args.cuda >= 0 else "cpu")
    # ---------

    # --- TRANSFORMATIONS
    train_transform = transforms.Compose([
        RandomCrop(28, padding=4),
        ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    test_transform = transforms.Compose([
        ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    # ---------

    # --- SCENARIO CREATION
    mnist_train = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                        train=True, download=True, transform=train_transform)
    mnist_test = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                       train=False, download=True, transform=test_transform)
    scenario = nc_scenario(
        mnist_train, mnist_test, 5, task_labels=False, seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(epoch=True, experience=True, stream=True),
        loss_metrics(epoch=True, experience=True, stream=True),
        # save image should be False to appropriately view
        # results in Interactive Logger.
        # a tensor will be printed
        StreamConfusionMatrix(save_image=False, normalize='all'),
        loggers=InteractiveLogger()
    )

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(
        model, SGD(model.parameters(), lr=0.001, momentum=0.9),
        CrossEntropyLoss(), train_mb_size=100, train_epochs=4, eval_mb_size=100,
        device=device, evaluator=eval_plugin, plugins=[ReplayPlugin(5000)])

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience: ", experience.current_experience)
        print("Current Classes: ", experience.classes_in_this_experience)

        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
예제 #4
0
def main(args):
    """
    Last Avalanche version reference performance (online):
        Top1_Acc_Stream/eval_phase/test_stream = 0.9421
    """
    # --- DEFAULT PARAMS ONLINE DATA INCREMENTAL LEARNING
    nb_tasks = 5  # Can still design the data stream based on tasks
    epochs = 1  # All data is only seen once: Online
    batch_size = 10  # Only process small amount of data at a time
    return_task_id = False  # Data incremental (task-agnostic/task-free)
    # TODO use data_incremental_generator, now experience=task

    # --- CONFIG
    device = torch.device(
        f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0
        else "cpu")
    # ---------

    # --- SCENARIO CREATION
    scenario = SplitMNIST(nb_tasks, return_task_id=return_task_id,
                          fixed_class_order=[i for i in range(10)])
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=args.featsize,
                      hidden_size=400, hidden_layers=2)

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(experience=True, stream=True),
        loss_metrics(experience=True, stream=True),
        ExperienceForgetting(),
        loggers=[interactive_logger])

    # CoPE PLUGIN
    cope = CoPEPlugin(mem_size=2000, p_size=args.featsize,
                      n_classes=scenario.n_classes)

    # CREATE THE STRATEGY INSTANCE (NAIVE) WITH CoPE PLUGIN
    cl_strategy = Naive(model, torch.optim.SGD(model.parameters(), lr=0.01),
                        cope.loss,  # CoPE PPP-Loss
                        train_mb_size=batch_size, train_epochs=epochs,
                        eval_mb_size=100, device=device,
                        plugins=[cope],
                        evaluator=eval_plugin
                        )

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience ", experience.current_experience)
        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
예제 #5
0
def main(args):
    model = SimpleMLP(hidden_size=args.hs)
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
    criterion = torch.nn.CrossEntropyLoss()

    # check if selected GPU is available or use CPU
    assert args.cuda == -1 or args.cuda >= 0, "cuda must be -1 or >= 0."
    device = torch.device(
        f"cuda:{args.cuda}"
        if torch.cuda.is_available() and args.cuda >= 0
        else "cpu"
    )
    print(f"Using device: {device}")

    # create Permuted MNIST scenario
    scenario = PermutedMNIST(n_experiences=4)

    interactive_logger = InteractiveLogger()
    eval_plugin = EvaluationPlugin(
        accuracy_metrics(
            minibatch=True, epoch=True, experience=True, stream=True
        ),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True),
        loggers=[interactive_logger],
    )

    # create strategy
    assert (
        len(args.lambda_e) == 1 or len(args.lambda_e) == 5
    ), "Lambda_e must be a non-empty list."
    lambda_e = args.lambda_e[0] if len(args.lambda_e) == 1 else args.lambda_e

    strategy = LFL(
        model,
        optimizer,
        criterion,
        lambda_e=lambda_e,
        train_epochs=args.epochs,
        device=device,
        train_mb_size=args.minibatch_size,
        evaluator=eval_plugin,
    )

    # train on the selected scenario with the chosen strategy
    print("Starting experiment...")
    results = []
    for train_batch_info in scenario.train_stream:
        print(
            "Start training on experience ", train_batch_info.current_experience
        )

        strategy.train(train_batch_info, num_workers=0)
        print(
            "End training on experience ", train_batch_info.current_experience
        )
        print("Computing accuracy on the test set")
        results.append(strategy.eval(scenario.test_stream[:]))
예제 #6
0
def main(args):
    model = SimpleMLP(hidden_size=args.hs)
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
    criterion = torch.nn.CrossEntropyLoss()

    # check if selected GPU is available or use CPU
    assert args.cuda == -1 or args.cuda >= 0, "cuda must be -1 or >= 0."
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    print(f'Using device: {device}')

    # create scenario
    if args.scenario == 'pmnist':
        scenario = PermutedMNIST(n_experiences=args.permutations)
    elif args.scenario == 'smnist':
        scenario = SplitMNIST(n_experiences=5, return_task_id=False)
    else:
        raise ValueError("Wrong scenario name. Allowed pmnist, smnist.")

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()
    text_logger = TextLogger(open('log.txt', 'a'))

    eval_plugin = EvaluationPlugin(accuracy_metrics(minibatch=True,
                                                    epoch=True,
                                                    experience=True,
                                                    stream=True),
                                   loss_metrics(minibatch=True,
                                                epoch=True,
                                                experience=True,
                                                stream=True),
                                   ExperienceForgetting(),
                                   loggers=[interactive_logger])

    # create strategy
    strategy = EWC(model,
                   optimizer,
                   criterion,
                   args.ewc_lambda,
                   args.ewc_mode,
                   decay_factor=args.decay_factor,
                   train_epochs=args.epochs,
                   device=device,
                   train_mb_size=args.minibatch_size,
                   evaluator=eval_plugin)

    # train on the selected scenario with the chosen strategy
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start training on experience ", experience.current_experience)

        strategy.train(experience)
        print("End training on experience", experience.current_experience)
        print('Computing accuracy on the test set')
        results.append(strategy.eval(scenario.test_stream[:]))
예제 #7
0
def main(args):
    model = SimpleMLP(hidden_size=args.hs)
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
    criterion = torch.nn.CrossEntropyLoss()

    # check if selected GPU is available or use CPU
    assert args.cuda == -1 or args.cuda >= 0, "cuda must be -1 or >= 0."
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    print(f'Using device: {device}')

    # create split scenario
    scenario = SplitMNIST(n_experiences=5, return_task_id=False)

    interactive_logger = InteractiveLogger()
    eval_plugin = EvaluationPlugin(accuracy_metrics(minibatch=True,
                                                    epoch=True,
                                                    experience=True,
                                                    stream=True),
                                   loss_metrics(minibatch=True,
                                                epoch=True,
                                                experience=True,
                                                stream=True),
                                   ExperienceForgetting(),
                                   loggers=[interactive_logger])

    # create strategy
    assert len(args.lwf_alpha) == 1 or len(args.lwf_alpha) == 5,\
        'Alpha must be a non-empty list.'
    lwf_alpha = args.lwf_alpha[0] if len(
        args.lwf_alpha) == 1 else args.lwf_alpha

    strategy = LwF(model,
                   optimizer,
                   criterion,
                   alpha=lwf_alpha,
                   temperature=args.softmax_temperature,
                   train_epochs=args.epochs,
                   device=device,
                   train_mb_size=args.minibatch_size,
                   evaluator=eval_plugin)

    # train on the selected scenario with the chosen strategy
    print('Starting experiment...')
    results = []
    for train_batch_info in scenario.train_stream:
        print("Start training on experience ",
              train_batch_info.current_experience)

        strategy.train(train_batch_info, num_workers=4)
        print("End training on experience ",
              train_batch_info.current_experience)
        print('Computing accuracy on the test set')
        results.append(strategy.eval(scenario.test_stream[:]))
예제 #8
0
 def test_no_errors(self):
     eval_plugin = EvaluationPlugin(accuracy_metrics(stream=True),
                                    loggers=None,
                                    benchmark=self.benchmark,
                                    strict_checks=True)
     strategy = Naive(self.model,
                      self.optimizer,
                      self.criterion,
                      train_epochs=2,
                      eval_every=0,
                      evaluator=eval_plugin)
     for exp in self.benchmark.train_stream:
         strategy.train(exp, eval_streams=[self.benchmark.test_stream])
         strategy.eval(self.benchmark.test_stream)
예제 #9
0
def main(cuda: int):
    # --- CONFIG
    device = torch.device(
        f"cuda:{cuda}" if torch.cuda.is_available() else "cpu"
    )
    # --- SCENARIO CREATION
    scenario = SplitCIFAR10(n_experiences=2, seed=42)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes, input_size=196608 // 64)

    # choose some metrics and evaluation method
    eval_plugin = EvaluationPlugin(
        accuracy_metrics(stream=True, experience=True),
        images_samples_metrics(
            on_train=True,
            on_eval=True,
            n_cols=10,
            n_rows=10,
        ),
        labels_repartition_metrics(
            # image_creator=repartition_bar_chart_image_creator,
            on_train=True,
            on_eval=True,
        ),
        loggers=[
            TensorboardLogger(f"tb_data/{datetime.now()}"),
            InteractiveLogger(),
        ],
    )

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(
        model,
        Adam(model.parameters()),
        train_mb_size=128,
        train_epochs=1,
        eval_mb_size=128,
        device=device,
        plugins=[ReplayPlugin(mem_size=1_000)],
        evaluator=eval_plugin,
    )

    # TRAINING LOOP
    for i, experience in enumerate(scenario.train_stream, 1):
        cl_strategy.train(experience)
        cl_strategy.eval(scenario.test_stream[:i])
예제 #10
0
 def test_raise_warning(self):
     eval_plugin = EvaluationPlugin(accuracy_metrics(stream=True),
                                    loggers=None,
                                    benchmark=self.benchmark,
                                    strict_checks=False)
     strategy = Naive(self.model,
                      self.optimizer,
                      self.criterion,
                      train_epochs=2,
                      eval_every=-1,
                      evaluator=eval_plugin)
     for exp in self.benchmark.train_stream:
         strategy.train(exp)
         strategy.eval(self.benchmark.test_stream)
     with self.assertWarns(UserWarning):
         strategy.eval(self.benchmark.test_stream[:2])
예제 #11
0
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")

    # --- SCENARIO CREATION
    scenario = SplitMNIST(n_experiences=10, seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True),
        loggers=[interactive_logger],
    )

    # CREATE THE STRATEGY INSTANCE (GenerativeReplay)
    cl_strategy = GenerativeReplay(
        model,
        torch.optim.Adam(model.parameters(), lr=0.001),
        CrossEntropyLoss(),
        train_mb_size=100,
        train_epochs=4,
        eval_mb_size=100,
        device=device,
        evaluator=eval_plugin,
    )

    # TRAINING LOOP
    print("Starting experiment...")
    results = []
    for experience in scenario.train_stream:
        print("Start of experience ", experience.current_experience)
        cl_strategy.train(experience)
        print("Training completed")

        print("Computing accuracy on the whole test set")
        results.append(cl_strategy.eval(scenario.test_stream))
예제 #12
0
def main(args):

    # Config
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    # model
    model = MTSimpleMLP()

    # CL Benchmark Creation
    scenario = SplitMNIST(n_experiences=5, return_task_id=True)
    train_stream = scenario.train_stream
    test_stream = scenario.test_stream

    # Prepare for training & testing
    optimizer = Adam(model.parameters(), lr=0.01)
    criterion = CrossEntropyLoss()

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=False,
                         epoch=True,
                         experience=True,
                         stream=True),
        forgetting_metrics(experience=True),
        loggers=[interactive_logger],
    )

    # Choose a CL strategy
    strategy = EWC(
        model=model,
        optimizer=optimizer,
        criterion=criterion,
        train_mb_size=128,
        train_epochs=3,
        eval_mb_size=128,
        device=device,
        evaluator=eval_plugin,
        ewc_lambda=0.4,
    )

    # train and test loop
    for train_task in train_stream:
        strategy.train(train_task)
        strategy.eval(test_stream)
예제 #13
0
def run_base_online(experience, device, use_interactive_logger: bool = False):
    """
    Runs OnlineNaive for one experience.
    """

    # Create list of loggers to be used
    loggers = []
    if use_interactive_logger:
        interactive_logger = InteractiveLogger()
        loggers.append(interactive_logger)

    # Evaluation plugin
    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True),
        loggers=loggers,
    )

    # Model
    model = SimpleMLP(num_classes=10)

    # Create OnlineNaive strategy
    cl_strategy = OnlineNaive(
        model,
        torch.optim.SGD(model.parameters(), lr=0.01),
        CrossEntropyLoss(),
        num_passes=1,
        train_mb_size=1,
        device=device,
        evaluator=eval_plugin,
    )

    start = time.time()
    print("Running OnlineNaive ...")
    cl_strategy.train(experience)
    end = time.time()
    duration = end - start

    return duration
예제 #14
0
def main(args):
    # --- CONFIG
    device = torch.device(
        f"cuda:{args.cuda}"
        if torch.cuda.is_available() and args.cuda >= 0
        else "cpu"
    )

    # --- SCENARIO CREATION
    scenario = SplitCIFAR100(n_experiences=20, return_task_id=True)
    config = {"scenario": "SplitCIFAR100"}

    # MODEL CREATION
    model = MTSimpleCNN()

    # choose some metrics and evaluation method
    loggers = [InteractiveLogger()]
    if args.wandb_project != "":
        wandb_logger = WandBLogger(
            project_name=args.wandb_project,
            run_name="LaMAML_" + config["scenario"],
            config=config,
        )
        loggers.append(wandb_logger)

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(
            minibatch=True, epoch=True, experience=True, stream=True
        ),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True),
        loggers=loggers,
    )

    # LAMAML STRATEGY
    rs_buffer = ReservoirSamplingBuffer(max_size=200)
    replay_plugin = ReplayPlugin(
        mem_size=200,
        batch_size=10,
        batch_size_mem=10,
        task_balanced_dataloader=False,
        storage_policy=rs_buffer,
    )

    cl_strategy = LaMAML(
        model,
        torch.optim.SGD(model.parameters(), lr=0.1),
        CrossEntropyLoss(),
        n_inner_updates=5,
        second_order=True,
        grad_clip_norm=1.0,
        learn_lr=True,
        lr_alpha=0.25,
        sync_update=False,
        train_mb_size=10,
        train_epochs=10,
        eval_mb_size=100,
        device=device,
        plugins=[replay_plugin],
        evaluator=eval_plugin,
    )

    # TRAINING LOOP
    print("Starting experiment...")
    results = []
    for experience in scenario.train_stream:
        print("Start of experience ", experience.current_experience)
        cl_strategy.train(experience)
        print("Training completed")

        print("Computing accuracy on the whole test set")
        results.append(cl_strategy.eval(scenario.test_stream))

    if args.wandb_project != "":
        wandb.finish()
예제 #15
0
def main(args):
    # --- CONFIG
    device = torch.device(
        f"cuda:{args.cuda}"
        if torch.cuda.is_available() and args.cuda >= 0
        else "cpu"
    )
    # ---------

    tr_ds = [
        AvalancheTensorDataset(
            torch.randn(10, 3),
            torch.randint(0, 3, (10,)).tolist(),
            task_labels=torch.randint(0, 5, (10,)).tolist(),
        )
        for _ in range(3)
    ]
    ts_ds = [
        AvalancheTensorDataset(
            torch.randn(10, 3),
            torch.randint(0, 3, (10,)).tolist(),
            task_labels=torch.randint(0, 5, (10,)).tolist(),
        )
        for _ in range(3)
    ]
    scenario = create_multi_dataset_generic_benchmark(
        train_datasets=tr_ds, test_datasets=ts_ds
    )
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=3, input_size=3)

    # DEFINE THE EVALUATION PLUGIN AND LOGGER
    # The evaluation plugin manages the metrics computation.
    # It takes as argument a list of metrics and a list of loggers.
    # The evaluation plugin calls the loggers to serialize the metrics
    # and save them in persistent memory or print them in the standard output.

    # log to text file
    text_logger = TextLogger(open("log.txt", "a"))

    # print to stdout
    interactive_logger = InteractiveLogger()

    csv_logger = CSVLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(
            minibatch=True,
            epoch=True,
            epoch_running=True,
            experience=True,
            stream=True,
        ),
        loss_metrics(
            minibatch=True,
            epoch=True,
            epoch_running=True,
            experience=True,
            stream=True,
        ),
        forgetting_metrics(experience=True, stream=True),
        bwt_metrics(experience=True, stream=True),
        cpu_usage_metrics(
            minibatch=True,
            epoch=True,
            epoch_running=True,
            experience=True,
            stream=True,
        ),
        timing_metrics(
            minibatch=True,
            epoch=True,
            epoch_running=True,
            experience=True,
            stream=True,
        ),
        ram_usage_metrics(
            every=0.5, minibatch=True, epoch=True, experience=True, stream=True
        ),
        gpu_usage_metrics(
            args.cuda,
            every=0.5,
            minibatch=True,
            epoch=True,
            experience=True,
            stream=True,
        ),
        disk_usage_metrics(
            minibatch=True, epoch=True, experience=True, stream=True
        ),
        MAC_metrics(minibatch=True, epoch=True, experience=True),
        loggers=[interactive_logger, text_logger, csv_logger],
        collect_all=True,
    )  # collect all metrics (set to True by default)

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(
        model,
        SGD(model.parameters(), lr=0.001, momentum=0.9),
        CrossEntropyLoss(),
        train_mb_size=500,
        train_epochs=1,
        eval_mb_size=100,
        device=device,
        evaluator=eval_plugin,
        eval_every=1,
    )

    # TRAINING LOOP
    print("Starting experiment...")
    results = []
    for i, experience in enumerate(scenario.train_stream):
        print("Start of experience: ", experience.current_experience)
        print("Current Classes: ", experience.classes_in_this_experience)

        # train returns a dictionary containing last recorded value
        # for each metric.
        res = cl_strategy.train(experience, eval_streams=[scenario.test_stream])
        print("Training completed")

        print("Computing accuracy on the whole test set")
        # test returns a dictionary with the last metric collected during
        # evaluation on that stream
        results.append(cl_strategy.eval(scenario.test_stream))

    print(f"Test metrics:\n{results}")

    # Dict with all the metric curves,
    # only available when `collect_all` is True.
    # Each entry is a (x, metric value) tuple.
    # You can use this dictionary to manipulate the
    # metrics without avalanche.
    all_metrics = cl_strategy.evaluator.get_all_metrics()
    print(f"Stored metrics: {list(all_metrics.keys())}")
예제 #16
0
 def test_accuracy_helper(self):
     metrics = accuracy_metrics(minibatch=True, epoch=True)
     self.assertEqual(2, len(metrics))
     self.assertIsInstance(metrics, List)
     self.assertIsInstance(metrics[0], PluginMetric)
     self.assertIsInstance(metrics[1], PluginMetric)
예제 #17
0
                    strat, item)
            raise

    def before_eval(self, strategy: "SupervisedTemplate", **kwargs):
        self._update_metrics_and_loggers(strategy, "before_eval")
        msgw = (
            "Evaluation stream is not equal to the complete test stream. "
            "This may result in inconsistent metrics. Use at your own risk.")
        msge = ("Stream provided to `eval` must be the same of the entire "
                "evaluation stream.")
        curr_stream = strategy.current_eval_stream[0].origin_stream
        benchmark = curr_stream[0].origin_stream.benchmark
        full_stream = benchmark.streams[curr_stream.name]

        if not self.suppress_warnings and len(curr_stream) != len(full_stream):
            if self.strict_checks:
                raise ValueError(msge)
            else:
                warnings.warn(msgw)


default_evaluator = EvaluationPlugin(
    accuracy_metrics(minibatch=False, epoch=True, experience=True,
                     stream=True),
    loss_metrics(minibatch=False, epoch=True, experience=True, stream=True),
    loggers=[InteractiveLogger()],
    suppress_warnings=True,
)

__all__ = ["EvaluationPlugin", "default_evaluator"]
예제 #18
0
def main(args):
    # Device config
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")

    # Intialize the model, stream and training strategy
    model = SimpleCNN(num_classes=10)
    if args.stream != "s_long":
        model_init = deepcopy(model)

    scenario = CTrL(stream_name=args.stream,
                    save_to_disk=args.save,
                    path=args.path,
                    seed=10)

    train_stream = scenario.train_stream
    test_stream = scenario.test_stream
    val_stream = scenario.val_stream

    optimizer = SGD(model.parameters(), lr=0.001, momentum=0.9)
    criterion = CrossEntropyLoss()

    logger = EvaluationPlugin(
        accuracy_metrics(minibatch=False,
                         epoch=False,
                         experience=True,
                         stream=True),
        loggers=[InteractiveLogger()],
    )

    cl_strategy = Naive(
        model,
        optimizer,
        criterion,
        train_mb_size=32,
        device=device,
        train_epochs=args.max_epochs,
        eval_mb_size=128,
        evaluator=logger,
        plugins=[EarlyStoppingPlugin(50, "val_stream")],
        eval_every=5,
    )

    # train and test loop
    for train_task, val_task in zip(train_stream, val_stream):
        cl_strategy.train(train_task, eval_streams=[val_task])
        cl_strategy.eval(test_stream)

    transfer_mat = []
    for tid in range(len(train_stream)):
        transfer_mat.append(
            logger.all_metric_results["Top1_Acc_Exp/eval_phase/test_stream/"
                                      f"Task00{tid}/Exp00{tid}"][1])

    if args.stream == "s_long":
        res = logger.last_metric_results["Top1_Acc_Stream/eval_phase/"
                                         "test_stream"]
        print(f"Average accuracy on S_long : {res}")
    else:
        optimizer = SGD(model_init.parameters(), lr=0.001, momentum=0.9)
        cl_strategy = Naive(
            model_init,
            optimizer,
            criterion,
            train_mb_size=32,
            device=device,
            train_epochs=args.max_epochs,
            eval_mb_size=128,
            plugins=[EarlyStoppingPlugin(50, "val_stream")],
            eval_every=5,
        )

        cl_strategy.train(train_stream[-1])
        res = cl_strategy.eval([test_stream[-1]])

        acc_last_stream = transfer_mat[-1][-1]
        acc_last_only = res["Top1_Acc_Exp/eval_phase/test_stream/"
                            "Task005/Exp-01"]
        transfer_value = acc_last_stream - acc_last_only

        print(f"Accuracy on probe task after training on the whole "
              f"stream: {acc_last_stream}")
        print(f"Accuracy on probe task after trained "
              f"independently: {acc_last_only}")
        print(f"T({args.stream})={transfer_value}")
예제 #19
0
def main(args):
    # Device config
    device = torch.device(f"cuda:{args.cuda}"
                          if torch.cuda.is_available() and
                          args.cuda >= 0 else "cpu")
    print('device ', device)
    # ---------

    # --- TRANSFORMATIONS
    _mu = [0.485, 0.456, 0.406]  # imagenet normalization
    _std = [0.229, 0.224, 0.225]
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=_mu,
                             std=_std)
    ])
    # ---------

    # --- SCENARIO CREATION
    scenario = CORe50(scenario=args.scenario, train_transform=transform,
                      eval_transform=transform)

    # ---------

    eval_plugin = EvaluationPlugin(
        loss_metrics(epoch=True, experience=True, stream=True),
        accuracy_metrics(epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True, stream=True),
        loggers=[InteractiveLogger()]
    )

    criterion = torch.nn.CrossEntropyLoss()
    model = SLDAResNetModel(device=device, arch='resnet18',
                            imagenet_pretrained=args.imagenet_pretrained)

    # CREATE THE STRATEGY INSTANCE
    cl_strategy = StreamingLDA(model, criterion,
                               args.feature_size, args.n_classes,
                               eval_mb_size=args.batch_size,
                               train_mb_size=args.batch_size,
                               train_epochs=1,
                               shrinkage_param=args.shrinkage,
                               streaming_update_sigma=args.plastic_cov,
                               device=device, evaluator=eval_plugin)

    warnings.warn(
        "The Deep SLDA example is not perfectly aligned with "
        "the paper implementation since it does not use a base "
        "initialization phase and instead starts streming from "
        "pre-trained weights.")

    # TRAINING LOOP
    print('Starting experiment...')
    for i, exp in enumerate(scenario.train_stream):

        # fit SLDA model to batch (one sample at a time)
        cl_strategy.train(exp)

        # evaluate model on test data
        cl_strategy.eval(scenario.test_stream)
예제 #20
0
        self._update_metrics(strategy, 'after_eval_exp')

    def after_eval(self, strategy: 'BaseStrategy', **kwargs):
        self._update_metrics(strategy, 'after_eval')

    def before_eval_iteration(self, strategy: 'BaseStrategy', **kwargs):
        self._update_metrics(strategy, 'before_eval_iteration')

    def before_eval_forward(self, strategy: 'BaseStrategy', **kwargs):
        self._update_metrics(strategy, 'before_eval_forward')

    def after_eval_forward(self, strategy: 'BaseStrategy', **kwargs):
        self._update_metrics(strategy, 'after_eval_forward')

    def after_eval_iteration(self, strategy: 'BaseStrategy', **kwargs):
        self._update_metrics(strategy, 'after_eval_iteration')


default_logger = EvaluationPlugin(accuracy_metrics(minibatch=False,
                                                   epoch=True,
                                                   experience=True,
                                                   stream=True),
                                  loss_metrics(minibatch=False,
                                               epoch=True,
                                               experience=True,
                                               stream=True),
                                  loggers=[InteractiveLogger()],
                                  suppress_warnings=True)

__all__ = ['EvaluationPlugin', 'default_logger']
예제 #21
0
def main(args):
    model = SimpleMLP(hidden_size=args.hs)
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
    criterion = torch.nn.CrossEntropyLoss()

    # check if selected GPU is available or use CPU
    assert args.cuda == -1 or args.cuda >= 0, "cuda must be -1 or >= 0."
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    print(f"Using device: {device}")

    # create scenario
    if args.scenario == "pmnist":
        scenario = PermutedMNIST(n_experiences=args.permutations)
    elif args.scenario == "smnist":
        mnist_train = MNIST(
            root=expanduser("~") + "/.avalanche/data/mnist/",
            train=True,
            download=True,
            transform=ToTensor(),
        )
        mnist_test = MNIST(
            root=expanduser("~") + "/.avalanche/data/mnist/",
            train=False,
            download=True,
            transform=ToTensor(),
        )
        scenario = nc_benchmark(mnist_train,
                                mnist_test,
                                5,
                                task_labels=False,
                                seed=1234)
    else:
        raise ValueError("Wrong scenario name. Allowed pmnist, smnist.")

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()
    tensorboard_logger = TensorboardLogger()
    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True, stream=True),
        bwt_metrics(experience=True, stream=True),
        loggers=[interactive_logger, tensorboard_logger],
    )

    # create strategy
    strategy = EWC(
        model,
        optimizer,
        criterion,
        args.ewc_lambda,
        args.ewc_mode,
        decay_factor=args.decay_factor,
        train_epochs=args.epochs,
        device=device,
        train_mb_size=args.minibatch_size,
        evaluator=eval_plugin,
    )

    # train on the selected scenario with the chosen strategy
    print("Starting experiment...")
    results = []
    for experience in scenario.train_stream:
        print("Start training on experience ", experience.current_experience)

        strategy.train(experience)
        print("End training on experience", experience.current_experience)
        print("Computing accuracy on the test set")
        results.append(strategy.eval(scenario.test_stream[:]))
예제 #22
0
파일: cope.py 프로젝트: pkraison/avalanche
def main(args):
    """
    Last Avalanche version reference performance (online = 1 epoch):

    Class-incremental (online):
        Top1_Acc_Stream/eval_phase/test_stream = 0.9421
    Data-incremental (online:
        Top1_Acc_Stream/eval_phase/test_stream = 0.9309

    These are reference results for a single run.
    """
    # --- DEFAULT PARAMS ONLINE DATA INCREMENTAL LEARNING
    nb_tasks = 5  # Can still design the data stream based on tasks
    batch_size = 10  # Learning agent only has small amount of data available
    epochs = 1  # How many times to process each mini-batch
    return_task_id = False  # Data incremental (task-agnostic/task-free)

    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    # ---------

    # --- SCENARIO CREATION
    n_classes = 10
    task_scenario = SplitMNIST(
        nb_tasks,
        return_task_id=return_task_id,
        fixed_class_order=[i for i in range(n_classes)],
    )

    # Make data incremental (one batch = one experience)
    scenario = data_incremental_benchmark(task_scenario,
                                          experience_size=batch_size)
    print(
        f"{scenario.n_experiences} batches in online data incremental setup.")
    # 6002 batches for SplitMNIST with batch size 10
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=args.featsize,
                      hidden_size=400,
                      hidden_layers=2,
                      drop_rate=0)

    # choose some metrics and evaluation method
    logger = TextLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(experience=True, stream=True),
        loss_metrics(experience=False, stream=True),
        StreamForgetting(),
        loggers=[logger],
        benchmark=scenario,
    )

    # CoPE PLUGIN
    cope = CoPEPlugin(mem_size=2000,
                      alpha=0.99,
                      p_size=args.featsize,
                      n_classes=n_classes)

    # CREATE THE STRATEGY INSTANCE (NAIVE) WITH CoPE PLUGIN
    cl_strategy = Naive(
        model,
        torch.optim.SGD(model.parameters(), lr=0.01),
        cope.ppp_loss,  # CoPE PPP-Loss
        train_mb_size=batch_size,
        train_epochs=epochs,
        eval_mb_size=100,
        device=device,
        plugins=[cope],
        evaluator=eval_plugin,
    )

    # TRAINING LOOP
    print("Starting experiment...")
    results = []
    cl_strategy.train(scenario.train_stream)

    print("Computing accuracy on the whole test set")
    results.append(cl_strategy.eval(scenario.test_stream))
예제 #23
0
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    # ---------

    # --- TRANSFORMATIONS
    train_transform = transforms.Compose([
        RandomCrop(28, padding=4),
        ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, ))
    ])
    test_transform = transforms.Compose(
        [ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))])
    # ---------

    # --- SCENARIO CREATION
    mnist_train = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                        train=True,
                        download=True,
                        transform=train_transform)
    mnist_test = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                       train=False,
                       download=True,
                       transform=test_transform)
    scenario = nc_benchmark(mnist_train,
                            mnist_test,
                            5,
                            task_labels=False,
                            seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    # DEFINE THE EVALUATION PLUGIN AND LOGGER
    # The evaluation plugin manages the metrics computation.
    # It takes as argument a list of metrics and a list of loggers.
    # The evaluation plugin calls the loggers to serialize the metrics
    # and save them in persistent memory or print them in the standard output.

    # log to text file
    text_logger = TextLogger(open('log.txt', 'a'))

    # print to stdout
    interactive_logger = InteractiveLogger()

    csv_logger = CSVLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         epoch_running=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True,
                     epoch=True,
                     epoch_running=True,
                     experience=True,
                     stream=True),
        forgetting_metrics(experience=True, stream=True),
        bwt_metrics(experience=True, stream=True),
        forward_transfer_metrics(experience=True, stream=True),
        cpu_usage_metrics(minibatch=True,
                          epoch=True,
                          epoch_running=True,
                          experience=True,
                          stream=True),
        timing_metrics(minibatch=True,
                       epoch=True,
                       epoch_running=True,
                       experience=True,
                       stream=True),
        ram_usage_metrics(every=0.5,
                          minibatch=True,
                          epoch=True,
                          experience=True,
                          stream=True),
        gpu_usage_metrics(args.cuda,
                          every=0.5,
                          minibatch=True,
                          epoch=True,
                          experience=True,
                          stream=True),
        disk_usage_metrics(minibatch=True,
                           epoch=True,
                           experience=True,
                           stream=True),
        MAC_metrics(minibatch=True, epoch=True, experience=True),
        loggers=[interactive_logger, text_logger, csv_logger],
        collect_all=True)  # collect all metrics (set to True by default)

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(model,
                        SGD(model.parameters(), lr=0.001, momentum=0.9),
                        CrossEntropyLoss(),
                        train_mb_size=500,
                        train_epochs=1,
                        eval_mb_size=100,
                        device=device,
                        evaluator=eval_plugin,
                        eval_every=1)

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for i, experience in enumerate(scenario.train_stream):
        print("Start of experience: ", experience.current_experience)
        print("Current Classes: ", experience.classes_in_this_experience)

        # train returns a dictionary containing last recorded value
        # for each metric.
        res = cl_strategy.train(experience,
                                eval_streams=[scenario.test_stream])
        print('Training completed')

        print('Computing accuracy on the whole test set')
        # test returns a dictionary with the last metric collected during
        # evaluation on that stream
        results.append(cl_strategy.eval(scenario.test_stream))

    print(f"Test metrics:\n{results}")

    # Dict with all the metric curves,
    # only available when `collect_all` is True.
    # Each entry is a (x, metric value) tuple.
    # You can use this dictionary to manipulate the
    # metrics without avalanche.
    all_metrics = cl_strategy.evaluator.get_all_metrics()
    print(f"Stored metrics: {list(all_metrics.keys())}")
예제 #24
0
    def setUpClass(cls) -> None:
        torch.manual_seed(0)
        np.random.seed(0)
        random.seed(0)

        n_samples_per_class = 100
        dataset = make_classification(n_samples=6 * n_samples_per_class,
                                      n_classes=6,
                                      n_features=4,
                                      n_informative=4,
                                      n_redundant=0)
        X = torch.from_numpy(dataset[0]).float()
        y = torch.from_numpy(dataset[1]).long()
        train_X, test_X, train_y, test_y = train_test_split(X,
                                                            y,
                                                            train_size=0.5,
                                                            shuffle=True,
                                                            stratify=y)
        tr_d = TensorDataset(train_X, train_y)
        ts_d = TensorDataset(test_X, test_y)
        benchmark = nc_benchmark(train_dataset=tr_d,
                                 test_dataset=ts_d,
                                 n_experiences=3,
                                 task_labels=True,
                                 shuffle=False,
                                 seed=0)
        model = SimpleMLP(input_size=4, num_classes=benchmark.n_classes)

        f = open('log.txt', 'w')
        text_logger = TextLogger(f)
        eval_plugin = EvaluationPlugin(
            accuracy_metrics(minibatch=True,
                             epoch=True,
                             epoch_running=True,
                             experience=True,
                             stream=True),
            loss_metrics(minibatch=True,
                         epoch=True,
                         epoch_running=True,
                         experience=True,
                         stream=True),
            forgetting_metrics(experience=True, stream=True, task=True),
            confusion_matrix_metrics(num_classes=6,
                                     save_image=False,
                                     normalize='all',
                                     stream=True),
            bwt_metrics(experience=True, stream=True, task=True),
            cpu_usage_metrics(minibatch=True,
                              epoch=True,
                              epoch_running=True,
                              experience=True,
                              stream=True),
            timing_metrics(minibatch=True,
                           epoch=True,
                           epoch_running=True,
                           experience=True,
                           stream=True),
            ram_usage_metrics(every=0.5,
                              minibatch=True,
                              epoch=True,
                              experience=True,
                              stream=True),
            disk_usage_metrics(minibatch=True,
                               epoch=True,
                               experience=True,
                               stream=True),
            MAC_metrics(minibatch=True, epoch=True, experience=True),
            loggers=[text_logger],
            collect_all=True)  # collect all metrics (set to True by default)
        cl_strategy = BaseStrategy(model,
                                   SGD(model.parameters(),
                                       lr=0.001,
                                       momentum=0.9),
                                   CrossEntropyLoss(),
                                   train_mb_size=10,
                                   train_epochs=2,
                                   eval_mb_size=10,
                                   device=DEVICE,
                                   evaluator=eval_plugin,
                                   eval_every=1)
        for i, experience in enumerate(benchmark.train_stream):
            cl_strategy.train(experience,
                              eval_streams=[benchmark.test_stream[i]],
                              shuffle=False)
            cl_strategy.eval(benchmark.test_stream)
        cls.all_metrics = cl_strategy.evaluator.get_all_metrics()
        f.close()
        # with open(os.path.join(pathlib.Path(__file__).parent.absolute(),
        #                        'target_metrics',
        #                        'mt.pickle'), 'wb') as f:
        #     pickle.dump(dict(cls.all_metrics), f,
        #                 protocol=pickle.HIGHEST_PROTOCOL)
        with open(
                os.path.join(
                    pathlib.Path(__file__).parent.absolute(), 'target_metrics',
                    'mt.pickle'), 'rb') as f:
            cls.ref = pickle.load(f)
예제 #25
0
def run_base(experience, device, use_interactive_logger: bool = False):
    """
        Runs Naive (from BaseStrategy) for one experience.
    """
    def create_sub_experience_list(experience):
        """Creates a list of sub-experiences from an experience.
        It returns a list of experiences, where each experience is
        a subset of the original experience.

        :param experience: single Experience.

        :return: list of Experience.
        """

        # Shuffle the indices
        indices = torch.randperm(len(experience.dataset))
        num_sub_exps = len(indices)
        mb_size = 1
        sub_experience_list = []
        for subexp_id in range(num_sub_exps):
            subexp_indices = indices[subexp_id * mb_size:(subexp_id + 1) *
                                     mb_size]
            sub_experience = copy.copy(experience)
            subexp_ds = AvalancheSubset(sub_experience.dataset,
                                        indices=subexp_indices)
            sub_experience.dataset = subexp_ds
            sub_experience_list.append(sub_experience)

        return sub_experience_list

    # Create list of loggers to be used
    loggers = []
    if use_interactive_logger:
        interactive_logger = InteractiveLogger()
        loggers.append(interactive_logger)

    # Evaluation plugin
    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True),
        loggers=loggers,
    )

    # Model
    model = SimpleMLP(num_classes=10)

    # Create OnlineNaive strategy
    cl_strategy = Naive(
        model,
        torch.optim.SGD(model.parameters(), lr=0.01),
        CrossEntropyLoss(),
        train_mb_size=1,
        device=device,
        evaluator=eval_plugin,
    )

    start = time.time()
    sub_experience_list = create_sub_experience_list(experience)

    # !!! This is only for profiling purpose. This method may not work
    # in practice for dynamic modules since the model adaptation step
    # can go wrong.

    # Train for each sub-experience
    print("Running OnlineNaive ...")
    for i, sub_experience in enumerate(sub_experience_list):
        experience = sub_experience
        cl_strategy.train(experience)
    end = time.time()
    duration = end - start

    return duration
예제 #26
0
    def setUpClass(cls) -> None:
        torch.manual_seed(0)
        np.random.seed(0)
        random.seed(0)

        n_samples_per_class = 100
        datasets = []
        for i in range(3):
            dataset = make_classification(n_samples=3 * n_samples_per_class,
                                          n_classes=3,
                                          n_features=3,
                                          n_informative=3,
                                          n_redundant=0)
            X = torch.from_numpy(dataset[0]).float()
            y = torch.from_numpy(dataset[1]).long()
            train_X, test_X, train_y, test_y = train_test_split(X,
                                                                y,
                                                                train_size=0.5,
                                                                shuffle=True,
                                                                stratify=y)
            datasets.append((train_X, train_y, test_X, test_y))

        tr_ds = [
            AvalancheTensorDataset(
                tr_X,
                tr_y,
                dataset_type=AvalancheDatasetType.CLASSIFICATION,
                task_labels=torch.randint(0, 3, (150, )).tolist())
            for tr_X, tr_y, _, _ in datasets
        ]
        ts_ds = [
            AvalancheTensorDataset(
                ts_X,
                ts_y,
                dataset_type=AvalancheDatasetType.CLASSIFICATION,
                task_labels=torch.randint(0, 3, (150, )).tolist())
            for _, _, ts_X, ts_y in datasets
        ]
        benchmark = dataset_benchmark(train_datasets=tr_ds,
                                      test_datasets=ts_ds)
        model = SimpleMLP(num_classes=3, input_size=3)

        f = open('log.txt', 'w')
        text_logger = TextLogger(f)
        eval_plugin = EvaluationPlugin(
            accuracy_metrics(minibatch=True,
                             epoch=True,
                             epoch_running=True,
                             experience=True,
                             stream=True,
                             trained_experience=True),
            loss_metrics(minibatch=True,
                         epoch=True,
                         epoch_running=True,
                         experience=True,
                         stream=True),
            forgetting_metrics(experience=True, stream=True),
            confusion_matrix_metrics(num_classes=3,
                                     save_image=False,
                                     normalize='all',
                                     stream=True),
            bwt_metrics(experience=True, stream=True),
            forward_transfer_metrics(experience=True, stream=True),
            cpu_usage_metrics(minibatch=True,
                              epoch=True,
                              epoch_running=True,
                              experience=True,
                              stream=True),
            timing_metrics(minibatch=True,
                           epoch=True,
                           epoch_running=True,
                           experience=True,
                           stream=True),
            ram_usage_metrics(every=0.5,
                              minibatch=True,
                              epoch=True,
                              experience=True,
                              stream=True),
            disk_usage_metrics(minibatch=True,
                               epoch=True,
                               experience=True,
                               stream=True),
            MAC_metrics(minibatch=True, epoch=True, experience=True),
            loggers=[text_logger],
            collect_all=True)  # collect all metrics (set to True by default)
        cl_strategy = BaseStrategy(model,
                                   SGD(model.parameters(),
                                       lr=0.001,
                                       momentum=0.9),
                                   CrossEntropyLoss(),
                                   train_mb_size=2,
                                   train_epochs=2,
                                   eval_mb_size=2,
                                   device=DEVICE,
                                   evaluator=eval_plugin,
                                   eval_every=1)
        for i, experience in enumerate(benchmark.train_stream):
            cl_strategy.train(experience,
                              eval_streams=[benchmark.test_stream],
                              shuffle=False)
            cl_strategy.eval(benchmark.test_stream)
        cls.all_metrics = cl_strategy.evaluator.get_all_metrics()
        f.close()
        # # Uncomment me to regenerate the reference metrics. Make sure
        # # the old tests were passing for all unchanged metrics
        # with open(os.path.join(pathlib.Path(__file__).parent.absolute(),
        #                        'target_metrics',
        #                        'tpp.pickle'), 'wb') as f:
        #     pickle.dump(dict(cls.all_metrics), f,
        #                 protocol=4)
        with open(
                os.path.join(
                    pathlib.Path(__file__).parent.absolute(), 'target_metrics',
                    'tpp.pickle'), 'rb') as f:
            cls.ref = pickle.load(f)
예제 #27
0
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    # ---------

    # --- TRANSFORMATIONS
    train_transform = transforms.Compose([
        Resize(224),
        ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, ))
    ])
    test_transform = transforms.Compose([
        Resize(224),
        ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, ))
    ])
    # ---------

    # --- SCENARIO CREATION
    scenario = SplitCIFAR10(5,
                            train_transform=train_transform,
                            eval_transform=test_transform)
    # ---------

    # MODEL CREATION
    model = MobilenetV1()
    adapt_classification_layer(model, scenario.n_classes, bias=False)

    # DEFINE THE EVALUATION PLUGIN AND LOGGER

    my_logger = TensorboardLogger(tb_log_dir="logs",
                                  tb_log_exp_name="logging_example")

    # print to stdout
    interactive_logger = InteractiveLogger()

    evaluation_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        ExperienceForgetting(),
        loggers=[my_logger, interactive_logger])

    # CREATE THE STRATEGY INSTANCE (NAIVE with the Synaptic Intelligence plugin)
    cl_strategy = SynapticIntelligence(model,
                                       Adam(model.parameters(), lr=0.001),
                                       CrossEntropyLoss(),
                                       si_lambda=0.0001,
                                       train_mb_size=128,
                                       train_epochs=4,
                                       eval_mb_size=128,
                                       device=device,
                                       evaluator=evaluation_plugin)

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience: ", experience.current_experience)
        print("Current Classes: ", experience.classes_in_this_experience)

        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
예제 #28
0
def main():
    args = parser.parse_args()
    args.cuda = args.cuda == 'yes'
    args.disable_pbar = args.disable_pbar == 'yes'
    args.stable_sgd = args.stable_sgd == 'yes'
    print(f"args={vars(args)}")

    device = torch.device("cuda:0" if torch.cuda.is_available() and args.cuda else "cpu")
    print(f'Using device: {device}')

    # unique identifier
    uid = uuid.uuid4().hex if args.uid is None else args.uid
    now = str(datetime.datetime.now().date()) + "_" + ':'.join(str(datetime.datetime.now().time()).split(':')[:-1])
    runname = 'T={}_id={}'.format(now, uid) if not args.resume else args.resume

    # Paths
    setupname = [args.strategy, args.exp_name, args.model, args.scenario]
    parentdir = os.path.join(args.save_path, '_'.join(setupname))
    results_path = Path(os.path.join(parentdir, runname))
    results_path.mkdir(parents=True, exist_ok=True)
    tb_log_dir = os.path.join(results_path, 'tb_run')  # Group all runs

    # Eval results
    eval_metric = 'Top1_Acc_Stream/eval_phase/test_stream'
    eval_results_dir = results_path / eval_metric.split('/')[0]
    eval_results_dir.mkdir(parents=True, exist_ok=True)

    eval_result_files = []  # To avg over seeds
    seeds = [args.seed] if args.seed is not None else list(range(args.n_seeds))
    for seed in seeds:
        # initialize seeds
        print("STARTING SEED {}/{}".format(seed, len(seeds) - 1))

        set_seed(seed)

        # create scenario
        if args.scenario == 'smnist':
            inputsize = 28 * 28
            scenario = SplitMNIST(n_experiences=5, return_task_id=False, seed=seed,
                                  fixed_class_order=[i for i in range(10)])
        elif args.scenario == 'CIFAR10':
            scenario = SplitCIFAR10(n_experiences=5, return_task_id=False, seed=seed,
                                    fixed_class_order=[i for i in range(10)])
            inputsize = (3, 32, 32)
        elif args.scenario == 'miniimgnet':
            scenario = SplitMiniImageNet(args.dset_rootpath, n_experiences=20, return_task_id=False, seed=seed,
                                         fixed_class_order=[i for i in range(100)])
            inputsize = (3, 84, 84)
        else:
            raise ValueError("Wrong scenario name.")
        print(f"Scenario = {args.scenario}")

        if args.model == 'simple_mlp':
            model = MyMLP(input_size=inputsize, hidden_size=args.hs)
        elif args.model == 'resnet18':
            if not args.stable_sgd:
                assert args.drop_prob == 0
            model = ResNet18(inputsize, scenario.n_classes, drop_prob=args.drop_prob)

        criterion = torch.nn.CrossEntropyLoss()
        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)

        # Paths
        eval_results_file = eval_results_dir / f'seed={seed}.csv'

        # LOGGING
        tb_logger = TensorboardLogger(tb_log_dir=tb_log_dir, tb_log_exp_name=f'seed={seed}.pt')  # log to Tensorboard
        print_logger = TextLogger() if args.disable_pbar else InteractiveLogger()  # print to stdout
        eval_logger = EvalTextLogger(metric_filter=eval_metric, file=open(eval_results_file, 'a'))
        eval_result_files.append(eval_results_file)

        # METRICS
        eval_plugin = EvaluationPlugin(
            accuracy_metrics(experience=True, stream=True),
            loss_metrics(minibatch=True, experience=True),
            ExperienceForgetting(),  # Test only
            StreamConfusionMatrix(num_classes=scenario.n_classes, save_image=True),

            # LOG OTHER STATS
            # timing_metrics(epoch=True, experience=False),
            # cpu_usage_metrics(experience=True),
            # DiskUsageMonitor(),
            # MinibatchMaxRAM(),
            # GpuUsageMonitor(0),
            loggers=[print_logger, tb_logger, eval_logger])

        plugins = None
        if args.strategy == 'replay':
            plugins = [RehRevPlugin(n_total_memories=args.mem_size,
                                    mode=args.replay_mode,  # STEP-BACK
                                    aversion_steps=args.aversion_steps,
                                    aversion_lr=args.aversion_lr,
                                    stable_sgd=args.stable_sgd,  # Stable SGD
                                    lr_decay=args.lr_decay,
                                    init_epochs=args.init_epochs  # First task epochs
                                    )]

        # CREATE THE STRATEGY INSTANCE (NAIVE)
        strategy = Naive(model, optimizer, criterion,
                         train_epochs=args.epochs, device=device,
                         train_mb_size=args.bs, evaluator=eval_plugin,
                         plugins=plugins
                         )

        # train on the selected scenario with the chosen strategy
        print('Starting experiment...')
        for experience in scenario.train_stream:
            if experience.current_experience == args.until_task:
                print("CUTTING OF TRAINING AT TASK ", experience.current_experience)
                break
            else:
                print("Start training on step ", experience.current_experience)

            strategy.train(experience)
            print("End training on step ", experience.current_experience)
            print('Computing accuracy on the test set')
            res = strategy.eval(scenario.test_stream[:args.until_task])  # Gathered by EvalLogger

    final_results_file = eval_results_dir / f'seed_summary.pt'
    stat_summarize(eval_result_files, final_results_file)
    print(f"[FILE:TB-RESULTS]: {tb_log_dir}")
    print(f"[FILE:FINAL-RESULTS]: {final_results_file}")
    print("FINISHED SCRIPT")
예제 #29
0
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    # ---------

    # --- TRANSFORMATIONS
    train_transform = transforms.Compose([
        RandomCrop(28, padding=4),
        ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, )),
    ])
    test_transform = transforms.Compose(
        [ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))])
    # ---------

    # --- SCENARIO CREATION
    mnist_train = MNIST(
        root=expanduser("~") + "/.avalanche/data/mnist/",
        train=True,
        download=True,
        transform=train_transform,
    )
    mnist_test = MNIST(
        root=expanduser("~") + "/.avalanche/data/mnist/",
        train=False,
        download=True,
        transform=test_transform,
    )
    scenario = nc_benchmark(mnist_train,
                            mnist_test,
                            5,
                            task_labels=False,
                            seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    interactive_logger = InteractiveLogger()
    wandb_logger = WandBLogger(project_name=args.project,
                               run_name=args.run,
                               config=vars(args))

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(
            minibatch=True,
            epoch=True,
            epoch_running=True,
            experience=True,
            stream=True,
        ),
        loss_metrics(
            minibatch=True,
            epoch=True,
            epoch_running=True,
            experience=True,
            stream=True,
        ),
        forgetting_metrics(experience=True, stream=True),
        confusion_matrix_metrics(stream=True,
                                 wandb=True,
                                 class_names=[str(i) for i in range(10)]),
        cpu_usage_metrics(minibatch=True,
                          epoch=True,
                          experience=True,
                          stream=True),
        timing_metrics(minibatch=True,
                       epoch=True,
                       experience=True,
                       stream=True),
        ram_usage_metrics(every=0.5,
                          minibatch=True,
                          epoch=True,
                          experience=True,
                          stream=True),
        gpu_usage_metrics(
            args.cuda,
            every=0.5,
            minibatch=True,
            epoch=True,
            experience=True,
            stream=True,
        ),
        disk_usage_metrics(minibatch=True,
                           epoch=True,
                           experience=True,
                           stream=True),
        MAC_metrics(minibatch=True, epoch=True, experience=True),
        loggers=[interactive_logger, wandb_logger],
    )

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(
        model,
        SGD(model.parameters(), lr=0.001, momentum=0.9),
        CrossEntropyLoss(),
        train_mb_size=100,
        train_epochs=4,
        eval_mb_size=100,
        device=device,
        evaluator=eval_plugin,
    )

    # TRAINING LOOP
    print("Starting experiment...")
    results = []
    for experience in scenario.train_stream:
        print("Start of experience: ", experience.current_experience)
        print("Current Classes: ", experience.classes_in_this_experience)

        cl_strategy.train(experience)
        print("Training completed")

        print("Computing accuracy on the whole test set")
        results.append(cl_strategy.eval(scenario.test_stream))
예제 #30
0
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    # ---------

    # --- TRANSFORMATIONS
    train_transform = transforms.Compose([
        RandomCrop(28, padding=4),
        ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, ))
    ])
    test_transform = transforms.Compose(
        [ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))])
    # ---------

    # --- SCENARIO CREATION
    mnist_train = MNIST('./data/mnist',
                        train=True,
                        download=True,
                        transform=train_transform)
    mnist_test = MNIST('./data/mnist',
                       train=False,
                       download=True,
                       transform=test_transform)
    scenario = nc_scenario(mnist_train,
                           mnist_test,
                           5,
                           task_labels=False,
                           seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    # DEFINE THE EVALUATION PLUGIN AND LOGGER
    # The evaluation plugin manages the metrics computation.
    # It takes as argument a list of metrics and a list of loggers.
    # The evaluation plugin calls the loggers to serialize the metrics
    # and save them in persistent memory or print them in the standard output.

    # log to text file
    text_logger = TextLogger(open('log.txt', 'a'))

    # print to stdout
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(accuracy_metrics(minibatch=True,
                                                    epoch=True,
                                                    experience=True,
                                                    stream=True),
                                   loss_metrics(minibatch=True,
                                                epoch=True,
                                                experience=True,
                                                stream=True),
                                   cpu_usage_metrics(minibatch=True,
                                                     epoch=True,
                                                     experience=True,
                                                     stream=True),
                                   timing_metrics(minibatch=True,
                                                  epoch=True,
                                                  experience=True,
                                                  stream=True),
                                   ExperienceForgetting(),
                                   loggers=[interactive_logger, text_logger])

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(model,
                        SGD(model.parameters(), lr=0.001, momentum=0.9),
                        CrossEntropyLoss(),
                        train_mb_size=500,
                        train_epochs=1,
                        eval_mb_size=100,
                        device=device,
                        evaluator=eval_plugin)

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience: ", experience.current_experience)
        print("Current Classes: ", experience.classes_in_this_experience)

        # train returns a list of dictionaries (one for each experience). Each
        # dictionary stores the last value of each metric curve emitted
        # during training.
        res = cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        # test also returns a dictionary
        results.append(cl_strategy.eval(scenario.test_stream))

    print(f"Test metrics:\n{results}")

    # All the metric curves (x,y values) are stored inside the evaluator
    # (can be disabled). You can use this dictionary to manipulate the
    # metrics without avalanche.
    all_metrics = cl_strategy.evaluator.all_metrics
    print(f"Stored metrics: {list(all_metrics.keys())}")
    mname = 'Top1_Acc_Task/Task000'
    print(f"{mname}: {cl_strategy.evaluator.all_metrics[mname]}")