コード例 #1
0
ファイル: test_plugins.py プロジェクト: pkraison/avalanche
    def test_publish_metric(self):
        ep = EvaluationPlugin()
        mval = MetricValue(self, "metric", 1.0, 0)
        ep.publish_metric_value(mval)

        # check key exists
        assert len(ep.get_all_metrics()["metric"][1]) == 1
コード例 #2
0
ファイル: test_strategies.py プロジェクト: rrmina/avalanche
    def test_periodic_eval(self):
        model = SimpleMLP(input_size=6, hidden_size=10)
        scenario = get_fast_scenario()
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()
        curve_key = 'Top1_Acc_Stream/eval_phase/train_stream'

        ###################
        # Case #1: No eval
        ###################
        # we use stream acc. because it emits a single value
        # for each eval loop.
        acc = StreamAccuracy()
        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_epochs=2,
                         eval_every=-1,
                         evaluator=EvaluationPlugin(acc))
        strategy.train(scenario.train_stream[0])
        # eval is not called in this case
        assert len(strategy.evaluator.all_metrics) == 0

        ###################
        # Case #2: Eval at the end only
        ###################
        acc = StreamAccuracy()
        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_epochs=2,
                         eval_every=0,
                         evaluator=EvaluationPlugin(acc))
        strategy.train(scenario.train_stream[0])
        # eval is called once at the end of the training loop
        curve = strategy.evaluator.all_metrics[curve_key][1]
        assert len(curve) == 1

        ###################
        # Case #3: Eval after every epoch
        ###################
        acc = StreamAccuracy()
        strategy = Naive(model,
                         optimizer,
                         criterion,
                         train_epochs=2,
                         eval_every=1,
                         evaluator=EvaluationPlugin(acc))
        strategy.train(scenario.train_stream[0])
        # eval is called after every epoch + the end of the training loop
        curve = strategy.evaluator.all_metrics[curve_key][1]
        assert len(curve) == 3
コード例 #3
0
    def test_pnn(self):
        # check that pnn reaches high enough accuracy.
        # Ensure nothing weird is happening with the multiple heads.

        set_deterministic_run(seed=0)

        main_metric = StreamAccuracy()
        exp_acc = ExperienceAccuracy()
        evalp = EvaluationPlugin(main_metric, exp_acc, loggers=None)
        model = PNN(num_layers=1, in_features=6, hidden_features_per_column=50)
        optimizer = SGD(model.parameters(), lr=0.1)
        strategy = PNNStrategy(
            model,
            optimizer,
            train_mb_size=32,
            device=get_device(),
            eval_mb_size=512,
            train_epochs=1,
            evaluator=evalp,
        )
        benchmark = get_fast_benchmark(use_task_labels=True)

        for train_batch_info in benchmark.train_stream:
            strategy.train(train_batch_info)

        strategy.eval(benchmark.train_stream[:])
        print("TRAIN STREAM ACC: ", main_metric.result())
        assert (sum(main_metric.result().values()) /
                float(len(main_metric.result().keys())) > 0.5)
コード例 #4
0
def main(args):

    # Model getter: specify dataset and depth of the network.
    model = pytorchcv_wrapper.resnet('cifar10', depth=20, pretrained=False)

    # Or get a more specific model. E.g. wide resnet, with depth 40 and growth
    # factor 8 for Cifar 10.
    # model = pytorchcv_wrapper.get_model("wrn40_8_cifar10", pretrained=False)

    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}"
                          if torch.cuda.is_available() and
                          args.cuda >= 0 else "cpu")

    device = "cpu"

    # --- TRANSFORMATIONS
    transform = transforms.Compose([
        ToTensor(),
        transforms.Normalize((0.491, 0.482, 0.446), (0.247, 0.243, 0.261))
    ])

    # --- SCENARIO CREATION
    cifar_train = CIFAR10(root=expanduser("~") + "/.avalanche/data/cifar10/",
                          train=True, download=True, transform=transform)
    cifar_test = CIFAR10(root=expanduser("~") + "/.avalanche/data/cifar10/",
                         train=False, download=True, transform=transform)
    scenario = nc_benchmark(
        cifar_train, cifar_test, 5, task_labels=False, seed=1234,
        fixed_class_order=[i for i in range(10)])

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(
            minibatch=True, epoch=True, experience=True, stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True),
        loggers=[interactive_logger])

    # CREATE THE STRATEGY INSTANCE (Naive, with Replay)
    cl_strategy = Naive(model, torch.optim.SGD(model.parameters(), lr=0.01),
                        CrossEntropyLoss(),
                        train_mb_size=100, train_epochs=1, eval_mb_size=100,
                        device=device,
                        plugins=[ReplayPlugin(mem_size=1000)],
                        evaluator=eval_plugin
                        )

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience ", experience.current_experience)
        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
コード例 #5
0
    def test_plugins_compatibility_checks(self):
        model = SimpleMLP(input_size=6, hidden_size=10)
        benchmark = get_fast_benchmark()
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()

        evalp = EvaluationPlugin(
            loss_metrics(minibatch=True,
                         epoch=True,
                         experience=True,
                         stream=True),
            loggers=[InteractiveLogger()],
            strict_checks=None,
        )

        strategy = Naive(
            model,
            optimizer,
            criterion,
            train_epochs=2,
            eval_every=-1,
            evaluator=evalp,
            plugins=[
                EarlyStoppingPlugin(patience=10, val_stream_name="train")
            ],
        )
        strategy.train(benchmark.train_stream[0])
コード例 #6
0
ファイル: replay.py プロジェクト: ryanlindeborg/avalanche
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}"
                          if torch.cuda.is_available() and
                          args.cuda >= 0 else "cpu")
    n_batches = 5
    # ---------

    # --- TRANSFORMATIONS
    train_transform = transforms.Compose([
        RandomCrop(28, padding=4),
        ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    test_transform = transforms.Compose([
        ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    # ---------

    # --- SCENARIO CREATION
    mnist_train = MNIST('./data/mnist', train=True,
                        download=True, transform=train_transform)
    mnist_test = MNIST('./data/mnist', train=False,
                       download=True, transform=test_transform)
    scenario = nc_scenario(
        mnist_train, mnist_test, n_batches, task_labels=False, seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(
            minibatch=True, epoch=True, experience=True, stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        ExperienceForgetting(),
        loggers=[interactive_logger])

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(model, torch.optim.Adam(model.parameters(), lr=0.001),
                        CrossEntropyLoss(),
                        train_mb_size=100, train_epochs=4, eval_mb_size=100, device=device,
                        plugins=[ReplayPlugin(mem_size=10000)],
                        evaluator=eval_plugin
                        )

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience ", experience.current_experience)
        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
コード例 #7
0
    def test_multihead_cumulative(self):
        # check that multi-head reaches high enough accuracy.
        # Ensure nothing weird is happening with the multiple heads.

        set_deterministic_run(seed=0)

        model = MHTestMLP(input_size=6, hidden_size=100)
        criterion = CrossEntropyLoss()
        optimizer = SGD(model.parameters(), lr=1)

        main_metric = StreamAccuracy()
        exp_acc = ExperienceAccuracy()
        evalp = EvaluationPlugin(main_metric, exp_acc, loggers=None)
        strategy = Cumulative(
            model,
            optimizer,
            criterion,
            train_mb_size=64,
            device=get_device(),
            eval_mb_size=512,
            train_epochs=6,
            evaluator=evalp,
        )
        benchmark = get_fast_benchmark(use_task_labels=True)

        for train_batch_info in benchmark.train_stream:
            strategy.train(train_batch_info)
        strategy.eval(benchmark.train_stream[:])
        print("TRAIN STREAM ACC: ", main_metric.result())
        assert (sum(main_metric.result().values()) /
                float(len(main_metric.result().keys())) > 0.7)
コード例 #8
0
    def test_multihead_cumulative(self):
        # check that multi-head reaches high enough accuracy.
        # Ensure nothing weird is happening with the multiple heads.
        model = MHTestMLP(input_size=6, hidden_size=100)
        criterion = CrossEntropyLoss()
        optimizer = SGD(model.parameters(), lr=1)

        main_metric = StreamAccuracy()
        exp_acc = ExperienceAccuracy()
        evalp = EvaluationPlugin(main_metric, exp_acc, loggers=None)
        strategy = Cumulative(model,
                              optimizer,
                              criterion,
                              train_mb_size=32,
                              device=get_device(),
                              eval_mb_size=512,
                              train_epochs=1,
                              evaluator=evalp)
        scenario = get_fast_scenario(use_task_labels=True)

        for train_batch_info in scenario.train_stream:
            strategy.train(train_batch_info)
        strategy.eval(scenario.train_stream[:])
        print("TRAIN STREAM ACC: ", main_metric.result())
        assert main_metric.result() > 0.7
コード例 #9
0
def main(args):
    """
    Last Avalanche version reference performance (online):
        Top1_Acc_Stream/eval_phase/test_stream = 0.9421
    """
    # --- DEFAULT PARAMS ONLINE DATA INCREMENTAL LEARNING
    nb_tasks = 5  # Can still design the data stream based on tasks
    epochs = 1  # All data is only seen once: Online
    batch_size = 10  # Only process small amount of data at a time
    return_task_id = False  # Data incremental (task-agnostic/task-free)
    # TODO use data_incremental_generator, now experience=task

    # --- CONFIG
    device = torch.device(
        f"cuda:{args.cuda}" if torch.cuda.is_available() and args.cuda >= 0
        else "cpu")
    # ---------

    # --- SCENARIO CREATION
    scenario = SplitMNIST(nb_tasks, return_task_id=return_task_id,
                          fixed_class_order=[i for i in range(10)])
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=args.featsize,
                      hidden_size=400, hidden_layers=2)

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(experience=True, stream=True),
        loss_metrics(experience=True, stream=True),
        ExperienceForgetting(),
        loggers=[interactive_logger])

    # CoPE PLUGIN
    cope = CoPEPlugin(mem_size=2000, p_size=args.featsize,
                      n_classes=scenario.n_classes)

    # CREATE THE STRATEGY INSTANCE (NAIVE) WITH CoPE PLUGIN
    cl_strategy = Naive(model, torch.optim.SGD(model.parameters(), lr=0.01),
                        cope.loss,  # CoPE PPP-Loss
                        train_mb_size=batch_size, train_epochs=epochs,
                        eval_mb_size=100, device=device,
                        plugins=[cope],
                        evaluator=eval_plugin
                        )

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience ", experience.current_experience)
        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
コード例 #10
0
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}"
                          if torch.cuda.is_available() and
                          args.cuda >= 0 else "cpu")
    # ---------

    # --- TRANSFORMATIONS
    train_transform = transforms.Compose([
        RandomCrop(28, padding=4),
        ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    test_transform = transforms.Compose([
        ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])
    # ---------

    # --- SCENARIO CREATION
    mnist_train = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                        train=True, download=True, transform=train_transform)
    mnist_test = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                       train=False, download=True, transform=test_transform)
    scenario = nc_scenario(
        mnist_train, mnist_test, 5, task_labels=False, seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(epoch=True, experience=True, stream=True),
        loss_metrics(epoch=True, experience=True, stream=True),
        # save image should be False to appropriately view
        # results in Interactive Logger.
        # a tensor will be printed
        StreamConfusionMatrix(save_image=False, normalize='all'),
        loggers=InteractiveLogger()
    )

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(
        model, SGD(model.parameters(), lr=0.001, momentum=0.9),
        CrossEntropyLoss(), train_mb_size=100, train_epochs=4, eval_mb_size=100,
        device=device, evaluator=eval_plugin, plugins=[ReplayPlugin(5000)])

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start of experience: ", experience.current_experience)
        print("Current Classes: ", experience.classes_in_this_experience)

        cl_strategy.train(experience)
        print('Training completed')

        print('Computing accuracy on the whole test set')
        results.append(cl_strategy.eval(scenario.test_stream))
コード例 #11
0
def main(args):
    model = SimpleMLP(hidden_size=args.hs)
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
    criterion = torch.nn.CrossEntropyLoss()

    # check if selected GPU is available or use CPU
    assert args.cuda == -1 or args.cuda >= 0, "cuda must be -1 or >= 0."
    device = torch.device(
        f"cuda:{args.cuda}"
        if torch.cuda.is_available() and args.cuda >= 0
        else "cpu"
    )
    print(f"Using device: {device}")

    # create Permuted MNIST scenario
    scenario = PermutedMNIST(n_experiences=4)

    interactive_logger = InteractiveLogger()
    eval_plugin = EvaluationPlugin(
        accuracy_metrics(
            minibatch=True, epoch=True, experience=True, stream=True
        ),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True),
        loggers=[interactive_logger],
    )

    # create strategy
    assert (
        len(args.lambda_e) == 1 or len(args.lambda_e) == 5
    ), "Lambda_e must be a non-empty list."
    lambda_e = args.lambda_e[0] if len(args.lambda_e) == 1 else args.lambda_e

    strategy = LFL(
        model,
        optimizer,
        criterion,
        lambda_e=lambda_e,
        train_epochs=args.epochs,
        device=device,
        train_mb_size=args.minibatch_size,
        evaluator=eval_plugin,
    )

    # train on the selected scenario with the chosen strategy
    print("Starting experiment...")
    results = []
    for train_batch_info in scenario.train_stream:
        print(
            "Start training on experience ", train_batch_info.current_experience
        )

        strategy.train(train_batch_info, num_workers=0)
        print(
            "End training on experience ", train_batch_info.current_experience
        )
        print("Computing accuracy on the test set")
        results.append(strategy.eval(scenario.test_stream[:]))
コード例 #12
0
def main(args):
    model = SimpleMLP(hidden_size=args.hs)
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
    criterion = torch.nn.CrossEntropyLoss()

    # check if selected GPU is available or use CPU
    assert args.cuda == -1 or args.cuda >= 0, "cuda must be -1 or >= 0."
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    print(f'Using device: {device}')

    # create scenario
    if args.scenario == 'pmnist':
        scenario = PermutedMNIST(n_experiences=args.permutations)
    elif args.scenario == 'smnist':
        scenario = SplitMNIST(n_experiences=5, return_task_id=False)
    else:
        raise ValueError("Wrong scenario name. Allowed pmnist, smnist.")

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()
    text_logger = TextLogger(open('log.txt', 'a'))

    eval_plugin = EvaluationPlugin(accuracy_metrics(minibatch=True,
                                                    epoch=True,
                                                    experience=True,
                                                    stream=True),
                                   loss_metrics(minibatch=True,
                                                epoch=True,
                                                experience=True,
                                                stream=True),
                                   ExperienceForgetting(),
                                   loggers=[interactive_logger])

    # create strategy
    strategy = EWC(model,
                   optimizer,
                   criterion,
                   args.ewc_lambda,
                   args.ewc_mode,
                   decay_factor=args.decay_factor,
                   train_epochs=args.epochs,
                   device=device,
                   train_mb_size=args.minibatch_size,
                   evaluator=eval_plugin)

    # train on the selected scenario with the chosen strategy
    print('Starting experiment...')
    results = []
    for experience in scenario.train_stream:
        print("Start training on experience ", experience.current_experience)

        strategy.train(experience)
        print("End training on experience", experience.current_experience)
        print('Computing accuracy on the test set')
        results.append(strategy.eval(scenario.test_stream[:]))
コード例 #13
0
    def __init__(
        self,
        model: Module,
        optimizer,
        criterion=CrossEntropyLoss(),
        train_mb_size: int = 1,
        train_epochs: int = 1,
        eval_mb_size: int = 1,
        device="cpu",
        plugins=None,
        evaluator=None,
        eval_every=-1,
        peval_mode="epoch",
    ):
        self._criterion = criterion
        self.model: Module = model
        self.optimizer = optimizer
        self.train_epochs: int = train_epochs
        self.train_mb_size: int = train_mb_size
        self.eval_mb_size: int = (
            train_mb_size if eval_mb_size is None else eval_mb_size
        )
        self.device = device
        self.plugins = [] if plugins is None else plugins

        if evaluator is None:
            evaluator = EvaluationPlugin()
        self.plugins.append(evaluator)
        self.evaluator = evaluator
        """ EvaluationPlugin used for logging and metric computations. """

        # Configure periodic evaluation.
        assert peval_mode in {"epoch", "iteration"}
        self.eval_every = eval_every
        peval = PeriodicEval(eval_every, peval_mode)
        self.plugins.append(peval)

        self.clock = Clock()
        """ Incremental counters for strategy events. """
        # WARNING: Clock needs to be the last plugin, otherwise
        # counters will be wrong for plugins called after it.
        self.plugins.append(self.clock)

        ###################################################################
        # State variables. These are updated during the train/eval loops. #
        ###################################################################
        self.experience = None
        self.adapted_dataset = None
        self.dataloader = None
        self.mbatch = None
        self.mb_output = None
        self.loss = None
        self.is_training: bool = False
        self.current_eval_stream = None
        self._stop_training = False
コード例 #14
0
ファイル: lwf_mnist.py プロジェクト: ryanlindeborg/avalanche
def main(args):
    model = SimpleMLP(hidden_size=args.hs)
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
    criterion = torch.nn.CrossEntropyLoss()

    # check if selected GPU is available or use CPU
    assert args.cuda == -1 or args.cuda >= 0, "cuda must be -1 or >= 0."
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    print(f'Using device: {device}')

    # create split scenario
    scenario = SplitMNIST(n_experiences=5, return_task_id=False)

    interactive_logger = InteractiveLogger()
    eval_plugin = EvaluationPlugin(accuracy_metrics(minibatch=True,
                                                    epoch=True,
                                                    experience=True,
                                                    stream=True),
                                   loss_metrics(minibatch=True,
                                                epoch=True,
                                                experience=True,
                                                stream=True),
                                   ExperienceForgetting(),
                                   loggers=[interactive_logger])

    # create strategy
    assert len(args.lwf_alpha) == 1 or len(args.lwf_alpha) == 5,\
        'Alpha must be a non-empty list.'
    lwf_alpha = args.lwf_alpha[0] if len(
        args.lwf_alpha) == 1 else args.lwf_alpha

    strategy = LwF(model,
                   optimizer,
                   criterion,
                   alpha=lwf_alpha,
                   temperature=args.softmax_temperature,
                   train_epochs=args.epochs,
                   device=device,
                   train_mb_size=args.minibatch_size,
                   evaluator=eval_plugin)

    # train on the selected scenario with the chosen strategy
    print('Starting experiment...')
    results = []
    for train_batch_info in scenario.train_stream:
        print("Start training on experience ",
              train_batch_info.current_experience)

        strategy.train(train_batch_info, num_workers=4)
        print("End training on experience ",
              train_batch_info.current_experience)
        print('Computing accuracy on the test set')
        results.append(strategy.eval(scenario.test_stream[:]))
コード例 #15
0
 def _test_logger(self, logp):
     evalp = EvaluationPlugin(
         loss_metrics(minibatch=True, epoch=True,
                      experience=True, stream=True),
         loggers=[logp]
     )
     strat = Naive(self.model, self.optimizer, evaluator=evalp,
                   train_mb_size=32)
     for e in self.benchmark.train_stream:
         strat.train(e)
     strat.eval(self.benchmark.train_stream)
コード例 #16
0
    def __init__(
        self,
        model: Module,
        optimizer: Optimizer,
        criterion=VAE_loss,
        train_mb_size: int = 1,
        train_epochs: int = 1,
        eval_mb_size: int = None,
        device=None,
        plugins: Optional[List[SupervisedPlugin]] = None,
        evaluator: EvaluationPlugin = EvaluationPlugin(
            loggers=[InteractiveLogger()],
            suppress_warnings=True,
        ),
        eval_every=-1,
        **base_kwargs
    ):
        """
        Creates an instance of the Naive strategy.

        :param model: The model.
        :param optimizer: The optimizer to use.
        :param criterion: The loss criterion to use.
        :param train_mb_size: The train minibatch size. Defaults to 1.
        :param train_epochs: The number of training epochs. Defaults to 1.
        :param eval_mb_size: The eval minibatch size. Defaults to 1.
        :param device: The device to use. Defaults to None (cpu).
        :param plugins: Plugins to be added. Defaults to None.
        :param evaluator: (optional) instance of EvaluationPlugin for logging
            and metric computations.
        :param eval_every: the frequency of the calls to `eval` inside the
            training loop. -1 disables the evaluation. 0 means `eval` is called
            only at the end of the learning experience. Values >0 mean that
            `eval` is called every `eval_every` epochs and at the end of the
            learning experience.
        :param **base_kwargs: any additional
            :class:`~avalanche.training.BaseTemplate` constructor arguments.
        """

        super().__init__(
            model,
            optimizer,
            criterion,
            train_mb_size=train_mb_size,
            train_epochs=train_epochs,
            eval_mb_size=eval_mb_size,
            device=device,
            plugins=plugins,
            evaluator=evaluator,
            eval_every=eval_every,
            **base_kwargs
        )
コード例 #17
0
 def test_no_errors(self):
     eval_plugin = EvaluationPlugin(accuracy_metrics(stream=True),
                                    loggers=None,
                                    benchmark=self.benchmark,
                                    strict_checks=True)
     strategy = Naive(self.model,
                      self.optimizer,
                      self.criterion,
                      train_epochs=2,
                      eval_every=0,
                      evaluator=eval_plugin)
     for exp in self.benchmark.train_stream:
         strategy.train(exp, eval_streams=[self.benchmark.test_stream])
         strategy.eval(self.benchmark.test_stream)
コード例 #18
0
def main(cuda: int):
    # --- CONFIG
    device = torch.device(
        f"cuda:{cuda}" if torch.cuda.is_available() else "cpu"
    )
    # --- SCENARIO CREATION
    scenario = SplitCIFAR10(n_experiences=2, seed=42)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes, input_size=196608 // 64)

    # choose some metrics and evaluation method
    eval_plugin = EvaluationPlugin(
        accuracy_metrics(stream=True, experience=True),
        images_samples_metrics(
            on_train=True,
            on_eval=True,
            n_cols=10,
            n_rows=10,
        ),
        labels_repartition_metrics(
            # image_creator=repartition_bar_chart_image_creator,
            on_train=True,
            on_eval=True,
        ),
        loggers=[
            TensorboardLogger(f"tb_data/{datetime.now()}"),
            InteractiveLogger(),
        ],
    )

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(
        model,
        Adam(model.parameters()),
        train_mb_size=128,
        train_epochs=1,
        eval_mb_size=128,
        device=device,
        plugins=[ReplayPlugin(mem_size=1_000)],
        evaluator=eval_plugin,
    )

    # TRAINING LOOP
    for i, experience in enumerate(scenario.train_stream, 1):
        cl_strategy.train(experience)
        cl_strategy.eval(scenario.test_stream[:i])
コード例 #19
0
 def test_raise_warning(self):
     eval_plugin = EvaluationPlugin(accuracy_metrics(stream=True),
                                    loggers=None,
                                    benchmark=self.benchmark,
                                    strict_checks=False)
     strategy = Naive(self.model,
                      self.optimizer,
                      self.criterion,
                      train_epochs=2,
                      eval_every=-1,
                      evaluator=eval_plugin)
     for exp in self.benchmark.train_stream:
         strategy.train(exp)
         strategy.eval(self.benchmark.test_stream)
     with self.assertWarns(UserWarning):
         strategy.eval(self.benchmark.test_stream[:2])
コード例 #20
0
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")

    # --- SCENARIO CREATION
    scenario = SplitMNIST(n_experiences=10, seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True),
        loggers=[interactive_logger],
    )

    # CREATE THE STRATEGY INSTANCE (GenerativeReplay)
    cl_strategy = GenerativeReplay(
        model,
        torch.optim.Adam(model.parameters(), lr=0.001),
        CrossEntropyLoss(),
        train_mb_size=100,
        train_epochs=4,
        eval_mb_size=100,
        device=device,
        evaluator=eval_plugin,
    )

    # TRAINING LOOP
    print("Starting experiment...")
    results = []
    for experience in scenario.train_stream:
        print("Start of experience ", experience.current_experience)
        cl_strategy.train(experience)
        print("Training completed")

        print("Computing accuracy on the whole test set")
        results.append(cl_strategy.eval(scenario.test_stream))
コード例 #21
0
ファイル: multihead.py プロジェクト: pkraison/avalanche
def main(args):

    # Config
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    # model
    model = MTSimpleMLP()

    # CL Benchmark Creation
    scenario = SplitMNIST(n_experiences=5, return_task_id=True)
    train_stream = scenario.train_stream
    test_stream = scenario.test_stream

    # Prepare for training & testing
    optimizer = Adam(model.parameters(), lr=0.01)
    criterion = CrossEntropyLoss()

    # choose some metrics and evaluation method
    interactive_logger = InteractiveLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=False,
                         epoch=True,
                         experience=True,
                         stream=True),
        forgetting_metrics(experience=True),
        loggers=[interactive_logger],
    )

    # Choose a CL strategy
    strategy = EWC(
        model=model,
        optimizer=optimizer,
        criterion=criterion,
        train_mb_size=128,
        train_epochs=3,
        eval_mb_size=128,
        device=device,
        evaluator=eval_plugin,
        ewc_lambda=0.4,
    )

    # train and test loop
    for train_task in train_stream:
        strategy.train(train_task)
        strategy.eval(test_stream)
コード例 #22
0
    def test_forward_callbacks(self):
        # The EvaluationPlugin should forward all the callbacks to metrics,
        # even those that are unused by the EvaluationPlugin itself.
        class MetricMock:
            def __init__(self):
                self.x = 0

            def before_blabla(self, strategy):
                self.x += 1

        met = MetricMock()
        evalp = EvaluationPlugin(met)
        evalp.before_blabla(None)

        # it should ignore undefined callbacks
        evalp.after_blabla(None)

        # it should raise error for other undefined attributes
        with self.assertRaises(AttributeError):
            evalp.asd(None)
コード例 #23
0
def run_base_online(experience, device, use_interactive_logger: bool = False):
    """
    Runs OnlineNaive for one experience.
    """

    # Create list of loggers to be used
    loggers = []
    if use_interactive_logger:
        interactive_logger = InteractiveLogger()
        loggers.append(interactive_logger)

    # Evaluation plugin
    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True),
        loggers=loggers,
    )

    # Model
    model = SimpleMLP(num_classes=10)

    # Create OnlineNaive strategy
    cl_strategy = OnlineNaive(
        model,
        torch.optim.SGD(model.parameters(), lr=0.01),
        CrossEntropyLoss(),
        num_passes=1,
        train_mb_size=1,
        device=device,
        evaluator=eval_plugin,
    )

    start = time.time()
    print("Running OnlineNaive ...")
    cl_strategy.train(experience)
    end = time.time()
    duration = end - start

    return duration
コード例 #24
0
    def test_pnn(self):
        # check that pnn reaches high enough accuracy.
        # Ensure nothing weird is happening with the multiple heads.
        main_metric = StreamAccuracy()
        exp_acc = ExperienceAccuracy()
        evalp = EvaluationPlugin(main_metric, exp_acc, loggers=None)
        strategy = PNNStrategy(1,
                               6,
                               50,
                               0.1,
                               train_mb_size=32,
                               device=get_device(),
                               eval_mb_size=512,
                               train_epochs=1,
                               evaluator=evalp)
        scenario = get_fast_scenario(use_task_labels=True)

        for train_batch_info in scenario.train_stream:
            strategy.train(train_batch_info)

        strategy.eval(scenario.train_stream[:])
        print("TRAIN STREAM ACC: ", main_metric.result())
        assert main_metric.result() > 0.5
コード例 #25
0
    def __init__(
        self,
        model: Module,
        optimizer: Optimizer,
        train_mb_size: int = 1,
        train_epochs: int = 1,
        eval_mb_size: Optional[int] = 1,
        device="cpu",
        plugins: Optional[List["SupervisedPlugin"]] = None,
        evaluator: EvaluationPlugin = default_evaluator,
        eval_every=-1,
        peval_mode="epoch",
    ):
        """Init.

        :param model: PyTorch model.
        :param optimizer: PyTorch optimizer.
        :param train_mb_size: mini-batch size for training.
        :param train_epochs: number of training epochs.
        :param eval_mb_size: mini-batch size for eval.
        :param evaluator: (optional) instance of EvaluationPlugin for logging
            and metric computations. None to remove logging.
        :param eval_every: the frequency of the calls to `eval` inside the
            training loop. -1 disables the evaluation. 0 means `eval` is called
            only at the end of the learning experience. Values >0 mean that
            `eval` is called every `eval_every` epochs and at the end of the
            learning experience.
        :param peval_mode: one of {'epoch', 'iteration'}. Decides whether the
            periodic evaluation during training should execute every
            `eval_every` epochs or iterations (Default='epoch').
        """
        super().__init__(model=model, device=device, plugins=plugins)

        self.optimizer: Optimizer = optimizer
        """ PyTorch optimizer. """

        self.train_epochs: int = train_epochs
        """ Number of training epochs. """

        self.train_mb_size: int = train_mb_size
        """ Training mini-batch size. """

        self.eval_mb_size: int = (train_mb_size
                                  if eval_mb_size is None else eval_mb_size)
        """ Eval mini-batch size. """

        if evaluator is None:
            evaluator = EvaluationPlugin()
        self.plugins.append(evaluator)
        self.evaluator = evaluator
        """ EvaluationPlugin used for logging and metric computations. """

        # Configure periodic evaluation.
        assert peval_mode in {"epoch", "iteration"}
        self.eval_every = eval_every
        peval = PeriodicEval(eval_every, peval_mode)
        self.plugins.append(peval)

        self.clock = Clock()
        """ Incremental counters for strategy events. """
        # WARNING: Clock needs to be the last plugin, otherwise
        # counters will be wrong for plugins called after it.
        self.plugins.append(self.clock)

        ###################################################################
        # State variables. These are updated during the train/eval loops. #
        ###################################################################

        self.dataloader = None
        """ Dataloader. """

        self.mbatch = None
        """ Current mini-batch. """

        self.mb_output = None
        """ Model's output computed on the current mini-batch. """

        self.loss = None
        """ Loss of the current mini-batch. """

        self._stop_training = False
コード例 #26
0
ファイル: base_strategy.py プロジェクト: msrocean/avalanche
    def __init__(self,
                 model: Module,
                 optimizer: Optimizer,
                 criterion,
                 train_mb_size: int = 1,
                 train_epochs: int = 1,
                 eval_mb_size: int = 1,
                 device='cpu',
                 plugins: Optional[Sequence['StrategyPlugin']] = None,
                 evaluator=default_logger):
        """
        BaseStrategy is the super class of all task-based continual learning
        strategies. It implements a basic training loop and callback system
        that allows to execute code at each experience of the training loop.
        Plugins can be used to implement callbacks to augment the training
        loop with additional behavior (e.g. a memory buffer for replay).

        **Scenarios**
        This strategy supports several continual learning scenarios:

        * class-incremental scenarios (no task labels)
        * multi-task scenarios, where task labels are provided)
        * multi-incremental scenarios, where the same task may be revisited

        The exact scenario depends on the data stream and whether it provides
        the task labels.

        **Training loop**
        The training loop and its callbacks are organized as follows::
            train
                before_training
                before_training_exp
                adapt_train_dataset
                make_train_dataloader
                before_training_epoch
                    before_training_iteration
                        before_forward
                        after_forward
                        before_backward
                        after_backward
                    after_training_iteration
                    before_update
                    after_update
                after_training_epoch
                after_training_exp
                after_training

        **Evaluation loop**
        The evaluation loop and its callbacks are organized as follows::
            eval
                before_eval
                adapt_eval_dataset
                make_eval_dataloader
                before_eval_exp
                    eval_epoch
                        before_eval_iteration
                        before_eval_forward
                        after_eval_forward
                        after_eval_iteration
                after_eval_exp
                after_eval

        :param model: PyTorch model.
        :param optimizer: PyTorch optimizer.
        :param criterion: loss function.
        :param train_mb_size: mini-batch size for training.
        :param train_epochs: number of training epochs.
        :param eval_mb_size: mini-batch size for eval.
        :param device: PyTorch device where the model will be allocated.
        :param plugins: (optional) list of StrategyPlugins.
        :param evaluator: (optional) instance of EvaluationPlugin for logging
            and metric computations. None to remove logging.
        """
        self.model: Module = model
        """ PyTorch model. """

        self.criterion = criterion
        """ Loss function. """

        self.optimizer = optimizer
        """ PyTorch optimizer. """

        self.train_epochs: int = train_epochs
        """ Number of training epochs. """

        self.train_mb_size: int = train_mb_size
        """ Training mini-batch size. """

        self.eval_mb_size: int = train_mb_size if eval_mb_size is None \
            else eval_mb_size
        """ Eval mini-batch size. """

        self.device = device
        """ PyTorch device where the model will be allocated. """

        self.plugins = [] if plugins is None else plugins
        """ List of `StrategyPlugin`s. """

        if evaluator is None:
            evaluator = EvaluationPlugin()
        self.plugins.append(evaluator)
        self.evaluator = evaluator
        """ EvaluationPlugin used for logging and metric computations. """

        # Flow state variables
        self.training_exp_counter = 0
        """ Counts the number of training steps. +1 at the end of each 
        experience. """

        self.eval_exp_id = None  # eval-flow only
        """ Label of the currently evaluated experience. Only at eval time. """

        self.epoch: Optional[int] = None
        """ Epoch counter. """

        self.experience = None
        """ Current experience. """

        self.adapted_dataset = None
        """ Data used to train. It may be modified by plugins. Plugins can 
        append data to it (e.g. for replay). 
         
        .. note:: 
            This dataset may contain samples from different experiences. If you 
            want the original data for the current experience  
            use :attr:`.BaseStrategy.experience`.
        """

        self.current_dataloader = None
        """ Dataloader. """

        self.mb_it = None
        """ Iteration counter. Reset at the start of a new epoch. """

        self.mb_x = None
        """ Current mini-batch input. """

        self.mb_y = None
        """ Current mini-batch target. """

        self.loss = None
        """ Loss of the current mini-batch. """

        self.logits = None
        """ Logits computed on the current mini-batch. """

        self.train_task_label: Optional[int] = None
        """ Label of the current experience (train time). """

        self.eval_task_label: Optional[int] = None
        """ Label of the current experience (eval time). """

        self.is_training: bool = False
        """ True if the strategy is in training mode. """
コード例 #27
0
    def test_periodic_eval(self):
        model = SimpleMLP(input_size=6, hidden_size=10)
        model.classifier = IncrementalClassifier(model.classifier.in_features)
        benchmark = get_fast_benchmark()
        optimizer = SGD(model.parameters(), lr=1e-3)
        criterion = CrossEntropyLoss()
        curve_key = "Top1_Acc_Stream/eval_phase/train_stream/Task000"

        ###################
        # Case #1: No eval
        ###################
        # we use stream acc. because it emits a single value
        # for each eval loop.
        acc = StreamAccuracy()
        strategy = Naive(
            model,
            optimizer,
            criterion,
            train_epochs=2,
            eval_every=-1,
            evaluator=EvaluationPlugin(acc),
        )
        strategy.train(benchmark.train_stream[0])
        # eval is not called in this case
        assert len(strategy.evaluator.get_all_metrics()) == 0

        ###################
        # Case #2: Eval at the end only and before training
        ###################
        acc = StreamAccuracy()
        evalp = EvaluationPlugin(acc)
        strategy = Naive(
            model,
            optimizer,
            criterion,
            train_epochs=2,
            eval_every=0,
            evaluator=evalp,
        )
        strategy.train(benchmark.train_stream[0])
        # eval is called once at the end of the training loop
        curve = strategy.evaluator.get_all_metrics()[curve_key][1]
        assert len(curve) == 2

        ###################
        # Case #3: Eval after every epoch and before training
        ###################
        acc = StreamAccuracy()
        strategy = Naive(
            model,
            optimizer,
            criterion,
            train_epochs=2,
            eval_every=1,
            evaluator=EvaluationPlugin(acc),
        )
        strategy.train(benchmark.train_stream[0])
        curve = strategy.evaluator.get_all_metrics()[curve_key][1]
        assert len(curve) == 3

        ###################
        # Case #4: Eval in iteration mode
        ###################
        acc = StreamAccuracy()
        strategy = Naive(
            model,
            optimizer,
            criterion,
            train_epochs=2,
            eval_every=100,
            evaluator=EvaluationPlugin(acc),
            peval_mode="iteration",
        )
        strategy.train(benchmark.train_stream[0])
        curve = strategy.evaluator.get_all_metrics()[curve_key][1]
        assert len(curve) == 5
コード例 #28
0
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    # ---------

    # --- TRANSFORMATIONS
    train_transform = transforms.Compose([
        RandomCrop(28, padding=4),
        ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, )),
    ])
    test_transform = transforms.Compose(
        [ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))])
    # ---------

    # --- SCENARIO CREATION
    mnist_train = MNIST(
        root=expanduser("~") + "/.avalanche/data/mnist/",
        train=True,
        download=True,
        transform=train_transform,
    )
    mnist_test = MNIST(
        root=expanduser("~") + "/.avalanche/data/mnist/",
        train=False,
        download=True,
        transform=test_transform,
    )
    scenario = nc_benchmark(mnist_train,
                            mnist_test,
                            5,
                            task_labels=False,
                            seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    interactive_logger = InteractiveLogger()
    wandb_logger = WandBLogger(project_name=args.project,
                               run_name=args.run,
                               config=vars(args))

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(
            minibatch=True,
            epoch=True,
            epoch_running=True,
            experience=True,
            stream=True,
        ),
        loss_metrics(
            minibatch=True,
            epoch=True,
            epoch_running=True,
            experience=True,
            stream=True,
        ),
        forgetting_metrics(experience=True, stream=True),
        confusion_matrix_metrics(stream=True,
                                 wandb=True,
                                 class_names=[str(i) for i in range(10)]),
        cpu_usage_metrics(minibatch=True,
                          epoch=True,
                          experience=True,
                          stream=True),
        timing_metrics(minibatch=True,
                       epoch=True,
                       experience=True,
                       stream=True),
        ram_usage_metrics(every=0.5,
                          minibatch=True,
                          epoch=True,
                          experience=True,
                          stream=True),
        gpu_usage_metrics(
            args.cuda,
            every=0.5,
            minibatch=True,
            epoch=True,
            experience=True,
            stream=True,
        ),
        disk_usage_metrics(minibatch=True,
                           epoch=True,
                           experience=True,
                           stream=True),
        MAC_metrics(minibatch=True, epoch=True, experience=True),
        loggers=[interactive_logger, wandb_logger],
    )

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(
        model,
        SGD(model.parameters(), lr=0.001, momentum=0.9),
        CrossEntropyLoss(),
        train_mb_size=100,
        train_epochs=4,
        eval_mb_size=100,
        device=device,
        evaluator=eval_plugin,
    )

    # TRAINING LOOP
    print("Starting experiment...")
    results = []
    for experience in scenario.train_stream:
        print("Start of experience: ", experience.current_experience)
        print("Current Classes: ", experience.classes_in_this_experience)

        cl_strategy.train(experience)
        print("Training completed")

        print("Computing accuracy on the whole test set")
        results.append(cl_strategy.eval(scenario.test_stream))
コード例 #29
0
def main(args):
    # Device config
    device = torch.device(f"cuda:{args.cuda}"
                          if torch.cuda.is_available() and
                          args.cuda >= 0 else "cpu")
    print('device ', device)
    # ---------

    # --- TRANSFORMATIONS
    _mu = [0.485, 0.456, 0.406]  # imagenet normalization
    _std = [0.229, 0.224, 0.225]
    transform = transforms.Compose([
        transforms.Resize((224, 224)),
        transforms.ToTensor(),
        transforms.Normalize(mean=_mu,
                             std=_std)
    ])
    # ---------

    # --- SCENARIO CREATION
    scenario = CORe50(scenario=args.scenario, train_transform=transform,
                      eval_transform=transform)

    # ---------

    eval_plugin = EvaluationPlugin(
        loss_metrics(epoch=True, experience=True, stream=True),
        accuracy_metrics(epoch=True, experience=True, stream=True),
        forgetting_metrics(experience=True, stream=True),
        loggers=[InteractiveLogger()]
    )

    criterion = torch.nn.CrossEntropyLoss()
    model = SLDAResNetModel(device=device, arch='resnet18',
                            imagenet_pretrained=args.imagenet_pretrained)

    # CREATE THE STRATEGY INSTANCE
    cl_strategy = StreamingLDA(model, criterion,
                               args.feature_size, args.n_classes,
                               eval_mb_size=args.batch_size,
                               train_mb_size=args.batch_size,
                               train_epochs=1,
                               shrinkage_param=args.shrinkage,
                               streaming_update_sigma=args.plastic_cov,
                               device=device, evaluator=eval_plugin)

    warnings.warn(
        "The Deep SLDA example is not perfectly aligned with "
        "the paper implementation since it does not use a base "
        "initialization phase and instead starts streming from "
        "pre-trained weights.")

    # TRAINING LOOP
    print('Starting experiment...')
    for i, exp in enumerate(scenario.train_stream):

        # fit SLDA model to batch (one sample at a time)
        cl_strategy.train(exp)

        # evaluate model on test data
        cl_strategy.eval(scenario.test_stream)
コード例 #30
0
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    # ---------

    # --- TRANSFORMATIONS
    train_transform = ToTensor()
    test_transform = ToTensor()
    # ---------

    # --- SCENARIO CREATION
    torch.random.manual_seed(1234)
    n_exps = 100  # Keep it high to run a short exp
    benchmark = split_lvis(n_experiences=n_exps,
                           train_transform=train_transform,
                           eval_transform=test_transform)
    # ---------

    # MODEL CREATION
    # load a model pre-trained on COCO
    model = torchvision.models.detection.fasterrcnn_resnet50_fpn(
        pretrained=True)

    # Just tune the box predictor
    for p in model.parameters():
        p.requires_grad = False

    # Replace the classifier with a new one, that has "num_classes" outputs
    num_classes = benchmark.n_classes + 1  # N classes + background
    # Get number of input features for the classifier
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    # Replace the pre-trained head with a new one
    model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)

    model = model.to(device)

    # Define the optimizer and the scheduler
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=0.005,
                                momentum=0.9,
                                weight_decay=0.0005)

    train_mb_size = 5
    warmup_factor = 1.0 / 1000
    warmup_iters = min(
        1000,
        len(benchmark.train_stream[0].dataset) // train_mb_size - 1)
    lr_scheduler = torch.optim.lr_scheduler.LinearLR(
        optimizer, start_factor=warmup_factor, total_iters=warmup_iters)

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = ObjectDetectionTemplate(
        model=model,
        optimizer=optimizer,
        train_mb_size=train_mb_size,
        train_epochs=1,
        eval_mb_size=train_mb_size,
        device=device,
        plugins=[
            LRSchedulerPlugin(lr_scheduler,
                              step_granularity='iteration',
                              first_exp_only=True,
                              first_epoch_only=True)
        ],
        evaluator=EvaluationPlugin(timing_metrics(epoch=True),
                                   loss_metrics(epoch_running=True),
                                   make_lvis_metrics(),
                                   loggers=[InteractiveLogger()]))

    # TRAINING LOOP
    print("Starting experiment...")
    for i, experience in enumerate(benchmark.train_stream):
        print("Start of experience: ", experience.current_experience)
        print('Train dataset contains', len(experience.dataset), 'instances')

        cl_strategy.train(experience, num_workers=8)
        print("Training completed")

        cl_strategy.eval(benchmark.test_stream, num_workers=8)
        print('Evaluation completed')