示例#1
0
    def setUpClass(cls) -> None:
        torch.manual_seed(0)
        np.random.seed(0)
        random.seed(0)

        n_samples_per_class = 100
        datasets = []
        for i in range(3):
            dataset = make_classification(n_samples=3 * n_samples_per_class,
                                          n_classes=3,
                                          n_features=3,
                                          n_informative=3,
                                          n_redundant=0)
            X = torch.from_numpy(dataset[0]).float()
            y = torch.from_numpy(dataset[1]).long()
            train_X, test_X, train_y, test_y = train_test_split(X,
                                                                y,
                                                                train_size=0.5,
                                                                shuffle=True,
                                                                stratify=y)
            datasets.append((train_X, train_y, test_X, test_y))

        tr_ds = [
            AvalancheTensorDataset(
                tr_X,
                tr_y,
                dataset_type=AvalancheDatasetType.CLASSIFICATION,
                task_labels=torch.randint(0, 3, (150, )).tolist())
            for tr_X, tr_y, _, _ in datasets
        ]
        ts_ds = [
            AvalancheTensorDataset(
                ts_X,
                ts_y,
                dataset_type=AvalancheDatasetType.CLASSIFICATION,
                task_labels=torch.randint(0, 3, (150, )).tolist())
            for _, _, ts_X, ts_y in datasets
        ]
        benchmark = dataset_benchmark(train_datasets=tr_ds,
                                      test_datasets=ts_ds)
        model = SimpleMLP(num_classes=3, input_size=3)

        f = open('log.txt', 'w')
        text_logger = TextLogger(f)
        eval_plugin = EvaluationPlugin(
            accuracy_metrics(minibatch=True,
                             epoch=True,
                             epoch_running=True,
                             experience=True,
                             stream=True,
                             trained_experience=True),
            loss_metrics(minibatch=True,
                         epoch=True,
                         epoch_running=True,
                         experience=True,
                         stream=True),
            forgetting_metrics(experience=True, stream=True),
            confusion_matrix_metrics(num_classes=3,
                                     save_image=False,
                                     normalize='all',
                                     stream=True),
            bwt_metrics(experience=True, stream=True),
            forward_transfer_metrics(experience=True, stream=True),
            cpu_usage_metrics(minibatch=True,
                              epoch=True,
                              epoch_running=True,
                              experience=True,
                              stream=True),
            timing_metrics(minibatch=True,
                           epoch=True,
                           epoch_running=True,
                           experience=True,
                           stream=True),
            ram_usage_metrics(every=0.5,
                              minibatch=True,
                              epoch=True,
                              experience=True,
                              stream=True),
            disk_usage_metrics(minibatch=True,
                               epoch=True,
                               experience=True,
                               stream=True),
            MAC_metrics(minibatch=True, epoch=True, experience=True),
            loggers=[text_logger],
            collect_all=True)  # collect all metrics (set to True by default)
        cl_strategy = BaseStrategy(model,
                                   SGD(model.parameters(),
                                       lr=0.001,
                                       momentum=0.9),
                                   CrossEntropyLoss(),
                                   train_mb_size=2,
                                   train_epochs=2,
                                   eval_mb_size=2,
                                   device=DEVICE,
                                   evaluator=eval_plugin,
                                   eval_every=1)
        for i, experience in enumerate(benchmark.train_stream):
            cl_strategy.train(experience,
                              eval_streams=[benchmark.test_stream],
                              shuffle=False)
            cl_strategy.eval(benchmark.test_stream)
        cls.all_metrics = cl_strategy.evaluator.get_all_metrics()
        f.close()
        # # Uncomment me to regenerate the reference metrics. Make sure
        # # the old tests were passing for all unchanged metrics
        # with open(os.path.join(pathlib.Path(__file__).parent.absolute(),
        #                        'target_metrics',
        #                        'tpp.pickle'), 'wb') as f:
        #     pickle.dump(dict(cls.all_metrics), f,
        #                 protocol=4)
        with open(
                os.path.join(
                    pathlib.Path(__file__).parent.absolute(), 'target_metrics',
                    'tpp.pickle'), 'rb') as f:
            cls.ref = pickle.load(f)
示例#2
0
def main(args):
    # --- CONFIG
    device = torch.device(f"cuda:{args.cuda}" if torch.cuda.is_available()
                          and args.cuda >= 0 else "cpu")
    # ---------

    # --- TRANSFORMATIONS
    train_transform = transforms.Compose([
        RandomCrop(28, padding=4),
        ToTensor(),
        transforms.Normalize((0.1307, ), (0.3081, ))
    ])
    test_transform = transforms.Compose(
        [ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))])
    # ---------

    # --- SCENARIO CREATION
    mnist_train = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                        train=True,
                        download=True,
                        transform=train_transform)
    mnist_test = MNIST(root=expanduser("~") + "/.avalanche/data/mnist/",
                       train=False,
                       download=True,
                       transform=test_transform)
    scenario = nc_benchmark(mnist_train,
                            mnist_test,
                            5,
                            task_labels=False,
                            seed=1234)
    # ---------

    # MODEL CREATION
    model = SimpleMLP(num_classes=scenario.n_classes)

    # DEFINE THE EVALUATION PLUGIN AND LOGGER
    # The evaluation plugin manages the metrics computation.
    # It takes as argument a list of metrics and a list of loggers.
    # The evaluation plugin calls the loggers to serialize the metrics
    # and save them in persistent memory or print them in the standard output.

    # log to text file
    text_logger = TextLogger(open('log.txt', 'a'))

    # print to stdout
    interactive_logger = InteractiveLogger()

    csv_logger = CSVLogger()

    eval_plugin = EvaluationPlugin(
        accuracy_metrics(minibatch=True,
                         epoch=True,
                         epoch_running=True,
                         experience=True,
                         stream=True),
        loss_metrics(minibatch=True,
                     epoch=True,
                     epoch_running=True,
                     experience=True,
                     stream=True),
        forgetting_metrics(experience=True, stream=True),
        bwt_metrics(experience=True, stream=True),
        forward_transfer_metrics(experience=True, stream=True),
        cpu_usage_metrics(minibatch=True,
                          epoch=True,
                          epoch_running=True,
                          experience=True,
                          stream=True),
        timing_metrics(minibatch=True,
                       epoch=True,
                       epoch_running=True,
                       experience=True,
                       stream=True),
        ram_usage_metrics(every=0.5,
                          minibatch=True,
                          epoch=True,
                          experience=True,
                          stream=True),
        gpu_usage_metrics(args.cuda,
                          every=0.5,
                          minibatch=True,
                          epoch=True,
                          experience=True,
                          stream=True),
        disk_usage_metrics(minibatch=True,
                           epoch=True,
                           experience=True,
                           stream=True),
        MAC_metrics(minibatch=True, epoch=True, experience=True),
        loggers=[interactive_logger, text_logger, csv_logger],
        collect_all=True)  # collect all metrics (set to True by default)

    # CREATE THE STRATEGY INSTANCE (NAIVE)
    cl_strategy = Naive(model,
                        SGD(model.parameters(), lr=0.001, momentum=0.9),
                        CrossEntropyLoss(),
                        train_mb_size=500,
                        train_epochs=1,
                        eval_mb_size=100,
                        device=device,
                        evaluator=eval_plugin,
                        eval_every=1)

    # TRAINING LOOP
    print('Starting experiment...')
    results = []
    for i, experience in enumerate(scenario.train_stream):
        print("Start of experience: ", experience.current_experience)
        print("Current Classes: ", experience.classes_in_this_experience)

        # train returns a dictionary containing last recorded value
        # for each metric.
        res = cl_strategy.train(experience,
                                eval_streams=[scenario.test_stream])
        print('Training completed')

        print('Computing accuracy on the whole test set')
        # test returns a dictionary with the last metric collected during
        # evaluation on that stream
        results.append(cl_strategy.eval(scenario.test_stream))

    print(f"Test metrics:\n{results}")

    # Dict with all the metric curves,
    # only available when `collect_all` is True.
    # Each entry is a (x, metric value) tuple.
    # You can use this dictionary to manipulate the
    # metrics without avalanche.
    all_metrics = cl_strategy.evaluator.get_all_metrics()
    print(f"Stored metrics: {list(all_metrics.keys())}")
示例#3
0
    def setUpClass(cls) -> None:
        torch.manual_seed(0)
        np.random.seed(0)
        random.seed(0)

        n_samples_per_class = 100
        dataset = make_classification(
            n_samples=6 * n_samples_per_class,
            n_classes=6,
            n_features=4,
            n_informative=4,
            n_redundant=0,
        )
        X = torch.from_numpy(dataset[0]).float()
        y = torch.from_numpy(dataset[1]).long()
        train_X, test_X, train_y, test_y = train_test_split(X,
                                                            y,
                                                            train_size=0.5,
                                                            shuffle=True,
                                                            stratify=y)
        tr_d = TensorDataset(train_X, train_y)
        ts_d = TensorDataset(test_X, test_y)
        benchmark = nc_benchmark(
            train_dataset=tr_d,
            test_dataset=ts_d,
            n_experiences=3,
            task_labels=True,
            shuffle=False,
            seed=0,
        )
        model = SimpleMLP(input_size=4, num_classes=benchmark.n_classes)

        f = open("log.txt", "w")
        text_logger = TextLogger(f)
        eval_plugin = EvaluationPlugin(
            accuracy_metrics(
                minibatch=True,
                epoch=True,
                epoch_running=True,
                experience=True,
                stream=True,
                trained_experience=True,
            ),
            loss_metrics(
                minibatch=True,
                epoch=True,
                epoch_running=True,
                experience=True,
                stream=True,
            ),
            forgetting_metrics(experience=True, stream=True),
            confusion_matrix_metrics(num_classes=6,
                                     save_image=False,
                                     normalize="all",
                                     stream=True),
            bwt_metrics(experience=True, stream=True),
            forward_transfer_metrics(experience=True, stream=True),
            cpu_usage_metrics(
                minibatch=True,
                epoch=True,
                epoch_running=True,
                experience=True,
                stream=True,
            ),
            timing_metrics(
                minibatch=True,
                epoch=True,
                epoch_running=True,
                experience=True,
                stream=True,
            ),
            ram_usage_metrics(
                every=0.5,
                minibatch=True,
                epoch=True,
                experience=True,
                stream=True,
            ),
            disk_usage_metrics(minibatch=True,
                               epoch=True,
                               experience=True,
                               stream=True),
            MAC_metrics(minibatch=True, epoch=True, experience=True),
            loggers=[text_logger],
            collect_all=True,
        )  # collect all metrics (set to True by default)
        cl_strategy = BaseStrategy(
            model,
            SGD(model.parameters(), lr=0.001, momentum=0.9),
            CrossEntropyLoss(),
            train_mb_size=10,
            train_epochs=2,
            eval_mb_size=10,
            device=DEVICE,
            evaluator=eval_plugin,
            eval_every=1,
        )
        for i, experience in enumerate(benchmark.train_stream):
            cl_strategy.train(experience,
                              eval_streams=[benchmark.test_stream],
                              shuffle=False)
            cl_strategy.eval(benchmark.test_stream)
        cls.all_metrics = cl_strategy.evaluator.get_all_metrics()
        f.close()
        # Set the environment variable UPDATE_METRICS to True to update
        # the pickle file with target values.
        # Make sure the old tests were passing for all unchanged metrics
        if UPDATE_METRICS:
            with open(
                    os.path.join(
                        pathlib.Path(__file__).parent.absolute(),
                        "target_metrics",
                        "mt.pickle",
                    ),
                    "wb",
            ) as f:
                pickle.dump(dict(cls.all_metrics), f, protocol=4)
        with open(
                os.path.join(
                    pathlib.Path(__file__).parent.absolute(),
                    "target_metrics",
                    "mt.pickle",
                ),
                "rb",
        ) as f:
            cls.ref = pickle.load(f)