Exemple #1
0
    def test_SplitCifar100_benchmark_download_once(self):
        global CIFAR100_DOWNLOADS, CIFAR10_DOWNLOADS
        CIFAR100_DOWNLOADS = 0
        CIFAR10_DOWNLOADS = 0

        benchmark = SplitCIFAR100(5)
        self.assertEqual(5, len(benchmark.train_stream))
        self.assertEqual(5, len(benchmark.test_stream))

        self.assertEqual(1, CIFAR100_DOWNLOADS)
        self.assertEqual(0, CIFAR10_DOWNLOADS)
    def test_SplitCifar100_scenario_download_once(self):
        global CIFAR100_DOWNLOADS, CIFAR10_DOWNLOADS
        CIFAR100_DOWNLOADS = 0
        CIFAR10_DOWNLOADS = 0

        scenario = SplitCIFAR100(5)
        self.assertEqual(5, len(scenario.train_stream))
        self.assertEqual(5, len(scenario.test_stream))

        self.assertEqual(1, CIFAR100_DOWNLOADS)
        self.assertEqual(0, CIFAR10_DOWNLOADS)
def evaluate_on_cifar_100(
    *,
    method_name: str,
    plugins: List[StrategyPlugin],
    tb_dir: str = str(TB_DIR),
    seed: int = 42,
    verbose: bool = False,
    train_epochs: int = 70,
    n_classes_per_batch: int = 10,
    start_lr: float = 2.0,
    lr_milestones: List[int] = None,
    lr_gamma: float = 0.2,
):
    assert not N_CLASSES % n_classes_per_batch, "n_classes should be a multiple of n_classes_per_batch"

    scenario = SplitCIFAR100(n_experiences=N_CLASSES // n_classes_per_batch)
    model = ResNet32(n_classes=N_CLASSES)

    tb_logger = TensorboardLogger(tb_dir + f"/cifar100_{n_classes_per_batch}/{method_name}/{seed}_{create_time_id()}")

    loggers = [tb_logger]
    if verbose:
        loggers.append(InteractiveLogger())

    strategy = Naive(
        model=model,
        optimizer=SGD(model.parameters(), lr=2.0, weight_decay=0.00001),
        criterion=CrossEntropyLoss(),
        train_epochs=train_epochs,
        train_mb_size=128,
        device=device,
        plugins=plugins + [LRSchedulerPlugin(start_lr=start_lr, milestones=lr_milestones, gamma=lr_gamma)],
        evaluator=EvaluationPlugin(
            [
                NormalizedStreamAccuracy(),
                NormalizedExperienceAccuracy(),
                ExperienceMeanRepresentationShift(MeanL2RepresentationShift()),
                ExperienceMeanRepresentationShift(MeanCosineRepresentationShift()),
            ],
            StreamConfusionMatrix(
                num_classes=N_CLASSES,
                image_creator=SortedCMImageCreator(scenario.classes_order),
            ),
            loggers=loggers,
        ),
    )

    for i, train_task in enumerate(scenario.train_stream, 1):
        strategy.train(train_task, num_workers=0)
        strategy.eval(scenario.test_stream[:i])

    tb_logger.writer.flush()
Exemple #4
0
    def test_SplitCifar100_benchmark(self):
        benchmark = SplitCIFAR100(5)
        self.assertEqual(5, len(benchmark.train_stream))
        self.assertEqual(5, len(benchmark.test_stream))

        train_sz = 0
        for experience in benchmark.train_stream:
            self.assertIsInstance(experience, Experience)
            train_sz += len(experience.dataset)

            # Regression test for 575
            load_experience_train_eval(experience)

        self.assertEqual(50000, train_sz)

        test_sz = 0
        for experience in benchmark.test_stream:
            self.assertIsInstance(experience, Experience)
            test_sz += len(experience.dataset)

            # Regression test for 575
            load_experience_train_eval(experience)

        self.assertEqual(10000, test_sz)