예제 #1
0
    def make_train_dataloader(self,
                              num_workers=0,
                              shuffle=True,
                              pin_memory=True,
                              persistent_workers=False,
                              **kwargs):
        """Data loader initialization.

        Called at the start of each learning experience after the dataset
        adaptation.

        :param num_workers: number of thread workers for the data loading.
        :param shuffle: True if the data should be shuffled, False otherwise.
        :param pin_memory: If True, the data loader will copy Tensors into CUDA
            pinned memory before returning them. Defaults to True.
        """

        other_dataloader_args = {}

        if parse_version(torch.__version__) >= parse_version('1.7.0'):
            other_dataloader_args['persistent_workers'] = persistent_workers

        self.dataloader = TaskBalancedDataLoader(self.adapted_dataset,
                                                 oversample_small_groups=True,
                                                 num_workers=num_workers,
                                                 batch_size=self.train_mb_size,
                                                 shuffle=shuffle,
                                                 pin_memory=pin_memory,
                                                 **other_dataloader_args)
예제 #2
0
 def make_train_dataloader(self, shuffle=True, **kwargs):
     # you can override make_train_dataloader to change the
     # strategy's dataloader
     # remember to iterate over self.adapted_dataset
     self.dataloader = TaskBalancedDataLoader(
         self.adapted_dataset, batch_size=self.train_mb_size
     )
    def make_train_dataloader(self,
                              num_workers=0,
                              shuffle=True,
                              pin_memory=True,
                              persistent_workers=False,
                              **kwargs):
        """Data loader initialization.

        Called at the start of each learning experience after the dataset
        adaptation.

        :param num_workers: number of thread workers for the data loading.
        :param shuffle: True if the data should be shuffled, False otherwise.
        :param pin_memory: If True, the data loader will copy Tensors into CUDA
            pinned memory before returning them. Defaults to True.
        :param persistent_workers: If True, the data loader will not shutdown
            the worker processes after a dataset has been consumed once.
            Used only if `PyTorch >= 1.7.0`.
        """

        other_dataloader_args = {}

        if parse_version(torch.__version__) >= parse_version("1.7.0"):
            other_dataloader_args["persistent_workers"] = persistent_workers

        self.dataloader = TaskBalancedDataLoader(
            self.adapted_dataset,
            oversample_small_groups=True,
            num_workers=num_workers,
            batch_size=self.train_mb_size,
            shuffle=shuffle,
            pin_memory=pin_memory,
            collate_mbatches=detection_collate_mbatches_fn,
            collate_fn=detection_collate_fn,
            **other_dataloader_args)
 def make_train_dataloader(self,
                           num_workers=0,
                           shuffle=True,
                           pin_memory=True,
                           **kwargs):
     self.dataloader = TaskBalancedDataLoader(self.adapted_dataset,
                                              oversample_small_groups=True,
                                              num_workers=num_workers,
                                              batch_size=self.train_mb_size,
                                              shuffle=shuffle,
                                              pin_memory=pin_memory)
예제 #5
0
 def make_train_dataloader(self, num_workers=0, shuffle=True, **kwargs):
     """
     Called after the dataset adaptation. Initializes the data loader.
     :param num_workers: number of thread workers for the data loading.
     :param shuffle: True if the data should be shuffled, False otherwise.
     """
     self.dataloader = TaskBalancedDataLoader(self.adapted_dataset,
                                              oversample_small_groups=True,
                                              num_workers=num_workers,
                                              batch_size=self.train_mb_size,
                                              shuffle=shuffle)
예제 #6
0
    def _verify_rop_tests_reproducibility(self, init_strategy, n_epochs,
                                          criterion):
        # This doesn't actually test the support for the specific scheduler
        # (ReduceLROnPlateau), but it's only used to check if:
        # - the same model+benchmark pair can be instantiated in a
        #   deterministic way.
        # - the same results could be obtained in a standard training loop in a
        #   deterministic way.
        models_rnd = []
        benchmarks_rnd = []
        for _ in range(2):
            benchmark, model = init_strategy()
            models_rnd.append(model)
            benchmarks_rnd.append(benchmark)

        self.assert_model_equals(*models_rnd)
        self.assert_benchmark_equals(*benchmarks_rnd)

        expected_lrs_rnd = []
        for _ in range(2):
            benchmark, model = init_strategy()

            expected_lrs = []
            model.train()
            for exp in benchmark.train_stream:
                optimizer = SGD(model.parameters(), lr=0.001)
                scheduler = ReduceLROnPlateau(optimizer)
                expected_lrs.append([])
                train_loss = Mean()
                for epoch in range(n_epochs):
                    train_loss.reset()
                    for x, y, t in TaskBalancedDataLoader(
                            exp.dataset,
                            oversample_small_groups=True,
                            num_workers=0,
                            batch_size=32,
                            shuffle=False,
                            pin_memory=False,
                    ):
                        optimizer.zero_grad()
                        outputs = model(x)
                        loss = criterion(outputs, y)
                        train_loss.update(loss, weight=len(x))
                        loss.backward()
                        optimizer.step()

                        for group in optimizer.param_groups:
                            expected_lrs[-1].append(group["lr"])
                            break
                    scheduler.step(train_loss.result())

            expected_lrs_rnd.append(expected_lrs)
        self.assertEqual(expected_lrs_rnd[0], expected_lrs_rnd[1])
예제 #7
0
    def test_basic(self):
        benchmark = get_fast_benchmark()
        ds = [el.dataset for el in benchmark.train_stream]
        data = AvalancheConcatDataset(ds)
        dl = TaskBalancedDataLoader(data)
        for el in dl:
            pass

        dl = GroupBalancedDataLoader(ds)
        for el in dl:
            pass

        dl = ReplayDataLoader(data, data)
        for el in dl:
            pass
예제 #8
0
    def test_basic(self):
        scenario = get_fast_scenario()
        ds = [el.dataset for el in scenario.train_stream]
        data = AvalancheConcatDataset(ds)
        dl = TaskBalancedDataLoader(data)
        for el in dl:
            pass

        dl = GroupBalancedDataLoader(ds)
        for el in dl:
            pass

        dl = ReplayDataLoader(data, data)
        for el in dl:
            pass
예제 #9
0
 def make_train_dataloader(self, num_workers=0, shuffle=True,
                           pin_memory=True, **kwargs):
     """
     Called after the dataset adaptation. Initializes the data loader.
     :param num_workers: number of thread workers for the data loading.
     :param shuffle: True if the data should be shuffled, False otherwise.
     :param pin_memory: If True, the data loader will copy Tensors into CUDA
         pinned memory before returning them. Defaults to True.
     """
     self.dataloader = TaskBalancedDataLoader(
         self.adapted_dataset,
         oversample_small_groups=True,
         num_workers=num_workers,
         batch_size=self.train_mb_size,
         shuffle=shuffle,
         pin_memory=pin_memory)
예제 #10
0
    def test_dataload_batch_balancing(self):
        benchmark = get_fast_benchmark()
        batch_size = 32
        replayPlugin = ReplayPlugin(mem_size=20)

        model = SimpleMLP(input_size=6, hidden_size=10)
        cl_strategy = Naive(
            model,
            SGD(model.parameters(), lr=0.001, momentum=0.9,
                weight_decay=0.001),
            CrossEntropyLoss(),
            train_mb_size=batch_size,
            train_epochs=1,
            eval_mb_size=100,
            plugins=[replayPlugin],
        )
        for step in benchmark.train_stream:
            adapted_dataset = step.dataset
            if len(replayPlugin.storage_policy.buffer) > 0:
                dataloader = ReplayDataLoader(
                    adapted_dataset,
                    replayPlugin.storage_policy.buffer,
                    oversample_small_tasks=True,
                    num_workers=0,
                    batch_size=batch_size,
                    shuffle=True,
                )
            else:
                dataloader = TaskBalancedDataLoader(adapted_dataset)

            for mini_batch in dataloader:
                mb_task_labels = mini_batch[-1]
                lengths = []
                for task_id in adapted_dataset.task_set:
                    len_task = (mb_task_labels == task_id).sum()
                    lengths.append(len_task)
                if sum(lengths) == batch_size:
                    difference = max(lengths) - min(lengths)
                    self.assertLessEqual(difference, 1)
                self.assertLessEqual(sum(lengths), batch_size)
            cl_strategy.train(step)
예제 #11
0
if __name__ == "__main__":
    benchmark = SplitMNIST(n_experiences=5)

    model = SimpleMLP(input_size=784, hidden_size=10)
    opt = SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.001)

    # we use our custom strategy to change the dataloading policy.
    cl_strategy = MyCumulativeStrategy(
        model,
        opt,
        CrossEntropyLoss(),
        train_epochs=1,
        train_mb_size=512,
        eval_mb_size=512,
    )

    for step in benchmark.train_stream:
        cl_strategy.train(step)
        cl_strategy.eval(step)

    # If you don't use avalanche's strategies you can also use the dataloader
    # directly to iterate the data
    data = step.dataset
    dl = TaskBalancedDataLoader(data)
    for x, y, t in dl:
        # by default minibatches in Avalanche have the form <x, y, ..., t>
        # with arbitrary additional tensors between y and t.
        print(x, y, t)
        break
예제 #12
0
    def test_scheduler_reduce_on_plateau_plugin_with_val_stream(self):
        # Regression test for issue #858 (part 2)
        n_epochs = 20
        criterion = CrossEntropyLoss()

        def _prepare_rng_critical_parts(seed=1234):
            torch.random.manual_seed(seed)
            initial_benchmark = PluginTests.create_benchmark(seed=seed)
            val_benchmark = benchmark_with_validation_stream(
                initial_benchmark, 0.3, shuffle=True
            )
            return (val_benchmark, _PlainMLP(input_size=6, hidden_size=10))

        self._verify_rop_tests_reproducibility(
            _prepare_rng_critical_parts, n_epochs, criterion
        )

        # Everything is in order, now we can test the plugin support for the
        # ReduceLROnPlateau scheduler!
        for reset_lr, reset_scheduler in itertools.product(
            (True, False), (True, False)
        ):
            with self.subTest(
                reset_lr=reset_lr, reset_scheduler=reset_scheduler
            ):
                # First, obtain the reference (expected) lr timeline by running
                # a plain PyTorch training loop with ReduceLROnPlateau.
                benchmark, model = _prepare_rng_critical_parts()

                expected_lrs = []

                optimizer = SGD(model.parameters(), lr=0.001)
                scheduler = ReduceLROnPlateau(optimizer)
                for exp_idx, exp in enumerate(benchmark.train_stream):
                    expected_lrs.append([])
                    model.train()
                    if reset_lr:
                        for group in optimizer.param_groups:
                            group["lr"] = 0.001

                    if reset_scheduler:
                        scheduler = ReduceLROnPlateau(optimizer)

                    for epoch in range(n_epochs):
                        for x, y, t in TaskBalancedDataLoader(
                            exp.dataset,
                            oversample_small_groups=True,
                            num_workers=0,
                            batch_size=32,
                            shuffle=False,
                            pin_memory=False,
                        ):
                            optimizer.zero_grad()
                            outputs = model(x)
                            loss = criterion(outputs, y)
                            loss.backward()
                            optimizer.step()
                        for group in optimizer.param_groups:
                            expected_lrs[-1].append(group["lr"])
                            break

                        val_loss = Mean()
                        val_exp = benchmark.valid_stream[exp_idx]

                        model.eval()
                        with torch.no_grad():
                            for x, y, t in DataLoader(
                                val_exp.dataset,
                                num_workers=0,
                                batch_size=100,
                                pin_memory=False,
                            ):
                                outputs = model(x)
                                loss = criterion(outputs, y)
                                val_loss.update(loss, weight=len(x))

                        scheduler.step(val_loss.result())

                # Now we have the correct timeline stored in expected_lrs
                # Let's test the plugin!
                benchmark, model = _prepare_rng_critical_parts()
                optimizer = SGD(model.parameters(), lr=0.001)
                scheduler = ReduceLROnPlateau(optimizer)

                PluginTests._test_scheduler_plugin(
                    benchmark,
                    model,
                    optimizer,
                    scheduler,
                    n_epochs,
                    reset_lr,
                    reset_scheduler,
                    expected_lrs,
                    criterion=criterion,
                    metric="val_loss",
                    eval_on_valid_stream=True,
                )
예제 #13
0
    def test_scheduler_reduce_on_plateau_plugin(self):
        # Regression test for issue #858
        n_epochs = 20
        criterion = CrossEntropyLoss()

        def _prepare_rng_critical_parts(seed=1234):
            torch.random.manual_seed(seed)
            return (
                PluginTests.create_benchmark(seed=seed),
                _PlainMLP(input_size=6, hidden_size=10),
            )

        self._verify_rop_tests_reproducibility(
            _prepare_rng_critical_parts, n_epochs, criterion
        )

        # Everything is in order, now we can test the plugin support for the
        # ReduceLROnPlateau scheduler!

        for reset_lr, reset_scheduler in itertools.product(
            (True, False), (True, False)
        ):
            with self.subTest(
                reset_lr=reset_lr, reset_scheduler=reset_scheduler
            ):
                # First, obtain the reference (expected) lr timeline by running
                # a plain PyTorch training loop with ReduceLROnPlateau.
                benchmark, model = _prepare_rng_critical_parts()
                model.train()
                expected_lrs = []

                optimizer = SGD(model.parameters(), lr=0.001)
                scheduler = ReduceLROnPlateau(optimizer)
                for exp in benchmark.train_stream:
                    if reset_lr:
                        for group in optimizer.param_groups:
                            group["lr"] = 0.001

                    if reset_scheduler:
                        scheduler = ReduceLROnPlateau(optimizer)

                    expected_lrs.append([])
                    train_loss = Mean()
                    for epoch in range(n_epochs):
                        train_loss.reset()
                        for x, y, t in TaskBalancedDataLoader(
                            exp.dataset,
                            oversample_small_groups=True,
                            num_workers=0,
                            batch_size=32,
                            shuffle=False,
                            pin_memory=False,
                        ):
                            optimizer.zero_grad()
                            outputs = model(x)
                            loss = criterion(outputs, y)
                            train_loss.update(loss, weight=len(x))
                            loss.backward()
                            optimizer.step()
                        scheduler.step(train_loss.result())
                        for group in optimizer.param_groups:
                            expected_lrs[-1].append(group["lr"])
                            break

                # Now we have the correct timeline stored in expected_lrs.
                # Let's test the plugin!
                benchmark, model = _prepare_rng_critical_parts()
                optimizer = SGD(model.parameters(), lr=0.001)
                scheduler = ReduceLROnPlateau(optimizer)

                PluginTests._test_scheduler_plugin(
                    benchmark,
                    model,
                    optimizer,
                    scheduler,
                    n_epochs,
                    reset_lr,
                    reset_scheduler,
                    expected_lrs,
                    criterion=criterion,
                    metric="train_loss",
                )

        # Other tests
        benchmark, model = _prepare_rng_critical_parts()
        optimizer = SGD(model.parameters(), lr=0.001)
        scheduler = ReduceLROnPlateau(optimizer)
        scheduler2 = MultiStepLR(optimizer, [1, 2, 3])

        # The metric must be set
        with self.assertRaises(Exception):
            LRSchedulerPlugin(scheduler, metric=None)

        # Doesn't make sense to set the metric when using a non-metric
        # based scheduler (should warn)
        with self.assertWarns(Warning):
            LRSchedulerPlugin(scheduler2, metric="train_loss")

        # Must raise an error on unsupported metric
        with self.assertRaises(Exception):
            LRSchedulerPlugin(scheduler, metric="cuteness")