def finetuning(self, train_loader, val_loader, config, params):
        if config["sampling"] == "undersampling" or \
           (config["sampling"] == "next_undersampling" and self._task > 0):
            self._data_memory, self._targets_memory, _, _ = self.build_examplars(
                self.inc_dataset, self._herding_indexes)
            loader = self.inc_dataset.get_memory_loader(*self.get_memory())
        elif config["sampling"] == "new":
            class_ids = list(
                range(self._n_classes - self._task_size, self._n_classes))
            _, loader = self.inc_dataset.get_custom_loader([class_ids],
                                                           mode="train")
        else:
            loader = train_loader

        optimizer = factory.get_optimizer(params, config["optimizer"],
                                          config["lr"], self._weight_decay)
        scheduler = factory.get_lr_scheduler(config["scheduling"],
                                             optimizer,
                                             nb_epochs=config["epochs"],
                                             lr_decay=config.get(
                                                 "lr_decay", 0.1),
                                             task=self._task)

        loops.single_loop(loader,
                          val_loader,
                          self._multiple_devices,
                          self._network,
                          config["epochs"],
                          optimizer,
                          scheduler=scheduler,
                          train_function=self._supervised_forward,
                          eval_function=self._accuracy,
                          task=self._task,
                          n_tasks=self._n_tasks,
                          disable_progressbar=self._disable_progressbar)
    def _before_task(self, train_loader, val_loader):
        self._gen_weights()
        self._n_classes += self._task_size
        logger.info("Now {} examplars per class.".format(
            self._memory_per_class))

        if self._groupwise_factors and isinstance(self._groupwise_factors,
                                                  dict):
            if self._groupwise_factors_bis and self._task > 0:
                logger.info("Using second set of groupwise lr.")
                groupwise_factor = self._groupwise_factors_bis
            else:
                groupwise_factor = self._groupwise_factors

            params = []
            for group_name, group_params in self._network.get_group_parameters(
            ).items():
                if group_params is None or group_name == "last_block":
                    continue
                factor = groupwise_factor.get(group_name, 1.0)
                if factor == 0.:
                    continue
                params.append({
                    "params": group_params,
                    "lr": self._lr * factor
                })
                print(f"Group: {group_name}, lr: {self._lr * factor}.")
        elif self._groupwise_factors == "ucir":
            params = [
                {
                    "params": self._network.convnet.parameters(),
                    "lr": self._lr
                },
                {
                    "params": self._network.classifier.new_weights,
                    "lr": self._lr
                },
            ]
        else:
            params = self._network.parameters()

        self._optimizer = factory.get_optimizer(params, self._opt_name,
                                                self._lr, self.weight_decay)

        self._scheduler = factory.get_lr_scheduler(self._scheduling,
                                                   self._optimizer,
                                                   nb_epochs=self._n_epochs,
                                                   lr_decay=self._lr_decay,
                                                   task=self._task)

        if self._class_weights_config:
            self._class_weights = torch.tensor(
                data.get_class_weights(train_loader.dataset,
                                       **self._class_weights_config)).to(
                                           self._device)
        else:
            self._class_weights = None
    def _before_task(self, train_loader, val_loader):
        self._gen_weights()

        self._n_classes += self._task_size
        print("Now {} examplars per class.".format(self._memory_per_class))

        self._optimizer = factory.get_optimizer(
            self._network.parameters(), self._opt_name, self._lr, self._weight_decay
        )

        self._scheduler = factory.get_lr_scheduler(
            self._scheduling, self._optimizer, self._n_epochs, lr_decay=self._lr_decay
        )
    def _before_task(self, train_loader, val_loader):
        if self._task != 0:
            self._network = self._first_model.copy()
            self._nb_inc_classes += self._task_size
            utils.add_new_weights(self._network, "basic", self._n_classes,
                                  self._nb_inc_classes, self.inc_dataset)
        else:
            utils.add_new_weights(self._network, "basic", self._n_classes,
                                  self._task_size, self.inc_dataset)
        self._network.classifier.reset_weights()

        self._n_classes += self._task_size
        logger.info("Now {} examplars per class.".format(
            self._memory_per_class))
        logger.info(
            f"Nb classes in classifier {len(self._network.classifier.weights)}"
        )

        if self._groupwise_factors and isinstance(self._groupwise_factors,
                                                  dict):
            if self._groupwise_factors_bis and self._task > 0:
                logger.info("Using second set of groupwise lr.")
                groupwise_factor = self._groupwise_factors_bis
            else:
                groupwise_factor = self._groupwise_factors

            params = []
            for group_name, group_params in self._network.get_group_parameters(
            ).items():
                if group_params is None or group_name == "last_block":
                    continue
                factor = groupwise_factor.get(group_name, 1.0)
                if factor == 0.:
                    continue
                params.append({
                    "params": group_params,
                    "lr": self._lr * factor
                })
                print(f"Group: {group_name}, lr: {self._lr * factor}.")
        else:
            params = self._network.parameters()

        self._optimizer = factory.get_optimizer(params, self._opt_name,
                                                self._lr, self.weight_decay)

        self._scheduler = factory.get_lr_scheduler(self._scheduling,
                                                   self._optimizer,
                                                   nb_epochs=self._n_epochs,
                                                   lr_decay=self._lr_decay,
                                                   task=self._task)
    def _before_task(self, train_loader, val_loader):
        utils.add_new_weights(self._network, {"type": "basic"},
                              self._n_classes, self._task_size,
                              self.inc_dataset)
        self._n_classes += self._task_size

        self._optimizer = factory.get_optimizer(
            [{
                "params": self._network.convnet.parameters(),
            }], self.unsupervised_training["optimizer"],
            self.unsupervised_training["lr"], self._weight_decay)

        self._scheduler = factory.get_lr_scheduler(
            self.unsupervised_training["scheduling"],
            self._optimizer,
            nb_epochs=self.unsupervised_training["epochs"],
            lr_decay=self.unsupervised_training.get("lr_decay", 0.1),
            task=self._task)