def _train_task(self, train_loader, val_loader):
        loops.single_loop(train_loader,
                          val_loader,
                          self._multiple_devices,
                          self._network,
                          self.unsupervised_training["epochs"],
                          self._optimizer,
                          scheduler=self._scheduler,
                          train_function=self._unsupervised_forward,
                          eval_function=self._accuracy,
                          task=self._task,
                          n_tasks=self._n_tasks,
                          disable_progressbar=self._disable_progressbar)

        if self.new_supervised_training:
            logger.info("Finetuning new")
            self.finetuning(train_loader, val_loader,
                            self.new_supervised_training,
                            [{
                                "params": self._network.classifier.new_weights
                            }])
        if self.all_supervised_training:
            logger.info("Finetuning all")
            self.finetuning(
                train_loader, val_loader, self.all_supervised_training,
                [{
                    "params": self._network.classifier.parameters()
                }])
    def finetuning(self, train_loader, val_loader, config, params):
        if config["sampling"] == "undersampling" or \
           (config["sampling"] == "next_undersampling" and self._task > 0):
            self._data_memory, self._targets_memory, _, _ = self.build_examplars(
                self.inc_dataset, self._herding_indexes)
            loader = self.inc_dataset.get_memory_loader(*self.get_memory())
        elif config["sampling"] == "new":
            class_ids = list(
                range(self._n_classes - self._task_size, self._n_classes))
            _, loader = self.inc_dataset.get_custom_loader([class_ids],
                                                           mode="train")
        else:
            loader = train_loader

        optimizer = factory.get_optimizer(params, config["optimizer"],
                                          config["lr"], self._weight_decay)
        scheduler = factory.get_lr_scheduler(config["scheduling"],
                                             optimizer,
                                             nb_epochs=config["epochs"],
                                             lr_decay=config.get(
                                                 "lr_decay", 0.1),
                                             task=self._task)

        loops.single_loop(loader,
                          val_loader,
                          self._multiple_devices,
                          self._network,
                          config["epochs"],
                          optimizer,
                          scheduler=scheduler,
                          train_function=self._supervised_forward,
                          eval_function=self._accuracy,
                          task=self._task,
                          n_tasks=self._n_tasks,
                          disable_progressbar=self._disable_progressbar)
Example #3
0
 def _train_task(self, train_loader, val_loader):
     loops.single_loop(train_loader,
                       val_loader,
                       self._multiple_devices,
                       self._network,
                       self._n_epochs,
                       self._optimizer,
                       scheduler=self._scheduler,
                       train_function=self._forward_loss,
                       eval_function=self._accuracy,
                       task=self._task,
                       n_tasks=self._n_tasks)
    def _train_task(self, train_loader, val_loader):
        # Oracle retrains everytime a model on all data:
        _, train_loader = self.inc_dataset.get_custom_loader(list(
            range(self._n_classes)),
                                                             mode="train")
        _, val_loader = self.inc_dataset.get_custom_loader(list(
            range(self._n_classes)),
                                                           data_source="val")

        loops.single_loop(train_loader,
                          val_loader,
                          self._multiple_devices,
                          self._network,
                          self._n_epochs,
                          self._optimizer,
                          scheduler=self._scheduler,
                          train_function=self._forward_loss,
                          eval_function=self._accuracy,
                          task=self._task,
                          n_tasks=self._n_tasks)