示例#1
0
    def _before_task(self, train_loader, val_loader):
        self._n_classes += self._task_size
        self._network.add_classes(self._task_size)
        print("Now {} examplars per class.".format(self._memory_per_class))

        self._optimizer = factory.get_optimizer(self._network.parameters(),
                                                self._opt_name, self._lr,
                                                self._weight_decay)
        self._optimizer_graph = factory.get_optimizer(
            self._network.parameters(), self._opt_name, 0.001,
            self._weight_decay)

        self._scheduler = torch.optim.lr_scheduler.MultiStepLR(
            self._optimizer, self._scheduling, gamma=self._lr_decay)
    def _before_task(self, train_loader, val_loader):
        self._n_classes += self._task_size
        self._network.add_classes(self._task_size)
        logger.info("Now {} examplars per class.".format(
            self._memory_per_class))

        self._optimizer = factory.get_optimizer(self._network.parameters(),
                                                self._opt_name, self._lr,
                                                self._weight_decay)

        base_scheduler = torch.optim.lr_scheduler.MultiStepLR(
            self._optimizer, self._scheduling, gamma=self._lr_decay)

        if self._warmup_config:
            if self._warmup_config.get("only_first_step",
                                       True) and self._task != 0:
                pass
            else:
                logger.info("Using WarmUp")
                self._scheduler = schedulers.GradualWarmupScheduler(
                    optimizer=self._optimizer,
                    after_scheduler=base_scheduler,
                    **self._warmup_config)
        else:
            self._scheduler = base_scheduler
    def finetuning(self, train_loader, val_loader, config, params):
        if config["sampling"] == "undersampling" or \
           (config["sampling"] == "next_undersampling" and self._task > 0):
            self._data_memory, self._targets_memory, _, _ = self.build_examplars(
                self.inc_dataset, self._herding_indexes)
            loader = self.inc_dataset.get_memory_loader(*self.get_memory())
        elif config["sampling"] == "new":
            class_ids = list(
                range(self._n_classes - self._task_size, self._n_classes))
            _, loader = self.inc_dataset.get_custom_loader([class_ids],
                                                           mode="train")
        else:
            loader = train_loader

        optimizer = factory.get_optimizer(params, config["optimizer"],
                                          config["lr"], self._weight_decay)
        scheduler = factory.get_lr_scheduler(config["scheduling"],
                                             optimizer,
                                             nb_epochs=config["epochs"],
                                             lr_decay=config.get(
                                                 "lr_decay", 0.1),
                                             task=self._task)

        loops.single_loop(loader,
                          val_loader,
                          self._multiple_devices,
                          self._network,
                          config["epochs"],
                          optimizer,
                          scheduler=scheduler,
                          train_function=self._supervised_forward,
                          eval_function=self._accuracy,
                          task=self._task,
                          n_tasks=self._n_tasks,
                          disable_progressbar=self._disable_progressbar)
示例#4
0
    def _train_task(self, train_loader, val_loader):
        """Train & fine-tune model.

        The scheduling is different from the paper for one reason. In the paper,
        End-to-End Incremental Learning, the authors pre-generated 12 augmentations
        per images (thus multiplying by this number the dataset size). However
        I find this inefficient for large scale datasets, thus I'm simply doing
        the augmentations online. A greater number of epochs is then needed to
        match performances.

        :param train_loader: A DataLoader.
        :param val_loader: A DataLoader, can be None.
        """
        if self._task == 0:
            epochs = 90
            optimizer = factory.get_optimizer(self._network.parameters(),
                                              self._opt_name, 0.1, 0.0001)
            scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                             [50, 60],
                                                             gamma=0.1)
            self._train(train_loader, val_loader, epochs, optimizer, scheduler)
            return

        # Training on all new + examplars
        print("Training")
        self._finetuning = False
        epochs = 60
        optimizer = factory.get_optimizer(self._network.parameters(),
                                          self._opt_name, 0.1, 0.0001)
        scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [40, 50],
                                                         gamma=0.1)
        self._train(train_loader, val_loader, epochs, optimizer, scheduler)

        # Fine-tuning on sub-set new + examplars
        print("Fine-tuning")
        self._old_model = self._network.copy().freeze()

        self._finetuning = True
        self._build_examplars(n_examplars=self._k //
                              (self._n_classes - self._task_size))

        loader = self.inc_dataset.get_memory_loader(*self.get_memory())
        optimizer = factory.get_optimizer(self._network.parameters(),
                                          self._opt_name, 0.01, 0.0001)
        scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [10, 20],
                                                         gamma=0.1)
        self._train(loader, val_loader, 40, optimizer, scheduler)
    def _before_task(self, train_loader, val_loader):
        self._gen_weights()
        self._n_classes += self._task_size
        logger.info("Now {} examplars per class.".format(
            self._memory_per_class))

        if self._groupwise_factors and isinstance(self._groupwise_factors,
                                                  dict):
            if self._groupwise_factors_bis and self._task > 0:
                logger.info("Using second set of groupwise lr.")
                groupwise_factor = self._groupwise_factors_bis
            else:
                groupwise_factor = self._groupwise_factors

            params = []
            for group_name, group_params in self._network.get_group_parameters(
            ).items():
                if group_params is None or group_name == "last_block":
                    continue
                factor = groupwise_factor.get(group_name, 1.0)
                if factor == 0.:
                    continue
                params.append({
                    "params": group_params,
                    "lr": self._lr * factor
                })
                print(f"Group: {group_name}, lr: {self._lr * factor}.")
        elif self._groupwise_factors == "ucir":
            params = [
                {
                    "params": self._network.convnet.parameters(),
                    "lr": self._lr
                },
                {
                    "params": self._network.classifier.new_weights,
                    "lr": self._lr
                },
            ]
        else:
            params = self._network.parameters()

        self._optimizer = factory.get_optimizer(params, self._opt_name,
                                                self._lr, self.weight_decay)

        self._scheduler = factory.get_lr_scheduler(self._scheduling,
                                                   self._optimizer,
                                                   nb_epochs=self._n_epochs,
                                                   lr_decay=self._lr_decay,
                                                   task=self._task)

        if self._class_weights_config:
            self._class_weights = torch.tensor(
                data.get_class_weights(train_loader.dataset,
                                       **self._class_weights_config)).to(
                                           self._device)
        else:
            self._class_weights = None
示例#6
0
    def _before_task(self, data_loader, val_loader):
        self._n_classes += self._task_size
        self._network.add_classes(self._task_size)

        self._optimizer = factory.get_optimizer(self._network.parameters(),
                                                self._opt_name, self._lr,
                                                self._weight_decay)
        if self._scheduling is None:
            self._scheduler = None
        else:
            self._scheduler = torch.optim.lr_scheduler.MultiStepLR(
                self._optimizer, self._scheduling, gamma=self._lr_decay)
    def _before_task(self, train_loader, val_loader):
        self._gen_weights()

        self._n_classes += self._task_size
        print("Now {} examplars per class.".format(self._memory_per_class))

        self._optimizer = factory.get_optimizer(
            self._network.parameters(), self._opt_name, self._lr, self._weight_decay
        )

        self._scheduler = factory.get_lr_scheduler(
            self._scheduling, self._optimizer, self._n_epochs, lr_decay=self._lr_decay
        )
    def _train_task(self, train_loader, val_loader):
        if self._meta_transfer:
            logger.info("Setting task meta-transfer")
            self.set_meta_transfer()

        for p in self._network.parameters():
            if p.requires_grad:
                p.register_hook(lambda grad: torch.clamp(grad, -5., 5.))

        self._training_step(train_loader, val_loader, 0, self._n_epochs)

        if self._finetuning_config and self._task != 0:
            logger.info("Fine-tuning")

            self._data_memory, self._targets_memory, _, _ = self.build_examplars(
                self.inc_dataset, self._herding_indexes
            )
            loader = self.inc_dataset.get_memory_loader(*self.get_memory())

            if self._finetuning_config["tuning"] == "all":
                parameters = self._network.parameters()
            elif self._finetuning_config["tuning"] == "convnet":
                parameters = self._network.convnet.parameters()
            elif self._finetuning_config["tuning"] == "classifier":
                parameters = self._network.classifier.parameters()
            elif self._finetuning_config["tuning"] == "classifier_scale":
                parameters = [
                    {
                        "params": self._network.classifier.parameters(),
                        "lr": self._finetuning_config["lr"]
                    }, {
                        "params": self._network.post_processor.parameters(),
                        "lr": self._finetuning_config["lr"]
                    }
                ]
            else:
                raise NotImplementedError(
                    "Unknwown finetuning parameters {}.".format(self._finetuning_config["tuning"])
                )

            self._optimizer = factory.get_optimizer(
                parameters, self._opt_name, self._finetuning_config["lr"], self._weight_decay
            )
            self._scheduler = None
            self._training_step(
                loader,
                val_loader,
                self._n_epochs,
                self._n_epochs + self._finetuning_config["epochs"],
                record_bn=False
            )
    def _before_task(self, train_loader, val_loader):
        if self._task != 0:
            self._network = self._first_model.copy()
            self._nb_inc_classes += self._task_size
            utils.add_new_weights(self._network, "basic", self._n_classes,
                                  self._nb_inc_classes, self.inc_dataset)
        else:
            utils.add_new_weights(self._network, "basic", self._n_classes,
                                  self._task_size, self.inc_dataset)
        self._network.classifier.reset_weights()

        self._n_classes += self._task_size
        logger.info("Now {} examplars per class.".format(
            self._memory_per_class))
        logger.info(
            f"Nb classes in classifier {len(self._network.classifier.weights)}"
        )

        if self._groupwise_factors and isinstance(self._groupwise_factors,
                                                  dict):
            if self._groupwise_factors_bis and self._task > 0:
                logger.info("Using second set of groupwise lr.")
                groupwise_factor = self._groupwise_factors_bis
            else:
                groupwise_factor = self._groupwise_factors

            params = []
            for group_name, group_params in self._network.get_group_parameters(
            ).items():
                if group_params is None or group_name == "last_block":
                    continue
                factor = groupwise_factor.get(group_name, 1.0)
                if factor == 0.:
                    continue
                params.append({
                    "params": group_params,
                    "lr": self._lr * factor
                })
                print(f"Group: {group_name}, lr: {self._lr * factor}.")
        else:
            params = self._network.parameters()

        self._optimizer = factory.get_optimizer(params, self._opt_name,
                                                self._lr, self.weight_decay)

        self._scheduler = factory.get_lr_scheduler(self._scheduling,
                                                   self._optimizer,
                                                   nb_epochs=self._n_epochs,
                                                   lr_decay=self._lr_decay,
                                                   task=self._task)
    def _before_task(self, train_loader, val_loader):
        utils.add_new_weights(self._network, {"type": "basic"},
                              self._n_classes, self._task_size,
                              self.inc_dataset)
        self._n_classes += self._task_size

        self._optimizer = factory.get_optimizer(
            [{
                "params": self._network.convnet.parameters(),
            }], self.unsupervised_training["optimizer"],
            self.unsupervised_training["lr"], self._weight_decay)

        self._scheduler = factory.get_lr_scheduler(
            self.unsupervised_training["scheduling"],
            self._optimizer,
            nb_epochs=self.unsupervised_training["epochs"],
            lr_decay=self.unsupervised_training.get("lr_decay", 0.1),
            task=self._task)
    def _before_task(self, data_loader, val_loader):
        self._n_classes += self._task_size

        self._network = network.BasicNet(
            self._args["convnet"],
            convnet_kwargs=self._args.get("convnet_config", {}),
            classifier_kwargs=self._args.get("classifier_config", {
                "type": "fc",
                "use_bias": True
            }),
            device=self._device)
        self._network.add_classes(self._n_classes)

        self._optimizer = factory.get_optimizer(self._network.parameters(),
                                                self._opt_name, self._lr,
                                                self._weight_decay)
        if self._scheduling is None:
            self._scheduler = None
        else:
            self._scheduler = torch.optim.lr_scheduler.MultiStepLR(
                self._optimizer, self._scheduling, gamma=self._lr_decay)
    def _train_task(self, train_loader, val_loader):
        if self._meta_transfer:
            logger.info("Setting task meta-transfer")
            self.set_meta_transfer()

        for p in self._network.parameters():
            if p.requires_grad:
                p.register_hook(lambda grad: torch.clamp(grad, -5., 5.))

        logger.debug("nb {}.".format(len(train_loader.dataset)))

        if self._meta_transfer.get("clip"):
            logger.info(
                f"Clipping MTL weights ({self._meta_transfer.get('clip')}).")
            clipper = BoundClipper(*self._meta_transfer.get("clip"))
        else:
            clipper = None
        self._training_step(train_loader,
                            val_loader,
                            0,
                            self._n_epochs,
                            record_bn=True,
                            clipper=clipper)

        self._post_processing_type = None

        if self._finetuning_config and self._task != 0:
            logger.info("Fine-tuning")
            if self._finetuning_config["scaling"]:
                logger.info("Custom fine-tuning scaling of {}.".format(
                    self._finetuning_config["scaling"]))
                self._post_processing_type = self._finetuning_config["scaling"]

            if self._finetuning_config["sampling"] == "undersampling":
                self._data_memory, self._targets_memory, _, _ = self.build_examplars(
                    self.inc_dataset, self._herding_indexes)
                loader = self.inc_dataset.get_memory_loader(*self.get_memory())
            elif self._finetuning_config["sampling"] == "oversampling":
                _, loader = self.inc_dataset.get_custom_loader(
                    list(
                        range(self._n_classes - self._task_size,
                              self._n_classes)),
                    memory=self.get_memory(),
                    mode="train",
                    sampler=samplers.MemoryOverSampler)

            if self._finetuning_config["tuning"] == "all":
                parameters = self._network.parameters()
            elif self._finetuning_config["tuning"] == "convnet":
                parameters = self._network.convnet.parameters()
            elif self._finetuning_config["tuning"] == "classifier":
                parameters = self._network.classifier.parameters()
            elif self._finetuning_config["tuning"] == "classifier_scale":
                parameters = [{
                    "params": self._network.classifier.parameters(),
                    "lr": self._finetuning_config["lr"]
                }, {
                    "params":
                    self._network.post_processor.parameters(),
                    "lr":
                    self._finetuning_config["lr"]
                }]
            else:
                raise NotImplementedError(
                    "Unknwown finetuning parameters {}.".format(
                        self._finetuning_config["tuning"]))

            self._optimizer = factory.get_optimizer(
                parameters, self._opt_name, self._finetuning_config["lr"],
                self.weight_decay)
            self._scheduler = None
            self._training_step(loader,
                                val_loader,
                                self._n_epochs,
                                self._n_epochs +
                                self._finetuning_config["epochs"],
                                record_bn=False)