Пример #1
0
 def validate(self, model):
     """ Validate the performance of a model. """
     return utils.validate(self.val_loader,
                           model,
                           self.criterion,
                           print_freq=self.args.print_freq)
Пример #2
0
    def fine_tune(self,
                  model,
                  min_val_acc=None,
                  return_init_acc=False,
                  max_iters=None,
                  **kwargs):
        """ Fine tune a pruned model. """
        if torch.cuda.is_available():
            model.cuda()  # in case the model is not on CUDA yet.
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

        # prepare
        best_acc = 0
        epochs = self.args.epochs
        base_lr = self.args.lr
        optimizer = optim.SGD(
            model.parameters(),
            lr=base_lr,
            momentum=self.args.momentum,
            weight_decay=self.args.weight_decay,
        )

        # validation in the beginning
        val_loss, val_acc = utils.validate(self.val_loader,
                                           model,
                                           self.criterion,
                                           gpu=device,
                                           print_freq=self.args.print_freq,
                                           **kwargs)
        init_acc = val_acc
        best_acc = val_acc
        best_model = None

        for epoch in range(epochs):
            # TODO(13/02/2019): learning rate adjustment
            # self.adjust_learning_rate(epoch, optimizer)

            logging.debug("Epoch: [%d | %d] LR: %f" %
                          (epoch + 1, epochs, self.state["lr"]))

            # Run train and validation for one epoch
            train_loss, train_acc = utils.train(
                self.train_loader,
                model,
                self.criterion,
                optimizer,
                epoch,
                max_iters=max_iters,
                print_freq=self.args.print_freq,
                gpu=device,
                state=self.state,
                schedule=self.args.schedule,
                epochs=self.args.epochs,
                base_lr=self.args.lr,
                gamma=self.args.gamma,
                lr_type=self.args.lr_type,
                **kwargs)

            val_loss, val_acc = utils.validate(self.val_loader,
                                               model,
                                               self.criterion,
                                               gpu=device,
                                               print_freq=self.args.print_freq,
                                               **kwargs)

            # Append message to Logger
            self.logger.append([
                self.state["lr"], train_loss, 0.0, val_loss, train_acc, val_acc
            ])

            # Update best accuracy
            is_best = val_acc > best_acc
            if is_best:
                best_acc = val_acc
                best_model = copy.deepcopy(model)

            checkpoint_state = {
                "epoch": epoch + 1,
                "state_dict": model.state_dict(),
                "acc": val_acc,
                "best_acc": best_acc,
                "optimizer": optimizer.state_dict(),
            }
            utils.save_checkpoint(checkpoint_state, is_best,
                                  self.args.checkpoint)

            if min_val_acc is not None and val_acc >= min_val_acc:
                break
            # for name, mod in model.named_modules():
            #   if isinstance(mod, MaskConv2d):
            #     print(name, torch.nonzero(mod.weight.data).size(0))

        # Finalising
        self.logger.close()
        logging.info(
            "Best accuracy while fine-tuning: {:.2f}%".format(best_acc))

        if not return_init_acc:
            return best_acc, best_model
        else:
            return best_acc, init_acc, best_model
Пример #3
0
    def train(self, model, **kwargs):
        """ Simply train the model with provided arguments. """
        if torch.cuda.is_available():
            model.cuda()  # in case the model is not on CUDA yet.

        # prepare
        best_acc = 0
        epochs = self.args.epochs
        base_lr = self.args.lr
        optimizer = optim.SGD(model.parameters(),
                              lr=base_lr,
                              momentum=self.args.momentum,
                              weight_decay=self.args.weight_decay)

        # TODO: move this code somewhere else
        if self.args.resume:
            checkpoint = torch.load(self.args.resume)
            if 'optimizer' in checkpoint:
                optimizer.load_state_dict(checkpoint['optimizer'])

        logging.info(
            '==> Started training, total epochs {}, start from {}'.format(
                epochs, self.args.start_epoch))
        self.print_optimizer(optimizer)

        for epoch in range(self.args.start_epoch, epochs):
            # TODO(13/02/2019): learning rate adjustment
            # self.adjust_learning_rate(epoch, optimizer)
            logging.info('Epoch: [%5d | %5d] LR: %f' %
                         (epoch + 1, epochs, self.state['lr']))

            # Run train and validation for one epoch
            train_loss, train_acc = utils.train(self.train_loader,
                                                model,
                                                self.criterion,
                                                optimizer,
                                                epoch,
                                                print_freq=self.args.print_freq,
                                                state=self.state,
                                                schedule=self.args.schedule,
                                                epochs=self.args.epochs,
                                                base_lr=self.args.lr,
                                                gamma=self.args.gamma,
                                                lr_type=self.args.lr_type)

            val_loss, val_acc = utils.validate(self.val_loader,
                                               model,
                                               self.criterion,
                                               print_freq=self.args.print_freq)

            # Append message to Logger
            self.logger.append([
                self.state['lr'], train_loss, 0.0, val_loss, train_acc, val_acc
            ])

            # Update best accuracy
            is_best = val_acc > best_acc
            best_acc = max(val_acc, best_acc)

            checkpoint_state = {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'acc': val_acc,
                'best_acc': best_acc,
                'optimizer': optimizer.state_dict(),
            }
            utils.save_checkpoint(checkpoint_state, is_best,
                                  self.args.checkpoint)

        # Finalising
        self.logger.close()
        logging.info('Best accuracy achieved: {:.3f}%'.format(best_acc))

        return best_acc