Example #1
0
 def write_log2file(self, layer, block_count, layer_name):
     write_log(
         dir_name=os.path.join(self.settings.save_path, "log"),
         file_name="log_block-{:0>2d}_{}.txt".format(
             block_count, layer_name),
         log_str="{:d}\t{:f}\t{:f}\t{:f}\t{:f}\t{:f}\t{:f}\t{:f}\t{:f}\t\n".
         format(int(layer.d.sum()), self.record_selection_loss.avg,
                self.record_selection_mse_loss.avg,
                self.record_selection_softmax_loss.avg,
                self.record_sub_problem_loss.avg,
                self.record_sub_problem_mse_loss.avg,
                self.record_sub_problem_softmax_loss.avg,
                self.record_sub_problem_top1_error.avg,
                self.record_sub_problem_top5_error.avg))
     log_str = "Block-{:0>2d}-{}  #channels: [{:0>4d}|{:0>4d}]  ".format(
         block_count, layer_name, int(layer.d.sum()), layer.d.size(0))
     log_str += "[selection]loss: {:4f}  mseloss: {:4f}  softmaxloss: {:4f}  ".format(
         self.record_selection_loss.avg, self.record_selection_mse_loss.avg,
         self.record_selection_softmax_loss.avg)
     log_str += "[subproblem]loss: {:4f}  mseloss: {:4f}  softmaxloss: {:4f}  ".format(
         self.record_sub_problem_loss.avg,
         self.record_sub_problem_mse_loss.avg,
         self.record_sub_problem_softmax_loss.avg)
     log_str += "top1error: {:4f}  top5error: {:4f}  ".format(
         self.record_sub_problem_top1_error.avg,
         self.record_sub_problem_top5_error.avg)
     self.logger.info(log_str)
Example #2
0
File: main.py Project: qdmy/DCP-1
    def run(self):
        """
        Learn the parameters of the additional classifier and
        fine tune model with the additional losses and the final loss
        """

        best_top1 = 100
        best_top5 = 100

        start_epoch = 0
        # if load resume checkpoint
        if self.epoch != 0:
            start_epoch = self.epoch + 1
            self.epoch = 0

        self.trainer.val(0)

        for epoch in range(start_epoch, self.settings.n_epochs):
            train_error, train_loss, train5_error = self.trainer.train(epoch)
            val_error, val_loss, val5_error = self.trainer.val(epoch)

            # write log
            log_str = "{:d}\t".format(epoch)
            for i in range(len(train_error)):
                log_str += "{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t".format(
                    train_error[i], train_loss[i], val_error[i], val_loss[i],
                    train5_error[i], val5_error[i])
            write_log(self.settings.save_path, 'log.txt', log_str)

            # save model and checkpoint
            best_flag = False
            if best_top1 >= val_error[-1]:
                best_top1 = val_error[-1]
                best_top5 = val5_error[-1]
                best_flag = True

            if best_flag:
                self.checkpoint.save_aux_model(self.trainer.model,
                                               self.trainer.aux_fc)

            self.logger.info(
                "|===>Best Result is: Top1 Error: {:f}, Top5 Error: {:f}\n".
                format(best_top1, best_top5))
            self.logger.info(
                "|==>Best Result is: Top1 Accuracy: {:f}, Top5 Accuracy: {:f}\n"
                .format(100 - best_top1, 100 - best_top5))

            if self.settings.dataset in ["imagenet"]:
                self.checkpoint.save_aux_checkpoint(self.trainer.model,
                                                    self.trainer.seg_optimizer,
                                                    self.trainer.fc_optimizer,
                                                    self.trainer.aux_fc, epoch,
                                                    epoch + 1)
            else:
                self.checkpoint.save_aux_checkpoint(self.trainer.model,
                                                    self.trainer.seg_optimizer,
                                                    self.trainer.fc_optimizer,
                                                    self.trainer.aux_fc, epoch)
Example #3
0
    def fine_tuning(self):
        """
        Conduct network-wise fine-tuning after channel selection
        """

        best_top1 = 100
        best_top5 = 100

        start_epoch = 0
        if self.epoch != 0:
            start_epoch = self.epoch + 1
            self.epoch = 0

        for epoch in range(start_epoch, self.settings.n_epochs):
            train_error, train_loss, train5_error = self.network_wise_trainer.train(
                epoch)
            val_error, val_loss, val5_error = self.network_wise_trainer.val(
                epoch)

            # write log
            log_str = "{:d}\t".format(epoch)
            for i in range(len(train_error)):
                log_str += "{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t".format(
                    train_error[i], train_loss[i], val_error[i], val_loss[i],
                    train5_error[i], val5_error[i])
            write_log(self.settings.save_path, 'log.txt', log_str)

            # save model and checkpoint
            best_flag = False
            if best_top1 >= val_error[-1]:
                best_top1 = val_error[-1]
                best_top5 = val5_error[-1]
                best_flag = True

            if best_flag:
                self.checkpoint.save_aux_model(
                    self.network_wise_trainer.model,
                    self.network_wise_trainer.aux_fc)

            self.logger.info(
                "|===>Best Result is: Top1 Error: {:f}, Top5 Error: {:f}\n".
                format(best_top1, best_top5))
            self.logger.info(
                "|==>Best Result is: Top1 Accuracy: {:f}, Top5 Accuracy: {:f}\n"
                .format(100 - best_top1, 100 - best_top5))

            if "imagenet" in self.settings.dataset:
                self.checkpoint.save_aux_checkpoint(
                    self.network_wise_trainer.model,
                    self.network_wise_trainer.seg_optimizer,
                    self.network_wise_trainer.fc_optimizer,
                    self.network_wise_trainer.aux_fc, epoch, epoch + 1)
            else:
                self.checkpoint.save_aux_checkpoint(
                    self.network_wise_trainer.model,
                    self.network_wise_trainer.seg_optimizer,
                    self.network_wise_trainer.fc_optimizer,
                    self.network_wise_trainer.aux_fc, epoch)