Пример #1
0
    def print_loss_log(self, start_time, iters_per_epoch, e, i, loss):
        """
        Prints the loss and elapsed time for each epoch
        """
        total_iter = self.num_epochs * iters_per_epoch
        cur_iter = e * iters_per_epoch + i

        elapsed = time.time() - start_time
        total_time = (total_iter - cur_iter) * elapsed / (cur_iter + 1)
        epoch_time = (iters_per_epoch - i) * elapsed / (cur_iter + 1)

        epoch_time = str(datetime.timedelta(seconds=epoch_time))
        total_time = str(datetime.timedelta(seconds=total_time))
        elapsed = str(datetime.timedelta(seconds=elapsed))

        log = "Elapsed {}/{} -- {}, Epoch [{}/{}], Iter [{}/{}], " \
              "loss: {:.4f}".format(elapsed,
                                    epoch_time,
                                    total_time,
                                    e + 1,
                                    self.num_epochs,
                                    i + 1,
                                    iters_per_epoch,
                                    loss)

        write_print(self.output_txt, log)
Пример #2
0
 def test(self):
     """
     Evaluates the performance of the model using the test dataset
     """
     top_1_correct, top_5_correct, total = self.eval(self.data_loader)
     log = "top_1_acc: {:.4f}--top_5_acc: {:.4f}".format(
         top_1_correct / total, top_5_correct / total)
     write_print(self.output_txt, log)
Пример #3
0
 def load_pretrained_model(self):
     """
     loads a pre-trained model from a .pth file
     """
     self.model.load_state_dict(
         torch.load(
             os.path.join(self.model_save_path,
                          '{}.pth'.format(self.pretrained_model))))
     write_print(self.output_txt,
                 'loaded trained model {}'.format(self.pretrained_model))
Пример #4
0
 def train_evaluate(self, e):
     """
     Evaluates the performance of the model using the train dataset
     """
     top_1_correct, top_5_correct, total = self.eval(self.data_loader)
     log = "Epoch [{}/{}]--top_1_acc: {:.4f}--top_5_acc: {:.4f}".format(
         e + 1, self.num_epochs, top_1_correct / total,
         top_5_correct / total)
     write_print(self.output_txt, log)
     return top_1_correct / total, top_5_correct / total
Пример #5
0
 def print_network(self, model, name):
     """
     Prints the structure of the network and the total number of parameters
     """
     num_params = 0
     for p in model.parameters():
         num_params += p.numel()
     write_print(self.output_txt, name)
     write_print(self.output_txt, str(model))
     write_print(self.output_txt,
                 'The number of parameters: {}'.format(num_params))
Пример #6
0
    # dataset
    # parser.add_argument('--data_path', type=str, default='../data/c256/')
    # parser.add_argument('--train_data_path', type=str,
    #                     default='caltech_256_60_train_nobg_norm.hdf5')
    # parser.add_argument('--train_x_key', type=str, default='train_x')
    # parser.add_argument('--train_y_key', type=str, default='train_y')
    # parser.add_argument('--test_data_path', type=str,
    #                     default='caltech_256_60_test_nobg_norm.hdf5')
    # parser.add_argument('--test_x_key', type=str, default='test_x')
    # parser.add_argument('--test_y_key', type=str, default='test_y')

    # path
    parser.add_argument('--model_save_path', type=str, default='./weights',
                        help='Path for saving weights')

    # epoch step size
    parser.add_argument('--loss_log_step', type=int, default=1)
    parser.add_argument('--model_save_step', type=int, default=1)
    parser.add_argument('--train_eval_step', type=int, default=1)

    config = parser.parse_args()

    args = vars(config)
    print(args)
    write_print('hello.txt', '------------ Options -------------')
    for k, v in args.items():
        write_print('hello.txt', '{}: {}'.format(str(k), str(v)))
    write_print('hello.txt', '-------------- End ----------------')

    # main(version, config)
Пример #7
0
    def train(self):
        """
        Training process
        """
        self.losses = []
        self.top_1_acc = []
        self.top_5_acc = []

        iters_per_epoch = len(self.data_loader)

        # start with a trained model if exists
        if self.pretrained_model:
            start = int(self.pretrained_model.split('/')[-1])
        else:
            start = 0

        # start training
        start_time = time.time()
        for e in range(start, self.num_epochs):
            for i, (images, labels) in enumerate(tqdm(self.data_loader)):
                images = to_var(images, self.use_gpu)
                labels = to_var(torch.LongTensor(labels), self.use_gpu)

                loss = self.model_step(images, labels)

            # print out loss log
            if (e + 1) % self.loss_log_step == 0:
                self.print_loss_log(start_time, iters_per_epoch, e, i, loss)
                self.losses.append((e, loss))

            # save model
            if (e + 1) % self.model_save_step == 0:
                self.save_model(e)

            # evaluate on train dataset
            # if (e + 1) % self.train_eval_step == 0:
            #     top_1_acc, top_5_acc = self.train_evaluate(e)
            #     self.top_1_acc.append((e, top_1_acc))
            #     self.top_5_acc.append((e, top_5_acc))

        # print losses
        write_print(self.output_txt, '\n--Losses--')
        for e, loss in self.losses:
            write_print(self.output_txt, str(e) + ' {:.4f}'.format(loss))

        # print top_1_acc
        write_print(self.output_txt, '\n--Top 1 accuracy--')
        for e, acc in self.top_1_acc:
            write_print(self.output_txt, str(e) + ' {:.4f}'.format(acc))

        # print top_5_acc
        write_print(self.output_txt, '\n--Top 5 accuracy--')
        for e, acc in self.top_5_acc:
            write_print(self.output_txt, str(e) + ' {:.4f}'.format(acc))