Esempio n. 1
0
    def train(self, epoch):
        self.model.train()
        train_data_time = Timer()
        train_batch_time = Timer()
        train_data_time.tic()
        for batch_idx, (data, target) in enumerate(self.train_dataloader):
            train_data_time.toc()

            train_batch_time.tic()
            loss, output = self.step_feedfwd(
                data,
                self.model,
                target=target,
                criterion=self.train_criterion,
                optim=self.optimizer,
                train=True
            )

            t_loss, q_loss = self.get_result_loss(output, target)
            train_batch_time.toc()

            if batch_idx % self.config.print_freq == 0:
                n_itr = (epoch - 1) * len(self.train_dataloader) + batch_idx
                epoch_count = float(n_itr) / len(self.train_dataloader)
                print(
                    'Train {:s}: Epoch {:d}\t'
                    'Batch {:d}/{:d}\t'
                    'Data time {:.4f} ({:.4f})\t'
                    'Batch time {:.4f} ({:.4f})\t'
                    'Loss {:f}' .format(
                        self.config.experiment, epoch,
                        batch_idx, len(self.train_dataloader)-1,
                        train_data_time.last_time(), train_data_time.avg_time(),
                        train_batch_time.last_time(), train_batch_time.avg_time(),
                        loss
                    )
                )
                if self.config.tf:
                    self.writer.add_scalars(self.loss_win, {
                        "training_loss":loss
                        }, n_itr)
                    self.writer.add_scalars(self.result_win, {
                        "training_t_loss": t_loss.item(),
                        "training_q_loss": q_loss.item()
                        }, n_itr)
                    if self.n_criterion_params:
                        for name, v in self.train_criterion.named_parameters():
                            v = v.data.cpu().numpy()[0]
                            self.writer.add_scalars(self.criterion_params_win, {
                                    name:v
                                }, n_itr)

            train_data_time.tic()
Esempio n. 2
0
    def validate(self, epoch):
        # if self.visualize_val_err:
        #     L = len(self.val_dataloader)
        #     # print("L={}".format(L))
        #     batch_size = 10
        #     pred_pose = np.zeros((L * batch_size, 7))
        #     targ_pose = np.zeros((L * batch_size, 7))

        val_batch_time = Timer()  # time for step in each batch
        val_loss = AverageMeter()
        t_loss = AverageMeter()
        q_loss = AverageMeter()
        self.model.eval()
        val_data_time = Timer()  # time for data retrieving
        val_data_time.tic()
        for batch_idx, (data, target) in enumerate(self.val_dataloader):
            val_data_time.toc()

            val_batch_time.tic()
            loss, output = self.step_feedfwd(
                data,
                self.model,
                target=target,
                criterion=self.val_criterion,
                optim=self.optimizer,  # what will optimizer do in validation?
                train=False)
            # NxTx7
            val_batch_time.toc()
            val_loss.update(loss)

            t_loss_batch, q_loss_batch = self.get_result_loss(output, target)
            t_loss.update(t_loss_batch.item())
            q_loss.update(q_loss_batch.item())

            if batch_idx % self.config.print_freq == 0:
                print(
                    'Val {:s}: Epoch {:d}\t'
                    'Batch {:d}/{:d}\t'
                    'Data time {:.4f} ({:.4f})\t'
                    'Batch time {:.4f} ({:.4f})\t'
                    'Loss {:f}'.format(self.config.experiment, epoch,
                                       batch_idx,
                                       len(self.val_dataloader) - 1,
                                       val_data_time.last_time(),
                                       val_data_time.avg_time(),
                                       val_batch_time.last_time(),
                                       val_batch_time.avg_time(), loss))

            val_data_time.tic()

        # pred_pose = pred_pose.view(-1, 7)
        # targ_pose = targ_pose.view(-1, 7)
        print('Val {:s}: Epoch {:d}, val_loss {:f}'.format(
            self.config.experiment, epoch, val_loss.average()))
        print 'Mean error in translation: {:3.2f} m\n' \
              'Mean error in rotation: {:3.2f} degree'.format(t_loss.average(), q_loss.average())

        if self.config.tf:
            n_itr = (epoch - 1) * len(self.train_dataloader)
            self.writer.add_scalars(self.loss_win,
                                    {"val_loss": val_loss.average()}, n_itr)
            self.writer.add_scalars(self.result_win, {
                "val_t_loss": t_loss.average(),
                "val_q_loss": q_loss.average()
            }, n_itr)
            # self.vis.line(
            # X=np.asarray([epoch]),
            # Y=np.asarray([val_loss.average()]),
            # win=self.loss_win,
            # name='val_loss',
            # # append=True,
            # update='append',
            # env=self.vis_env
            # )
            # self.vis.line(
            # X=np.asarray([epoch]),
            # Y=np.asarray([t_loss.average()]),
            # win=self.result_win,
            # name='val_t_loss',
            # update='append',
            # env=self.vis_env
            # )
            # self.vis.line(
            # X=np.asarray([epoch]),
            # Y=np.asarray([q_loss.average()]),
            # win=self.result_win,
            # name='val_q_loss',
            # update='append',
            # env=self.vis_env
            # )
            # self.vis.save(envs=[self.vis_env])

        return t_loss.average()