Пример #1
0
def val(train_loader,
        model,
        input_n=20,
        output_n=10,
        is_cuda=False,
        dim_used=[]):
    # t_l = utils.AccumLoss()
    t_e = utils.AccumLoss()
    t_3d = utils.AccumLoss()

    model.eval()
    st = time.time()
    bar = Bar('>>>', fill='>', max=len(train_loader))
    for i, (inputs, targets, all_seq) in enumerate(train_loader):
        bt = time.time()

        if is_cuda:
            inputs = Variable(inputs.cuda()).float()
            # targets = Variable(targets.cuda(async=True)).float()
            all_seq = Variable(all_seq.cuda()).float()

        outputs = model(inputs)
        n = outputs.shape[0]
        outputs = outputs.view(n, -1)
        # targets = targets.view(n, -1)

        # loss = loss_funcs.sen_loss(outputs, all_seq, dim_used)

        n, _, _ = all_seq.data.shape
        m_err = loss_funcs.mpjpe_error(outputs, all_seq, input_n, dim_used,
                                       input_n + output_n)
        e_err = loss_funcs.euler_error(outputs, all_seq, input_n, dim_used,
                                       input_n + output_n)

        # t_l.update(loss.cpu().data.numpy()[0] * n, n)
        t_e.update(e_err.cpu().data.numpy() * n, n)
        t_3d.update(m_err.cpu().data.numpy() * n, n)

        bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(
            i + 1, len(train_loader),
            time.time() - bt,
            time.time() - st)
        bar.next()
    bar.finish()
    return t_e.avg, t_3d.avg
Пример #2
0
    def train(self, train_loader, dataset='h3.6m', input_n=20, dct_n=20, lr_now=None, cartesian=False,
              lambda_=0.01, max_norm=True, dim_used=[]):
        t_l = utils.AccumLoss()
        t_l_joint = utils.AccumLoss()
        t_l_vlb = utils.AccumLoss()
        t_l_latent = utils.AccumLoss()
        t_e = utils.AccumLoss()
        t_3d = utils.AccumLoss()

        self.model.train()
        st = time.time()
        bar = Bar('>>>', fill='>', max=len(train_loader))
        for i, (inputs, targets, all_seq) in enumerate(train_loader):

            # skip the last batch if only have one sample for batch_norm layers
            batch_size = inputs.shape[0]
            if batch_size == 1:
                continue

            bt = time.time()
            if self.is_cuda:
                inputs = Variable(inputs.cuda()).float()
                targets = Variable(targets.cuda(non_blocking=True)).float()
                all_seq = Variable(all_seq.cuda(non_blocking=True)).float()

            outputs, reconstructions, log_var, z = self.model(inputs.float())
            KL = self.model.KL
            n = outputs.shape[0]
            outputs = outputs.view(n, -1)

            loss, joint_loss, vlb, latent_loss = loss_funcs.sen_loss(outputs, all_seq, dim_used, dct_n, inputs,
                                                                     cartesian, lambda_, KL, reconstructions, log_var)

            # Print losses for epoch
            ret_log = np.array([i, loss.cpu().data.numpy(), joint_loss.cpu().data.numpy(), vlb.cpu().data.numpy(),
                                latent_loss.cpu().data.numpy()])
            df = pd.DataFrame(np.expand_dims(ret_log, axis=0))
            if i == 0:
                head = ['iteration', 'loss', 'joint_loss', 'vlb', 'latent_loss']
                df.to_csv('losses.csv', header=head, index=False)
            with open('losses.csv', 'a') as f:
                df.to_csv(f, header=False, index=False)

            # calculate loss and backward
            self.optimizer.zero_grad()
            loss.backward()
            if max_norm:
                nn.utils.clip_grad_norm(self.model.parameters(), max_norm=1)
            self.optimizer.step()
            n, _, _ = all_seq.data.shape

            if dataset == 'h3.6m':
                # 3d error
                m_err = loss_funcs.mpjpe_error(outputs, all_seq, input_n, dim_used, dct_n)
                # angle space error
                e_err = loss_funcs.euler_error(outputs, all_seq, input_n, dim_used, dct_n)
            elif dataset == 'cmu_mocap':
                m_err = loss_funcs.mpjpe_error_cmu(outputs, all_seq, input_n, dim_used=dim_used, dct_n=dct_n)
                e_err = loss_funcs.euler_error(outputs, all_seq, input_n, dim_used=dim_used, dct_n=dct_n)
            elif dataset == 'cmu_mocap_3d':
                m_err = loss
                e_err = loss

            # update the training loss
            t_l.update(loss.cpu().data.numpy() * n, n)
            t_l_joint.update(joint_loss.cpu().data.numpy() * n, n)
            t_l_vlb.update(vlb.cpu().data.numpy() * n, n)
            t_l_latent.update(latent_loss.cpu().data.numpy() * n, n)
            t_e.update(e_err.cpu().data.numpy() * n, n)
            t_3d.update(m_err.cpu().data.numpy() * n, n)

            bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(i + 1, len(train_loader), time.time() - bt,
                                                                             time.time() - st)
            bar.next()
        bar.finish()
        print("\nJoint loss: ", t_l_joint.avg)
        print("vlb: ", t_l_vlb.avg)
        print("Latent loss: ", t_l_latent.avg)
        print("loss: ", t_l.avg)
        return lr_now, t_l.avg, t_l_joint.avg, t_l_vlb.avg, t_l_latent.avg, t_e.avg, t_3d.avg
Пример #3
0
def train(train_loader,
          model,
          optimizer,
          input_n=20,
          dct_n=20,
          lr_now=None,
          max_norm=True,
          is_cuda=False,
          dim_used=[]):
    t_l = utils.AccumLoss()
    t_e = utils.AccumLoss()
    t_3d = utils.AccumLoss()

    model.train()
    st = time.time()
    bar = Bar('>>>', fill='>', max=len(train_loader))
    for i, (inputs, targets, all_seq) in enumerate(train_loader):

        # skip the last batch if only have one sample for batch_norm layers
        batch_size = inputs.shape[0]
        if batch_size == 1:
            continue

        bt = time.time()
        if is_cuda:
            inputs = Variable(inputs.cuda()).float()
            # targets = Variable(targets.cuda(async=True)).float()
            all_seq = Variable(all_seq.cuda(async=True)).float()

        outputs = model(inputs)
        n = outputs.shape[0]
        outputs = outputs.view(n, -1)
        # targets = targets.view(n, -1)

        loss = loss_funcs.sen_loss(outputs, all_seq, dim_used, dct_n)

        # calculate loss and backward
        optimizer.zero_grad()
        loss.backward()
        if max_norm:
            nn.utils.clip_grad_norm(model.parameters(), max_norm=1)
        optimizer.step()
        n, _, _ = all_seq.data.shape

        # 3d error
        m_err = loss_funcs.mpjpe_error(outputs, all_seq, input_n, dim_used,
                                       dct_n)

        # angle space error
        e_err = loss_funcs.euler_error(outputs, all_seq, input_n, dim_used,
                                       dct_n)

        # update the training loss
        t_l.update(loss.cpu().data.numpy()[0] * n, n)
        t_e.update(e_err.cpu().data.numpy()[0] * n, n)
        t_3d.update(m_err.cpu().data.numpy()[0] * n, n)

        bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(
            i + 1, len(train_loader),
            time.time() - bt,
            time.time() - st)
        bar.next()
    bar.finish()
    return lr_now, t_l.avg, t_e.avg, t_3d.avg