Esempio n. 1
0
def train(train_loader,
          model,
          optimizer,
          input_n=10,
          dct_n=20,
          dim_used=[],
          lr_now=None,
          max_norm=True,
          is_cuda=False):
    sen_l = utils.AccumLoss()
    eul_err = utils.AccumLoss()

    model.train()
    st = time.time()
    bar = Bar('>>>', fill='>', max=len(train_loader))
    for i, (inputs, targets, all_seq) in enumerate(train_loader):

        batch_size = inputs.shape[0]
        # batch size is 1 do not train
        if batch_size == 1:
            break

        bt = time.time()

        if is_cuda:
            inputs = Variable(inputs.cuda()).float()
            # targets = Variable(targets.cuda(async=True)).float()
            all_seq = Variable(all_seq.cuda(async=True)).float()
        else:
            inputs = Variable(inputs).float()
            # targets = Variable(targets).float()
            all_seq = Variable(all_seq).float()
        outputs = model(inputs)
        loss = loss_funcs.sen_loss(outputs, all_seq, dim_used, dct_n)

        # calculate loss and backward
        optimizer.zero_grad()
        loss.backward()
        if max_norm:
            nn.utils.clip_grad_norm(model.parameters(), max_norm=1)
        optimizer.step()
        n, seq_len, _ = all_seq.data.shape
        # update the training loss
        e_err = loss_funcs.euler_error(outputs, all_seq, input_n, dim_used,
                                       dct_n)
        sen_l.update(loss.cpu().data.numpy()[0] * n * seq_len, n * seq_len)
        eul_err.update(e_err.cpu().data.numpy()[0] * n * seq_len, n * seq_len)

        bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(
            i, len(train_loader),
            time.time() - bt,
            time.time() - st)
        bar.next()
    bar.finish()
    return lr_now, sen_l.avg, eul_err.avg
Esempio n. 2
0
def train(train_loader,
          model,
          optimizer,
          lr_now=None,
          max_norm=True,
          is_cuda=False,
          dim_used=[],
          dct_n=15):
    t_l = utils.AccumLoss()
    t_e = utils.AccumLoss()
    t_3d = utils.AccumLoss()

    model.train()
    st = time.time()
    bar = Bar('>>>', fill='>', max=len(train_loader))
    for i, (inputs, targets, all_seq) in enumerate(train_loader):
        bt = time.time()

        if is_cuda:
            inputs = Variable(inputs.cuda()).float()
            all_seq = Variable(all_seq.cuda(async=True)).float()

        outputs = model(inputs)

        n, seq_len, dim_full_len = all_seq.data.shape
        dim_used = np.array(dim_used)
        dim_used_len = len(dim_used)

        _, idct_m = data_utils.get_dct_matrix(seq_len)
        idct_m = Variable(torch.from_numpy(idct_m)).float().cuda()
        outputs_t = outputs.view(-1, dct_n).transpose(0, 1)
        outputs_p3d = torch.matmul(idct_m[:, :dct_n],
                                   outputs_t).transpose(0, 1).contiguous()
        outputs_p3d = outputs_p3d.view(-1, dim_used_len, seq_len).transpose(
            1, 2).contiguous().view(-1, 3)

        targ_p3d = all_seq[:, :, dim_used].clone().contiguous().view(-1, 3)

        loss = torch.mean(torch.norm(targ_p3d - outputs_p3d, 2, 1))

        # calculate loss and backward
        optimizer.zero_grad()
        loss.backward()
        if max_norm:
            nn.utils.clip_grad_norm(model.parameters(), max_norm=1)
        optimizer.step()
        t_l.update(loss.cpu().data.numpy()[0] * n, n)

        bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(
            i + 1, len(train_loader),
            time.time() - bt,
            time.time() - st)
        bar.next()
    bar.finish()
    return lr_now, t_l.avg, t_e.avg, t_3d.avg
Esempio n. 3
0
def val(train_loader, model, input_n=10, dct_n=20, dim_used=[], is_cuda=False):
    t_err = utils.AccumLoss()

    model.eval()
    st = time.time()
    bar = Bar('>>>', fill='>', max=len(train_loader))
    for i, (inputs, targets, all_seq) in enumerate(train_loader):
        bt = time.time()

        if is_cuda:
            inputs = Variable(inputs.cuda()).float()
            # targets = Variable(targets.cuda(async=True)).float()
            all_seq = Variable(all_seq.cuda(async=True)).float()
        else:
            inputs = Variable(inputs).float()
            # targets = Variable(targets).float()
            all_seq = Variable(all_seq).float()
        outputs = model(inputs)
        e_err = loss_funcs.euler_error(outputs, all_seq, input_n, dim_used,
                                       dct_n)

        n, seq_len, _ = all_seq.data.shape
        # update the training loss
        t_err.update(e_err.cpu().data.numpy()[0] * n * seq_len, n * seq_len)

        bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(
            i, len(train_loader),
            time.time() - bt,
            time.time() - st)
        bar.next()
    bar.finish()
    return t_err.avg
Esempio n. 4
0
def val(train_loader, model, is_cuda=False, dim_used=[], dct_n=15):
    t_3d = utils.AccumLoss()

    model.eval()
    st = time.time()
    bar = Bar('>>>', fill='>', max=len(train_loader))
    for i, (inputs, targets, all_seq) in enumerate(train_loader):
        bt = time.time()

        if is_cuda:
            inputs = Variable(inputs.cuda()).float()
            all_seq = Variable(all_seq.cuda(async=True)).float()

        outputs = model(inputs)

        n, _, _ = all_seq.data.shape

        m_err = loss_funcs.mpjpe_error_p3d(outputs, all_seq, dct_n, dim_used)

        # update the training loss
        t_3d.update(m_err.cpu().data.numpy()[0] * n, n)

        bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(
            i + 1, len(train_loader),
            time.time() - bt,
            time.time() - st)
        bar.next()
    bar.finish()
    return t_3d.avg
Esempio n. 5
0
def train(train_loader, model, optimizer, lr_now=None, max_norm=True, is_cuda=False, dim_used=[], dct_n=15):
    t_l = utils.AccumLoss()

    model.train()
    st = time.time()
    bar = Bar('>>>', fill='>', max=len(train_loader))
    for i, (inputs, targets, all_seq) in enumerate(train_loader):

        batch_size = inputs.shape[0]
        if batch_size == 1:
            continue

        bt = time.time()
        if is_cuda:
            inputs = Variable(inputs.cuda()).float()
            all_seq = Variable(all_seq.cuda(async=True)).float()

        outputs = model(inputs)

        # calculate loss and backward
        loss = loss_funcs.mpjpe_error_p3d(outputs, all_seq, dct_n, dim_used)
        optimizer.zero_grad()
        loss.backward()
        if max_norm:
            nn.utils.clip_grad_norm(model.parameters(), max_norm=1)
        optimizer.step()

        # update the training loss
        t_l.update(loss.cpu().data.numpy() * batch_size, batch_size)

        bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(i+1, len(train_loader), time.time() - bt,
                                                                         time.time() - st)
        bar.next()
    bar.finish()
    return lr_now, t_l.avg
Esempio n. 6
0
def val(train_loader,
        model,
        input_n=20,
        output_n=10,
        is_cuda=False,
        dim_used=[]):
    # t_l = utils.AccumLoss()
    t_e = utils.AccumLoss()
    t_3d = utils.AccumLoss()

    model.eval()
    st = time.time()
    bar = Bar('>>>', fill='>', max=len(train_loader))
    for i, (inputs, targets, all_seq) in enumerate(train_loader):
        bt = time.time()

        if is_cuda:
            inputs = Variable(inputs.cuda()).float()
            # targets = Variable(targets.cuda(async=True)).float()
            all_seq = Variable(all_seq.cuda()).float()

        outputs = model(inputs)
        n = outputs.shape[0]
        outputs = outputs.view(n, -1)
        # targets = targets.view(n, -1)

        # loss = loss_funcs.sen_loss(outputs, all_seq, dim_used)

        n, _, _ = all_seq.data.shape
        m_err = loss_funcs.mpjpe_error(outputs, all_seq, input_n, dim_used,
                                       input_n + output_n)
        e_err = loss_funcs.euler_error(outputs, all_seq, input_n, dim_used,
                                       input_n + output_n)

        # t_l.update(loss.cpu().data.numpy()[0] * n, n)
        t_e.update(e_err.cpu().data.numpy() * n, n)
        t_3d.update(m_err.cpu().data.numpy() * n, n)

        bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(
            i + 1, len(train_loader),
            time.time() - bt,
            time.time() - st)
        bar.next()
    bar.finish()
    return t_e.avg, t_3d.avg
Esempio n. 7
0
def train(train_loader,
          model,
          optimizer,
          lr_now=None,
          max_norm=True,
          is_cuda=False,
          dct_n=15,
          dim_used=[]):
    t_3d = utils.AccumLoss()

    model.train()
    st = time.time()
    bar = Bar('>>>', fill='>', max=len(train_loader))
    for i, (inputs, targets, all_seq) in enumerate(train_loader):
        batch_size = inputs.shape[0]
        if batch_size == 1:
            break
        bt = time.time()

        if is_cuda:
            inputs = Variable(inputs.cuda()).float()
            # targets = Variable(targets.cuda(async=True)).float()
            all_seq = Variable(all_seq.cuda(async=True)).float()
        else:
            inputs = Variable(inputs).float()
            # targets = Variable(targets).float()
            all_seq = Variable(all_seq).float()
        outputs = model(inputs)
        m_err = loss_funcs.mpjpe_error_3dpw(outputs, all_seq, dct_n, dim_used)

        # calculate loss and backward
        optimizer.zero_grad()
        m_err.backward()
        if max_norm:
            nn.utils.clip_grad_norm(model.parameters(), max_norm=1)
        optimizer.step()

        n, seq_len, _ = all_seq.data.shape
        t_3d.update(m_err.cpu().data.numpy()[0] * n * seq_len, n * seq_len)

        bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(
            i, len(train_loader),
            time.time() - bt,
            time.time() - st)
        bar.next()
    bar.finish()
    return lr_now, t_3d.avg
Esempio n. 8
0
    def train(self, train_loader, dataset='h3.6m', input_n=20, dct_n=20, lr_now=None, cartesian=False,
              lambda_=0.01, max_norm=True, dim_used=[]):
        t_l = utils.AccumLoss()
        t_l_joint = utils.AccumLoss()
        t_l_vlb = utils.AccumLoss()
        t_l_latent = utils.AccumLoss()
        t_e = utils.AccumLoss()
        t_3d = utils.AccumLoss()

        self.model.train()
        st = time.time()
        bar = Bar('>>>', fill='>', max=len(train_loader))
        for i, (inputs, targets, all_seq) in enumerate(train_loader):

            # skip the last batch if only have one sample for batch_norm layers
            batch_size = inputs.shape[0]
            if batch_size == 1:
                continue

            bt = time.time()
            if self.is_cuda:
                inputs = Variable(inputs.cuda()).float()
                targets = Variable(targets.cuda(non_blocking=True)).float()
                all_seq = Variable(all_seq.cuda(non_blocking=True)).float()

            outputs, reconstructions, log_var, z = self.model(inputs.float())
            KL = self.model.KL
            n = outputs.shape[0]
            outputs = outputs.view(n, -1)

            loss, joint_loss, vlb, latent_loss = loss_funcs.sen_loss(outputs, all_seq, dim_used, dct_n, inputs,
                                                                     cartesian, lambda_, KL, reconstructions, log_var)

            # Print losses for epoch
            ret_log = np.array([i, loss.cpu().data.numpy(), joint_loss.cpu().data.numpy(), vlb.cpu().data.numpy(),
                                latent_loss.cpu().data.numpy()])
            df = pd.DataFrame(np.expand_dims(ret_log, axis=0))
            if i == 0:
                head = ['iteration', 'loss', 'joint_loss', 'vlb', 'latent_loss']
                df.to_csv('losses.csv', header=head, index=False)
            with open('losses.csv', 'a') as f:
                df.to_csv(f, header=False, index=False)

            # calculate loss and backward
            self.optimizer.zero_grad()
            loss.backward()
            if max_norm:
                nn.utils.clip_grad_norm(self.model.parameters(), max_norm=1)
            self.optimizer.step()
            n, _, _ = all_seq.data.shape

            if dataset == 'h3.6m':
                # 3d error
                m_err = loss_funcs.mpjpe_error(outputs, all_seq, input_n, dim_used, dct_n)
                # angle space error
                e_err = loss_funcs.euler_error(outputs, all_seq, input_n, dim_used, dct_n)
            elif dataset == 'cmu_mocap':
                m_err = loss_funcs.mpjpe_error_cmu(outputs, all_seq, input_n, dim_used=dim_used, dct_n=dct_n)
                e_err = loss_funcs.euler_error(outputs, all_seq, input_n, dim_used=dim_used, dct_n=dct_n)
            elif dataset == 'cmu_mocap_3d':
                m_err = loss
                e_err = loss

            # update the training loss
            t_l.update(loss.cpu().data.numpy() * n, n)
            t_l_joint.update(joint_loss.cpu().data.numpy() * n, n)
            t_l_vlb.update(vlb.cpu().data.numpy() * n, n)
            t_l_latent.update(latent_loss.cpu().data.numpy() * n, n)
            t_e.update(e_err.cpu().data.numpy() * n, n)
            t_3d.update(m_err.cpu().data.numpy() * n, n)

            bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(i + 1, len(train_loader), time.time() - bt,
                                                                             time.time() - st)
            bar.next()
        bar.finish()
        print("\nJoint loss: ", t_l_joint.avg)
        print("vlb: ", t_l_vlb.avg)
        print("Latent loss: ", t_l_latent.avg)
        print("loss: ", t_l.avg)
        return lr_now, t_l.avg, t_l_joint.avg, t_l_vlb.avg, t_l_latent.avg, t_e.avg, t_3d.avg
Esempio n. 9
0
def train(train_loader,
          model,
          optimizer,
          input_n=20,
          dct_n=20,
          lr_now=None,
          max_norm=True,
          is_cuda=False,
          dim_used=[]):
    t_l = utils.AccumLoss()
    t_e = utils.AccumLoss()
    t_3d = utils.AccumLoss()

    model.train()
    st = time.time()
    bar = Bar('>>>', fill='>', max=len(train_loader))
    for i, (inputs, targets, all_seq) in enumerate(train_loader):

        # skip the last batch if only have one sample for batch_norm layers
        batch_size = inputs.shape[0]
        if batch_size == 1:
            continue

        bt = time.time()
        if is_cuda:
            inputs = Variable(inputs.cuda()).float()
            # targets = Variable(targets.cuda(async=True)).float()
            all_seq = Variable(all_seq.cuda(async=True)).float()

        outputs = model(inputs)
        n = outputs.shape[0]
        outputs = outputs.view(n, -1)
        # targets = targets.view(n, -1)

        loss = loss_funcs.sen_loss(outputs, all_seq, dim_used, dct_n)

        # calculate loss and backward
        optimizer.zero_grad()
        loss.backward()
        if max_norm:
            nn.utils.clip_grad_norm(model.parameters(), max_norm=1)
        optimizer.step()
        n, _, _ = all_seq.data.shape

        # 3d error
        m_err = loss_funcs.mpjpe_error(outputs, all_seq, input_n, dim_used,
                                       dct_n)

        # angle space error
        e_err = loss_funcs.euler_error(outputs, all_seq, input_n, dim_used,
                                       dct_n)

        # update the training loss
        t_l.update(loss.cpu().data.numpy()[0] * n, n)
        t_e.update(e_err.cpu().data.numpy()[0] * n, n)
        t_3d.update(m_err.cpu().data.numpy()[0] * n, n)

        bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(
            i + 1, len(train_loader),
            time.time() - bt,
            time.time() - st)
        bar.next()
    bar.finish()
    return lr_now, t_l.avg, t_e.avg, t_3d.avg