def main(opt):
    lr_now = opt.lr_now
    start_epoch = 1
    # opt.is_eval = True
    print('>>> create models')
    in_features = 66
    d_model = opt.d_model
    kernel_size = opt.kernel_size
    net_pred = AttModel.AttModel(in_features=in_features,
                                 kernel_size=kernel_size,
                                 d_model=d_model,
                                 num_stage=opt.num_stage,
                                 dct_n=opt.dct_n)
    net_pred.cuda()
    model_path_len = '{}/ckpt_best.pth.tar'.format(opt.ckpt)
    print(">>> loading ckpt len from '{}'".format(model_path_len))
    ckpt = torch.load(model_path_len)
    start_epoch = ckpt['epoch'] + 1
    err_best = ckpt['err']
    lr_now = ckpt['lr']
    net_pred.load_state_dict(ckpt['state_dict'])
    print(">>> ckpt len loaded (epoch: {} | err: {})".format(
        ckpt['epoch'], ckpt['err']))

    print('>>> loading datasets')

    head = np.array(['act'])
    for k in range(1, opt.output_n + 1):
        head = np.append(head, [f'#{k}'])

    acts = [
        "walking", "eating", "smoking", "discussion", "directions", "greeting",
        "phoning", "posing", "purchases", "sitting", "sittingdown",
        "takingphoto", "waiting", "walkingdog", "walkingtogether"
    ]
    errs = np.zeros([len(acts) + 1, opt.output_n])
    for i, act in enumerate(acts):
        test_dataset = datasets.Datasets(opt, split=2, actions=[act])
        print('>>> Testing dataset length: {:d}'.format(
            test_dataset.__len__()))
        test_loader = DataLoader(test_dataset,
                                 batch_size=opt.test_batch_size,
                                 shuffle=False,
                                 num_workers=0,
                                 pin_memory=True)

        ret_test = run_model(net_pred,
                             is_train=3,
                             data_loader=test_loader,
                             opt=opt)
        print('testing error: {:.3f}'.format(ret_test['#1']))
        ret_log = np.array([])
        for k in ret_test.keys():
            ret_log = np.append(ret_log, [ret_test[k]])
        errs[i] = ret_log
    errs[-1] = np.mean(errs[:-1], axis=0)
    acts = np.expand_dims(np.array(acts + ["average"]), axis=1)
    value = np.concatenate([acts, errs.astype(np.str)], axis=1)
    log.save_csv_log(opt,
                     head,
                     value,
                     is_create=True,
                     file_name='test_pre_action')
示例#2
0
def main(opt):
    lr_now = opt.lr_now
    start_epoch = 1
    # opt.is_eval = True
    ckpt = './checkpoint/pretrained/h36m_3d_in50_out10_dctn20/'
    batch_size = 1
    opt.ckpt = ckpt
    print('>>> create models')
    net_pred = AttModel.AttModel(in_features=66,
                                 kernel_size=10,
                                 d_model=256,
                                 num_stage=12,
                                 dct_n=20)
    net_pred.cuda()
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in net_pred.parameters()) / 1000000.0))

    model_path_len = './{}/ckpt_best.pth.tar'.format(opt.ckpt)
    print(">>> loading ckpt len from '{}'".format(model_path_len))
    ckpt = torch.load(model_path_len)
    start_epoch = ckpt['epoch']
    err_best = ckpt['err']
    net_pred.load_state_dict(ckpt['state_dict'])
    print(">>> ckpt len loaded (epoch: {} | err: {})".format(
        start_epoch, err_best))

    print('>>> loading datasets')

    acts = [
        "walking", "eating", "smoking", "discussion", "directions", "greeting",
        "phoning", "posing", "purchases", "sitting", "sittingdown",
        "takingphoto", "waiting", "walkingdog", "walkingtogether"
    ]
    good_idx = pd.read_csv('./checkpoint/pretrained/seq_selected.csv')
    good_idx = good_idx.values
    sele = {}
    for gi in range(good_idx.shape[0]):
        if good_idx[gi, 0] in sele.keys():
            sele[good_idx[gi, 0]].append(int(good_idx[gi, 1]))
        else:
            sele[good_idx[gi, 0]] = [int(good_idx[gi, 1])]

    err = np.zeros([2, opt.output_n])
    n = 0
    for act in acts:
        if not act in sele.keys():
            continue
        test_dataset = datasets.Datasets(opt, split=2, actions=[act])
        print('>>> Testing dataset length: {:d}'.format(
            test_dataset.__len__()))
        test_loader = DataLoader(test_dataset,
                                 batch_size=1,
                                 shuffle=False,
                                 num_workers=0,
                                 pin_memory=True)
        # evaluation
        ret, nt = run_model(net_pred,
                            is_train=3,
                            data_loader=test_loader,
                            opt=opt,
                            good_idx=sele[act])
        err += ret
        n += nt
    err = err / n
    head = np.array(['input_n'])
    for k in range(1, opt.output_n + 1):
        head = np.append(head, [f'#{k}'])
    value = np.expand_dims(np.array(['in50', 'in100']), axis=1)
    value = np.concatenate([value, err.astype(np.str)], axis=1)
    log.save_csv_log(opt,
                     head,
                     value,
                     is_create=True,
                     file_name='test_in50_in100')
示例#3
0
def main(opt):
    lr_now = opt.lr_now
    start_epoch = 1
    # opt.is_eval = True
    print('>>> create models')
    in_features = opt.in_features  # 66
    d_model = opt.d_model
    kernel_size = opt.kernel_size
    net_pred = AttModel.AttModelResNetVel(in_features=in_features,
                                          kernel_size=kernel_size,
                                          d_model=d_model,
                                          num_stage=opt.num_stage,
                                          dct_n=opt.dct_n,
                                          device=opt.device)
    net_pred.to(device=opt.device)

    #optimizer = optim.Adam(filter(lambda x: x.requires_grad, net_pred.parameters()), lr=opt.lr_now)
    optimizer = model_utils.NoamOpt(
        128, 1.3, 4000,
        torch.optim.Adam(net_pred.parameters(),
                         lr=0,
                         betas=(0.9, 0.98),
                         eps=1e-9))
    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in net_pred.parameters()) / 1000000.0))

    if opt.is_load or opt.is_eval:
        model_path_len = './{}/ckpt_best.pth.tar'.format(opt.ckpt)
        print(">>> loading ckpt len from '{}'".format(model_path_len))
        ckpt = torch.load(model_path_len)
        start_epoch = ckpt['epoch'] + 1
        err_best = ckpt['err']
        lr_now = ckpt['lr']
        net_pred.load_state_dict(ckpt['state_dict'])
        # net.load_state_dict(ckpt)
        # optimizer.load_state_dict(ckpt['optimizer'])
        # lr_now = util.lr_decay_mine(optimizer, lr_now, 0.2)
        print(">>> ckpt len loaded (epoch: {} | err: {})".format(
            ckpt['epoch'], ckpt['err']))

    print('>>> loading datasets')

    if not opt.is_eval:
        # dataset = datasets.Datasets(opt, split=0)
        # actions = ["walking", "eating", "smoking", "discussion", "directions",
        #            "greeting", "phoning", "posing", "purchases", "sitting",
        #            "sittingdown", "takingphoto", "waiting", "walkingdog",
        #            "walkingtogether"]
        dataset = datasets.Datasets(opt, split=0)
        print('>>> Training dataset length: {:d}'.format(dataset.__len__()))
        data_loader = DataLoader(dataset,
                                 batch_size=opt.batch_size,
                                 shuffle=True,
                                 num_workers=0,
                                 pin_memory=True)
        valid_dataset = datasets.Datasets(opt, split=1)
        print('>>> Validation dataset length: {:d}'.format(
            valid_dataset.__len__()))
        valid_loader = DataLoader(valid_dataset,
                                  batch_size=opt.test_batch_size,
                                  shuffle=True,
                                  num_workers=0,
                                  pin_memory=True)

    test_dataset = datasets.Datasets(opt, split=2)
    print('>>> Testing dataset length: {:d}'.format(test_dataset.__len__()))
    test_loader = DataLoader(test_dataset,
                             batch_size=opt.test_batch_size,
                             shuffle=False,
                             num_workers=0,
                             pin_memory=True)

    # evaluation
    if opt.is_eval:
        ret_test = run_model(net_pred,
                             is_train=3,
                             data_loader=test_loader,
                             opt=opt)
        ret_log = np.array([])
        head = np.array([])
        for k in ret_test.keys():
            ret_log = np.append(ret_log, [ret_test[k]])
            head = np.append(head, [k])
        log.save_csv_log(opt,
                         head,
                         ret_log,
                         is_create=True,
                         file_name='test_walking')
        # print('testing error: {:.3f}'.format(ret_test['m_p3d_h36']))
    # training
    if not opt.is_eval:
        err_best = 1000
        for epo in range(start_epoch, opt.epoch + 1):
            is_best = False
            # if epo % opt.lr_decay == 0:
            #lr_now = util.lr_decay_mine(optimizer, lr_now, 0.1 ** (1 / opt.epoch))
            lr_now = optimizer.rate()
            print('>>> training epoch: {:d}'.format(epo))
            ret_train = run_model(net_pred,
                                  optimizer,
                                  is_train=0,
                                  data_loader=data_loader,
                                  epo=epo,
                                  opt=opt)
            print('train error: {:.3f}'.format(ret_train['m_p3d_h36']))
            ret_valid = run_model(net_pred,
                                  is_train=1,
                                  data_loader=valid_loader,
                                  opt=opt,
                                  epo=epo)
            print('validation error: {:.3f}'.format(ret_valid['m_p3d_h36']))
            ret_test = run_model(net_pred,
                                 is_train=3,
                                 data_loader=test_loader,
                                 opt=opt,
                                 epo=epo)
            print('testing error: {:.3f}'.format(ret_test['#1']))
            ret_log = np.array([epo, lr_now])
            head = np.array(['epoch', 'lr'])
            for k in ret_train.keys():
                ret_log = np.append(ret_log, [ret_train[k]])
                head = np.append(head, [k])
            for k in ret_valid.keys():
                ret_log = np.append(ret_log, [ret_valid[k]])
                head = np.append(head, ['valid_' + k])
            for k in ret_test.keys():
                ret_log = np.append(ret_log, [ret_test[k]])
                head = np.append(head, ['test_' + k])
            log.save_csv_log(opt, head, ret_log, is_create=(epo == 1))
            if ret_valid['m_p3d_h36'] < err_best:
                err_best = ret_valid['m_p3d_h36']
                is_best = True
            log.save_ckpt(
                {
                    'epoch': epo,
                    'lr': lr_now,
                    'err': ret_valid['m_p3d_h36'],
                    'state_dict': net_pred.state_dict(),
                    'optimizer': optimizer.state_dict()
                },
                is_best=is_best,
                opt=opt)
示例#4
0
def main(opt):
    is_cuda = torch.cuda.is_available()

    # create model
    print(">>> creating model")
    input_n = opt.input_n
    output_n = opt.output_n
    itera = 50

    #model = nnmodel.GCN(input_feature=(input_n + output_n), hidden_feature=opt.linear_size, p_dropout=opt.dropout, num_stage=opt.num_stage, node_n=48)
    model = AttModel.AttModelRef4(in_features=opt.in_features,
                                  kernel_size=opt.kernel_size,
                                  d_model=opt.d_model,
                                  num_stage=opt.num_stage,
                                  dct_n=opt.dct_n,
                                  device=opt.device)
    if is_cuda:
        model.cuda()
    model_path_len = '/home/costa/src/Transformer/checkpoint/trans_N6_last_pose_in50_out10_ks10_dctn20/ckpt_best.pth.tar'
    model_path_len = '/home/costa/Desktop/ckpt_best.pth.tar'
    print(">>> loading ckpt len from '{}'".format(model_path_len))
    if is_cuda:
        ckpt = torch.load(model_path_len)
    else:
        ckpt = torch.load(model_path_len, map_location='cpu')
    err_best = ckpt['err']
    start_epoch = ckpt['epoch']
    model.load_state_dict(ckpt['state_dict'])
    print(">>> ckpt len loaded (epoch: {} | err: {})".format(
        start_epoch, err_best))

    # data loading
    print(">>> loading data")
    acts = data_utils.define_actions('all')
    test_data = dict()

    for act in acts:
        test_dataset = datasets.Datasets(opt=opt,
                                         actions=[act],
                                         split=1,
                                         itera=itera)
        test_data[act] = DataLoader(dataset=test_dataset,
                                    batch_size=opt.test_batch_size,
                                    shuffle=False,
                                    pin_memory=True)

    dim_used = test_dataset.dimensions_to_use
    print(">>> data loaded !")

    joint_to_ignore = np.array([16, 20, 23, 24, 28, 31])
    index_to_ignore = np.concatenate(
        (joint_to_ignore * 3, joint_to_ignore * 3 + 1,
         joint_to_ignore * 3 + 2))
    joint_equal = np.array([13, 19, 22, 13, 27, 30])
    index_to_equal = np.concatenate(
        (joint_equal * 3, joint_equal * 3 + 1, joint_equal * 3 + 2))

    model.eval()
    fig = plt.figure()
    ax = plt.gca(projection='3d')
    for act in acts:
        for i, all_seq in enumerate(test_data[act]):
            inputs = Variable(all_seq[:, :opt.input_n, dim_used]).float()
            all_seq = Variable(all_seq).float()
            if is_cuda:
                inputs = inputs.cuda()
                all_seq = all_seq.cuda()

            outputs = model(inputs, opt.output_n, opt.input_n, itera=itera)

            n, seq_len, dim_full_len = all_seq.data.shape
            dim_used_len = len(dim_used)
            '''
            _, idct_m = data_utils.get_dct_matrix(seq_len)
            if is_cuda:
                idct_m = Variable(torch.from_numpy(idct_m)).float().cuda()
            else:
                idct_m = Variable(torch.from_numpy(idct_m)).float()

            outputs_t = outputs.view(-1, seq_len).transpose(0, 1)
            outputs_exp = torch.matmul(idct_m, outputs_t).transpose(0, 1).contiguous().view(-1, dim_used_len, seq_len).transpose(1, 2)

            p3d_out = p3d_h36.clone()[:, in_n:in_n + out_n]
            p3d_out[:, :, dim_used] = p3d_out_all[:, seq_in:, 0]
            p3d_out[:, :, index_to_ignore] = p3d_out[:, :, index_to_equal]

            pred_expmap = all_seq.clone()
            dim_used = np.array(dim_used)
            pred_expmap[:, :, dim_used] = outputs_exp
            '''
            pred = all_seq.clone()
            pred[:, opt.input_n:opt.input_n + opt.output_n * itera,
                 dim_used] = outputs[:, opt.kernel_size:, 0]
            pred[:, opt.input_n:opt.input_n + opt.output_n * itera,
                 index_to_ignore] = pred[:, opt.input_n:opt.input_n +
                                         opt.output_n * itera, index_to_equal]
            targ = all_seq
            pred = pred.cpu().data.numpy()
            targ = targ.cpu().data.numpy()
            for k in range(8):
                plt.cla()
                figure_title = "action:{}, seq:{},".format(act, (k + 1))
                viz.plot_predictions_from_3d(
                    targ[k, opt.input_n:opt.input_n + opt.output_n * itera, :],
                    pred[k, opt.input_n:opt.input_n + opt.output_n * itera, :],
                    fig, ax, figure_title)
                plt.pause(1)