Пример #1
0
def main(opt):
    is_cuda = torch.cuda.is_available()
    desired_acts = ['eating', 'posing', 'sitting', 'posing', 'walkingdog']
    # create model
    print(">>> creating model")
    input_n = opt.input_n
    output_n = opt.output_n
    dct_n = opt.dct_n
    #calculate stepsize for auto regression based on input fames and  DCT coefficients
    stepsize = dct_n - input_n
    sample_rate = opt.sample_rate
    model = nnmodel.GCN(input_feature=(input_n + stepsize),
                        hidden_feature=opt.linear_size,
                        p_dropout=opt.dropout,
                        num_stage=opt.num_stage,
                        node_n=48)
    if is_cuda:
        model.cuda()
    model_path_len = "checkpoint/pretrained/h36m_in{}_out{}_dctn{}.pth.tar".format(
        input_n, stepsize, dct_n)
    print(">>> loading ckpt len from '{}'".format(model_path_len))
    if is_cuda:
        ckpt = torch.load(model_path_len)
    else:
        ckpt = torch.load(model_path_len, map_location='cpu')
    err_best = ckpt['err']
    start_epoch = ckpt['epoch']
    model.load_state_dict(ckpt['state_dict'])
    print(">>> ckpt len loaded (epoch: {} | err: {})".format(
        start_epoch, err_best))

    # data loading
    print(">>> loading data")
    acts = data_utils.define_actions('all')
    test_data = dict()
    for act in acts:
        test_dataset = H36motion(path_to_data=opt.data_dir,
                                 actions=act,
                                 input_n=input_n,
                                 output_n=output_n,
                                 dct_n=dct_n,
                                 split=1,
                                 sample_rate=sample_rate)
        test_data[act] = DataLoader(dataset=test_dataset,
                                    batch_size=opt.test_batch,
                                    shuffle=False,
                                    num_workers=opt.job,
                                    pin_memory=True)
    dim_used = test_dataset.dim_used
    print(">>> data loaded !")
    model.eval()
    fig = plt.figure()
    ax = plt.gca(projection='3d')
    #calculate no of iterations in auto regression to perform
    iterations = int(output_n / stepsize)
    print('iterations: {}'.format(iterations))
    for act in acts:
        for i, (_, targets, all_seq) in enumerate(test_data[act]):
            all_seq = Variable(all_seq).float()
            dim_used_len = len(dim_used)
            if is_cuda:
                all_seq = all_seq.cuda()
            dct_m_in, _ = data_utils.get_dct_matrix(dct_n)
            dct_m_in = Variable(torch.from_numpy(dct_m_in)).float().cuda()
            _, idct_m = data_utils.get_dct_matrix(dct_n)
            idct_m = Variable(torch.from_numpy(idct_m)).float().cuda()
            targ_expmap = all_seq.cpu().data.numpy()
            y_hat = None
            #Auto regression
            for idx in range(iterations):
                #start index of the input sequence
                start = input_n + idx * stepsize
                #end index of the input sequence
                stop = start + stepsize
                if y_hat is None:
                    #slice the sequence of length = (input_n + output_n) in iteration 1
                    input_seq = all_seq[:, :dct_n, dim_used]
                else:
                    #stack output from prev iteration and next frames to form the next input seq
                    input_seq = torch.cat(
                        (y_hat, all_seq[:, start:stop, dim_used]), 1)
                #calculate DCT of the input seq
                input_dct_seq = torch.matmul(dct_m_in,
                                             input_seq).transpose(1, 2)
                if is_cuda:
                    input_dct_seq = input_dct_seq.cuda()
                y = model(input_dct_seq)
                y_t = y.view(-1, dct_n).transpose(0, 1)
                y_exp = torch.matmul(idct_m,
                                     y_t).transpose(0, 1).contiguous().view(
                                         -1, dim_used_len,
                                         dct_n).transpose(1, 2)
                y_hat = y_exp[:, stepsize:, :]
                #accumulate the output frames in a single tensor
                if idx == 0:
                    outputs = y_exp
                else:
                    outputs = torch.cat((outputs, y_exp[:, input_n:, :]), 1)
            pred_expmap = all_seq.clone()
            dim_used = np.array(dim_used)
            pred_expmap[:, :, dim_used] = outputs
            pred_expmap = pred_expmap.cpu().data.numpy()
            #calculate loss and save to a file for later use
            #save_loss_file(act, pred_expmap, targ_expmap, input_n, output_n)
            if act in desired_acts:
                for k in range(8):
                    plt.cla()
                    figure_title = "action:{}, seq:{},".format(act, (k + 1))
                    viz.plot_predictions(targ_expmap[k, :, :],
                                         pred_expmap[k, :, :], fig, ax,
                                         figure_title)
                    plt.pause(1)
Пример #2
0
def main(opt):
    is_cuda = torch.cuda.is_available()

    # create model
    print(">>> creating model")
    input_n = opt.input_n
    output_n = opt.output_n
    sample_rate = opt.sample_rate
    dct_n = opt.dct_n

    model = nnmodel.GCN(input_feature=dct_n,
                        hidden_feature=opt.linear_size,
                        p_dropout=opt.dropout,
                        num_stage=opt.num_stage,
                        node_n=66)
    if is_cuda:
        model.to('cuda' if torch.cuda.is_available else 'cpu')
    model_path_len = './checkpoint/pretrained/h36m3D_in10_out10_dctn15.pth.tar'
    print(">>> loading ckpt len from '{}'".format(model_path_len))
    if is_cuda:
        ckpt = torch.load(model_path_len)
    else:
        ckpt = torch.load(model_path_len, map_location='cpu')
    err_best = ckpt['err']
    start_epoch = ckpt['epoch']
    model.load_state_dict(ckpt['state_dict'])
    print(">>> ckpt len loaded (epoch: {} | err: {})".format(
        start_epoch, err_best))

    # data loading
    print(">>> loading data")
    acts = data_utils.define_actions('all')
    test_data = dict()
    for act in acts:
        test_dataset = H36motion3D(path_to_data=opt.data_dir,
                                   actions=act,
                                   input_n=input_n,
                                   output_n=output_n,
                                   split=1,
                                   sample_rate=sample_rate,
                                   dct_used=dct_n)
        test_data[act] = DataLoader(dataset=test_dataset,
                                    batch_size=opt.test_batch,
                                    shuffle=False,
                                    num_workers=opt.job,
                                    pin_memory=True)
    dim_used = test_dataset.dim_used
    print(">>> data loaded !")

    model.eval()
    fig = plt.figure()
    ax = plt.gca(projection='3d')
    for act in acts:
        for i, (inputs, targets, all_seq) in enumerate(test_data[act]):
            inputs = Variable(inputs).float()
            all_seq = Variable(all_seq).float()
            if is_cuda:
                inputs = inputs.to(
                    'cuda' if torch.cuda.is_available else 'cpu')
                all_seq = all_seq.to(
                    'cuda' if torch.cuda.is_available else 'cpu')

            outputs = model(inputs)

            n, seq_len, dim_full_len = all_seq.data.shape
            dim_used_len = len(dim_used)

            _, idct_m = data_utils.get_dct_matrix(seq_len)
            idct_m = Variable(torch.from_numpy(idct_m)).float()
            if is_cuda:
                idct_m = idct_m.to(
                    'cuda' if torch.cuda.is_available else 'cpu')
            outputs_t = outputs.view(-1, dct_n).transpose(0, 1)
            outputs_3d = torch.matmul(idct_m[:, 0:dct_n], outputs_t).transpose(
                0, 1).contiguous().view(-1, dim_used_len,
                                        seq_len).transpose(1, 2)
            pred_3d = all_seq.clone()
            dim_used = np.array(dim_used)

            # joints at same loc
            joint_to_ignore = np.array([16, 20, 23, 24, 28, 31])
            index_to_ignore = np.concatenate(
                (joint_to_ignore * 3, joint_to_ignore * 3 + 1,
                 joint_to_ignore * 3 + 2))
            joint_equal = np.array([13, 19, 22, 13, 27, 30])
            index_to_equal = np.concatenate(
                (joint_equal * 3, joint_equal * 3 + 1, joint_equal * 3 + 2))

            pred_3d[:, :, dim_used] = outputs_3d
            pred_3d[:, :, index_to_ignore] = pred_3d[:, :, index_to_equal]
            pred_p3d = pred_3d.contiguous().view(
                n, seq_len, -1, 3)[:, input_n:, :, :].cpu().data.numpy()
            targ_p3d = all_seq.contiguous().view(
                n, seq_len, -1, 3)[:, input_n:, :, :].cpu().data.numpy()

            for k in range(8):
                plt.cla()
                figure_title = "action:{}, seq:{},".format(act, (k + 1))
                viz.plot_predictions_direct(targ_p3d[k], pred_p3d[k], fig, ax,
                                            figure_title)
                plt.pause(1)
Пример #3
0
def main(opt):
    is_cuda = torch.cuda.is_available()

    # create model
    print(">>> creating model")
    input_n = opt.input_n
    output_n = opt.output_n
    sample_rate = opt.sample_rate

    model = nnmodel.GCN(input_feature=(input_n + output_n), hidden_feature=opt.linear_size, p_dropout=opt.dropout,
                        num_stage=opt.num_stage, node_n=48)
    if is_cuda:
        model.cuda()
    model_path_len = './checkpoint/pretrained/h36m_in10_out25.pth.tar'
    print(">>> loading ckpt len from '{}'".format(model_path_len))
    if is_cuda:
        ckpt = torch.load(model_path_len)
    else:
        ckpt = torch.load(model_path_len, map_location='cpu')
    err_best = ckpt['err']
    start_epoch = ckpt['epoch']
    model.load_state_dict(ckpt['state_dict'])
    print(">>> ckpt len loaded (epoch: {} | err: {})".format(start_epoch, err_best))

    # data loading
    print(">>> loading data")
    acts = data_utils.define_actions('all')
    test_data = dict()
    for act in acts:
        test_dataset = H36motion(path_to_data=opt.data_dir, actions=act, input_n=input_n, output_n=output_n, split=1,
                                 sample_rate=sample_rate)
        test_data[act] = DataLoader(
            dataset=test_dataset,
            batch_size=opt.test_batch,
            shuffle=False,
            num_workers=opt.job,
            pin_memory=True)
    dim_used = test_dataset.dim_used
    print(">>> data loaded !")

    model.eval()
    fig = plt.figure()
    ax = plt.gca(projection='3d')
    for act in acts:
        for i, (inputs, targets, all_seq) in enumerate(test_data[act]):
            inputs = Variable(inputs).float()
            all_seq = Variable(all_seq).float()
            if is_cuda:
                inputs = inputs.cuda()
                all_seq = all_seq.cuda()

            outputs = model(inputs)

            n, seq_len, dim_full_len = all_seq.data.shape
            dim_used_len = len(dim_used)

            _, idct_m = data_utils.get_dct_matrix(seq_len)
            idct_m = Variable(torch.from_numpy(idct_m)).float().cuda()
            outputs_t = outputs.view(-1, seq_len).transpose(0, 1)
            outputs_exp = torch.matmul(idct_m, outputs_t).transpose(0, 1).contiguous().view(-1, dim_used_len,
                                                                                            seq_len).transpose(1, 2)
            pred_expmap = all_seq.clone()
            dim_used = np.array(dim_used)
            pred_expmap[:, :, dim_used] = outputs_exp
            targ_expmap = all_seq
            pred_expmap = pred_expmap.cpu().data.numpy()
            targ_expmap = targ_expmap.cpu().data.numpy()
            for k in range(8):
                plt.cla()
                figure_title = "action:{}, seq:{},".format(act, (k + 1))
                viz.plot_predictions(targ_expmap[k, :, :], pred_expmap[k, :, :], fig, ax, figure_title)
                plt.pause(1)
Пример #4
0
def main(opt):
    start_epoch = 0
    err_best = 10000
    lr_now = opt.lr
    is_cuda = torch.cuda.is_available()

    script_name = os.path.basename(__file__).split('.')[0]
    script_name = script_name + '_in{:d}_out{:d}_dctn_{:d}'.format(
        opt.input_n, opt.output_n, opt.dct_n)

    # create model
    print(">>> creating model")
    input_n = opt.input_n
    output_n = opt.output_n
    dct_n = opt.dct_n

    model = nnmodel.GCN(input_feature=dct_n,
                        hidden_feature=opt.linear_size,
                        p_dropout=opt.dropout,
                        num_stage=opt.num_stage,
                        node_n=69)

    if is_cuda:
        model.cuda()

    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
    if opt.is_load:
        model_path_len = 'checkpoint/test/ckpt_main_last.pth.tar'
        print(">>> loading ckpt len from '{}'".format(model_path_len))
        if is_cuda:
            ckpt = torch.load(model_path_len)
        else:
            ckpt = torch.load(model_path_len, map_location='cpu')
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt len loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))

    # data loading
    print(">>> loading data")
    train_dataset = Pose3dPW(path_to_data=opt.data_dir_3dpw,
                             input_n=input_n,
                             output_n=output_n,
                             dct_n=dct_n,
                             split=0)
    dim_used = train_dataset.dim_used
    test_dataset = Pose3dPW(path_to_data=opt.data_dir_3dpw,
                            input_n=input_n,
                            output_n=output_n,
                            dct_n=dct_n,
                            split=1)
    val_dataset = Pose3dPW(path_to_data=opt.data_dir_3dpw,
                           input_n=input_n,
                           output_n=output_n,
                           dct_n=dct_n,
                           split=2)

    # load dadasets for training
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=opt.train_batch,
                              shuffle=True,
                              num_workers=opt.job,
                              pin_memory=True)
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=opt.test_batch,
                             shuffle=False,
                             num_workers=opt.job,
                             pin_memory=True)
    val_loader = DataLoader(dataset=val_dataset,
                            batch_size=opt.test_batch,
                            shuffle=False,
                            num_workers=opt.job,
                            pin_memory=True)
    print(">>> data loaded !")
    print(">>> train data {}".format(train_dataset.__len__()))
    print(">>> test data {}".format(test_dataset.__len__()))
    print(">>> validation data {}".format(val_dataset.__len__()))

    for epoch in range(start_epoch, opt.epochs):

        if (epoch + 1) % opt.lr_decay == 0:
            lr_now = utils.lr_decay(optimizer, lr_now, opt.lr_gamma)
        print('==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))
        ret_log = np.array([epoch + 1])
        head = np.array(['epoch'])
        # per epoch
        lr_now, t_l, t_err = train(train_loader,
                                   model,
                                   optimizer,
                                   input_n=input_n,
                                   dct_n=dct_n,
                                   dim_used=dim_used,
                                   lr_now=lr_now,
                                   max_norm=opt.max_norm,
                                   is_cuda=is_cuda)
        ret_log = np.append(ret_log, [lr_now, t_l, t_err])
        head = np.append(head, ['lr', 't_l', 't_err'])

        v_err = val(val_loader,
                    model,
                    input_n=input_n,
                    dct_n=dct_n,
                    dim_used=dim_used,
                    is_cuda=is_cuda)

        ret_log = np.append(ret_log, v_err)
        head = np.append(head, ['v_err'])

        test_3d = test(test_loader,
                       model,
                       input_n=input_n,
                       output_n=output_n,
                       dct_n=dct_n,
                       dim_used=dim_used,
                       is_cuda=is_cuda)
        # ret_log = np.append(ret_log, test_l)
        ret_log = np.append(ret_log, test_3d)
        if output_n == 15:
            head = np.append(head,
                             ['1003d', '2003d', '3003d', '4003d', '5003d'])
        elif output_n == 30:
            head = np.append(head, [
                '1003d', '2003d', '3003d', '4003d', '5003d', '6003d', '7003d',
                '8003d', '9003d', '10003d'
            ])

        # update log file
        df = pd.DataFrame(np.expand_dims(ret_log, axis=0))
        if epoch == start_epoch:
            df.to_csv(opt.ckpt + '/' + script_name + '.csv',
                      header=head,
                      index=False)
        else:
            with open(opt.ckpt + '/' + script_name + '.csv', 'a') as f:
                df.to_csv(f, header=False, index=False)
        # save ckpt
        is_best = v_err < err_best
        err_best = min(v_err, err_best)
        file_name = [
            'ckpt_' + script_name + '_best.pth.tar',
            'ckpt_' + script_name + '_last.pth.tar'
        ]
        utils.save_ckpt(
            {
                'epoch': epoch + 1,
                'lr': lr_now,
                'err': test_3d[0],
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            },
            ckpt_path=opt.ckpt,
            is_best=is_best,
            file_name=file_name)
Пример #5
0
def main(opt):
    start_epoch = 0
    err_best = 10000
    lr_now = opt.lr
    is_cuda = torch.cuda.is_available()
    opt.is_load = True
    # save option in log
    script_name = os.path.basename(__file__).split('.')[0]
    script_name = script_name + '_3D_in{:d}_out{:d}_dct_n_{:d}'.format(
        opt.input_n, opt.output_n, opt.dct_n)

    # create model
    print(">>> creating model")
    input_n = opt.input_n
    output_n = opt.output_n
    dct_n = opt.dct_n
    sample_rate = opt.sample_rate

    model = nnmodel.GCN(input_feature=dct_n,
                        hidden_feature=opt.linear_size,
                        p_dropout=opt.dropout,
                        num_stage=opt.num_stage,
                        node_n=66)

    if is_cuda:
        model.cuda()

    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
    if opt.is_load:
        model_path_len = 'checkpoint/pretrained/h36m3D_in10_out25_dctn30.pth.tar'
        print(">>> loading ckpt len from '{}'".format(model_path_len))
        if is_cuda:
            ckpt = torch.load(model_path_len)
        else:
            ckpt = torch.load(model_path_len, map_location='cpu')
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt len loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))

    # data loading
    print(">>> loading data")
    # train_dataset = H36motion3D(path_to_data=opt.data_dir, actions='all', input_n=input_n, output_n=output_n,
    #                             split=0, dct_used=dct_n, sample_rate=sample_rate)

    acts = data_utils.define_actions('all')
    test_data = dict()
    for act in acts:
        test_dataset = H36motion3D(path_to_data=opt.data_dir,
                                   actions=act,
                                   input_n=input_n,
                                   output_n=output_n,
                                   split=1,
                                   sample_rate=sample_rate,
                                   dct_used=dct_n)
        test_data[act] = DataLoader(dataset=test_dataset,
                                    batch_size=opt.test_batch,
                                    shuffle=False,
                                    num_workers=opt.job,
                                    pin_memory=True)
    # val_dataset = H36motion3D(path_to_data=opt.data_dir, actions='all', input_n=input_n, output_n=output_n,
    #                           split=2, dct_used=dct_n, sample_rate=sample_rate)

    # load dadasets for training
    # train_loader = DataLoader(
    #     dataset=train_dataset,
    #     batch_size=opt.train_batch,
    #     shuffle=True,
    #     num_workers=opt.job,
    #     pin_memory=True)
    # val_loader = DataLoader(
    #     dataset=val_dataset,
    #     batch_size=opt.test_batch,
    #     shuffle=False,
    #     num_workers=opt.job,
    #     pin_memory=True)
    print(">>> data loaded !")
    # print(">>> train data {}".format(train_dataset.__len__()))
    # print(">>> test data {}".format(test_dataset.__len__()))
    # print(">>> validation data {}".format(val_dataset.__len__()))

    # for epoch in range(start_epoch, opt.epochs):
    #
    #     if (epoch + 1) % opt.lr_decay == 0:
    #         lr_now = utils.lr_decay(optimizer, lr_now, opt.lr_gamma)
    #
    #     print('==========================')
    #     print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))
    ret_log = np.array([start_epoch])
    head = np.array(['epoch'])
    # per epoch
    # lr_now, t_l = train(train_loader, model, optimizer, lr_now=lr_now, max_norm=opt.max_norm, is_cuda=is_cuda,
    #                     dim_used=train_dataset.dim_used, dct_n=dct_n)
    # ret_log = np.append(ret_log, [lr_now, t_l])
    # head = np.append(head, ['lr', 't_l'])
    #
    # v_3d = val(val_loader, model, is_cuda=is_cuda, dim_used=train_dataset.dim_used, dct_n=dct_n)
    #
    # ret_log = np.append(ret_log, [v_3d])
    # head = np.append(head, ['v_3d'])

    test_3d_temp = np.array([])
    test_3d_head = np.array([])
    for act in acts:
        test_l, test_3d = test(test_data[act],
                               model,
                               input_n=input_n,
                               output_n=output_n,
                               is_cuda=is_cuda,
                               dim_used=test_dataset.dim_used,
                               dct_n=dct_n)
        # ret_log = np.append(ret_log, test_l)
        ret_log = np.append(ret_log, test_3d)
        head = np.append(
            head, [act + '3d80', act + '3d160', act + '3d320', act + '3d400'])
        if output_n > 10:
            head = np.append(head, [act + '3d560', act + '3d1000'])
    ret_log = np.append(ret_log, test_3d_temp)
    head = np.append(head, test_3d_head)

    # update log file and save checkpoint
    df = pd.DataFrame(np.expand_dims(ret_log, axis=0))
    # if epoch == start_epoch:
    df.to_csv(opt.ckpt + '/' + script_name + '.csv', header=head, index=False)
Пример #6
0
def main(opt):
    start_epoch = 0
    err_best = 10000
    lr_now = opt.lr
    is_cuda = torch.cuda.is_available()
    opt.is_load = True
    # define log csv file
    script_name = os.path.basename(__file__).split('.')[0]
    script_name = script_name + "_in{:d}_out{:d}_dctn{:d}".format(
        opt.input_n, opt.output_n, opt.dct_n)
    desired_acts = ['eating', 'walkingdog']

    # create model
    print(">>> creating model")
    input_n = opt.input_n
    output_n = opt.output_n
    dct_n = opt.dct_n
    # calculate stepsize for auto regression based on input fames and  DCT coefficients
    stepsize = dct_n - input_n
    sample_rate = opt.sample_rate

    # 48 nodes for angle prediction
    model = nnmodel.GCN(input_feature=dct_n,
                        hidden_feature=opt.linear_size,
                        p_dropout=opt.dropout,
                        num_stage=opt.num_stage,
                        node_n=48)

    if is_cuda:
        model.cuda()

    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # continue from checkpoint
    if opt.is_load:
        model_path_len = "checkpoint/logs/ckpt_main_in{}_out{}_dctn{}_best.pth.tar".format(
            input_n, stepsize, dct_n)
        print(">>> loading ckpt len from '{}'".format(model_path_len))
        if is_cuda:
            ckpt = torch.load(model_path_len)
        else:
            ckpt = torch.load(model_path_len, map_location='cpu')
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt len loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))

    # data loading
    print(">>> loading data")
    # train_dataset = H36motion(path_to_data=opt.data_dir, actions='all', input_n=input_n, output_n=output_n,
    #                           split=0, sample_rate=sample_rate, dct_n=dct_n)
    # data_std = train_dataset.data_std
    # data_mean = train_dataset.data_mean
    dim_used = [
        6, 7, 8, 9, 12, 13, 14, 15, 21, 22, 23, 24, 27, 28, 29, 30, 36, 37, 38,
        39, 40, 41, 42, 43, 44, 45, 46, 47, 51, 52, 53, 54, 55, 56, 57, 60, 61,
        62, 75, 76, 77, 78, 79, 80, 81, 84, 85, 86
    ]
    # val_dataset = H36motion(path_to_data=opt.data_dir, actions='all', input_n=input_n, output_n=output_n,
    #                         split=2, sample_rate=sample_rate, data_mean=data_mean, data_std=data_std, dct_n=dct_n)

    # # load dadasets for training
    # train_loader = DataLoader(
    #     dataset=train_dataset,
    #     batch_size=opt.train_batch,
    #     shuffle=True,
    #     num_workers=opt.job,
    #     pin_memory=True)
    # val_loader = DataLoader(
    #     dataset=val_dataset,
    #     batch_size=opt.test_batch,
    #     shuffle=False,
    #     num_workers=opt.job,
    #     pin_memory=True)

    acts = data_utils.define_actions('all')
    print(acts)
    test_data = dict()
    for act in acts:
        test_dataset = H36motion(path_to_data=opt.data_dir,
                                 actions=act,
                                 input_n=input_n,
                                 output_n=output_n,
                                 split=1,
                                 sample_rate=sample_rate,
                                 dct_n=dct_n)
        test_data[act] = DataLoader(dataset=test_dataset,
                                    batch_size=opt.test_batch,
                                    shuffle=False,
                                    num_workers=opt.job,
                                    pin_memory=True)
    print(">>> data loaded !")
    # print(">>> train data {}".format(train_dataset.__len__()))
    # print(">>> validation data {}".format(val_dataset.__len__()))

    # for epoch in range(start_epoch, opt.epochs):
    #
    #     if (epoch + 1) % opt.lr_decay == 0:
    #         lr_now = utils.lr_decay(optimizer, lr_now, opt.lr_gamma)
    #     print('==========================')
    #     print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))
    ret_log = np.array([start_epoch])
    head = np.array(['epoch'])
    # per epoch
    # lr_now, t_l, t_e, t_3d = train(train_loader, model, optimizer, input_n=input_n,
    #                                lr_now=lr_now, max_norm=opt.max_norm, is_cuda=is_cuda,
    #                                dim_used=train_dataset.dim_used, dct_n=dct_n)
    # ret_log = np.append(ret_log, [lr_now, t_l, t_e, t_3d])
    # head = np.append(head, ['lr', 't_l', 't_e', 't_3d'])
    #
    # v_e, v_3d = val(val_loader, model, input_n=input_n, is_cuda=is_cuda, dim_used=train_dataset.dim_used,
    #                 dct_n=dct_n)
    #
    # ret_log = np.append(ret_log, [v_e, v_3d])
    # head = np.append(head, ['v_e', 'v_3d'])

    test_3d_temp = np.array([])
    test_3d_head = np.array([])
    for act in acts:
        test_e, test_3d = test(test_data[act],
                               model,
                               stepsize,
                               input_n=input_n,
                               output_n=output_n,
                               is_cuda=is_cuda,
                               dim_used=dim_used,
                               dct_n=dct_n)
        ret_log = np.append(ret_log, test_e)
        test_3d_temp = np.append(test_3d_temp, test_3d)
        test_3d_head = np.append(
            test_3d_head,
            [act + '3d80', act + '3d160', act + '3d320', act + '3d400'])
        head = np.append(head,
                         [act + '80', act + '160', act + '320', act + '400'])
        if output_n > 10:
            if output_n == 25:
                head = np.append(head, [act + '560', act + '1000'])
                test_3d_head = np.append(test_3d_head,
                                         [act + '3d560', act + '3d1000'])
            if output_n == 50:
                head = np.append(head,
                                 [act + '560', act + '1000', act + '2000'])
                test_3d_head = np.append(
                    test_3d_head,
                    [act + '3d560', act + '3d1000', act + '3d2000'])
            if output_n == 100:
                head = np.append(
                    head,
                    [act + '560', act + '1000', act + '2000', act + '4000'])
                test_3d_head = np.append(test_3d_head, [
                    act + '3d560', act + '3d1000', act + '3d2000',
                    act + '3d4000'
                ])

    ret_log = np.append(ret_log, test_3d_temp)
    head = np.append(head, test_3d_head)

    # update log file and save checkpoint
    df = pd.DataFrame(np.expand_dims(ret_log, axis=0))
    # if epoch == start_epoch:
    df.to_csv(opt.ckpt + '/' + script_name + '.csv', header=head, index=False)
Пример #7
0
def main(opt):
    start_epoch = 0
    err_best = 10000
    lr_now = opt.lr
    is_cuda = torch.cuda.is_available()

    # save option in log
    script_name = os.path.basename(__file__).split('.')[0]
    script_name = script_name + '_in{:d}_out{:d}_dctn_{:d}'.format(
        opt.input_n, opt.output_n, opt.dct_n)

    # create model
    print(">>> creating model")
    input_n = opt.input_n
    output_n = opt.output_n
    dct_n = opt.dct_n

    model = nnmodel.GCN(input_feature=dct_n,
                        hidden_feature=opt.linear_size,
                        p_dropout=opt.dropout,
                        num_stage=opt.num_stage,
                        node_n=64)

    if is_cuda:
        model.cuda()

    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)
    if opt.is_load:
        model_path_len = 'checkpoint/test/ckpt_main_gcn_muti_att_best.pth.tar'
        print(">>> loading ckpt len from '{}'".format(model_path_len))
        if is_cuda:
            ckpt = torch.load(model_path_len)
        else:
            ckpt = torch.load(model_path_len, map_location='cpu')
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt len loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))

    # data loading
    print(">>> loading data")
    train_dataset = CMU_Motion(path_to_data=opt.data_dir_cmu,
                               actions=opt.actions,
                               input_n=input_n,
                               output_n=output_n,
                               split=0,
                               dct_n=dct_n)
    data_std = train_dataset.data_std
    data_mean = train_dataset.data_mean
    dim_used = train_dataset.dim_used

    acts = data_utils.define_actions_cmu(opt.actions)
    test_data = dict()
    for act in acts:
        test_dataset = CMU_Motion(path_to_data=opt.data_dir_cmu,
                                  actions=act,
                                  input_n=input_n,
                                  output_n=output_n,
                                  split=1,
                                  data_mean=data_mean,
                                  data_std=data_std,
                                  dim_used=dim_used,
                                  dct_n=dct_n)
        test_data[act] = DataLoader(dataset=test_dataset,
                                    batch_size=opt.test_batch,
                                    shuffle=False,
                                    num_workers=opt.job,
                                    pin_memory=True)
    # we did not use validation set for cmu
    # val_dataset = CMU_Motion(path_to_data=opt.data_dir, actions='all', input_n=input_n, output_n=output_n,
    #                         split=2,
    #                         sample_rate=sample_rate, is_norm_dct=opt.is_norm_dct, is_norm_motion=opt.is_norm,
    #                         data_mean=data_mean, data_std=data_std, data_mean_dct=data_mean_dct,
    #                         data_std_dct=data_std_dct)
    # load dadasets for training
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=opt.train_batch,
                              shuffle=True,
                              num_workers=opt.job,
                              pin_memory=True)
    # val_loader = DataLoader(
    #     dataset=val_dataset,
    #     batch_size=opt.test_batch,
    #     shuffle=False,
    #     num_workers=opt.job,
    #     pin_memory=True)
    print(">>> data loaded !")
    print(">>> train data {}".format(train_dataset.__len__()))
    print(">>> test data {}".format(test_dataset.__len__()))
    # print(">>> validation data {}".format(val_dataset.__len__()))

    for epoch in range(start_epoch, opt.epochs):

        if (epoch + 1) % opt.lr_decay == 0:
            lr_now = utils.lr_decay(optimizer, lr_now, opt.lr_gamma)
        print('==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))
        ret_log = np.array([epoch + 1])
        head = np.array(['epoch'])
        # per epoch
        lr_now, t_l, t_e, t_3d = train(train_loader,
                                       model,
                                       optimizer,
                                       input_n=input_n,
                                       lr_now=lr_now,
                                       max_norm=opt.max_norm,
                                       is_cuda=is_cuda,
                                       dim_used=dim_used,
                                       dct_n=dct_n)
        ret_log = np.append(ret_log, [lr_now, t_l, t_e, t_3d])
        head = np.append(head, ['lr', 't_l', 't_e', 't_3d'])

        # v_l, v_e, v_3d = val(val_loader, model, optimizer, adj, input_n=input_n, output_n=output_n,
        #                      lr_now=lr_now, max_norm=opt.max_norm, is_cuda=is_cuda, epoch=epoch + 1,
        #                      dim_used=train_dataset.dim_used, is_norm_dct=opt.is_norm_dct,
        #                      is_norm=opt.is_norm, data_mean=data_mean, data_std=data_std,
        #                      data_mean_dct=data_mean_dct, data_std_dct=data_std_dct)
        #
        # ret_log = np.append(ret_log, [v_l, v_e, v_3d])
        # head = np.append(head, ['v_l', 'v_e', 'v_3d'])

        test_3d_temp = np.array([])
        test_3d_head = np.array([])
        for act in acts:
            test_e, test_3d = test(test_data[act],
                                   model,
                                   input_n=input_n,
                                   output_n=output_n,
                                   is_cuda=is_cuda,
                                   dim_used=dim_used,
                                   dct_n=dct_n)

            ret_log = np.append(ret_log, test_e)
            test_3d_temp = np.append(test_3d_temp, test_3d)
            test_3d_head = np.append(
                test_3d_head,
                [act + '3d80', act + '3d160', act + '3d320', act + '3d400'])

            head = np.append(
                head, [act + '80', act + '160', act + '320', act + '400'])
            if output_n > 10:
                head = np.append(head, [act + '560', act + '1000'])
                test_3d_head = np.append(test_3d_head,
                                         [act + '3d560', act + '3d1000'])
        ret_log = np.append(ret_log, test_3d_temp)
        head = np.append(head, test_3d_head)

        # update log file
        df = pd.DataFrame(np.expand_dims(ret_log, axis=0))
        if epoch == start_epoch:
            df.to_csv(opt.ckpt + '/' + script_name + '.csv',
                      header=head,
                      index=False)
        else:
            with open(opt.ckpt + '/' + script_name + '.csv', 'a') as f:
                df.to_csv(f, header=False, index=False)
        # save ckpt
        if not np.isnan(t_e):
            is_best = t_e < err_best
            err_best = min(t_e, err_best)
        else:
            is_best = False
        file_name = [
            'ckpt_' + script_name + '_best.pth.tar',
            'ckpt_' + script_name + '_last.pth.tar'
        ]
        utils.save_ckpt(
            {
                'epoch': epoch + 1,
                'lr': lr_now,
                'err': test_e[0],
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            },
            ckpt_path=opt.ckpt,
            is_best=is_best,
            file_name=file_name)
Пример #8
0
def main(opt):
    start_epoch = 0
    err_best = 10000
    lr_now = opt.lr
    is_cuda = torch.cuda.is_available()

    # define log csv file
    script_name = os.path.basename(__file__).split('.')[0]
    script_name = script_name + "_in{:d}_out{:d}_dctn{:d}".format(
        opt.input_n, opt.output_n, opt.dct_n)

    # create model
    print(">>> creating model")
    input_n = opt.input_n
    output_n = opt.output_n
    dct_n = opt.dct_n
    sample_rate = opt.sample_rate

    # 48 nodes for angle prediction
    model = nnmodel.GCN(input_feature=dct_n,
                        hidden_feature=opt.linear_size,
                        p_dropout=opt.dropout,
                        num_stage=opt.num_stage,
                        node_n=48)

    if is_cuda:
        model.cuda()

    print(">>> total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))
    optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)

    # continue from checkpoint
    if opt.is_load:
        model_path_len = 'checkpoint/test/ckpt_main_gcn_muti_att_best.pth.tar'
        print(">>> loading ckpt len from '{}'".format(model_path_len))
        if is_cuda:
            ckpt = torch.load(model_path_len)
        else:
            ckpt = torch.load(model_path_len, map_location='cpu')
        start_epoch = ckpt['epoch']
        err_best = ckpt['err']
        lr_now = ckpt['lr']
        model.load_state_dict(ckpt['state_dict'])
        optimizer.load_state_dict(ckpt['optimizer'])
        print(">>> ckpt len loaded (epoch: {} | err: {})".format(
            start_epoch, err_best))

    # data loading
    print(">>> loading data")
    train_dataset = H36motion(path_to_data=opt.data_dir,
                              actions='all',
                              input_n=input_n,
                              output_n=output_n,
                              split=0,
                              sample_rate=sample_rate,
                              dct_n=dct_n)
    data_std = train_dataset.data_std
    data_mean = train_dataset.data_mean

    val_dataset = H36motion(path_to_data=opt.data_dir,
                            actions='all',
                            input_n=input_n,
                            output_n=output_n,
                            split=2,
                            sample_rate=sample_rate,
                            data_mean=data_mean,
                            data_std=data_std,
                            dct_n=dct_n)

    # load datasets for training
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=opt.train_batch,
                              shuffle=True,
                              num_workers=opt.job,
                              pin_memory=True)
    val_loader = DataLoader(dataset=val_dataset,
                            batch_size=opt.test_batch,
                            shuffle=False,
                            num_workers=opt.job,
                            pin_memory=True)

    acts = data_utils.define_actions('all')
    test_data = dict()
    for act in acts:
        test_dataset = H36motion(path_to_data=opt.data_dir,
                                 actions=act,
                                 input_n=input_n,
                                 output_n=output_n,
                                 split=1,
                                 sample_rate=sample_rate,
                                 data_mean=data_mean,
                                 data_std=data_std,
                                 dct_n=dct_n)
        test_data[act] = DataLoader(dataset=test_dataset,
                                    batch_size=opt.test_batch,
                                    shuffle=False,
                                    num_workers=opt.job,
                                    pin_memory=True)
    print(">>> data loaded !")
    print(">>> train data {}".format(train_dataset.__len__()))
    print(">>> validation data {}".format(val_dataset.__len__()))

    for epoch in range(start_epoch, opt.epochs):

        if (epoch + 1) % opt.lr_decay == 0:
            lr_now = utils.lr_decay(optimizer, lr_now, opt.lr_gamma)
        print('==========================')
        print('>>> epoch: {} | lr: {:.5f}'.format(epoch + 1, lr_now))
        ret_log = np.array([epoch + 1])
        head = np.array(['epoch'])
        # per epoch
        a = train_dataset.dim_used