예제 #1
0
                                     1, :, joints_right + joints_left]
                predicted_3d_pos = torch.mean(predicted_3d_pos,
                                              dim=0,
                                              keepdim=True)

            if return_predictions:
                return predicted_3d_pos.squeeze(0).cpu().numpy()


if args.render:
    print('Rendering...')

    input_keypoints = keypoints[args.viz_subject][args.viz_action][
        args.viz_camera].copy()
    ground_truth = None
    if args.viz_subject in dataset.subjects() and args.viz_action in dataset[
            args.viz_subject]:
        if 'positions_3d' in dataset[args.viz_subject][args.viz_action]:
            ground_truth = dataset[args.viz_subject][
                args.viz_action]['positions_3d'][args.viz_camera].copy()
    if ground_truth is None:
        print(
            'INFO: this action is unlabeled. Ground truth will not be rendered.'
        )

    gen = UnchunkedGenerator(None,
                             None, [input_keypoints],
                             pad=pad,
                             causal_shift=causal_shift,
                             augment=args.test_time_augmentation,
                             kps_left=kps_left,
예제 #2
0
            'Unable to create checkpoint directory:', args.checkpoint)


print('ARGS EVAL:::::', args.evaluate)
print('Loading dataset...')
dataset_path = 'data/data_3d_' + args.dataset + '.npz'

dataset = CustomDataset('data/data_2d_' + args.dataset +
                        '_' + args.keypoints + '.npz')
# print(dataset.subjects())
print(dataset['009_FL_R.MTS'])
dataset_poses = np.load('data/data_2d_' + args.dataset + '_' +
                        args.keypoints + '.npz', allow_pickle=True)

print('Preparing data...')
for subject in dataset.subjects():
    for action in dataset[subject].keys():
        anim = dataset[subject][action]
        print(anim)

        if 'positions' in anim:
            positions_3d = []
            for cam in anim['cameras']:
                pos_3d = world_to_camera(
                    anim['positions'], R=cam['orientation'], t=cam['translation'])
                # Remove global offset, but keep trajectory in first position
                pos_3d[:, 1:] -= pos_3d[:, :1]
                positions_3d.append(pos_3d)
            anim['positions_3d'] = positions_3d

print('Loading 2D detections...')
예제 #3
0
#don't use ellipsis to truncate arrays when printing
#np.set_printoptions(threshold=sys.maxsize)

#dataset init
from common.custom_dataset import CustomDataset

#check path of npz
print('PATH: outs/data_2d_' + args.dataset + '_' + args.keypoints + '.npz')

#create new CustomDataset object
dataset = CustomDataset('data/data_2d_' + args.dataset + '_' + args.keypoints +
                        '.npz')  #NOTE CHANGE

print('Preparing data...')

print(dataset.subjects())  #looks like dict_keys(['../vids/output.mp4'])

for subject in dataset.subjects(
):  #should have just one subject, which will be '../vids/output.mp4'
    print(dataset[subject])
    '''looks like {'custom': {'cameras': {'id': '../vids/output.mp4', 'res_w': 1080, 'res_h': 1920, 
    'azimuth': 70, 'orientation': array([ 0.14070565, -0.15007018, -0.7552408 ,  0.62232804], dtype=float32), 
    'translation': array([1.841107 , 4.9552846, 1.5634454], dtype=float32)}}}'''

    print(dataset[subject].keys())  #something like dict_keys(['custom'])

    #should just be one key 'custom'
    for action in dataset[subject].keys():
        anim = dataset[subject][action]
        print(anim)
        '''anim looks like {'cameras': {'id': '../vids/output.mp4', 'res_w': 1080, 'res_h': 1920, 'azimuth': 70, 
예제 #4
0
viz_fps = 30

resume = ''
evaluate = 'pretrained_h36m_detectron_coco.bin'
checkpoint = 'checkpoint'

dataset = CustomDataset(myvideos_path)

# print(dataset)
# print(dataset.cameras())
# print(dataset.fps())
# print(dataset.skeleton())
# print(dataset.subjects())

print('Preparing data...')
for subject in dataset.subjects():
    for action in dataset[subject].keys():
        anim = dataset[subject][action]

        if 'positions' in anim:
            positions_3d = []
            for cam in anim['cameras']:
                pos_3d = world_to_camera(anim['positions'],
                                         R=cam['orientation'],
                                         t=cam['translation'])
                pos_3d[:,
                       1:] -= pos_3d[:, :
                                     1]  # Remove global offset, but keep trajectory in first position
                positions_3d.append(pos_3d)
            anim['positions_3d'] = positions_3d