示例#1
0
def read_3d_data(dataset):
    for subject in dataset.subjects():
        for action in dataset[subject].keys():
            anim = dataset[subject][action]

            positions_3d = []
            for cam in anim['cameras']:
                pos_3d = world_to_camera(anim['positions'],
                                         R=cam['orientation'],
                                         t=cam['translation'])
                # pos_3d[:, :] -= pos_3d[:, :1]  # keep this, remove at model training.
                positions_3d.append(pos_3d)
            anim['positions_3d'] = positions_3d

    return dataset
def prepare_data():
    print('Preparing data...')
    print(dataset.subjects())
    for subject in dataset.subjects():
        for action in dataset[subject].keys():
            anim = dataset[subject][action]

            if 'positions' in anim:
                positions_3d = []
                for cam in anim['cameras']:
                    pos_3d = world_to_camera(anim['positions'],
                                             R=cam['orientation'],
                                             t=cam['translation'])
                    pos_3d[:,
                           1:] -= pos_3d[:, :
                                         1]  # Remove global offset, but keep trajectory in first position
                    positions_3d.append(pos_3d)
                anim['positions_3d'] = positions_3d
示例#3
0
        exit(0)

    # Create 2D pose file
    print('')
    print('Computing ground-truth 2D poses...')
    dataset = Human36mDataset(output_filename + '.npz')
    output_2d_poses = {}
    for subject in dataset.subjects():
        output_2d_poses[subject] = {}
        for action in dataset[subject].keys():
            anim = dataset[subject][action]

            positions_2d = []
            for cam in anim['cameras']:
                pos_3d = world_to_camera(anim['positions'],
                                         R=cam['orientation'],
                                         t=cam['translation'])
                pos_2d = wrap(project_to_2d, True, pos_3d, cam['intrinsic'])
                pos_2d_pixel_space = image_coordinates(pos_2d,
                                                       w=cam['res_w'],
                                                       h=cam['res_h'])
                positions_2d.append(pos_2d_pixel_space.astype('float32'))
            output_2d_poses[subject][action] = positions_2d

    print('Saving...')
    metadata = {
        'num_joints':
        dataset.skeleton().num_joints(),
        'keypoints_symmetry':
        [dataset.skeleton().joints_left(),
         dataset.skeleton().joints_right()]
示例#4
0
        exit(0)

    # Create 2D pose file
    print("")
    print("Computing ground-truth 2D poses...")
    dataset = Human36mDataset(output_filename + ".npz")
    output_2d_poses = {}
    for subject in dataset.subjects():
        output_2d_poses[subject] = {}
        for action in dataset[subject].keys():
            anim = dataset[subject][action]

            positions_2d = []
            for cam in anim["cameras"]:
                pos_3d = world_to_camera(anim["positions"],
                                         R=cam["orientation"],
                                         t=cam["translation"])
                pos_2d = wrap(project_to_2d,
                              pos_3d,
                              cam["intrinsic"],
                              unsqueeze=True)
                pos_2d_pixel_space = image_coordinates(pos_2d,
                                                       w=cam['res_w'],
                                                       h=cam['res_h'])
                positions_2d.append(pos_2d_pixel_space.astype("float32"))
            output_2d_poses[subject][action] = positions_2d

    print("Saving...")
    metadata = {
        "num_joints":
        dataset.skeleton().num_joints(),