def vis_sequence(inference_id, example_id=None, use_plt=True):
    """
    Visualize a previously inferred sequence.

    Args:
        inference_id: id of the relevant inference set.
        example_id: example id to visualize. If none, a random example is
            selected
        use_plt: if true, uses matplotlib.pyplot for visualizations, otherwise
            uses glumpy. glumpy visualization is an animation, while plt
            version gives a number of frames.
    """
    inference_params = load_inference_params(inference_id)
    dataset = get_normalized_dataset(inference_params['dataset'])
    skeleton = get_skeleton(dataset.attrs['skeleton_id'])

    with h5py.File(results_path, 'r') as f:
        group = f[inference_id]
        params = load_inference_params(inference_id)
        if example_id is None:
            example_id = random.sample(list(group.keys()), 1)[0]
        example = dataset[example_id]
        fps = example.attrs['fps']
        ground_truth = np.array(example['p3w']) * \
            example.attrs['space_scale'] / 1000

        inferred = np.array(group[example_id]['p3w']) * \
            params['dataset']['normalize_kwargs']['pixel_scale'] / 1000

    if use_plt:
        vis_data_plt(skeleton, ground_truth, inferred)
    else:
        vis_data_glumpy(skeleton, fps, ground_truth, inferred)
Example #2
0
def vis(gan_id):
    """Visualize output from the given gan."""
    builder = GanBuilder(gan_id)
    skeleton = get_skeleton(
        builder.params['dataset']['normalize_kwargs']['skeleton_id'])

    print('Building graph...')
    graph = tf.Graph()
    with graph.as_default():
        gen_input = builder.get_random_generator_input()
        with tf.variable_scope('Generator'):
            sample = builder.get_generator_sample(gen_input)
        generator_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)

    print('Starting session...')
    with tf.Session(graph=graph) as sess:
        print('Restoring variables...')
        saver = tf.train.Saver(var_list=generator_vars)
        saver.restore(sess, builder.latest_checkpoint)
        print('Generating...')
        sample_data = sess.run(sample)

    print('Visualizing...')
    for s in sample_data:
        vis3d(skeleton, s)
        plt.show()
Example #3
0
def normalized_poses(p3, skeleton_id, rotate_front=False, recenter_xy=False):
    """Get a normalized version of p3. Does not change p3."""
    skeleton = get_skeleton(skeleton_id)
    if rotate_front:
        p3 = skeleton.rotate_front(p3)
    if recenter_xy:
        r = skeleton.root_index
        p3 = p3.copy()
        p3[..., :2] -= p3[..., r:r + 1, :2]
    return p3
Example #4
0
def normalize_dataset(dataset,
                      consistent_pose=False,
                      consistent_projection=False,
                      scale_to_height=False,
                      space_scale=1,
                      pixel_scale=1,
                      fps=None,
                      skeleton_id=None,
                      heights=None):
    """
    Modify data in a dataset.

    If `scale_to_height` is True, applies space_scale after scaling to height,
    i.e. if `space_scale = 5` and the subject height is 1.5, this is equivalent
    to `space_scale = 5*1.5` and no `scale_to_height = False`. Heights will be
    calculated if not provided.

    sequences should be an iterable of pose sequences, each of which is a dict
    with potentially all of the following keys:
        p3w
        r (needed if `consistent_pose`)
        t (needed if `consistent_pose`)
        f (needed if `consistent_projection`)
        c (needed if `consistent_projection`)
        p3c (overwritten if `consistent_pose`)
        p2 (overwritten if `consistent_projection`)
        subject_id (needed if `scale_to_height`)
        fps (needed if `fps is not None`)
        space_scale (optional)
        pixel_scale (optional)
        skeleton_id (needed if `target_skeleton_id is not None`)

    Args:
        ...
        heights: optional dict of subject_id -> heights. Computed if not
            supplied

    Modifies sequences in place.
    """
    if skeleton_id is not None:
        apply_skeleton_conversion(dataset, skeleton_id)

    skeleton_id = dataset.attrs['skeleton_id']
    if scale_to_height and heights is None:
        skeleton = get_skeleton(skeleton_id)
        heights = _get_heights(dataset.values(), skeleton)

    for sequence in dataset.values():
        if consistent_pose:
            apply_consistent_pose(sequence)
        if consistent_projection:
            apply_consistent_projection(sequence)
        if scale_to_height:
            apply_space_scale(
                sequence, heights[sequence.attrs['subject_id']] * space_scale)
        elif space_scale > 1:
            apply_space_scale(sequence, space_scale)

        if pixel_scale != 1:
            apply_pixel_scale(sequence, pixel_scale)

        if fps is not None:
            apply_fps_change(sequence, fps)

    return dataset
Example #5
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
from human_pose_util.register import register_datasets, get_dataset, get_skeleton, get_converter

register_datasets(eva=True)
eva = get_dataset('eva')
key = eva.keys()[0]
p3w = np.array(eva[key]['p3w'])
p3w /= 1000
target = 's14'
converter = get_converter('s20', target)
skeleton = get_skeleton(target)
p3w = converter.convert(p3w)

# matplotlib vis
from human_pose_util.skeleton.vis import vis3d
import matplotlib.pyplot as plt
vis3d(skeleton, p3w[0])
plt.show()

# animation vis with glumpy
from human_pose_util.animation import animated_scene as anim
anim.add_limb_collection_animator(skeleton, p3w, 60)
anim.run(60)
Example #6
0
Module issues if done in `if __name__ == '__main'__` block of register.py.
"""
import matplotlib.pyplot as plt
from human_pose_util.register import register_skeletons, register_datasets
from human_pose_util.register import get_dataset
from human_pose_util.register import get_skeleton
from dataset.normalize import normalized_view_data, normalized_p3w
from skeleton import vis3d

register_skeletons(h3m=True, eva=True, mpi_inf=True)
register_datasets(h3m=True, eva=True)
# register_converters(h3m_eva=True)
print('Registration successful!')

# dataset = dataset_register['h3m']

for dataset_id, target_skeleton_id in [['h3m', 's24'], ['eva', 's14']]:
    dataset = get_dataset(dataset_id)
    for mode in ['eval', 'train']:
        print('Getting normalized_view_data...')
        normalized_view_data(dataset, modes=mode)

        print('Getting normalized_p3w...')
        normalized_dataset, p3w = normalized_p3w(
            dataset, modes=mode, target_skeleton_id=target_skeleton_id)

skeleton = get_skeleton(normalized_dataset.attrs['skeleton_id'])
print(p3w.shape)
vis3d(skeleton, p3w[0])
plt.show()
 def _get_n_joints(self):
     skeleton_id = self.params['dataset']['normalize_kwargs']['skeleton_id']
     return get_skeleton(skeleton_id).n_joints