def get_normalized_dataset(dataset_params):
    dataset = get_dataset(dataset_params['type'])
    dataset = filter_dataset(dataset,
                             modes=['eval'],
                             **dataset_params['filter_kwargs'])
    dataset = copy_group(dataset)
    dataset = normalize_dataset(dataset, **dataset_params['normalize_kwargs'])
    return dataset
예제 #2
0
 def report(self, results, overwrite=False):
     with open(results.attrs['params_path'], 'r') as f:
         params = json.load(f)
     dataset = get_dataset(params['dataset']['type'])
     dataset = filter_dataset(dataset,
                              modes=['eval'],
                              **params['dataset']['filter_kwargs'])
     dataset = copy_group(dataset)
     dataset = normalize_dataset(dataset,
                                 **params['dataset']['normalize_kwargs'])
     summaries = {}
     for k, result in results.items():
         sequence = dataset[k]
         cat = self.category(sequence)
         if self.key in result and not overwrite:
             summary = result[self.key]
         else:
             print('Generating %s: %s' % (self.key, k))
             summary = self.summarize(sequence, result)
             if hasattr(result, 'create_dataset'):
                 if self.key in result:
                     assert (overwrite)
                     # assert(hasattr(result, '__del__'))
                     del result[self.key]
                 result.create_dataset(self.key, data=summary)
         if cat not in summaries:
             summaries[cat] = []
         summaries[cat].append(summary)
     print('Results for %s' % self.key)
     summaries = {
         k: np.concatenate(v, axis=0)
         for k, v in summaries.items()
     }
     for k, summary in summaries.items():
         summary = np.mean(summary)
         print('%s: %.2f' % (k, summary))
     summary = np.concatenate([v for k, v in summaries.items()], axis=0)
     summary = np.mean(summary)
     print('Total: %.2f' % summary)
예제 #3
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
from human_pose_util.register import register_datasets, get_dataset, get_skeleton, get_converter

register_datasets(eva=True)
eva = get_dataset('eva')
key = eva.keys()[0]
p3w = np.array(eva[key]['p3w'])
p3w /= 1000
target = 's14'
converter = get_converter('s20', target)
skeleton = get_skeleton(target)
p3w = converter.convert(p3w)

# matplotlib vis
from human_pose_util.skeleton.vis import vis3d
import matplotlib.pyplot as plt
vis3d(skeleton, p3w[0])
plt.show()

# animation vis with glumpy
from human_pose_util.animation import animated_scene as anim
anim.add_limb_collection_animator(skeleton, p3w, 60)
anim.run(60)
예제 #4
0
Module issues if done in `if __name__ == '__main'__` block of register.py.
"""
import matplotlib.pyplot as plt
from human_pose_util.register import register_skeletons, register_datasets
from human_pose_util.register import get_dataset
from human_pose_util.register import get_skeleton
from dataset.normalize import normalized_view_data, normalized_p3w
from skeleton import vis3d

register_skeletons(h3m=True, eva=True, mpi_inf=True)
register_datasets(h3m=True, eva=True)
# register_converters(h3m_eva=True)
print('Registration successful!')

# dataset = dataset_register['h3m']

for dataset_id, target_skeleton_id in [['h3m', 's24'], ['eva', 's14']]:
    dataset = get_dataset(dataset_id)
    for mode in ['eval', 'train']:
        print('Getting normalized_view_data...')
        normalized_view_data(dataset, modes=mode)

        print('Getting normalized_p3w...')
        normalized_dataset, p3w = normalized_p3w(
            dataset, modes=mode, target_skeleton_id=target_skeleton_id)

skeleton = get_skeleton(normalized_dataset.attrs['skeleton_id'])
print(p3w.shape)
vis3d(skeleton, p3w[0])
plt.show()