コード例 #1
0
def convert_world_to_cam(data_3d, center, focus):

    data_2d = np.zeros((data_3d.shape[0], data_3d.shape[1], 2))

    for i in tqdm(range(data_3d.shape[0])):
        viz = Visualize(1)
        viz.place_camera_circular(0, 2000, data_3d[0, 0, :])

        data_2d[i, :, :] = viz.get_projection(data_3d[i, :, :], 0, focus,
                                              center)

    return data_2d
コード例 #2
0
ファイル: main.py プロジェクト: SaadManzur/H36M_Utils
import constants as jnt
from utils import parse_metadata, read_h5, read_npz
from conversion import convert_json_to_npz, convert_h5_directory_to_augmented, convert_h5_to_projected
from visualize import Visualize
from tqdm import tqdm
import numpy as np

if __name__ == "__main__":

    viz = Visualize(50)

    data = read_h5("annot.h5")

    viz.place_random_cameras(50, [3000, 3500], data['pose/3d-univ'][()][0,
                                                                        0, :])

    data_2d = np.zeros((0, 32, 2))
    data_3d = np.zeros((0, 32, 3))

    point_2d = viz.get_projection(data['pose/3d-univ'][()][0, :, :], 32,
                                  jnt.CAMERAS[0]['focal_length'],
                                  jnt.CAMERAS[0]['center'])

    #viz.plot_3d(data['pose/3d-univ'][()][0, :, :], True)

    #viz.plot_2d(point_2d)

    #convert_h5_directory_to_augmented("../H36M_H5_Annotations/**/**/*.h5", 15)

    convert_h5_to_projected('annot.h5', 'h36m_s1_reprojected')
コード例 #3
0
def convert_h5_directory_to_augmented(directory_path, cam_count):

    files = glob.glob(directory_path)

    data = {}

    viz = Visualize(cam_count)

    prev_subject = ""

    for file in files:

        meta = file.split("/")
        subject = meta[2]
        """
        if subject != prev_subject:
            if data is not None:
                np.savez_compressed("output/h36m_augment_" + subject + "_cam_count_" + str(cam_count), data=data, cameras=jnt.CAMERAS)
                print("Dumped data for", subject)
            data = {}
            prev_subject = subject
        """

        action_name = meta[3].split("-")[0]

        if subject not in data:
            data[subject] = {}

        if action_name not in data[subject]:
            data[subject][action_name] = {
                '2d': np.empty((0, 32, 2), dtype=np.float32),
                '3d': np.empty((0, 32, 3), dtype=np.float32)
            }

        h5_data = read_h5(file)

        print("Processing", subject, "for", action_name)

        if action_name != "SittingDown":
            continue

        for i in tqdm(range(h5_data['pose/3d'][()].shape[0])):
            viz.place_random_cameras(cam_count, [3000, 3500],
                                     h5_data['pose/3d'][i, 0, :])
            for j in range(4):
                for k in range(cam_count):
                    point_2d = viz.get_projection(
                        h5_data['pose/3d-univ'][()][i, :, :], k,
                        jnt.CAMERAS[j]['focal_length'],
                        jnt.CAMERAS[j]['center']).reshape(1, 32, 2)
                    point_3d = viz.get_camspace_coord(
                        h5_data['pose/3d-univ'][()][i, :, :],
                        k).reshape(1, 32, 3)
                    R, t = viz.get_rotation_and_translation(k)

                    data[subject][action_name]['2d'] = np.vstack(
                        (data[subject][action_name]['2d'], point_2d))
                    data[subject][action_name]['3d'] = np.vstack(
                        (data[subject][action_name]['3d'], point_3d))
                    data[subject][action_name]['cam_id'] = jnt.CAMERAS[j]['id']
                    data[subject][action_name]['R'] = R
                    data[subject][action_name]['t'] = t

    np.savez_compressed("output/h36m_augment_sitting_down_cam_count" +
                        str(cam_count),
                        data=data,
                        cameras=jnt.CAMERAS)