prediction = np.load(args.viz_input) rot = np.array([0.14070565, -0.15007018, -0.7552408, 0.62232804], dtype=np.float32) # Example value taken from h36m prediction = camera_to_world(prediction, R=rot, t=0) # Invert camera transformation prediction[:, :, 2] -= np.min( prediction[:, :, 2] ) # We don't have the trajectory, but at least we can rebase the height anim_output = {'Inference': prediction} skeleton = Skeleton( parents=[ -1, 0, 1, 2, 3, 4, 0, 6, 7, 8, 9, 0, 11, 12, 13, 14, 12, 16, 17, 18, 19, 20, 19, 22, 12, 24, 25, 26, 27, 28, 27, 30 ], joints_left=[6, 7, 8, 9, 10, 16, 17, 18, 19, 20, 21, 22, 23], joints_right=[1, 2, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31]) skeleton.remove_joints( [4, 5, 9, 10, 11, 16, 20, 21, 22, 23, 24, 28, 29, 30, 31]) skeleton._parents[11] = 8 skeleton._parents[14] = 8 render_animation(anim_output, skeleton, 24, 3000, 70.0, args.viz_output, viewport=(1000, 1002))
[3.48356, -0., -0.], [0., 0., 0.], [0.71526, -0., -0.], [0., 0., 0.], [0., 0., 0.], [-3.1366, 1.37405, -0.40465], [-5.2419, -0., -0.], [-3.44417, -0., -0.], [0., 0., 0.], [-0.62253, -0., -0.], [0., 0., 0.], ], parents=[ -1, 0, 1, 2, 3, 4, 0, 6, 7, 8, 9, 0, 11, 12, 13, 14, 15, 13, 17, 18, 19, 20, 21, 20, 13, 24, 25, 26, 27, 28, 27 ], joints_left=[6, 7, 8, 9, 10, 21, 22, 23, 24, 25], joints_right=[1, 2, 3, 4, 5, 16, 17, 18, 19, 20]) dataset_path = '/storage/quaternet/dataset_locomotion.npz' long_term_weights_path = '/artifacts/weights_long_term.bin' dataset = MocapDataset(dataset_path, skeleton_cmu, fps=120) # Remove useless joints, from both the skeleton and the dataset skeleton_cmu.remove_joints([13, 21, 23, 28, 30], dataset) dataset.mirror() dataset.compute_euler_angles('yzx') dataset.downsample(4)