コード例 #1
0
def save_anim(reference_positions, bone_dependencies, rotations, name, prep):

    reference_anim = Animator.MocapAnimator2(reference_positions, [''] * 40,
                                             bone_dependencies,
                                             prep.target_delta_t,
                                             heading_dirs=rotations,
                                             name=name)
    reference_anim.animation()
コード例 #2
0
    def save_anim(self, global_positions, reference_positions,
                  bone_dependencies, rotations, name):
        file_name = name.split('/')[-1]
        name = name.replace(file_name, "videos_out/" + file_name)

        anim = Animator.MocapAnimator2(global_positions, [''] * 40,
                                       bone_dependencies,
                                       self.train_prep.target_delta_t,
                                       heading_dirs=rotations,
                                       name="trained.gif")
        anim.animation()
        reference_anim = Animator.MocapAnimator2(
            reference_positions, [''] * 40,
            bone_dependencies,
            self.train_prep.target_delta_t,
            heading_dirs=rotations,
            name="reference.gif")
        reference_anim.animation()
        AnimationStacker.concatenate_animations("trained.gif", "reference.gif",
                                                name + ".mp4")
コード例 #3
0
# "l_hand_idx, r_hand_idx, l_elbow_idx, r_elbow_idx, hip_idx, l_foot_idx, r_foot_idx
#[l_hand_idx, r_hand_idx, l_shoulder_idx, r_shoulder_idx, hip_idx, l_foot_idx, r_foot_idx, l_elbow_idx, r_elbow_idx, l_knee_idx, r_knee_idx]
bone_dependencies = [[0, 7], [1, 8], [2, 4], [3, 4], [4, -1], [5, 9], [6, 10],
                     [7, 2], [8, 3], [9, 4], [10, 4]]
bone_dependencies = np.array(bone_dependencies)

global_positions = np.hstack(
    (training_prep.scale_back_input(eval_input.detach().cpu().numpy())[:, :6],
     training_prep.scale_back_output(target_output.detach().cpu().numpy())))
# global_positions = np.hstack((eval_input, eval_output))
global_positions = global_positions.reshape(global_positions.shape[0], -1, 3)
global_positions = eval_prep.add_heads(global_positions)
# training_prep.scale_back(global_positions)

if __name__ == '__main__':
    anim = Animator.MocapAnimator(global_positions, [''] * 40,
                                  bone_dependencies, 1.0 / TARGET_FPS)
    anim.animation()

# Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
# with torch.no_grad():
#     n_correct = 0
#     n_samples = 0
#     for input, output in test_loader:
#         input = input.reshape(-1, 28 * 28).to(device)
#         output = output.to(device)
#         outputs = model(input)
#         # max returns (value ,index)
#         predicted = torch.round(10 * outputs.data).T
#         n_samples += output.size(0)
#         n_correct += (predicted == output).sum().item()
コード例 #4
0
eval_output = eval_output[idx1:idx2, :]

bone_dependencies, global_positions, rotations = eval_prep.get_global_pos_from_prediction(
    eval_input,
    to_numpy(final_outputs),
    training_prep,
    start_idx=idx1,
    end_idx=idx1 + 499)
bone_dependencies, reference_positions, rotations = eval_prep.get_global_pos_from_prediction(
    eval_input, eval_output, training_prep, start_idx=idx1, end_idx=idx2)

MOTIONTYPE = "Boxing"
# sp.print_stats(global_positions, reference_positions, ["l_hand", "r_hand", "l_shoulder", "r_shoulder", "hip", "l_foot", "r_foot", "l_elbow", "r_elbow", "l_knee", "r_knee"], MOTIONTYPE)

if __name__ == '__main__':
    anim = Animator.MocapAnimator(global_positions, [''] * 40,
                                  bone_dependencies,
                                  1.0 / TARGET_FPS,
                                  heading_dirs=rotations,
                                  name="trained.avi")
    anim.animation()
    reference_anim = Animator.MocapAnimator(reference_positions, [''] * 40,
                                            bone_dependencies,
                                            1.0 / TARGET_FPS,
                                            heading_dirs=rotations,
                                            name="reference.avi")
    reference_anim.animation()
    from Helpers import AnimationStacker
    AnimationStacker.concatenate_animations("trained.avi", "reference.avi",
                                            MOTIONTYPE + ".mp4")
コード例 #5
0
#[l_hand_idx, r_hand_idx, l_shoulder_idx, r_shoulder_idx, hip_idx, l_foot_idx, r_foot_idx, l_elbow_idx, r_elbow_idx, l_knee_idx, r_knee_idx]
bone_dependencies = [[0, 7], [1, 8], [2, 4], [3, 4], [4, -1], [5, 9], [6, 10],
                     [7, 2], [8, 3], [9, 4], [10, 4]]
bone_dependencies = np.array(bone_dependencies)

global_positions = np.hstack(
    (training_prep.scale_back_input(eval_input.detach().cpu().numpy())[:, :6],
     training_prep.scale_back_output(target_output.detach().cpu().numpy())))
# global_positions = np.hstack((eval_input, eval_output))
global_positions = global_positions.reshape(global_positions.shape[0], -1, 3)
global_positions = eval_prep.add_heads(global_positions)
# training_prep.scale_back(global_positions)

if __name__ == '__main__':
    anim = Animator.MocapAnimator(global_positions, [''] * 40,
                                  bone_dependencies,
                                  1.0 / TARGET_FPS,
                                  write_to_file=True)
    anim.animation()

# Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
# with torch.no_grad():
#     n_correct = 0
#     n_samples = 0
#     for input, output in test_loader:
#         input = input.reshape(-1, 28 * 28).to(device)
#         output = output.to(device)
#         outputs = model(input)
#         # max returns (value ,index)
#         predicted = torch.round(10 * outputs.data).T
#         n_samples += output.size(0)
コード例 #6
0
bone_dependencies, global_positions, _ = eval_prep.get_global_pos_from_prediction(
    eval_input, to_numpy(final_outputs), training_prep)
_, reference_positions, _ = eval_prep.get_global_pos_from_prediction(
    eval_input, eval_output, training_prep)

MOTIONTYPE = "Boxing"
sp.print_stats(global_positions, reference_positions, [
    "l_hand", "r_hand", "l_shoulder", "r_shoulder", "hip", "l_foot", "r_foot",
    "l_elbow", "r_elbow", "l_knee", "r_knee"
], MOTIONTYPE)

if __name__ == '__main__':
    anim = Animator.MocapAnimator(
        global_positions, [''] * 40,
        bone_dependencies,
        1.0 / TARGET_FPS,
        heading_dirs=eval_prep.heading_dirs[-global_positions.shape[0]:],
        name="trained.avi")
    anim.animation()
    reference_anim = Animator.MocapAnimator(
        reference_positions, [''] * 40,
        bone_dependencies,
        1.0 / TARGET_FPS,
        heading_dirs=eval_prep.heading_dirs[-reference_positions.shape[0]:],
        name="reference.avi")
    reference_anim.animation()
    from Helpers import AnimationStacker
    AnimationStacker.concatenate_animations("trained.avi", "reference.avi",
                                            MOTIONTYPE + ".mp4")