Пример #1
0
    def save_prediction(self, name, perceptual_loss_net):
        super(vae_wrapper, self).save_prediction(name, perceptual_loss_net)
        eval_prep = self.eval_prep
        eval_input = self.train_prep.scale_input(
            eval_prep.inputs)  # .scale_input(eval_prep.inputs)
        eval_output = self.train_prep.scale_output(
            eval_prep.outputs)  # scale_output(eval_prep.outputs)

        local_positions = to_numpy(self.final_outputs)

        bone_dependencies, global_positions, rotations = eval_prep.get_global_pos_from_prediction(
            eval_input, local_positions, self.train_prep)
        _, reference_positions, rotations = eval_prep.get_global_pos_from_prediction(
            eval_input, eval_output, self.train_prep)
        self.export_anim_unity(
            self.train_prep.scale_back_output(local_positions), name)
        self.stats_printer(self.train_prep.scale_back_output(local_positions),
                           eval_prep.outputs, name, perceptual_loss_net)
        self.save_anim(global_positions, reference_positions,
                       bone_dependencies, rotations, name)
Пример #2
0
    def save_prediction(self, name, perceptual_loss_net):
        super(rnn_wrapper_2, self).save_prediction(name, perceptual_loss_net)

        training_prep = self.train_prep
        eval_prep = self.eval_prep
        final_outputs = self.final_outputs

        eval_input = training_prep.scale_input(
            eval_prep.inputs)  # .scale_input(eval_prep.inputs)
        eval_output = training_prep.scale_output(
            eval_prep.outputs)  # scale_output(eval_prep.outputs)

        idx1 = (eval_input.shape[0]) % eval_prep.total_seq_length
        idx2 = idx1 + final_outputs.shape[0]
        eval_input = eval_input[idx1:idx2, :]
        eval_output = eval_output[idx1:idx2, :]
        self.final_outputs = self.final_outputs.squeeze(1)

        local_positions = to_numpy(self.final_outputs)

        bone_dependencies, global_positions, rotations = eval_prep.get_global_pos_from_prediction(
            eval_input,
            local_positions,
            self.train_prep,
            start_idx=idx1,
            end_idx=idx2)
        _, reference_positions, rotations = eval_prep.get_global_pos_from_prediction(
            eval_input,
            eval_output,
            self.train_prep,
            start_idx=idx1,
            end_idx=idx2)
        self.export_anim_unity(
            self.train_prep.scale_back_output(local_positions), name)
        self.stats_printer(self.train_prep.scale_back_output(local_positions),
                           eval_prep.outputs, name, perceptual_loss_net)
        self.save_anim(global_positions, reference_positions,
                       bone_dependencies, rotations, name)
Пример #3
0
x_len = eval_x.shape[1]
final_outputs = glow_model.predict(eval_cond[:, :, :500], STACKCOUNT, x_len)
#
eval_input = training_prep.scale_input(
    eval_prep.inputs)  # .scale_input(eval_prep.inputs)
eval_output = training_prep.scale_output(
    eval_prep.outputs)  # scale_output(eval_prep.outputs)

idx1 = (eval_input.shape[0]) % eval_prep.total_seq_length
idx2 = idx1 + 499
eval_input = eval_input[idx1:idx2, :]
eval_output = eval_output[idx1:idx2, :]

bone_dependencies, global_positions, rotations = eval_prep.get_global_pos_from_prediction(
    eval_input,
    to_numpy(final_outputs),
    training_prep,
    start_idx=idx1,
    end_idx=idx1 + 499)
bone_dependencies, reference_positions, rotations = eval_prep.get_global_pos_from_prediction(
    eval_input, eval_output, training_prep, start_idx=idx1, end_idx=idx2)

MOTIONTYPE = "Boxing"
# sp.print_stats(global_positions, reference_positions, ["l_hand", "r_hand", "l_shoulder", "r_shoulder", "hip", "l_foot", "r_foot", "l_elbow", "r_elbow", "l_knee", "r_knee"], MOTIONTYPE)

if __name__ == '__main__':
    anim = Animator.MocapAnimator(global_positions, [''] * 40,
                                  bone_dependencies,
                                  1.0 / TARGET_FPS,
                                  heading_dirs=rotations,
                                  name="trained.avi")
Пример #4
0
rnnvae_model.train_model(input=train_feet_lower_input,
                         conditional_input=train_feet_lower_conditional_input,
                         output=train_feet_output,
                         eval_input=eval_feet_lower_input,
                         eval_conditional_input=eval_feet_conditional_input,
                         eval_output=eval_feet_output,
                         learning_rate=learning_rate,
                         epochs=num_epochs,
                         batch_size=batch_size)

ff_outputs = ff_model.predict(eval_input)
final_outputs = rnnvae_model.predict(eval_feet_input, ff_outputs, STACKCOUNT)

bone_dependencies, global_positions, _ = eval_prep.get_global_pos_from_prediction(
    eval_input, to_numpy(final_outputs), training_prep)
_, reference_positions, _ = eval_prep.get_global_pos_from_prediction(
    eval_input, eval_output, training_prep)

MOTIONTYPE = "Boxing"
sp.print_stats(global_positions, reference_positions, [
    "l_hand", "r_hand", "l_shoulder", "r_shoulder", "hip", "l_foot", "r_foot",
    "l_elbow", "r_elbow", "l_knee", "r_knee"
], MOTIONTYPE)

if __name__ == '__main__':
    anim = Animator.MocapAnimator(
        global_positions, [''] * 40,
        bone_dependencies,
        1.0 / TARGET_FPS,
        heading_dirs=eval_prep.heading_dirs[-global_positions.shape[0]:],