prediction[:, :, 2] -= np.min(prediction[:, :, 2]) anim_output = {'Reconstruction': prediction} input_keypoints = image_coordinates(input_keypoints[..., :2], w=cam['res_w'], h=cam['res_h']) from common.visualization import render_animation #Render the video render_animation(input_keypoints, keypoints_metadata, anim_output, dataset.skeleton(), dataset.fps(), args.viz_bitrate, cam['azimuth'], args.viz_output, limit=args.viz_limit, downsample=args.viz_downsample, size=args.viz_size, input_video_path=args.viz_video, viewport=(cam['res_w'], cam['res_h']), input_video_skip=args.viz_skip) #If not rendering else: print('Evaluating...') all_actions = {} all_actions_by_subject = {}
subject][args.viz_camera]['orientation'] break prediction = camera_to_world(prediction, R=rot, t=0) # We don't have the trajectory, but at least we can rebase the height prediction[:, :, 2] -= np.min(prediction[:, :, 2]) anim_output = {'Reconstruction': prediction} if ground_truth is not None and not args.viz_no_ground_truth: anim_output['Ground truth'] = ground_truth input_keypoints = image_coordinates( input_keypoints[..., :2], w=cam['res_w'], h=cam['res_h']) from common.visualization import render_animation render_animation(input_keypoints, keypoints_metadata, anim_output, dataset.skeleton(), dataset.fps( ), args.viz_bitrate, cam['azimuth'], args.viz_output, limit=args.viz_limit, downsample=args.viz_downsample, size=args.viz_size, input_video_path=args.viz_video, viewport=( cam['res_w'], cam['res_h']), input_video_skip=args.viz_skip) else: print('Evaluating...') all_actions = {} all_actions_by_subject = {} for subject in subjects_test: if subject not in all_actions_by_subject: all_actions_by_subject[subject] = {} for action in dataset[subject].keys(): action_name = action.split(' ')[0]