def __call__(self, results): image_size = results['ann_info']['image_size'] img = results['img'] joints_3d = results['joints_3d'] joints_3d_visible = results['joints_3d_visible'] c = results['center'] s = results['scale'] r = results['rotation'] if self.use_udp: trans = get_warp_matrix(r, c * 2.0, image_size - 1.0, s * 200.0) img = cv2.warpAffine( img, trans, (int(image_size[0]), int(image_size[1])), flags=cv2.INTER_LINEAR) joints_3d[:, 0:2] = \ warp_affine_joints(joints_3d[:, 0:2].copy(), trans) else: trans = get_affine_transform(c, s, r, image_size) img = cv2.warpAffine( img, trans, (int(image_size[0]), int(image_size[1])), flags=cv2.INTER_LINEAR) for i in range(results['ann_info']['num_joints']): if joints_3d_visible[i, 0] > 0.0: joints_3d[i, 0:2] = affine_transform(joints_3d[i, 0:2], trans) results['img'] = img results['joints_3d'] = joints_3d results['joints_3d_visible'] = joints_3d_visible return results
def get_group_preds(grouped_joints, center, scale, heatmap_size, use_udp=False): """Transform the grouped joints back to the image. Args: grouped_joints (list): Grouped person joints. center (np.ndarray[2, ]): Center of the bounding box (x, y). scale (np.ndarray[2, ]): Scale of the bounding box wrt [width, height]. heatmap_size (np.ndarray[2, ]): Size of the destination heatmaps. use_udp (bool): Unbiased data processing. Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020). Returns: list: List of the pose result for each person. """ if use_udp: if grouped_joints[0].shape[0] > 0: heatmap_size_t = np.array(heatmap_size, dtype=np.float32) - 1.0 trans = get_warp_matrix(theta=0, size_input=heatmap_size_t, size_dst=scale, size_target=heatmap_size_t) grouped_joints[0][..., :2] = \ warp_affine_joints(grouped_joints[0][..., :2], trans) results = [person for person in grouped_joints[0]] else: results = [] for person in grouped_joints[0]: joints = transform_preds(person, center, scale, heatmap_size) results.append(joints) return results