def load_skeleton(self, B_path, is_clean_pose=True): B_coor = json.load(open(B_path))["people"] if len(B_coor) == 0: pose = torch.zeros(self.opt.structure_nc, self.load_size[0], self.load_size[1]) else: B_coor = B_coor[0] pose_dict = openpose_utils.obtain_2d_cords( B_coor, resize_param=self.load_size, org_size=self.org_size, affine=self.affine_param) pose_body = pose_dict['body'] if not is_clean_pose: pose_body = openpose_utils.openpose18_to_coco17(pose_body) pose_numpy = openpose_utils.obtain_map(pose_body, self.load_size) pose = np.transpose(pose_numpy, (2, 0, 1)) pose = torch.Tensor(pose) Bi = pose if not self.opt.no_bone_map: color = np.zeros(shape=self.load_size + (3, ), dtype=np.uint8) LIMB_SEQ = openpose_utils.LIMB_SEQ_HUMAN36M_17 if is_clean_pose else openpose_utils.LIMB_SEQ_COCO_17 color = openpose_utils.draw_joint(color, pose_body.astype(np.int), LIMB_SEQ) color = np.transpose(color, (2, 0, 1)) color = torch.Tensor(color) Bi = torch.cat((Bi, color), dim=0) return Bi
def load_keypoint(self, B_path, is_clean_pose): B_coor = json.load(open(B_path))["people"] if len(B_coor)==0: pose = torch.zeros(17*2, 1) else: B_coor = B_coor[0] pose_dict = openpose_utils.obtain_2d_cords(B_coor, resize_param=self.load_size, org_size=self.org_size, affine=self.affine_param) if not is_clean_pose: pose = openpose_utils.openpose18_to_coco17(pose_dict['body']) else: pose = pose_dict['body'] pose = torch.Tensor(pose).float() pose = pose.view(17*2, 1) pose = 2*pose/self.load_size[1]-1 return pose
def load_keypoints(self, keypoint_list, B_paths): pose_list=[] B_path_recoder=[] for index in keypoint_list: B_path = B_paths[index] B_path_recoder.append(B_path) B_coor = json.load(open(B_path))["people"] if len(B_coor)==0: pose = torch.zeros(17*2, 1) else: B_coor = B_coor[0] pose_dict = openpose_utils.obtain_2d_cords(B_coor, resize_param=self.load_size, org_size=self.org_size) pose = openpose_utils.openpose18_to_coco17(pose_dict['body']) pose = torch.Tensor(pose).float() pose = pose.view(17*2, 1) # normalize pose pose = 2*pose/self.load_size[1]-1 pose_list.append(pose) B = torch.cat(pose_list, 1) return B, B_path_recoder