def refine_poses(self, keypoint_thresh=10, score_thresh=0.5, neck_thresh=0.59, margin=0.0): W, H = 104.73, 67.74 for i, basename in enumerate(tqdm(self.frame_basenames)): poses = self.poses[basename] # remove the poses with few keypoints or they keep = [] for ii in range(len(poses)): keypoints = poses[ii] valid = (keypoints[:, 2] > 0.).nonzero()[0] score = np.sum(keypoints[valid, 2]) if len( valid ) > keypoint_thresh and score > score_thresh and keypoints[ 1, 2] > neck_thresh: keep.append(ii) poses = [poses[ii] for ii in keep] root_part = 1 root_box = [] for ii in range(len(poses)): root_tmp = poses[ii][root_part, :] valid_keypoints = (poses[ii][:, 2] > 0).nonzero() root_box.append([ root_tmp[0] - 10, root_tmp[1] - 10, root_tmp[0] + 10, root_tmp[1] + 10, np.sum(poses[ii][valid_keypoints, 2]) ]) root_box = np.array(root_box) # Perform Neck NMS if len(root_box.shape) == 1: root_box = root_box[None, :] keep2 = [0] else: keep2 = nms(root_box.astype(np.float32), 0.1) poses = [poses[ii] for ii in keep2 if ii < len(poses)] # Remove poses outside of field keep3 = [] cam_mat = self.calib[basename] cam = cam_utils.Camera(basename, cam_mat['A'], cam_mat['R'], cam_mat['T'], self.shape[0], self.shape[1]) for ii in range(len(poses)): kp3 = misc_utils.lift_keypoints_in_3d(cam, poses[ii]) if (-W / 2. - margin) <= kp3[1, 0] <= (W / 2. + margin) and ( -H / 2. - margin) <= kp3[1, 2] <= (H / 2. + margin): keep3.append(ii) poses = [poses[ii] for ii in keep3] self.poses[basename] = poses
fig = plt.figure() ax = fig.add_subplot(111) for i in tqdm(range(len(new_tracklets))): neck_pos = [] for j in range(len(new_tracklets[i])): frame_index = new_tracklets[i][j].frame_index basename = db.frame_basenames[frame_index] cam_data = db.calib[basename] cam = cam_utils.Camera(basename, cam_data['A'], cam_data['R'], cam_data['T'], db.shape[0], db.shape[1]) kp_3d = misc_utils.lift_keypoints_in_3d(cam, new_tracklets[i][j].keypoints) neck_pos.append(kp_3d[1, :]) neck_pos = np.array(neck_pos) # Smooth trajectory smoothed_positions = smooth_trajectory(new_tracklets[i], neck_pos) for j in range(len(new_tracklets[i])): data_out[new_tracklets[i][j].frame].append({'mesh': new_tracklets[i][j].mesh_name, 'x': smoothed_positions[0, j], 'y': smoothed_positions[1, j], 'z': smoothed_positions[2, j]}) ax.plot(smoothed_positions[0, :], smoothed_positions[2, :], 'o') plt.show() with open(join(db.path_to_dataset, 'players', 'metadata', 'position.json'), 'w') as outfile: json.dump(data_out, outfile)
basename = db.frame_basenames[sel_frame] poses = db.poses[basename] mask = db.get_instances_from_detectron(sel_frame, is_bool=True) cam_mat = db.calib[basename] cam = cam_utils.Camera(basename, cam_mat['A'], cam_mat['R'], cam_mat['T'], db.shape[0], db.shape[1]) skeleton_buffer = seg_utils.get_instance_skeleton_buffer( db.shape[0], db.shape[1], poses) h, w = img.shape[:2] for i in range(len(poses)): valid = poses[i][:, 2] > 0 kp3 = misc_utils.lift_keypoints_in_3d(cam, poses[i][valid, :], pad=0) center3d = np.mean(kp3, axis=0) # Most of keypoitns are in the upper body so the center of the mass is closer to neck center3d[1] -= 0.25 _, center_depth = cam.project(np.array([center3d])) bbox = misc_utils.get_box_from_3d_shpere(cam, center3d) x1, y1, x2, y2 = bbox[:4] x1 -= margin y1 -= margin x2 += margin y2 += margin x1, x2, y1, y2 = max(x1, 0), min(w, x2), max(y1, 0), min(h, y2)