def _detectBoxer(self, name_img_tuple): result, object_detected_list = self._boxer_detector.detect( name_img_tuple[1]) imgobj = ImageCV(name_img_tuple[1]) for object_detected in object_detected_list: object_detected.bbox = imgobj.normRectToAbsRect( object_detected.bbox, 1) if self.show: bbox_rect_label = [ (detected.bbox, f"{detected.class_name}:{i}: {str(100 * detected.score)[:5]}%" ) for i, detected in enumerate(object_detected_list) ] imgobj.setTitle( f'{name_img_tuple[0]}:boxer_detect_result:').drawBboxes( bbox_rect_label, copy=True).show() return result, object_detected_list
def readFrame(self, index: int, need_type: Union[ImageCV, np.ndarray] = ImageCV): cap = cv2.VideoCapture(self.fname) cap.set(cv2.CAP_PROP_POS_FRAMES, index) _, f = cap.read() if need_type == ImageCV: return ImageCV(f, title=index) else: return f
def _estimatePose(self, name_img_tuple): poses, datum = self._processImg(name_img_tuple[1]) if self.show: center_points = Poses( datum.poseKeypoints, self._pose_estimator.pose_type).centers(need_type=tuple) ImageCV(datum.cvOutputData).drawPoints(center_points, copy=False) self._showIfEnabled( datum.cvOutputData, title=f'{name_img_tuple[0]}:pose_estimate_result') return datum
def compare(pre, cur): pre_obj = ImageCV(pre[1], pre[0]) cur_obj = ImageCV(cur[1], cur[0]) distance = pre_obj.distanceHist(cur_obj, method=sci_dist.euclidean, gray=False, show=False)[-1] distances.append([pre[0], cur[0], distance]) if distance >= threshold: sections[-1][-1] = pre[0] sections.append([cur[0], None]) if log: print( f'[{pre[0]}]<->[{cur[0]}] distance: {distance}, threshold: {threshold}' ) if show: pre_obj.show() cur_obj.show() del pre_obj, cur_obj return cur
class BoxingAIImage(BoxingAI): def __init__(self, path): assert os.path.isfile(path) super().__init__() self.media = ImageCV(path) self.queue_name_img_tuple = Queue(maxsize=2) def _startProducingImgs(self): img = self.media.org() base_name = os.path.basename(self.media.fname) # self.require_show(img,title=f'{base_name}:org') img = self._execPrepHooks(img, base_name) # ImageCV(img,'testing').show() self.queue_name_img_tuple.put((base_name, img)) self.queue_name_img_tuple.put('DONE') def _connectAndSmoothPoses(self, all_poses, smooth): return all_poses
def showState(self): logger.debug(self.poseKeypoints) ImageCV(self.cvInputData, 'cvInputData').show() ImageCV(self.cvOutputData, 'cvOutputData').show()
def showCvOut(self): ImageCV(self.cvOutputData, self.title).show()
def show(self, img, title=None): ImageCV(img, title).show()
def __init__(self, path): assert os.path.isfile(path) super().__init__() self.media = ImageCV(path) self.queue_name_img_tuple = Queue(maxsize=2)
def _showIfEnabled(self, img, title=None): if self.show: ImageCV(img, title).show()
def _genMainProcess(self, max_people_num, heuristic, smooth): # img_shape = self.media.getInfo()['shape'] # act_recg1 = ActionRecognizer(self.pose_estimator_env[0]['model_pose'],img_shape) # act_recg2 = ActionRecognizer(self.pose_estimator_env[0]['model_pose'],img_shape) while True: got = self.queue_name_img_tuple.get() if got == 'DONE': break if heuristic: _, object_detected_list = self._detectBoxer(got) img_obj = ImageCV(got[1], title=got[0]) dilate_ratio = 1.1 roi_datum_tuple_list = [] boxer_id_score_to_every_pose = [] for i, b in enumerate(object_detected_list): roi_rect = img_obj.rectDilate(b.bbox, dilate_ratio) boxer_roi_img = img_obj.roiCopy(roi_rect).org() datum = self._estimatePose( (f'{got[0]}:boxer {i} (dilate_ratio {dilate_ratio})', boxer_roi_img)) roi_datum_tuple_list.append((roi_rect, datum)) boxer_id_score_to_every_pose += [(i, b)] * max( 1, ndarrayLen(datum.poseKeypoints)) datum = DatumPickleable.rebuildFromRoiDatum( got[1], roi_datum_tuple_list, self._pose_estimator.pose_type) # datum,fill_num = self._fill_missing_pose_keypoints(datum, max_people_num) # boxer_id_score_to_every_pose += [None] * fill_num self._showIfEnabled(datum.cvOutputData, title=f'{got[0]}:rebuild(may be covered)') poses = Poses(datum.poseKeypoints, self._pose_estimator.pose_type) poses.cleanup(['face']) posescore_list = self._rescorePoseByBoxerScore( poses, boxer_id_score_to_every_pose) posescore_list = self._filterOutDuplicatePose(posescore_list) else: # if heuristic and not boxer_entities: # logger.warn('only one boxer detected, use full img pose estimation!') datum = self._estimatePose(got) # datum,fill_num = self._fill_missing_pose_keypoints(datum, max_people_num) poses = Poses(datum.poseKeypoints, self._pose_estimator.pose_type) poses.cleanup(['face']) posescore_list = self._rescorePoseByBoxerScore(poses, None) object_detected_list = None # posescore_list = self._fill_missing_pose(posescore_list, max_people_num) logger.debug('rescored pose score:') logger.debug([ f'frame index:[{got[0]}] from_boxer:{t[1]} ' f'completion_score(cleaned):{t[2]} completion_multi_boxerscore:{t[3]} ' f'points_scores_sum:{t[4]} scores_sum_after_re_pu:{t[5]} ' f'norm_dis_to_boxer_center:{t[6]} knee_and_below_nonzero_exists:{t[7]} ' f'[p0x:{t[0].key_points[0][0]} p1x:{t[0].key_points[1][0]} p8x:{t[0].key_points[8][0]} p17x:{t[0].key_points[17][0]}]' for t in posescore_list ]) # posescore_list = list(filter(lambda t: t.points_scores_sum_after_re_pu,posescore_list[:max_people_num])) # posescore_list = posescore_list[:3] posescore_list = self._connectAndSmoothPoses(posescore_list, smooth=smooth) # actions_hist1 = act_recg1.put_pose(posescore_list[0].pose)[-5:] # actions_hist2 = act_recg2.put_pose(posescore_list[1].pose)[-5:] # act_recg1.show() # yield got[0],list(posescore_list),datum,boxer_entities if heuristic else None,[actions_hist1,actions_hist2] yield got[0], list( posescore_list ), datum, object_detected_list if heuristic else None, [[], []]