print(' ---- Detection stage ---- ') print('Face detection') face_bboxes, face_bboxes_confidences = face_det_tracker.predict(image) res_face_detection = face_det_tracker.get_result() #face_detector.visualize(image, face_bboxes, face_bboxes_confidences, color=(0, 0, 255)) print('Initialize tracking...') multi_faces_tracker.initialize(image, face_bboxes) print('Facial landmarks estimation') facial_landmarks, facial_landmarks_confidences = facial_landmarks_estimator.predict(image, face_bboxes, face_bboxes_confidences) res_facial_landmarks_estimation = facial_landmarks_estimator.get_result() #facial_landmarks_estimator.visualize(image, facial_landmarks, facial_landmarks_confidences, color=(0, 0, 255)) pipeline.match(res_face_detection.bounding_boxes, res_facial_landmarks_estimation.facial_landmarks) pipeline.visualize(image) cv2.putText(image, 'Detection stage', (5, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255)) elif pipeline.regime == 'tracking': print(' ---- Tracking stage ---- ') print('Face tracking') face_bboxes, face_bboxes_confidences = multi_faces_tracker.update(image) #face_detector.visualize(image, face_bboxes, face_bboxes_confidences, color=(0, 255, 0)) print('Facial landmarks estimation') facial_landmarks, facial_landmarks_confidences = facial_landmarks_estimator.predict(image, face_bboxes, face_bboxes_confidences) #facial_landmarks_estimator.visualize(image, facial_landmarks, facial_landmarks_confidences, color=(0, 255, 0))
image, face_bboxes, face_bboxes_confidences) res_facial_emotion_classification = facial_emotion_classifier.get_result( ) #facial_emotion_classifier.visualize(image, facial_emotions, facial_emotions_confidences) if action_recognition: print('Action classification') action_classifier.predict(image) #action_classifier.predict(res_humans) #res_action_recognition = action_classifier.get_result() #actions, actions_confidences = res_action_recognition.convert_to_list() #action_classifier.visualize(image) pipeline.match(res_human_detection.bounding_boxes, \ res_poses_estimation.poses, \ res_face_detection.bounding_boxes, \ res_facial_landmarks_estimation.facial_landmarks, \ res_facial_emotion_classification.facial_emotions) pipeline.visualize(image) cv2.putText(image, 'Detection stage', (5, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0)) pipeline.update_regime() elif pipeline.regime == 'tracking': print(' ---- Tracking stage ---- ') #if len(multi_human_tracker.trackers) == 0: # pass