def detect_controller(self, req): cv_image = self.bridge.imgmsg_to_cv2(req.input_image, "bgr8") image = cv2.resize(cv_image, (0, 0), fx=config.scale, fy=config.scale) positions = face_api.detect_faces(image, min_score=config.detection_score, max_idx=config.detection_idx) encodings = face_api.face_descriptor(image, positions) image_h, image_w = self.image_shape boxes = [] for face_position, encoding in zip(self.face_positions, encodings): face = face_api.Face(face_position[0], tracker_timeout=config.tracker_timeout) predicted_id = classifier.predict(encoding) if predicted_id != 0: face.details = face_map[predicted_id] # try to find gender. if face.details["gender"] == "unknown": face.details["gender"] = face_api.predict_gender(encoding) else: face.details["gender"] = face_api.predict_gender(encoding) face_map[face.details["id"]] = face.details if face_map[ face.details["id"]]["size"] < config.classification_size: face_map[face.details["id"]]["size"] += 1 classifier.add_pair(encoding, face.details["id"]) face_path = os.path.join(_face_dir, face.details["id"]) if not os.path.exists(face_path): os.mkdir(face_path) with open( os.path.join(face_path, "{}.dump".format(int(time.time()))), 'wb') as fp: pickle.dump(encoding, fp) box = Box() box.x = face.rect.left() / float(image_w) box.y = face.rect.top() / float(image_h) box.w = face.rect.width() / float(image_w) box.h = face.rect.height() / float(image_h) box.gender = face.details["gender"] box.label = face.details["id"] box.name = face.details["name"] box.age = face.details["age"] boxes.append(box) rospy.loginfo("{} faces loaded.".format(box.name)) response = DetectResponse(boxes) return response
def analysis_show(path, img, cover): global face_information #申明使用全局变量 global name if cover: face_information = detect_faces(path) try: if face_information['faces']: name = search_show(face_information) return name except KeyError: print('no face') else: try: if face_information['faces']: # x = face_information['faces'][0]['face_rectangle']['left'] # y = face_information['faces'][0]['face_rectangle']['top'] age = face_information['faces'][0]['attributes']['age'][ 'value'] emotion = face_information['faces'][0]['attributes']['emotion'] emotion = emotion.copy() #操作复制的一份防止重复pop emotion1 = max(emotion, key=emotion.get) emotion.pop(emotion1) emotion2 = max(emotion, key=emotion.get) beauty = face_information['faces'][0]['attributes']['beauty'] lock.acquire() show_info(name, img, age, emotion1, emotion2, beauty) lock.release() else: lock.acquire() cv2.putText(img, "no face", (100, 50), font, 1.2, (255, 255, 255), 1) # 照片/添加的文字/左上角坐标/字体/字体大小/颜色/字体粗细 lock.release() except KeyError: cv2.putText(img, "no face", (100, 50), font, 1.2, (255, 255, 255), 1)
def process(self, data): try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") image = cv2.resize(cv_image, (0, 0), fx=config.scale, fy=config.scale) self.image_shape = image.shape[:2] original = image.copy() if self.frame_limit % config.skip_frames == 0: self.frame_limit = 0 # Detecting face positions self.face_positions = face_api.detect_faces( image, min_score=config.detection_score, max_idx=config.detection_idx) self.frame_limit += 1 if len(self.face_positions) > len(self.detected_faces): # Make the list empty self.detected_faces = [] # Compute the 128D vector that describes the face in img identified by # shape. encodings = face_api.face_descriptor(image, self.face_positions) for face_position, encoding in zip(self.face_positions, encodings): # Create object from face_position. face = face_api.Face( face_position[0], tracker_timeout=config.tracker_timeout) predicted_id = classifier.predict(encoding) if predicted_id != 0: face.details = face_map[predicted_id] # try to find gender. if face.details["gender"] == "unknown": face.details["gender"] = face_api.predict_gender( encoding) else: face.details["gender"] = face_api.predict_gender( encoding) face_map[face.details["id"]] = face.details if face_map[face.details["id"]][ "size"] < config.classification_size: face_map[face.details["id"]]["size"] += 1 classifier.add_pair(encoding, face.details["id"]) face_path = os.path.join(_face_dir, face.details["id"]) if not os.path.exists(face_path): os.mkdir(face_path) with open( os.path.join( face_path, "{}.dump".format(int(time.time()))), 'wb') as fp: pickle.dump(encoding, fp) # Start correlation tracker for face. face.tracker.start_track(image, face_position[0]) # Face detection score, The score is bigger for more confident # detections. rospy.loginfo( "Face {}->{:5} , [{}] [score : {:.2f}]".format( face.details["id"], face.details["name"], face.details["gender"], face_position[1])) self.detected_faces.append(face) else: for index, face in enumerate(self.detected_faces): # Update tracker , if quality is low , # face will be removed from list. if not face.update_tracker(image): self.detected_faces.pop(index) face.details = face_map[str(face.details["id"])] face.draw_face(original) if config.show_window: cv2.imshow("image", original) key = cv2.waitKey(1) & 0xFF if key == ord('q'): cv2.destroyAllWindows() rospy.signal_shutdown("q key pressed") except CvBridgeError as e: rospy.logerr(e)
def synchronisedImageCallback(self, imageRGB, imageDepth): try: # rospy.rostime.wallsleep(0.1) self.time = rospy.get_rostime() cv_image = self.bridge.imgmsg_to_cv2(imageRGB, "bgr8") image = cv2.resize(cv_image, (0, 0), fx=config.scale, fy=config.scale) self.image_shape = image.shape[:2] image_h, image_w = self.image_shape original = image.copy() if self.frame_limit % config.skip_frames == 0: self.frame_limit = 0 # Detecting face positions self.face_positions = face_api.detect_faces( image, min_score=config.detection_score, max_idx=config.detection_idx) self.frame_limit += 1 if len(self.face_positions) > len(self.detected_faces): # Make the list empty self.detected_faces = [] # Compute the 128D vector that describes the face in img identified by # shape. encodings = face_api.face_descriptor(image, self.face_positions) cpt = 0 for face_position, encoding in zip(self.face_positions, encodings): # Create object from face_position. face = face_api.Face( face_position[0], tracker_timeout=config.tracker_timeout) predicted_id = classifier.predict(encoding) if predicted_id != 0: try: face.details = face_map[predicted_id] except: print "an key error has ocurred (line ~121)" return # try to find gender. if face.details["gender"] == "unknown": face.details["gender"] = face_api.predict_gender( encoding) else: face.details["gender"] = face_api.predict_gender( encoding) face_map[face.details["id"]] = face.details if face_map[face.details["id"]][ "size"] < config.classification_size: face_map[face.details["id"]]["size"] += 1 classifier.add_pair(encoding, face.details["id"]) face_path = os.path.join(_face_dir, face.details["id"]) if not os.path.exists(face_path): os.mkdir(face_path) with open( os.path.join( face_path, "{}.dump".format(int(time.time()))), 'wb') as fp: pickle.dump(encoding, fp) # Start correlation tracker for face. face.tracker.start_track(image, face_position[0]) # Face detection score, The score is bigger for more confident # detections. rospy.loginfo( "Face {}->{:5} , [{}] [score : {:.2f}], [xmin: {} xmax: {} ymin: {} ymax: {}]" .format( face.details["id"], face.details["name"], face.details["gender"], face_position[1], face.rect.left() / config.scale, face.rect.width() / config.scale + face.rect.left() / config.scale, face.rect.top() / config.scale, face.rect.height() / config.scale + face.rect.top() / config.scale)) face.details["score"] = face_position[1] rospy.loginfo("SCORE {}".format(face.details["score"])) cpt = cpt + 1 self.detected_faces.append(face) else: cpt = 0 for index, face in enumerate(self.detected_faces): # Update tracker , if quality is low , # face will be removed from list. if not face.update_tracker(image): self.detected_faces.pop(index) face.details = face_map[str(face.details["id"])] face.draw_face(original) cpt = cpt + 1 if cpt > 0: listBB2D = BoundingBoxes2D() for index, face in enumerate(self.detected_faces): msgFace = FaceMsg() msgBB = BoundingBox2D() msgBB.xmin = face.rect.left() / config.scale msgBB.xmax = face.rect.width( ) / config.scale + face.rect.left() / config.scale msgBB.ymin = face.rect.top() / config.scale msgBB.ymax = face.rect.height( ) / config.scale + face.rect.top() / config.scale msgBB.Class = "face" msgFace.gender = face.details["gender"] msgFace.id = face.details["id"] msgFace.name = face.details["name"] if face.details["age"] >= 0: msgFace.age = face.details["age"] else: msgFace.age = 0 msgFace.genderProbability = abs(face.details["score"] / 2.7) msgBB.probability = msgFace.genderProbability #msgFace.boundingBoxe = msgBB listBB2D.boundingBoxes.append(msgBB) listBB2D.header.stamp = imageDepth.header.stamp self.msg.faces.append(msgFace) try: rospy.wait_for_service("/get_3d_bounding_boxes", 0.01) serv = rospy.ServiceProxy("/get_3d_bounding_boxes", GetBoundingBoxes3D) resp = serv(listBB2D, imageDepth, imageDepth.header.frame_id, config.output_fame) for index, BB3D in enumerate( resp.boundingBoxes3D.boundingBoxes): self.msg.faces[index].boundingBox = BB3D self.msg.header.stamp = imageDepth.header.stamp self.msg.header.frame_id = config.output_fame self.faces_pub.publish(self.msg) #self.rgb_pub.publish(data) #self.depth_pub.publish(Depth) self.msg.faces = [] self.BB3Dfaces = [] except Exception as e: rospy.logerr("ERROR with frame to box: " + str(e)) if config.show_window: cv2.imshow("image", original) key = cv2.waitKey(1) & 0xFF if key == ord('q'): cv2.destroyAllWindows() rospy.signal_shutdown("q key pressed") except CvBridgeError as e: rospy.logerr(e) self.flagClassificationInProgress = False