예제 #1
0
    def process(self, data):
        try:
            image = self.bridge.imgmsg_to_cv2(data, "bgr8")
            image_h, image_w = image.shape[:2]

            rospy.wait_for_service(_service)
            try:
                faces = rospy.ServiceProxy(_service, Face)
                resp1 = faces()
                faces = resp1.faces
                for f in faces:
                    rect = dlib.rectangle(
                        int(f.x * image_w),
                        int(f.y * image_h),
                        int((f.x + f.w) * image_w),
                        int((f.y + f.h) * image_h),
                    )
                    face = face_api.Face(rect)
                    face.details["id"] = f.label
                    face.details["name"] = f.name
                    face.details["gender"] = f.gender

                    face.draw_face(image)

            except rospy.ServiceException, e:
                print "Service call failed: %s" % e

            cv2.imshow("image", image)
            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                cv2.destroyAllWindows()
                rospy.signal_shutdown("q key pressed")
            elif key == ord('s'):
                cv2.imwrite("output.jpg", image)
예제 #2
0
    def detect_controller(self, req):
        cv_image = self.bridge.imgmsg_to_cv2(req.input_image, "bgr8")
        image = cv2.resize(cv_image, (0, 0), fx=config.scale, fy=config.scale)
        positions = face_api.detect_faces(image,
                                          min_score=config.detection_score,
                                          max_idx=config.detection_idx)
        encodings = face_api.face_descriptor(image, positions)

        image_h, image_w = self.image_shape
        boxes = []

        for face_position, encoding in zip(self.face_positions, encodings):
            face = face_api.Face(face_position[0],
                                 tracker_timeout=config.tracker_timeout)

            predicted_id = classifier.predict(encoding)
            if predicted_id != 0:
                face.details = face_map[predicted_id]

                # try to find gender.
                if face.details["gender"] == "unknown":
                    face.details["gender"] = face_api.predict_gender(encoding)

            else:
                face.details["gender"] = face_api.predict_gender(encoding)
                face_map[face.details["id"]] = face.details

            if face_map[
                    face.details["id"]]["size"] < config.classification_size:
                face_map[face.details["id"]]["size"] += 1

                classifier.add_pair(encoding, face.details["id"])

                face_path = os.path.join(_face_dir, face.details["id"])
                if not os.path.exists(face_path):
                    os.mkdir(face_path)
                with open(
                        os.path.join(face_path,
                                     "{}.dump".format(int(time.time()))),
                        'wb') as fp:
                    pickle.dump(encoding, fp)

            box = Box()
            box.x = face.rect.left() / float(image_w)
            box.y = face.rect.top() / float(image_h)
            box.w = face.rect.width() / float(image_w)
            box.h = face.rect.height() / float(image_h)
            box.gender = face.details["gender"]
            box.label = face.details["id"]
            box.name = face.details["name"]
            box.age = face.details["age"]
            boxes.append(box)
            rospy.loginfo("{} faces loaded.".format(box.name))
        response = DetectResponse(boxes)
        return response
    def process(self, data):
        try:
            cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
            image = cv2.resize(cv_image, (0, 0),
                               fx=config.scale,
                               fy=config.scale)

            self.image_shape = image.shape[:2]

            original = image.copy()

            if self.frame_limit % config.skip_frames == 0:
                self.frame_limit = 0
                # Detecting face positions
                self.face_positions = face_api.detect_faces(
                    image,
                    min_score=config.detection_score,
                    max_idx=config.detection_idx)
            self.frame_limit += 1

            if len(self.face_positions) > len(self.detected_faces):
                # Make the list empty
                self.detected_faces = []

                # Compute the 128D vector that describes the face in img identified by
                # shape.
                encodings = face_api.face_descriptor(image,
                                                     self.face_positions)

                for face_position, encoding in zip(self.face_positions,
                                                   encodings):
                    # Create object from face_position.
                    face = face_api.Face(
                        face_position[0],
                        tracker_timeout=config.tracker_timeout)

                    predicted_id = classifier.predict(encoding)
                    if predicted_id != 0:
                        face.details = face_map[predicted_id]

                        # try to find gender.
                        if face.details["gender"] == "unknown":
                            face.details["gender"] = face_api.predict_gender(
                                encoding)

                    else:
                        face.details["gender"] = face_api.predict_gender(
                            encoding)
                        face_map[face.details["id"]] = face.details

                    if face_map[face.details["id"]][
                            "size"] < config.classification_size:
                        face_map[face.details["id"]]["size"] += 1

                        classifier.add_pair(encoding, face.details["id"])

                        face_path = os.path.join(_face_dir, face.details["id"])
                        if not os.path.exists(face_path):
                            os.mkdir(face_path)
                        with open(
                                os.path.join(
                                    face_path,
                                    "{}.dump".format(int(time.time()))),
                                'wb') as fp:
                            pickle.dump(encoding, fp)

                    # Start correlation tracker for face.
                    face.tracker.start_track(image, face_position[0])

                    # Face detection score, The score is bigger for more confident
                    # detections.
                    rospy.loginfo(
                        "Face {}->{:5} , [{}] [score : {:.2f}]".format(
                            face.details["id"], face.details["name"],
                            face.details["gender"], face_position[1]))

                    self.detected_faces.append(face)
            else:
                for index, face in enumerate(self.detected_faces):
                    # Update tracker , if quality is low ,
                    # face will be removed from list.
                    if not face.update_tracker(image):
                        self.detected_faces.pop(index)

                    face.details = face_map[str(face.details["id"])]

                    face.draw_face(original)

            if config.show_window:
                cv2.imshow("image", original)
                key = cv2.waitKey(1) & 0xFF
                if key == ord('q'):
                    cv2.destroyAllWindows()
                    rospy.signal_shutdown("q key pressed")

        except CvBridgeError as e:
            rospy.logerr(e)
예제 #4
0
    def synchronisedImageCallback(self, imageRGB, imageDepth):
        try:
            # rospy.rostime.wallsleep(0.1)
            self.time = rospy.get_rostime()
            cv_image = self.bridge.imgmsg_to_cv2(imageRGB, "bgr8")

            image = cv2.resize(cv_image, (0, 0),
                               fx=config.scale,
                               fy=config.scale)

            self.image_shape = image.shape[:2]
            image_h, image_w = self.image_shape

            original = image.copy()

            if self.frame_limit % config.skip_frames == 0:
                self.frame_limit = 0
                # Detecting face positions
                self.face_positions = face_api.detect_faces(
                    image,
                    min_score=config.detection_score,
                    max_idx=config.detection_idx)

            self.frame_limit += 1

            if len(self.face_positions) > len(self.detected_faces):
                # Make the list empty
                self.detected_faces = []

                # Compute the 128D vector that describes the face in img identified by
                # shape.
                encodings = face_api.face_descriptor(image,
                                                     self.face_positions)

                cpt = 0

                for face_position, encoding in zip(self.face_positions,
                                                   encodings):
                    # Create object from face_position.
                    face = face_api.Face(
                        face_position[0],
                        tracker_timeout=config.tracker_timeout)

                    predicted_id = classifier.predict(encoding)
                    if predicted_id != 0:
                        try:
                            face.details = face_map[predicted_id]
                        except:
                            print "an key error has ocurred (line ~121)"
                            return

                        # try to find gender.
                        if face.details["gender"] == "unknown":
                            face.details["gender"] = face_api.predict_gender(
                                encoding)

                    else:
                        face.details["gender"] = face_api.predict_gender(
                            encoding)
                        face_map[face.details["id"]] = face.details

                    if face_map[face.details["id"]][
                            "size"] < config.classification_size:
                        face_map[face.details["id"]]["size"] += 1

                        classifier.add_pair(encoding, face.details["id"])

                        face_path = os.path.join(_face_dir, face.details["id"])
                        if not os.path.exists(face_path):
                            os.mkdir(face_path)
                        with open(
                                os.path.join(
                                    face_path,
                                    "{}.dump".format(int(time.time()))),
                                'wb') as fp:
                            pickle.dump(encoding, fp)

                    # Start correlation tracker for face.
                    face.tracker.start_track(image, face_position[0])

                    # Face detection score, The score is bigger for more confident
                    # detections.
                    rospy.loginfo(
                        "Face {}->{:5} , [{}] [score : {:.2f}], [xmin: {} xmax: {} ymin: {} ymax: {}]"
                        .format(
                            face.details["id"], face.details["name"],
                            face.details["gender"], face_position[1],
                            face.rect.left() / config.scale,
                            face.rect.width() / config.scale +
                            face.rect.left() / config.scale,
                            face.rect.top() / config.scale,
                            face.rect.height() / config.scale +
                            face.rect.top() / config.scale))

                    face.details["score"] = face_position[1]

                    rospy.loginfo("SCORE {}".format(face.details["score"]))
                    cpt = cpt + 1
                    self.detected_faces.append(face)
            else:
                cpt = 0
                for index, face in enumerate(self.detected_faces):
                    # Update tracker , if quality is low ,
                    # face will be removed from list.
                    if not face.update_tracker(image):
                        self.detected_faces.pop(index)

                    face.details = face_map[str(face.details["id"])]

                    face.draw_face(original)
                    cpt = cpt + 1

            if cpt > 0:
                listBB2D = BoundingBoxes2D()
                for index, face in enumerate(self.detected_faces):
                    msgFace = FaceMsg()
                    msgBB = BoundingBox2D()

                    msgBB.xmin = face.rect.left() / config.scale
                    msgBB.xmax = face.rect.width(
                    ) / config.scale + face.rect.left() / config.scale
                    msgBB.ymin = face.rect.top() / config.scale
                    msgBB.ymax = face.rect.height(
                    ) / config.scale + face.rect.top() / config.scale
                    msgBB.Class = "face"

                    msgFace.gender = face.details["gender"]
                    msgFace.id = face.details["id"]
                    msgFace.name = face.details["name"]

                    if face.details["age"] >= 0:
                        msgFace.age = face.details["age"]
                    else:
                        msgFace.age = 0

                    msgFace.genderProbability = abs(face.details["score"] /
                                                    2.7)

                    msgBB.probability = msgFace.genderProbability

                    #msgFace.boundingBoxe = msgBB
                    listBB2D.boundingBoxes.append(msgBB)
                    listBB2D.header.stamp = imageDepth.header.stamp

                    self.msg.faces.append(msgFace)

                try:
                    rospy.wait_for_service("/get_3d_bounding_boxes", 0.01)
                    serv = rospy.ServiceProxy("/get_3d_bounding_boxes",
                                              GetBoundingBoxes3D)
                    resp = serv(listBB2D, imageDepth,
                                imageDepth.header.frame_id, config.output_fame)

                    for index, BB3D in enumerate(
                            resp.boundingBoxes3D.boundingBoxes):
                        self.msg.faces[index].boundingBox = BB3D

                    self.msg.header.stamp = imageDepth.header.stamp
                    self.msg.header.frame_id = config.output_fame
                    self.faces_pub.publish(self.msg)
                    #self.rgb_pub.publish(data)
                    #self.depth_pub.publish(Depth)

                    self.msg.faces = []
                    self.BB3Dfaces = []

                except Exception as e:
                    rospy.logerr("ERROR with frame to box: " + str(e))

            if config.show_window:
                cv2.imshow("image", original)
                key = cv2.waitKey(1) & 0xFF
                if key == ord('q'):
                    cv2.destroyAllWindows()
                    rospy.signal_shutdown("q key pressed")

        except CvBridgeError as e:
            rospy.logerr(e)

        self.flagClassificationInProgress = False