def yoloRecognitionCallback(self, bbs):
        rospy.loginfo('Image ID: ' + str(bbs.image_header.seq))

        bbs_l = bbs.bounding_boxes

        objects = []
        people = []

        for bb in bbs_l:
            if bb.Class in dictionary.keys():
                reference_model = bb.Class
                bb.Class = dictionary[bb.Class]

            if 'people' in self.possible_classes and bb.Class in self.possible_classes[
                    'people'] and bb.probability >= self.threshold:
                person = Description()
                person.label_class = 'people' + '/' + bb.Class
                person.reference_model = reference_model
                person.probability = bb.probability
                person.bounding_box.minX = bb.xmin
                person.bounding_box.minY = bb.ymin
                person.bounding_box.width = bb.xmax - bb.xmin
                person.bounding_box.height = bb.ymax - bb.ymin
                people.append(person)

            elif bb.Class in [
                    val for sublist in self.possible_classes.values()
                    for val in sublist
            ] and bb.probability >= self.threshold:
                object_d = Description()
                index = 0
                i = 0
                for value in self.possible_classes.values():
                    if bb.Class in value:
                        index = i
                    i += 1
                object_d.reference_model = reference_model
                object_d.label_class = list(
                    self.possible_classes.keys())[index] + '/' + bb.Class
                object_d.probability = bb.probability
                object_d.bounding_box.minX = bb.xmin
                object_d.bounding_box.minY = bb.ymin
                object_d.bounding_box.width = bb.xmax - bb.xmin
                object_d.bounding_box.height = bb.ymax - bb.ymin
                objects.append(object_d)

        objects_msg = Recognitions()
        people_msg = Recognitions()

        if len(objects) > 0:
            objects_msg.header = bbs.header
            objects_msg.image_header = bbs.image_header
            objects_msg.descriptions = objects
            self.recognized_objects_pub.publish(objects_msg)

        if len(people) > 0:
            people_msg.header = bbs.header
            people_msg.image_header = bbs.image_header
            people_msg.descriptions = people
            self.recognized_people_pub.publish(people_msg)
Example #2
0
    def instanceSegmentationCallback(self, instance_results):
        rospy.loginfo('Image ID: ' + str(instance_results.header.seq))
        objects = []
        people = []
        for i, (bb, class_id, class_name, score, mask) in enumerate(
                zip(instance_results.boxes, instance_results.class_ids,
                    instance_results.class_names, instance_results.scores,
                    instance_results.masks)):
            if class_name in self.possible_classes[
                    'people'] and score >= self.threshold:
                person = Description()
                person.label_class = "person/{}".format(class_name)
                person.probability = score
                person.bounding_box.minX = bb.x_offset
                person.bounding_box.minY = bb.y_offset
                person.bounding_box.width = bb.width
                person.bounding_box.height = bb.height
                person.mask = mask
                people.append(person)

            elif class_name in [
                    val for sublist in self.possible_classes.values()
                    for val in sublist
            ] and score >= self.threshold:
                object_d = Description()
                index = 0
                i = 0
                for value in self.possible_classes.values():
                    if class_name in value:
                        index = i
                    i += 1
                object_d.label_class = list(
                    self.possible_classes.keys())[index] + '/' + class_name
                object_d.probability = score
                object_d.bounding_box.minX = bb.x_offset
                object_d.bounding_box.minY = bb.y_offset
                object_d.bounding_box.width = bb.width
                object_d.bounding_box.height = bb.height
                objects.append(object_d)

        objects_msg = Recognitions()
        people_msg = Recognitions()

        if len(objects) > 0:
            objects_msg.header = instance_results.header
            objects_msg.image_header = instance_results.header
            objects_msg.descriptions = objects
            self.recognized_objects_pub.publish(objects_msg)

        if len(people) > 0:
            people_msg.header = instance_results.header
            people_msg.image_header = instance_results.header
            people_msg.descriptions = people
            self.recognized_people_pub.publish(people_msg)
    def recognitionProcess(self, ros_msg):
        rospy.loginfo('Image ID: {}'.format(ros_msg.header.seq))
        bgr_image = BRIDGE.imgmsg_to_cv2(ros_msg, desired_encoding="bgr8")

        self.image_height = bgr_image.shape[0]
        self.image_width = bgr_image.shape[1]

        face_rects = self.detectFaces(bgr_image)
        if len(face_rects) == 0:
            rospy.loginfo("Recognition FPS: {:.2f} Hz.".format(
                (1 / (rospy.get_rostime().to_sec() - self.last_recognition))))
            self.last_recognition = rospy.get_rostime().to_sec()
            return None

        faces_description = []
        for face_rect in face_rects:
            face_description = Description()

            aligned_face = self.alignFace(bgr_image, face_rect)

            features_array = self.extractFeatures(aligned_face)

            classification = self.classify(features_array)

            label_class, confidence = classification

            bounding_box = self.dlibRectangle2RosBoundingBox(face_rect)
            features = self.numpyArray2RosVector(features_array)

            face_description.label_class = label_class
            face_description.probability = confidence
            face_description.bounding_box = bounding_box
            faces_description.append(face_description)

        recognized_faces = Recognitions()
        recognized_faces.image_header = ros_msg.header

        recognized_faces.header.stamp = rospy.get_rostime()
        recognized_faces.header.frame_id = "face_recognition"

        recognized_faces.descriptions = faces_description

        rospy.loginfo("Recognition FPS: {:.2f} Hz.".format(
            (1 / (rospy.get_rostime().to_sec() - self.last_recognition))))
        self.last_recognition = rospy.get_rostime().to_sec()

        return recognized_faces
Example #4
0
def peopleDetectionCallBack(recognition):
    frame_id = recognition.image_header.seq
    req = image_request_client(frame_id)
    frame = req.rgbd_image.rgb
    descriptions = recognition.descriptions
    cv_frame = BRIDGE.imgmsg_to_cv2(frame, desired_encoding='rgb8')
    frame_rgb = cv2.cvtColor(cv_frame, cv2.COLOR_BGR2RGB)
    people_tracking.setFrame(recognition.image_header, recognition.header,
                             frame_id, frame_rgb.copy())

    img_size = frame.height * frame.width

    for description in descriptions:
        area = description.bounding_box.width * description.bounding_box.height
        if ((description.probability < probability_threshold)
                or (area < img_size * boundingBox_threshold)):
            descriptions.remove(description)

    tracker, detections = people_tracking.track(descriptions)

    if people_tracking.trackingPerson is not None:
        if people_tracking.trackingPerson.is_confirmed(
        ) and people_tracking.trackingPerson.time_since_update <= 1:
            response = Recognitions()
            response.image_header = recognition.image_header
            response.header = recognition.header
            response.descriptions = [Description()]
            response.descriptions[0].label_class = 'tracked_person'
            BBbox = BoundingBox()
            BBbox.minX = people_tracking.trackingPerson.to_tlwh()[0]
            BBbox.minY = people_tracking.trackingPerson.to_tlwh()[1]
            BBbox.width = people_tracking.trackingPerson.to_tlwh()[2]
            BBbox.height = people_tracking.trackingPerson.to_tlwh()[3]
            response.descriptions[0].bounding_box = BBbox
            tracker_publisher.publish(response)
        else:
            people_tracking.findPerson()
    debug(frame_rgb.copy(), tracker, detections,
          people_tracking.trackingPerson)
Example #5
0
from imutils.object_detection import non_max_suppression
import numpy as np
from cv_bridge import CvBridge

from butia_vision_msgs.msg import Recognitions, Description
from sensor_msgs.msg import Image

BRIDGE = CvBridge()

image_subscriber = None

recognition_publisher = None

view_publisher = None

pub_msg = Recognitions()


def image2Recognitions(image_msg):
    image = BRIDGE.imgmsg_to_cv2(image_msg, desired_encoding="bgr8")

    (rects, weights) = hog.detectMultiScale(image,
                                            winStride=(8, 8),
                                            padding=(32, 32),
                                            scale=1.05)

    rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
    pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)

    pub_msg.header.stamp = rospy.get_rostime().now()
    pub_msg.image_header = image_msg.header