Ejemplo n.º 1
0
class FaceRecognition(ParallelPDU):
    """ PDU that receives cropped images (head only)
    and saves them to file """

    QUEUE = 'face-recognition'
    ORDERED_DELIVERY = True
    POOL_SIZE = 8
    UNFINISHED_TASKS_THRESHOLD = 2 * POOL_SIZE
    BEST_PERSON_THRESHOLD = 0.75
    UPGRADE_FACE_SAMPLES_THRESHOLD = 0.85

    def __init__(self, **kwargs):
        kwargs['heavy_preprocess'] = betaface_recognition
        super(FaceRecognition, self).__init__(**kwargs)
        self.session_tracker = SessionTracker()

    def light_postprocess(self, matches, image_dict):
        self.logger.info("Received matches from BetaFace: %r" % matches)

        # No persons means we're out of here.
        if len(matches) == 0:
            self.logger.info("Bailing out because of empty matches from "
                             "BetaFace")
            return

        # No strong bias towards a given person also gets us out of here.
        max_probability = max(matches.values())
        if max_probability < self.BEST_PERSON_THRESHOLD:
            self.logger.info("Out of the matches returned by BetaFace, the "
                             "maximum confidence is only %.2f "
                             "(should have been at least %.2f)"\
                             % (max_probability, self.BEST_PERSON_THRESHOLD))
            return

        # Get the person with the maximal probability from BetaFace
        person_name = matches.keys()[matches.values().index(max_probability)]
        self.logger.info("MOST_PROBABLE_PERSON = %s" % person_name)

        # Send event to room. This will cause interactivity within the lab.
        message_to_room = {'event_type': 'person_appeared',
                           'person_name': person_name}
        self.send_to('room', message_to_room)
        if 'session_id' in image_dict.keys():
            self.session_tracker.track_event(image_dict['session_id'],
                                             0, # set the person name globally on the session
                                             {'person_name': person_name})

        # Send cropped image to UpgradeFaceSamples only if detection confidence
        # is really really high. Otherwise it's not worth it and we will
        # pollute the data structures in BetaFace API.
        if max_probability >= self.UPGRADE_FACE_SAMPLES_THRESHOLD:
            upgrade_message = {'person_name': person_name}
            upgrade_message.update(image_dict)
            self.send_to('upgrade-face-samples', upgrade_message)
        else:
            self.logger.info("It isn't worth it to send a face sample because "
                             "maximal confidence was only %.2f (should have "
                             "been at least %.2f" %\
                             (max_probability,
                              self.UPGRADE_FACE_SAMPLES_THRESHOLD))
Ejemplo n.º 2
0
class Dashboard(PDU):
    QUEUE = 'dashboard'

    def __init__(self, **kwargs):
        super(Dashboard, self).__init__(**kwargs)
        self.dashboard_cache = DashboardCache()
        self.session_tracker = SessionTracker()

    def process_message(self, message):
        push_to_redis = self.get_pushing_function(message)
        push_to_redis(sensor_id=message['sensor_id'],
                                 sensor_type=message['sensor_type'],
                                 measurement_type=message['type'],
                                 measurement=json.dumps(message))

        # Send the RGB image to SessionsStore only if the message has a
        # session_id. This means that, at kinect-level, there is an active
        # tracking session.
        if message['type'] == 'image_rgb' and message.get('session_id', None):
            sid = message['session_id']
            time = message['created_at']
            mappings = {'image_rgb': message['image_rgb']}
            self.session_tracker.track_event(sid, time, mappings)

    def get_pushing_function(self, message):
        if(message['sensor_type'] == "arduino"):
            return self.dashboard_cache.lpush
        else:
            return self.dashboard_cache.put
Ejemplo n.º 3
0
class HeadCrop(ParallelPDU):
    """ PDU that receives images and skeletons from Router
        and crops images (head only) """

    QUEUE = 'head-crop'
    POOL_SIZE = 20
    UNFINISHED_TASKS_THRESHOLD = 2 * POOL_SIZE

    def __init__(self, **kwargs):
        kwargs['heavy_preprocess'] = crop_head
        super(HeadCrop, self).__init__(**kwargs)
        self.last_image = None
        self.last_image_at = None
        self.last_skeleton = None
        self.last_skeleton_at = None
        self.session_tracker = SessionTracker()

    def process_message(self, message):
        # Step 1 - always update last_image/last_skeleton
        if (message['type'] == 'image_rgb' and
            message['sensor_type'] == 'kinect'):
                self.last_image = message['image_rgb']
                if not 'encoder_name' in self.last_image:
                    self.last_image['encoder_name'] = 'raw'
                self.last_image_at = message['created_at']

        elif message['type'] == 'skeleton' and\
            message['sensor_type'] == 'kinect':
            self.last_skeleton = message['skeleton_2D']
            self.last_skeleton_at = message['created_at']

        message['hack'] = {}
        message['hack']['last_image'] = copy.copy(self.last_image)
        message['hack']['last_image_at'] = self.last_image_at
        message['hack']['last_skeleton'] = copy.copy(self.last_skeleton)
        message['hack']['last_skeleton_at'] = self.last_skeleton_at

        super(HeadCrop, self).process_message(message)

    def light_postprocess(self, cropped_head, image_dict):
        # Route cropped images to face-recognition
        if cropped_head is not None:
            self.log("Sending an image to face recognition")
            self._send_to_recognition(cropped_head)
            self.session_tracker.track_event(image_dict['session_id'],
                                             image_dict['created_at'],
                                             {"head": cropped_head})

    def _send_to_recognition(self, image):
        """ Send a given image to face recognition. """
        self.send_to('face-recognition', {'head_image': image})
Ejemplo n.º 4
0
 def __init__(self, **kwargs):
     kwargs['heavy_preprocess'] = crop_head
     super(HeadCrop, self).__init__(**kwargs)
     self.last_image = None
     self.last_image_at = None
     self.last_skeleton = None
     self.last_skeleton_at = None
     self.session_tracker = SessionTracker()
Ejemplo n.º 5
0
 def __init__(self, **kwargs):
     super(RoomPosition, self).__init__(**kwargs)
     self.session_tracker = SessionTracker()
     self.dashboard_cache = DashboardCache()
Ejemplo n.º 6
0
class RoomPosition(PDU):
    ''' PDU that reads skeleton messages from kinect and translates the position
        of the torso (from kinect coordinates) to room coordinates using the
        sensor position value that is sent by kinect.

        INPUT:
        Message from kinect of skeleton type with sensor_position field
        The sensor_position needs the following fields:
            - X,Y,Z: the position of the sensor wrt the room
            - alpha: the rotation around y axis
            - beta: the rotation around x axis
            - gamma: the rotation around z axis
        The message needs a skeleton_3D key with the positions of the torso

        OUTPUT:
        Sends a message with a subject_position key set to a dictionary with
        X,Y,Z in room coordinates. The message is sent to the subject-position
        queue
    '''

    QUEUE = 'room-position'
    #torso is always first in list
    JOINTS = ['torso', 'head', 'neck', 'left_shoulder', 'right_shoulder',
              'left_elbow', 'right_elbow', 'left_hand', 'right_hand',
              'left_hip', 'right_hip', 'left_knee', 'right_knee',
              'left_foot', 'right_foot']

    def __init__(self, **kwargs):
        super(RoomPosition, self).__init__(**kwargs)
        self.session_tracker = SessionTracker()
        self.dashboard_cache = DashboardCache()

    def process_message(self, message):
        if message['type'] !=  'skeleton':
            return

        sensor_position = message['sensor_position']
        """ We are doing rotation using the euler angles
            see http://en.wikipedia.org/wiki/Rotation_matrix
        """
        alpha = sensor_position['alpha']
        beta = sensor_position['beta']
        gamma  = sensor_position['gamma']

        rx = rot_x(beta)
        ry = rot_y(alpha)
        rz = rot_z(gamma)
        trans = tanslation(sensor_position['X'], sensor_position['Y'], sensor_position['Z'])

        # some temp variables manipulation follows as the cv library uses
        # output parameters on multilications

        temp_mat = cv.CreateMat(4,4, cv.CV_64F)
        rot_mat = cv.CreateMat(4,4, cv.CV_64F)

        cv.MatMul(ry,rx,temp_mat)
        cv.MatMul(temp_mat, rz, rot_mat)
        cv.MatMul(trans, rot_mat, temp_mat)

        N = len(self.JOINTS)
        pos = cv.CreateMat(4, N, cv.CV_64F)
        temp_pos = cv.CreateMat(4, N, cv.CV_64F)
        skeleton_3D = message['skeleton_3D']

        for i in range(N):
            joint = self.JOINTS[i]
            if joint in skeleton_3D:
                joint_pos = skeleton_3D[joint]
                temp_pos[0,i] = joint_pos['X']
                temp_pos[1,i] = joint_pos['Y']
                temp_pos[2,i] = joint_pos['Z']
                temp_pos[3,i] = 1

        cv.MatMul(temp_mat, temp_pos, pos)
        skeleton_in_room = {}
        for i in range(N):
            joint = self.JOINTS[i]
            if joint in skeleton_3D:
                skeleton_in_room[joint] = {
                    'X': pos[0, i],
                    'Y': pos[1, i],
                    'Z': pos[2, i],
                }

        position_message = {
            'X': pos[0,0],
            'Y': pos[1,0],
            'Z': pos[2,0],
        }

        self.log('Found position %s' % position_message)

        if message.get('session_id', None):
            sid = message['session_id']
            time = message['created_at']
            self.session_tracker.track_event(sid, time, {'skeleton_in_room': skeleton_in_room})
            # Keep it simple and put X, Y, Z as top-level keys for this
            # measurement of the current tracking session.
            self.session_tracker.track_event(sid, time, position_message)

        dashboard_message = {
            'created_at': message['created_at'],
            'sensor_id': message['sensor_id'],
            'sensor_position': sensor_position,
        }
        dashboard_message.update(position_message)

        # Send subject position to Redis
        self.dashboard_cache.lpush(sensor_id=message['sensor_id'],
            sensor_type=message['sensor_type'],
            measurement_type='subject_position',
            measurement=json.dumps(dashboard_message))

        return None
Ejemplo n.º 7
0
 def __init__(self, **kwargs):
     kwargs['heavy_preprocess'] = betaface_recognition
     super(FaceRecognition, self).__init__(**kwargs)
     self.session_tracker = SessionTracker()