示例#1
0
    def identify(self, face):
        if face.embedding is not None:
            st = log.get_current_time()
            total_min_distance = 100
            min_id = settings.ILLEGAL_ID

            # See if the face is a match for the known face(s)
            for person in self.persons:
                distances = list(
                    facenet.distance(face.embedding,
                                     person.get_sample_embeddings()))
                min_dist = min(distances)
                if min_dist < total_min_distance:
                    total_min_distance = min_dist
                    min_id = person.id

            face.result.id = min_id
            face.result.min_distance = total_min_distance

            if total_min_distance > settings.DISTANCE_THRESHOLD:
                min_id = settings.ILLEGAL_ID
                face.result.result = settings.ILLEGAL
            else:
                face.result.result = settings.LEGAL

            et = log.get_current_time()
            log.logging.debug('identify time: ' + str(et - st) + 'ms')
            log.logging.info('identify result: ' + min_id)

            face.result.file_path = log.save_photo(face.image_raw,
                                                   face.result.id,
                                                   face.result.timestamp,
                                                   total_min_distance)
            return min_id
    def identify(self, image):
        # 人脸检测
        st = log.get_current_time()
        faces = self.detect.find_faces(image)

        # 人脸识别
        for face in faces:
            # 人脸编码
            face.embedding = self.encoder.generate_embedding(face)
            # 人脸比对
            face.name = self.identifier.identify(face)

        et = log.get_current_time()
        log.logging.debug('recognition time: ' + str(et - st) + 'ms\n\n')
        return faces
示例#3
0
    def find_faces(self, image_raw):
        st = log.get_current_time()
        faces = []
        image_small = cv2.resize(
            image_raw, (0, 0),
            fx=settings.RESIZE_FACTOR,
            fy=settings.RESIZE_FACTOR)
        bounding_boxes, _ = align.detect_face.detect_face(
            image_small, self.minsize, self.pnet, self.rnet, self.onet,
            self.threshold, self.factor)
        _factor = int(1 / settings.RESIZE_FACTOR)
        for bb in bounding_boxes:
            face = Face()
            face.image_raw = image_raw
            face.bounding_box = np.zeros(4, dtype=np.int32)

            img_size = np.asarray(image_small.shape)[0:2]
            face.bounding_box[0] = np.maximum(
                bb[0] - self.face_crop_margin / 2, 0)
            face.bounding_box[1] = np.maximum(
                bb[1] - self.face_crop_margin / 2, 0)
            face.bounding_box[2] = np.minimum(
                bb[2] + self.face_crop_margin / 2, img_size[1])
            face.bounding_box[3] = np.minimum(
                bb[3] + self.face_crop_margin / 2, img_size[0])
            cropped = image_small[face.bounding_box[1]:face.bounding_box[3],
                                  face.bounding_box[0]:face.bounding_box[2], :]
            raw_cropped = image_raw[face.bounding_box[1]*_factor:face.bounding_box[3]*_factor,
                                     face.bounding_box[0]*_factor:face.bounding_box[2]*_factor, :]
            face.image = misc.imresize(
                cropped, (self.face_crop_size, self.face_crop_size),
                interp='bilinear')
            face.face_image_raw = misc.imresize(
                raw_cropped, (self.face_crop_size, self.face_crop_size),
                interp='bilinear')
            face.result.timestamp = log.get_current_time()
            faces.append(face)

        et = log.get_current_time()
        log.logging.debug('face num: ' + str(len(faces)) + ' time usage:' +
                          str(et - st) + 'ms')
        return faces
    def generate_embedding(self, face):
        st = log.get_current_time()
        # Get input and output tensors
        images_placeholder = tf.get_default_graph().get_tensor_by_name(
            "input:0")
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(
            "phase_train:0")

        #prewhiten_face = facenet.prewhiten(face.image)
        prewhiten_face = facenet.prewhiten(face.face_image_raw)
        # Run forward pass to calculate embeddings
        feed_dict = {
            images_placeholder: [prewhiten_face],
            phase_train_placeholder: False
        }
        embedding = self.sess.run(embeddings, feed_dict=feed_dict)[0]
        et = log.get_current_time()
        log.logging.debug('encoding time: ' + str(et - st) + 'ms')
        return embedding
示例#5
0
    def to_json(self):
        return {
            'type': settings.TYPE_RECOGNITION_RESULT,
            'id': str(self.id),
            'result': str(self.result),
            'time': str(self.timestamp),
            'distance': str(self.min_distance),
            'path': str(self.file_path),
            'camera': settings.CAMERA_NAME
        }


if __name__ == '__main__':
    r = Result()
    r.id = 'test1'
    r.min_distance = 0.1
    r.timestamp = log.get_current_time()
    r.result = 1
    r.file_path = '/tmp/test.jpg'
    put_result(r)
    push_result()
    send_email(r)
    r.timestamp = log.get_current_time()
    send_email(r)
    r.id='test2'
    send_email(r)
    time.sleep(3)
    r.timestamp = log.get_current_time()
    send_email(r)