Ejemplo n.º 1
0
    def post(self):
        parser = reqparse.RequestParser()
        parser.add_argument('image', type=FileStorage, location='files')
        args = parser.parse_args()

        image = args['image']
        if not image:
            return {'error': 'Image is not specified'}, 400
        faces = face_extractor.extract_faces(imread(image, mode='RGB'),
                                             image_size=160,
                                             margin=0.1)
        if len(faces):
            input_tensor = np.array([prewhiten(face[0]) for face in faces])
            faces_embeddings = model.predict(input_tensor)
            distances, identifiers = classifier.predict_on_batch(
                faces_embeddings)
            return {
                'found_faces':
                len(faces),
                'persons': [{
                    'area':
                    serialize_area(face_area),
                    'id':
                    uuid.decode('utf-8') if uuid and dist is not None
                    and dist <= app.config['FACE_RECOGNITION_THRESHOLD'] else
                    None,
                    'distance':
                    float(dist) if dist is not None else None
                } for face_area, dist, uuid in zip_longest((
                    face[1] for face in faces), distances, identifiers)],
            }
        else:
            return {'found_faces': 0, 'persons': []}
Ejemplo n.º 2
0
def recognition_socket(ws):
    mode = 'face-detection'
    while not ws.closed:
        data = ws.receive()
        if not data:
            continue
        if isinstance(data, (bytes, bytearray)):
            image = BytesIO(data)
            faces = face_extractor.extract_faces(imread(image, mode='RGB'),
                                                 image_size=160,
                                                 margin=0.1)
            if not len(faces):
                ws.send(json.dumps({'found_faces': 0, 'persons': []}))
                continue
            if mode == 'face-detection':
                ws.send(
                    json.dumps({
                        'found_faces':
                        len(faces),
                        'persons': [{
                            'area': serialize_area(face_area),
                        } for face_area in (face[1] for face in faces)],
                    }))
            elif mode == 'face-recognition':
                input_tensor = np.array([prewhiten(face[0]) for face in faces])
                faces_embeddings = model.predict(input_tensor)
                distances, identifiers = classifier.predict_on_batch(
                    faces_embeddings)

                threshold = app.config['FACE_RECOGNITION_THRESHOLD']
                ws.send(
                    json.dumps({
                        'found_faces':
                        len(faces),
                        'persons': [{
                            'area':
                            serialize_area(face_area),
                            'id':
                            uuid.decode('utf-8') if dist is not None
                            and dist <= threshold else None,
                            'distance':
                            float(dist) if dist is not None else None
                        } for face_area, dist, uuid in zip_longest((
                            face[1]
                            for face in faces), distances, identifiers)],
                    }))
            else:
                ws.send(json.dumps({'error': 'unexpected mode'}))
        else:
            try:
                data = json.loads(data)
            except JSONDecodeError:
                ws.send(json.dumps({'error': 'invalid JSON'}))
            else:
                mode = data.get('mode')
Ejemplo n.º 3
0
    def post(self, person_id):
        person_uuid = person_id.encode()
        if not classifier.uuid_in_classes(person_uuid):
            return {
                'status': 'error',
                'error': 'Person ID not found',
                'person_id': None
            }

        parser = reqparse.RequestParser()
        parser.add_argument('image', type=FileStorage, location='files')
        args = parser.parse_args()

        image = args.get('image')
        if not image:
            return {'status': 'error', 'error': 'Image is not specified'}, 400
        image = imread(image, mode='RGB')
        faces = face_extractor.extract_faces(image, image_size=160, margin=0.1)
        if not len(faces):
            return {'status': 'error', 'error': 'Faces not found on image'}
        elif len(faces) > 1:
            return {
                'status': 'error',
                'error': f'Found more than 1 face ({len(faces)})'
            }

        face_image = faces[0][0]
        face_input_tensor = np.array([prewhiten(face_image)])
        face_embeddings = model.predict(face_input_tensor)

        class_id = classifier.partial_fit(face_embeddings,
                                          np.array([person_uuid]))[0]

        person_images_dir = os.path.join(app.config.get('IMAGE_DIR'),
                                         person_id)
        if not os.path.exists(person_images_dir):
            os.makedirs(person_images_dir)

        person_image = os.path.join(person_images_dir,
                                    f'{class_id}_original.jpg')
        person_face_image = os.path.join(person_images_dir, f'{class_id}.jpg')
        imsave(person_image, image)
        imsave(person_face_image, face_image)

        classifier.save(app.config['CLASSIFIER_PATH'])
        return {'status': 'success', 'person_id': person_id, 'error': ''}
Ejemplo n.º 4
0
 def post(self):
     global face_extractor
     parse = reqparse.RequestParser()
     parse.add_argument('image', type=FileStorage, location='files')
     args = parse.parse_args()
     _image = args['image']
     if not _image:
         return {'error': 'Image is not specified'}, 400
     image = imread(_image, mode='RGB')
     faces = face_extractor.extract_faces(image, image_size=160, margin=0.1)
     if len(faces):
         input_tensor = np.array([prewhiten(face[0]) for face in faces])
         predictions = model.predict(input_tensor)
         return {
             'found_faces':
             len(faces),
             'faces': [{
                 'area': serialize_area(face_area),
                 'embeddings': prediction
             } for face_area, prediction in zip_longest((
                 face[1] for face in faces), predictions)],
         }
     else:
         return {'found_faces': 0, 'faces': []}
Ejemplo n.º 5
0
    def post(self):
        global face_extractor, model
        parse = reqparse.RequestParser()
        parse.add_argument('image1', type=FileStorage, location='files')
        parse.add_argument('image2', type=FileStorage, location='files')
        args = parse.parse_args()
        image1, image2 = args['image1'], args['image2']
        if not image1:
            return {'error': 'Image1 is not specified'}, 400
        if not image2:
            return {'error': 'Image2 is not specified'}, 400
        image1_faces = face_extractor.extract_faces(imread(image1, mode='RGB'),
                                                    image_size=160,
                                                    margin=0.1)
        image2_faces = face_extractor.extract_faces(imread(image2, mode='RGB'),
                                                    image_size=160,
                                                    margin=0.1)

        result = {
            'info': [
                {
                    'found_faces':
                    len(image1_faces),
                    'faces': [{
                        'area': serialize_area(face_area)
                    } for face_area in (face[1] for face in image1_faces)]
                },
                {
                    'found_faces':
                    len(image2_faces),
                    'faces': [{
                        'area': serialize_area(face_area)
                    } for face_area in (face[1] for face in image2_faces)]
                },
            ]
        }

        if len(image1_faces) > 1:
            return {
                'error': f'Found {len(image1_faces)} faces on first image',
                'distance': None,
                **result
            }
        if len(image2_faces) > 1:
            return {
                'error': f'Found {len(image2_faces)} faces on second image',
                'distance': None,
                **result
            }
        face1_input_tensor = np.array([prewhiten(image1_faces[0][0])])
        face1_embeddings = model.predict(face1_input_tensor)[0]

        face2_input_tensor = np.array([prewhiten(image2_faces[0][0])])
        face2_embeddings = model.predict(face2_input_tensor)[0]

        _distance = distance.euclidean(face1_embeddings, face2_embeddings)

        return {
            'distance': _distance,
            'is_same_person':
            _distance <= app.config['FACE_RECOGNITION_THRESHOLD'],
            **result
        }