def get_content_image(self):
     path = os.path.join(self.content_img_dir, self.content_img)
     img = cv2.imread(path, cv2.IMREAD_COLOR)
     check_image(img, path)
     img = img.astype(np.float32)
     img = check_resize(img, self.max_size)
     img = preprocess(img)
     return img
Example #2
0
def get_style_images(content_img, style_images):
    _, ch, cw, cd = content_img.shape
    style_imgs = []
    for style_fn in style_images:
        path = os.path.join('./', style_fn)
        img = cv2.imread(path, cv2.IMREAD_COLOR)
        utils.check_image(img, path)
        img = img.astype(np.float32)
        img = cv2.resize(img, dsize=(cw, ch), interpolation=cv2.INTER_AREA)
        img = preprocess(img)
        style_imgs.append(img)
    return style_imgs
Example #3
0
def get_content_image(content_img):
    path = os.path.join('./', content_img)
    img = cv2.imread(path, cv2.IMREAD_COLOR)
    utils.check_image(img, path)
    img = img.astype(np.float32)
    h, w, d = img.shape
    mx = 512
    if h > w and h > mx:
        w = (float(mx) / float(h)) * w
        img = cv2.resize(img, dsize=(int(w), mx), interpolation=cv2.INTER_AREA)
    if w > mx:
        h = (float(mx) / float(w)) * h
        img = cv2.resize(img, dsize=(mx, int(h)), interpolation=cv2.INTER_AREA)
    img = preprocess(img)
    return img
Example #4
0
def detect_face():
    """
    REST API returning list of detected features: bounding boxes for each face and eyes, nose, mouth positions.
    :return:
    """
    logger.info("Detecting faces")
    if len(request.data) == 0:
        logger.error("No data received")
        abort(415)
    if not check_image(BytesIO(request.data)):
        logger.error("Extension not allowed")
        abort(415)

    parser = reqparse.RequestParser()
    parser.add_argument("confidence",
                        type=float,
                        default=current_app.config["CONFIDENCE"])
    args = parser.parse_args()

    if args.get("confidence") < 0.0 or args.get("confidence") > 1.0:
        abort(412)

    image = convert_to_cv_image(request.data)
    model = get_detector()
    res = model.detect_faces(image)
    logger.info(f"Found {len(res)} faces in the image.")
    filtered_faces = [
        i for i in res if i.get("confidence") >= args.get("confidence")
    ]
    logger.info(
        f'Filtering with confidence {args.get("confidence")}. {len(filtered_faces)} faces left.'
    )
    return json.dumps(filtered_faces)
Example #5
0
def web_faces():
    if request.method == "POST":
        if request.files:
            image = request.files["image"]
            if not check_image(image.stream):
                abort(415)
            filename = secure_filename(image.filename)
            logger.info("Successfully uploaded image")
            cvimage = convert_to_cv_image(image)
            model = get_detector()
            faces = model.detect_faces(cvimage)
            faceimg = paint_faces(cvimage, faces)
            cv2.imwrite(
                os.path.join(current_app.config["UPLOAD_FOLDER"], filename),
                faceimg)
            return render_template("index.html", filename=filename)
    if request.method == "GET":
        return render_template("index.html")
Example #6
0
def identify():

    # Récupérer la requette en json ------------------------------

    req = request.get_json()

    if "img" in list(req.keys()):

        img = req["img"]

        if not check_image(img):
            return jsonify({
                'success':
                False,
                'errorMsg':
                'une image au format string base64 est attendue'
            }), 205

        # Supprimer le "data:image/jpeg;base64" et garder uniquement l'image
        img = img.split(',')[1]

        # Decodage de l'image : format base64 vers une image
        im_bytes = base64.b64decode(img)
        im_arr = np.frombuffer(im_bytes, dtype=np.uint8)
        img = cv2.imdecode(im_arr, flags=cv2.IMREAD_COLOR)

    else:
        return jsonify({
            'success': False,
            'errorMsg': 'une image est attendue par l\'api'
        }), 205

    # Étape 1: Extraction des visages de l'image ------------------------------

    detected_faces = face_recognition.extract_faces(image=img,
                                                    detector=detector)

    # Étape 2: Vérifier qu'il n'y est une personne sur l'image ------------------------------

    if len(detected_faces) < 1:
        return jsonify({
            'success': False,
            'errorMsg': 'aucune personne présente sur l\'image'
        }), 500

    # Étape 3: Alignement des visages ------------------------------

    aligned_faces = face_recognition.align_faces(faces=detected_faces,
                                                 image=img,
                                                 predictor=predictor)

    # Étape 4: Encodage des visages pour reconnaissance faciale ------------------------------

    vectorized_faces = face_recognition.get_embeddings(
        faces_pixels=aligned_faces, model=model)

    # Étape 5: Identification ------------------------------

    result = []
    for unknown in vectorized_faces:

        best_cosine = 1.0
        best_label = 'inconnue'

        for i, known in enumerate(known_persons):

            face_dist = face_recognition.face_distance(unknown, known)

            if (best_cosine == 1.0 or best_cosine > face_dist):
                best_cosine = face_dist
                best_label = labels[i]

        if (best_cosine > 0.4):
            best_label = 'inconnue'

        result.append(best_label)

    # Étape 6: Ajout des noms sur l'image ------------------------------

    for i, face in enumerate(detected_faces):
        # On récupère les extrémité du visage pour tracer un rectangle autour
        startX, startY, endX, endY = face.left(), face.top(), face.right(
        ), face.bottom()

        # On vérifie qu'on est pas au bord de l'image
        y = startY - 10 if startY - 10 > 10 else startY + 10

        # On ajoute le rectangle et le nom de la personne sur l'image
        cv2.rectangle(img, (startX, startY), (endX, endY), (0, 0, 255), 2)
        cv2.putText(img, result[i], (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (0, 0, 255), 2)

    # Encoder l'image en au format jpg
    retval, buffer = cv2.imencode('.jpg', img)

    # Encoder l'image en au format texte
    jpg_as_text = base64.b64encode(buffer).decode(ENCODING)

    return jsonify({"img": jpg_as_text}), 200
Example #7
0
def add_person():

    # Récupérer la requette en json ------------------------------

    req = request.get_json()

    if "img" in list(req.keys()):

        img = req["img"]

        if not check_image(img):
            return jsonify({
                'success':
                False,
                'errorMsg':
                'une image au format string base64 est attendue'
            }), 205

        # Supprimer le "data:image/jpeg;base64" et garder uniquement l'image
        img = img.split(',')[1]

        # Decodage de l'image : format base64 vers une image
        im_bytes = base64.b64decode(img)
        im_arr = np.frombuffer(im_bytes, dtype=np.uint8)
        img = cv2.imdecode(im_arr, flags=cv2.IMREAD_COLOR)

    else:
        return jsonify({
            'success': False,
            'errorMsg': 'une image est attendue par l\'api'
        }), 205

    if "name" in list(req.keys()):

        name = req["name"]

        # Vérifier si le nom est vide
        if not name.strip():
            return jsonify({
                'success': False,
                'errorMsg': 'le nom de la personne est attendue'
            }), 205

        # Passer le nom en minuscule
        name = name.lower()

    else:
        return jsonify({
            'success':
            False,
            'errorMsg':
            'le nom de la personne est attendue par l\'api'
        }), 205

    # Étape 1: Extraction des visages de l'image ------------------------------

    detected_faces = face_recognition.extract_faces(image=img,
                                                    detector=detector)

    # Étape 2: Vérifier qu'il n'ya qu'une personne sur l'image ------------------------------

    if len(detected_faces) > 1:
        return jsonify({
            'success':
            False,
            'errorMsg':
            'plusieurs personnes présentes sur l\'image'
        }), 500

    # Étape 3: Alignement du visage ------------------------------

    aligned_faces = face_recognition.align_faces(faces=detected_faces,
                                                 image=img,
                                                 predictor=predictor)

    # Étape 4: Ajout du visage à la base de donnée ------------------------------

    if not add_to_base(name, aligned_faces[0], database_path=database_path):
        return jsonify({
            'success':
            False,
            'errorMsg':
            'accès en écriture à la base de donnée impossible'
        }), 500

    # Étape 5: Encodage du visage ------------------------------

    vectorized_faces = face_recognition.get_embeddings(
        faces_pixels=aligned_faces, model=model)

    # Étape 6: Ajout du visage encodé à la liste des visages connus ------------------------------

    known_persons.append(vectorized_faces[0])
    labels.append(name)

    return jsonify({"success": True}), 200
Example #8
0
            xrename(get_community_icon_name(community, legacyname=True), newname)

def save_image(fname, image):
    if fname == None:
        return False
    basename = os.path.basename(fname)
    tmpname = fname + '.tmp'
    try:
        f = open(tmpname, 'w')
    except IOError, (errno, strerror):
        warning('Can not save face to %s: %s\n' %(tmpname, strerror))
        return False
    f.write(image)
    f.close()

    if not check_image(tmpname):
        xremove(tmpname)
        return False

    if not xrename(tmpname, fname):
        xremove(tmpname)
        warning('Can not rename: %s -> %s\n' %(tmpname, fname))
        return False

    return True

def save_community_icon(com, icon):
    # personal communities can have arbitary large icons because the picture
    # is not sent over network
    if com.get('peer') and len(icon) > TP_MAX_FACE_SIZE:
        warning('Community %s has too large icon picture: %d\n' %(com.get('name'), len(icon)))
def test_examples(imgpath, expected, app):
    with open(imgpath, "rb") as f:
        s = BytesIO(f.read())
        with app.app_context():
            res = check_image(s)
        assert res == expected