示例#1
0
def representWrapper(req, trx_id=0):

    resp_obj = jsonify({'success': False})

    #-------------------------------------
    #find out model

    model_name = "VGG-Face"
    distance_metric = "cosine"
    detector_backend = 'opencv'

    if "model_name" in list(req.keys()):
        model_name = req["model_name"]

    if "detector_backend" in list(req.keys()):
        detector_backend = req["detector_backend"]

    #-------------------------------------
    #retrieve images from request

    img = ""
    if "img" in list(req.keys()):
        img = req["img"]  #list
        #print("img: ", img)

    validate_img = False
    if len(img) > 11 and img[0:11] == "data:image/":
        validate_img = True

    if validate_img != True:
        print("invalid image passed!")
        return jsonify({
            'success': False,
            'error': 'you must pass img as base64 encoded string'
        }), 205

    #-------------------------------------
    #call represent function from the interface

    try:

        embedding = DeepFace.represent(img,
                                       model_name=model_name,
                                       detector_backend=detector_backend)

    except Exception as err:
        print("Exception: ", str(err))
        resp_obj = jsonify({'success': False, 'error': str(err)}), 205

    #-------------------------------------

    #print("embedding is ", len(embedding)," dimensional vector")
    resp_obj = {}
    resp_obj["embedding"] = embedding

    #-------------------------------------

    return resp_obj
示例#2
0
def deepFaceVector():
    model_names = ["VGG-Face", "Facenet", "OpenFace", "DeepFace", "DeepID", "Dlib", "ArcFace"]
    for dirpath, dirnames, filenames in os.walk(params.path):
        filenames = [f for f in filenames if not f[0] == '.' and f[-3:] == 'png']
        for file in sorted(filenames):
            pathWithOutBaseName, id = os.path.split(dirpath)
            timeFile = file[0: 12]
            for modelName in model_names:
                try:
                    print(dirpath + "/" + timeFile + "_" + id + "_body.png" + " ===> " + modelName)
                    result = DeepFace.represent(dirpath + "/" + timeFile + "_" + id + "_body.png", model_name=modelName, enforce_detection=False)
                except:
                    print("No se puede aplicar algoritmo" + dirpath + "/" + timeFile + "_" + id + "_body.png"  + " ===> " + modelName)
                np.save(dirpath + "/" + timeFile + "_" + id + "_" + modelName + "_deepFaceVector", result)
示例#3
0
def test_cases():

    print("DeepFace.detectFace test")

    for detector in detectors:
        img = DeepFace.detectFace("dataset/img11.jpg",
                                  detector_backend=detector)
        evaluate(img.shape[0] > 0 and img.shape[1] > 0)
        print(detector, " test is done")

    print("-----------------------------------------")

    img_path = "dataset/img1.jpg"
    embedding = DeepFace.represent(img_path)
    print("Function returned ", len(embedding), "dimensional vector")
    evaluate(len(embedding) > 0)

    print("-----------------------------------------")

    print("Face detectors test")

    for detector in detectors:
        print(detector + " detector")
        res = DeepFace.verify(dataset[0][0],
                              dataset[0][1],
                              detector_backend=detector)
        print(res)
        assert res["verified"] == dataset[0][2]

    print("-----------------------------------------")

    print("Find function test")

    df = DeepFace.find(img_path="dataset/img1.jpg", db_path="dataset")
    print(df.head())
    evaluate(df.shape[0] > 0)

    print("-----------------------------------------")

    print("Facial analysis test. Passing nothing as an action")

    img = "dataset/img4.jpg"
    demography = DeepFace.analyze(img)
    print(demography)

    evaluate(demography["age"] > 20 and demography["age"] < 40)
    evaluate(demography["dominant_gender"] == "Woman")

    print("-----------------------------------------")

    print("Facial analysis test. Passing all to the action")
    demography = DeepFace.analyze(img, ['age', 'gender', 'race', 'emotion'])

    print("Demography:")
    print(demography)

    #check response is a valid json
    print("Age: ", demography["age"])
    print("Gender: ", demography["dominant_gender"])
    print("Race: ", demography["dominant_race"])
    print("Emotion: ", demography["dominant_emotion"])

    evaluate(demography.get("age") is not None)
    evaluate(demography.get("dominant_gender") is not None)
    evaluate(demography.get("dominant_race") is not None)
    evaluate(demography.get("dominant_emotion") is not None)

    print("-----------------------------------------")

    print(
        "Facial analysis test 2. Remove some actions and check they are not computed"
    )
    demography = DeepFace.analyze(img, ['age', 'gender'])

    print("Age: ", demography.get("age"))
    print("Gender: ", demography.get("dominant_gender"))
    print("Race: ", demography.get("dominant_race"))
    print("Emotion: ", demography.get("dominant_emotion"))

    evaluate(demography.get("age") is not None)
    evaluate(demography.get("dominant_gender") is not None)
    evaluate(demography.get("dominant_race") is None)
    evaluate(demography.get("dominant_emotion") is None)

    print("-----------------------------------------")

    print("Facial recognition tests")

    for model in models:
        for metric in metrics:
            for instance in dataset:
                img1 = instance[0]
                img2 = instance[1]
                result = instance[2]

                resp_obj = DeepFace.verify(img1,
                                           img2,
                                           model_name=model,
                                           distance_metric=metric)

                prediction = resp_obj["verified"]
                distance = round(resp_obj["distance"], 2)
                threshold = resp_obj["threshold"]

                passed = prediction == result

                evaluate(passed)

                if passed:
                    test_result_label = "passed"
                else:
                    test_result_label = "failed"

                if prediction == True:
                    classified_label = "verified"
                else:
                    classified_label = "unverified"

                print(
                    img1.split("/")[-1], "-",
                    img2.split("/")[-1], classified_label,
                    "as same person based on", model, "and", metric,
                    ". Distance:", distance, ", Threshold:", threshold, "(",
                    test_result_label, ")")

            print("--------------------------")

    # -----------------------------------------

    print("Passing numpy array to analyze function")

    img = cv2.imread("dataset/img1.jpg")
    resp_obj = DeepFace.analyze(img)
    print(resp_obj)

    evaluate(resp_obj["age"] > 20 and resp_obj["age"] < 40)
    evaluate(resp_obj["gender"] == "Woman")

    print("--------------------------")

    print("Passing numpy array to verify function")

    img1 = cv2.imread("dataset/img1.jpg")
    img2 = cv2.imread("dataset/img2.jpg")

    res = DeepFace.verify(img1, img2)
    print(res)

    evaluate(res["verified"] == True)

    print("--------------------------")

    print("Passing numpy array to find function")

    img1 = cv2.imread("dataset/img1.jpg")

    df = DeepFace.find(img1, db_path="dataset")

    print(df.head())

    evaluate(df.shape[0] > 0)

    print("--------------------------")

    print("non-binary gender tests")

    #interface validation - no need to call evaluate here

    for img1_path, img2_path, verified in dataset:
        for detector in detectors:
            result = DeepFace.analyze(img1_path,
                                      actions=('gender', ),
                                      detector_backend=detector,
                                      enforce_detection=False)

            print(result)

            assert 'gender' in result.keys()
            assert 'dominant_gender' in result.keys(
            ) and result["dominant_gender"] in ["Man", "Woman"]

            if result["dominant_gender"] == "Man":
                assert result["gender"]["Man"] > result["gender"]["Woman"]
            else:
                assert result["gender"]["Man"] < result["gender"]["Woman"]
示例#4
0
#-----------------------------------------

print("DeepFace.detectFace test")
detectors = ['opencv', 'ssd', 'dlib', 'mtcnn', 'retinaface']
for detector in detectors:
    img = DeepFace.detectFace("dataset/img11.jpg", detector_backend=detector)
    print(detector, " test is done")
#import matplotlib.pyplot as plt
#plt.imshow(img)
#plt.show()

#-----------------------------------------
print("-----------------------------------------")

img_path = "dataset/img1.jpg"
embedding = DeepFace.represent(img_path)
print("Function returned ", len(embedding), "dimensional vector")

model_name = "VGG-Face"
model = DeepFace.build_model(model_name)
print(model_name, " is built")
embedding = DeepFace.represent(img_path, model=model)
print("Represent function returned ", len(embedding), "dimensional vector")

#-----------------------------------------

dataset = [['dataset/img1.jpg', 'dataset/img2.jpg', True],
           ['dataset/img1.jpg', 'dataset/img6.jpg', True]]

print("-----------------------------------------")
示例#5
0
#! /usr/bin/python3

from deepface import DeepFace
import time

tik = time.time()
embedding = DeepFace.represent("img/igor1.jpg", model_name = 'ArcFace')
print ("embedding",embedding, time.time() - tik)

result  = DeepFace.verify( "img/katya1.jpg", 
                           "img/katya2.jpg", 
                           model_name = 'ArcFace', 
                           detector_backend = 'retinaface') # 'retinaface'
print("Is verified: ", result["verified"], time.time() - tik)