Esempio n. 1
0
def home_face(request):
    # print(static)
    if request.method == "POST":
        # print(request.POST.get('submit', ''))
        print(request.user)

        data = request.POST.get('account').partition(",")[2]
        imgdata = base64.b64decode(data)
        filename = "{}temp.jpg".format(settings.MEDIA_ROOT)
        # filename = 'promise.jpg'  # I assume you have a way of picking unique filenames
        with open(filename, 'wb') as f:
            f.write(imgdata)

        df = DeepFace.find(img_path=settings.MEDIA_ROOT + 'img1.jpg',
                           db_path=settings.MEDIA_ROOT + 'sets')

        if df.shape[0] > 0:
            matched = df.iloc[0].identity
            print(matched)
        else:
            print("no match")

        # print(request.POST.get('account')+"test")
    context = {}
    return render(request, "face/face.html", context)
Esempio n. 2
0
def face_authenticate(imageBase):
    # take image as base64 arg
    data = imageBase

    facematch = False
    imgdata = base64.b64decode(data)
    filename = settings.MEDIA_ROOT + "temp.jpg"
    # filename = "temp.jpg".format(settings.MEDIA_ROOT)

    with open(filename, 'wb') as f:
        f.write(imgdata)

    # compare and a
    try:
        df = DeepFace.find(img_path=settings.MEDIA_ROOT + 'temp.jpg',
                           db_path=settings.MEDIA_ROOT + 'sets')

        if df.shape[0] > 0:
            matched = df.iloc[0].identity

            facematch = True
        else:
            facematch = False
    except:
        facematch = False
    return facematch
Esempio n. 3
0
    def get(self, request):
        data = []
        message = ''
        img_file = request.query_params.get('filename')
        if img_file:
            img_name = root_dir + '\\' + img_file
            img = cv2.imread(img_name, cv2.COLOR_BGR2RGB)
            result = DeepFace.find(img, db_path=img_dir)
            if result.shape[0] > 0:
                for i, j in zip(result['identity'], result['distance']):
                    data.append({
                        "path": i,
                        "matched %": str(round(j * 100, 2)) + "%"
                    })
                message = "Employee ID: xxx"
            else:
                message = "Unknown Person"

        return Response({'data': data, 'message': message})
Esempio n. 4
0
def detect_deepface(img):
    detections = detect_face2(img=img,
                              detector_backend=detector_backend,
                              grayscale=grayscale,
                              enforce_detection=enforce_detection)

    if len(detections) == 0:
        return img

    bboxs = []
    face_count = 0
    for detection in detections:
        x, y, w, h = detection["box"]
        detected_face = img[int(y):int(y + h), int(x):int(x + w)]
        face_detected = "Unknown"

        try:
            detected_face = detected_face[:, :, ::-1]
            new_im = Image.fromarray(detected_face)
            tmp_image = str(int(time.time())) + ".jpg"
            new_im.save(tmp_image)
            df = DeepFace.find(img_path=tmp_image,
                               db_path=db_folder,
                               enforce_detection=False)
            os.remove(tmp_image)
            face_detected = re.split(r' |/|\\', df['identity'].iloc[0])[1]
        except Exception as err:
            print("ERROR:", err)

        bboxs.append([x, y, w, h, detection['confidence'], face_detected])

        print(f"face detected: {face_detected}")

        face_count += 1

    output_img = drawBBoxs(img, bboxs, col_dict,
                           col_dict.get(face_detected, "yellow"))

    return output_img, face_detected
Esempio n. 5
0
    def face_authenticate(self, imageBase):

        data = imageBase

        facematch = False
        imgdata = base64.b64decode(data)
        filename = "{}temp.jpg".format(settings.MEDIA_ROOT)
        # filename = 'promise.jpg'  # I assume you have a way of picking unique filenames
        with open(filename, 'wb') as f:
            f.write(imgdata)

        df = DeepFace.find(img_path=settings.MEDIA_ROOT + 'img1.jpg',
                           db_path=settings.MEDIA_ROOT + 'sets')

        if df.shape[0] > 0:
            matched = df.iloc[0].identity
            print(matched)
            facematch = True
        else:
            print("no match")

        return facematch
Esempio n. 6
0
print(res)

print("dlib detector")
res = DeepFace.verify(dataset, detector_backend = 'dlib')
print(res)

print("mtcnn detector")
res = DeepFace.verify(dataset, detector_backend = 'mtcnn')
print(res)

print("-----------------------------------------")

print("Single find function test")

df = DeepFace.find(img_path = "dataset/img1.jpg", db_path = "dataset"
	#, model_name = 'Dlib'
)
print(df.head())

print("-----------------------------------------")

print("Pre-built model for single find function test")

model_name = "VGG-Face"
model = DeepFace.build_model(model_name)
print(model_name," is built")

df = DeepFace.find(img_path = "dataset/img1.jpg", db_path = "dataset"
					, model_name = model_name, model = model
)
print(df.head())
Esempio n. 7
0
def detect_deepface_cropped(img, person_boxes, old_ballots, original_boxes):
    """
    takes in a list of person bounding boxes and image
    crops the image to the bounding boxes and detects from database
    returns drawn img, person bounding boxes and id-ed name: [(x1, y1), (x2, y2), "ZuoLin"]
    """
    bboxs = []
    person_face_detected = []
    output_img = img.copy()
    for i in range(len(person_boxes)):
        p_bbox = person_boxes[i]
        o_bbox = original_boxes[i]
        (xmin, ymin), (xmax, ymax) = p_bbox
        cropped_img = img[ymin:ymax, xmin:xmax]
        detections = detect_face2(img=cropped_img,
                                  detector_backend=detector_backend,
                                  grayscale=grayscale,
                                  enforce_detection=enforce_detection)
        (oxmin, oymin), (oxmax, oymax) = p_bbox
        o_cropped_img = img[oymin:oymax, oxmin:oxmax]

        face_detected = None
        if len(detections) == 1:  # Only retain images that has 1 detected face

            x, y, w, h = detections[0]["box"]
            y_buffer = int(h * 0.5)
            x_buffer = int(w * 0.5)
            detected_face = cropped_img[
                max(0, int(y - y_buffer)):min(cropped_img.
                                              shape[0], int(y + h + y_buffer)),
                max(0, int(x - x_buffer)):min(cropped_img.
                                              shape[1], int(x + w + x_buffer))]

            try:
                ## calls reid_processor to confirm identity
                # best_body_guess, body_confidence = reid.detect_body_cropped(cropped_img)

                detected_face = detected_face[:, :, ::-1]
                new_im = Image.fromarray(detected_face)
                tmp_image = str(int(time.time())) + ".jpg"
                new_im.save(tmp_image)
                df = DeepFace.find(img_path=tmp_image,
                                   db_path=db_folder,
                                   enforce_detection=False)
                os.remove(tmp_image)
                df.sort_values('VGG-Face_cosine', inplace=True, ascending=True)
                face_detected = re.split(r' |/|\\', df['identity'].iloc[0])[1]
                dist = float(df['VGG-Face_cosine'].iloc[0])

                ## need code when face id is not confident, use reid to confirm/dispute
                if dist > 0.035:
                    face_detected = None
                    pass
                else:
                    print('p detected by face:', face_detected)

            except Exception as err:
                print("ERROR:", err)

        if face_detected == None:
            best_identity, best_confidence = reid.detect_body_cropped(
                o_cropped_img[:, :, ::-1])
            face_detected = best_identity
            print('p detected by body:', face_detected)
            bboxs.append(
                [None, None, None, None, best_confidence, face_detected])
        else:
            bboxs.append([
                xmin + x, ymin + y, w, h, detections[0]['confidence'],
                face_detected
            ])
        person_face_detected.append([(xmin, ymin), (xmax, ymax),
                                     face_detected])

    old_ballots = assign_new_to_old(person_face_detected, old_ballots, bboxs,
                                    original_boxes)
    for face_detected in old_ballots:
        print(f"person detected: {face_detected.name}",
              f"confidence: {face_detected.best_count}")
        if face_detected.info[0] is not None:
            output_img = drawBBox(output_img, face_detected.info, col_dict,
                                  "yellow")
        cc = face_detected.o_coords
        output_img = drawBBox2(output_img, cc[0][0], cc[1][0], cc[0][1],
                               cc[1][1], face_detected.name, col_dict,
                               "yellow")

    person_detected = [[x.coords[0], x.coords[1], x.name] for x in old_ballots
                       if x.name]
    return output_img, person_detected, old_ballots
Esempio n. 8
0
def test_cases():

    print("DeepFace.detectFace test")

    for detector in detectors:
        img = DeepFace.detectFace("dataset/img11.jpg",
                                  detector_backend=detector)
        evaluate(img.shape[0] > 0 and img.shape[1] > 0)
        print(detector, " test is done")

    print("-----------------------------------------")

    img_path = "dataset/img1.jpg"
    embedding = DeepFace.represent(img_path)
    print("Function returned ", len(embedding), "dimensional vector")
    evaluate(len(embedding) > 0)

    print("-----------------------------------------")

    print("Face detectors test")

    for detector in detectors:
        print(detector + " detector")
        res = DeepFace.verify(dataset[0][0],
                              dataset[0][1],
                              detector_backend=detector)
        print(res)
        assert res["verified"] == dataset[0][2]

    print("-----------------------------------------")

    print("Find function test")

    df = DeepFace.find(img_path="dataset/img1.jpg", db_path="dataset")
    print(df.head())
    evaluate(df.shape[0] > 0)

    print("-----------------------------------------")

    print("Facial analysis test. Passing nothing as an action")

    img = "dataset/img4.jpg"
    demography = DeepFace.analyze(img)
    print(demography)

    evaluate(demography["age"] > 20 and demography["age"] < 40)
    evaluate(demography["dominant_gender"] == "Woman")

    print("-----------------------------------------")

    print("Facial analysis test. Passing all to the action")
    demography = DeepFace.analyze(img, ['age', 'gender', 'race', 'emotion'])

    print("Demography:")
    print(demography)

    #check response is a valid json
    print("Age: ", demography["age"])
    print("Gender: ", demography["dominant_gender"])
    print("Race: ", demography["dominant_race"])
    print("Emotion: ", demography["dominant_emotion"])

    evaluate(demography.get("age") is not None)
    evaluate(demography.get("dominant_gender") is not None)
    evaluate(demography.get("dominant_race") is not None)
    evaluate(demography.get("dominant_emotion") is not None)

    print("-----------------------------------------")

    print(
        "Facial analysis test 2. Remove some actions and check they are not computed"
    )
    demography = DeepFace.analyze(img, ['age', 'gender'])

    print("Age: ", demography.get("age"))
    print("Gender: ", demography.get("dominant_gender"))
    print("Race: ", demography.get("dominant_race"))
    print("Emotion: ", demography.get("dominant_emotion"))

    evaluate(demography.get("age") is not None)
    evaluate(demography.get("dominant_gender") is not None)
    evaluate(demography.get("dominant_race") is None)
    evaluate(demography.get("dominant_emotion") is None)

    print("-----------------------------------------")

    print("Facial recognition tests")

    for model in models:
        for metric in metrics:
            for instance in dataset:
                img1 = instance[0]
                img2 = instance[1]
                result = instance[2]

                resp_obj = DeepFace.verify(img1,
                                           img2,
                                           model_name=model,
                                           distance_metric=metric)

                prediction = resp_obj["verified"]
                distance = round(resp_obj["distance"], 2)
                threshold = resp_obj["threshold"]

                passed = prediction == result

                evaluate(passed)

                if passed:
                    test_result_label = "passed"
                else:
                    test_result_label = "failed"

                if prediction == True:
                    classified_label = "verified"
                else:
                    classified_label = "unverified"

                print(
                    img1.split("/")[-1], "-",
                    img2.split("/")[-1], classified_label,
                    "as same person based on", model, "and", metric,
                    ". Distance:", distance, ", Threshold:", threshold, "(",
                    test_result_label, ")")

            print("--------------------------")

    # -----------------------------------------

    print("Passing numpy array to analyze function")

    img = cv2.imread("dataset/img1.jpg")
    resp_obj = DeepFace.analyze(img)
    print(resp_obj)

    evaluate(resp_obj["age"] > 20 and resp_obj["age"] < 40)
    evaluate(resp_obj["gender"] == "Woman")

    print("--------------------------")

    print("Passing numpy array to verify function")

    img1 = cv2.imread("dataset/img1.jpg")
    img2 = cv2.imread("dataset/img2.jpg")

    res = DeepFace.verify(img1, img2)
    print(res)

    evaluate(res["verified"] == True)

    print("--------------------------")

    print("Passing numpy array to find function")

    img1 = cv2.imread("dataset/img1.jpg")

    df = DeepFace.find(img1, db_path="dataset")

    print(df.head())

    evaluate(df.shape[0] > 0)

    print("--------------------------")

    print("non-binary gender tests")

    #interface validation - no need to call evaluate here

    for img1_path, img2_path, verified in dataset:
        for detector in detectors:
            result = DeepFace.analyze(img1_path,
                                      actions=('gender', ),
                                      detector_backend=detector,
                                      enforce_detection=False)

            print(result)

            assert 'gender' in result.keys()
            assert 'dominant_gender' in result.keys(
            ) and result["dominant_gender"] in ["Man", "Woman"]

            if result["dominant_gender"] == "Man":
                assert result["gender"]["Man"] > result["gender"]["Woman"]
            else:
                assert result["gender"]["Man"] < result["gender"]["Woman"]
Esempio n. 9
0
import logging
import os

os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"  # FATAL
logging.getLogger("tensorflow").setLevel(logging.FATAL)
from deepface import DeepFace
from deepface.commons import realtime
import youtube_dl
import cv2
from cv2 import VideoCapture

###########################################YOUTUBE

f_list = glob("nested/*/*")
ids = defaultdict(list)
for f in f_list:
    ids[f.split('/')[1]].append(f)

dfs = DeepFace.find(img_path=f_list, db_path="nested")

models = ["VGG-Face", "Facenet", "OpenFace", "DeepFace", "DeepID"]
realtime.analysis(cap=cap,
                  out=out,
                  db_path="friends",
                  model_name=models[1],
                  distance_metric='cosine',
                  enable_face_analysis=False,
                  embd_saved=True)
Esempio n. 10
0
def faceRecognise(pic, database):
    df = DeepFace.find(
        img_path=pic, db_path=database
    )  #Storing the results in a panda dataframe to be analysed
    print(df.head())
 def search_for_face_db(self, img, path):
     result = DeepFace.find(img, db_path=path)
     return (result.empty == False)
Esempio n. 12
0
'''
The package structure should be 
users
    -user1
        -photo1.jpg
        -photo2.jpg
    -user2
        .
        .
        .

'''
import os
from deepface import DeepFace
import pandas as pd

if __name__ == "__main__":
    os.chdir('..')
    CWD = os.getcwd()
    DB_DIR = os.path.join(CWD, 'photos_database')
    USERS_DIR = os.path.join(CWD, 'users')
    for user in os.listdir(USERS_DIR):
        df = DeepFace.find(img_path=os.path.join(USERS_DIR, user),
                           db_path=DB_DIR)
        print("USER: "******"identity"], row["VGG-Face_cosine"])
Esempio n. 13
0
print(galleries.size)
print(probes.size)


def get_file_name(df, id):
    name = df[df['id'] == id]['file name']
    return name.tolist()


print(get_file_name(galleries, 3))
print(get_file_name(probes, 3))
print(get_file_name(probes, 3))

# should be general
here = os.path.dirname(os.path.realpath(__file__))
path = here + "/second/images"

for i in range(1, 121):
    #     gallery = get_file_name(galleries, i)[0]
    for img in get_file_name(probes, i):
        # gpath = osp.join(path, gallery)
        ppath = osp.join(path, img)
        # print(gpath,ppath,"pgpath")
        res = DeepFace.find(ppath,
                            db_path=here + '/second/gallery',
                            enforce_detection=False,
                            detector_backend='retina')
        # print(img, res)
        logging.info(img)
        logging.info(res)
Esempio n. 14
0
from deepface import DeepFace
import json

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#-----------------------------------------

print("Bulk tests")

print("-----------------------------------------")

print("Large scale face recognition")

df = DeepFace.find(img_path="dataset/img1.jpg",
                   db_path="dataset"
                   #, model_name = 'Dlib'
                   )
print(df.head())

print("-----------------------------------------")

print("Ensemble for find function")
df = DeepFace.find(img_path="dataset/img1.jpg",
                   db_path="dataset",
                   model_name="Ensemble")
print(df.head())

print("-----------------------------------------")

print("Bulk face recognition tests")
Esempio n. 15
0
def take_attendance_by_photo(request):
    """
    Working example for encoding and format:

    import base64
    import requests
    import json
    with open("photo.jpg", "rb") as f:
        s_raw = f.read()
    s_b64 = base64.b64encode(s_raw)
    r = requests.post(
        url='http://localhost:8000/teacher-api/take-attendance/',
        data=json.dumps({'session_id': 1, 'raw_picture': str(s_b64, 'ascii')})
    )
    """
    serializer = TakeAttendanceSerializer(json.loads(request.body))
    photo = base64.b64decode(serializer.data['raw_picture'])
    session_id = serializer.data['session_id']
    """
    Store the submitted in temporary folder
    """
    file_name = file_utils.store_temp_file(photo, '.jpg')
    if imghdr.what(file_name) is None:
        return HttpResponse(json.dumps(
            {'error_msg': 'raw_picture is not a valid image data.'}),
                            status=400)
    """
    Initialize folder of this class if not exists
    """
    course_schedule = get_object_or_404(CourseSchedule, id=session_id)
    is_created_now = file_utils.create_directory(session_id)
    if is_created_now:
        # copy out student images and pre-process it
        print("COPY STUDENT PHOTO")
        students = [
            coursestudent.students
            for coursestudent in CourseStudent.objects.filter(
                course_class=course_schedule.course_class)
        ]
        print(students)
        for student in students:
            src = os.path.join(MEDIA_ROOT, student.image.path)
            dst = os.path.join(STORAGE_DIR, str(session_id),
                               student.student_id + '.jpg')
            print(src, dst)
            shutil.copyfile(src, dst)
    """
    Use deepface to detect face, return if face not detected
    """
    from deepface import DeepFace
    try:
        DeepFace.detectFace(file_name, detector_backend='dlib')
    except ValueError:
        return HttpResponse(json.dumps({'face_is_detected': False}),
                            status=200)
    """
    Use deepface to check photo against folder return if no face match
    """
    try:
        df = DeepFace.find(file_name,
                           db_path=os.path.join(file_utils.STORAGE_DIR,
                                                str(session_id)),
                           distance_metric='euclidean_l2',
                           detector_backend='dlib')
    except ValueError:
        return HttpResponse(json.dumps({
            'error_msg':
            'Server failed to detect face existing database images'
        }),
                            status=500)
    if len(df) == 0:
        return HttpResponse(json.dumps({
            'face_is_detected': True,
            'matched_student_id': None,
        }),
                            status=200)
    result = [(row['distance'], row['identity']) for idx, row in df.iterrows()]
    result.sort()
    best_match_file_name = result[0][1]
    student_id, __ = os.path.basename(best_match_file_name).split('.')
    """
    Take attendance of user in the database, return matched student id
    """
    student_obj = StudentProfile.objects.get(student_id=student_id)
    if Attendance.objects.filter(attendee=student_obj.user,
                                 course_schedule=course_schedule).count() == 0:
        Attendance.objects.create(
            attendee=student_obj.user,
            course_schedule=course_schedule,
            late=timezone.now() >
            (course_schedule.open_time +
             timedelta(minutes=LATE_ATTENDANCE_CUTOFF_MINUTES)))
        attendance_taken_before = False
    else:
        attendance_taken_before = True
    return HttpResponse(json.dumps({
        'face_is_detected':
        True,
        'matched_student_id':
        student_id,
        'attendance_taken_before':
        attendance_taken_before,
    }),
                        status=200)
Esempio n. 16
0
def predict_result():
    df = DeepFace.find(img_path='image.jpg',
                       db_path='./data/train/',
                       model=model,
                       enforce_detection=False)
    return df['identity'][0].split('/')[3]