def labeltrans(x1, x2):
    x1 = int(x1)
    ctr = 0
    nctr = 0
    face_recognizer = cv2.face.LBPHFaceRecognizer_create()
    face_recognizer.read('trainingData.yml')
    name = {
        0: "Aakarsh Anubhav",
        1: "Soham Samanta",
        2: "Kondala Snehasis Rao",
        3: "Apurva Shahabadi",
        4: "Shivam Singh",
        5: "Kunwar Pratyush",
        6: "Harsh Anand",
        7: "Ch. Harika",
        8: "Maasid Zafar"
    }

    cap = cv2.VideoCapture(0)
    while ctr <= 21:

        ret, test_img = cap.read()
        faces_detected, gray_img = fr.faceDetection(test_img)
        for (x, y, w, h) in faces_detected:
            cv2.rectangle(test_img, (x, y), (x + w, y + h), (0, 255, 0), 2)
        for face in faces_detected:
            (x, y, w, h) = face
            roi_gray = gray_img[y:y + w, x:x + h]
            label, confidence = face_recognizer.predict(roi_gray)
            #print("label:",label)
            fr.draw_rect(test_img, face)
            predicted_name = name[label]
            if confidence < 72:
                print(x1, " ", ctr, " ", nctr, " ", label, " ", confidence,
                      " ", predicted_name, " ", x2)
                fr.put_text(test_img, predicted_name, x, y)
                if x1 == label and x2 == predicted_name:
                    ctr = ctr + 1
                if ctr == 10:
                    messagebox.showinfo('AKKI', 'Operation Successful')
                    cap.release()
                    cv2.destroyAllWindows()
                    return ctr
                if x1 != label or x2 != predicted_name:
                    nctr = nctr + 1
                if nctr == 10:
                    messagebox.showinfo('Akki', 'Operation unsucessful')
                    cap.release()
                    cv2.destroyAllWindows()
                    return 0
        cv2.imshow('Cyber Ninja System', test_img)

        k = cv2.waitKey(30) & 0xff
        if k == 27 or ctr == 20:
            break

    cap.release()
    cv2.destroyAllWindows()

    return
Example #2
0
def checkIn(name):
    global db
    print("About to take a Picture")
    path = "/home/pi/Downloads/" + name + ".jpg"
    with picamera.PiCamera() as camera:
        camera.resolution = (1280, 720)
        camera.capture(path)
    print("Picture taken.")
    test_img = cv2.imread("/home/pi/Desktop/Face Recognition/TestImages/" +
                          name + ".jpg")
    faces_detected, gray_img = fr.faceDetection(test_img)
    print("faces detected: ", faces_detected)
    #use for testing purpose:-
    face_recognizer = cv2.createLBPHFaceRecognizer()
    face_recognizer.load('/home/pi/Desktop/Project3/trainingData.yml')
    predicted_name = ""
    for face in faces_detected:
        (x, y, w, h) = face
        roi_gray = gray_img[y:y + h, x:x + h]
        label, confidence = face_recognizer.predict(roi_gray)
        print("confidence: ", confidence)
        print("label: ", label)
        fr.draw_rect(test_img, face)
        predicted_name = namesAndPath[label]
        fr.put_text(test_img, predicted_name, x, y)

    print(predicted_name + " You are checked-in successfully")
    db[predicted_name] = datetime.datetime.utcnow()
    return
Example #3
0
def face():
    speak('Processing Facial Recognition')
    test_img = cv2.imread('saved/{}.jpg'.format(always))
    faces_detected, gray_img = fr.faceDetection(test_img)
    speak("I found your data from our system")

    faces, faceID = fr.labels_for_training_data('test')
    face_recognizer = fr.train_classifier(faces, faceID)
    face_recognizer.write('trainingData.yml')

    users_list()

    for face in faces_detected:
        (x, y, w, h) = face
        roi_gray = gray_img[y:y + h, x:x + h]
        label, confidence = face_recognizer.predict(
            roi_gray)  #predicting the label of given image
        print("Confidence:", confidence)
        print("label:", label)
        fr.draw_rect(test_img, face)
        predicted_name = name[label]
        if (
                confidence > 37
        ):  #If confidence more than 37 then don't print predicted face text on screen
            welcome()
            break
        fr.put_text(test_img, predicted_name, x, y)

    resized_img = cv2.resize(test_img, (1000, 1000))
    cv2.imshow("Face", resized_img)
    cv2.waitKey(0)  #Waits indefinitely until a key is pressed
    cv2.destroyAllWindows
def attendance():
    face_recognizer = cv2.face.LBPHFaceRecognizer_create()
    face_recognizer.read('trainingData.yml')  #Load saved training data
    students = Student.query.all()
    name = Counter()
    i = 0
    for ele in students:
        ele.status = 0
        name[i] = ele
        i += 1
    db.session.commit()
    cap = cv2.VideoCapture(0)
    start = time.time()
    while i and name:
        ret, test_img = cap.read(
        )  # captures frame and returns boolean value and captured image
        faces_detected, gray_img = fr.faceDetection(test_img)

        for (x, y, w, h) in faces_detected:
            cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0),
                          thickness=7)

        resized_img = cv2.resize(test_img, (1000, 700))
        cv2.imshow('face detection Tutorial ', resized_img)
        cv2.waitKey(10)

        for face in faces_detected:
            (x, y, w, h) = face
            roi_gray = gray_img[y:y + w, x:x + h]
            label, confidence = face_recognizer.predict(
                roi_gray)  #predicting the label of given image
            print("confidence:" + str(confidence))
            print("label:" + str(label))
            fr.draw_rect(test_img, face)
            a = Student.query.filter_by(roll=label).first()
            if confidence < 39 and a.status == 0:
                a.status = 1
                db.session.commit()
                #winsound.Beep(2500, 1200)
                duration = 1  # seconds
                freq = 440  # Hz
                os.system('play -nq -t alsa synth {} sine {}'.format(
                    duration, freq))
                i -= 1
                fr.put_text(test_img, str(a), x, y)
        end = time.time()
        if cv2.waitKey(30) & 0xFF == ord('q'):
            break
        if int(end - start) >= 180:
            break
    cap.release()
    cv2.destroyAllWindows()

    return redirect(url_for('result'))
def train():

    # read time table and class names
    read_tt.read_tt_and_names()
    # making excel template
    excel_template.make_template()

    # This module takes images  stored in disk and performs face recognition
    test_img_name = str(input("image name : "))
    test_img = cv2.imread('TestImages/' + test_img_name +
                          '.jpg')  # test_img path

    # detect all the faces in image
    faces_detected, gray_img = fr.faceDetection(test_img)
    print("faces_detected:", faces_detected)
    # counter for keeping face count
    face_count = len(faces_detected)
    print("face_count:", face_count)

    faces, faceID = fr.labels_for_training_data('trainingImages')
    face_recognizer = fr.train_classifier(faces, faceID)
    # have to store our trained data so we can use it later without going through the training process again
    face_recognizer.write('trainingData.yml')
    # use this .yml file in future to avoid training time

    # creating dictionary containing names for each label
    name = np.load("names.npy", allow_pickle=True, fix_imports=True)
    name = name.item()

    # id of students present in class
    present = []

    for face in faces_detected:
        (x, y, w, h) = face
        roi_gray = gray_img[y:y + h, x:x + h]
        label, confidence = face_recognizer.predict(
            roi_gray)  # predicting the label of given image
        present.append(label)
        fr.draw_rect(test_img, face)  # drawing rectangle on face
        predicted_name = name[label]
        fr.put_text(test_img, predicted_name, x,
                    y)  # printing name of the person

    # # print ids of present students
    # present.sort()
    print(present)
    # cv2.imshow("test image", resized_img)
    cv2.waitKey(0)  # Waits indefinitely until a key is pressed
    cv2.destroyAllWindows()


# train()
def test():
    #This module captures images via webcam and performs face recognition
    face_recognizer = cv2.face.LBPHFaceRecognizer_create()
    face_recognizer.read('trainingData.yml')  #Load saved training data

    list = Student.query.all()
    name = {}
    i = 1
    for ele in list:
        ele.status = False
        name[i] = ele
        i = i + 1

    cap = cv2.VideoCapture(0)

    while True:
        ret, test_img = cap.read(
        )  # captures frame and returns boolean value and captured image
        faces_detected, gray_img = fr.faceDetection(test_img)

        for (x, y, w, h) in faces_detected:
            cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0),
                          thickness=7)

        resized_img = cv2.resize(test_img, (1000, 700))
        #cv2.imshow('face detection Tutorial ',resized_img)
        cv2.waitKey(10)

        for face in faces_detected:
            (x, y, w, h) = face
            roi_gray = gray_img[y:y + w, x:x + h]
            label, confidence = face_recognizer.predict(
                roi_gray)  #predicting the label of given image
            print("confidence:", confidence)
            print("label:", label)
            fr.draw_rect(test_img, face)
            predicted_name = str(name[label])
            if confidence < 37:  #If confidence less than 37 then don't print predicted face text on screen
                detected = Student.query.filter_by(Roll=predicted_name)
                detected.status = True
                db.session.commit()
                fr.put_text(test_img, predicted_name, x, y)

        resized_img = cv2.resize(test_img, (1000, 700))
        cv2.imshow('face recognition tutorial ', resized_img)
        if cv2.waitKey(10) == ord('q'):  #wait until 'q' key is pressed
            break

    cap.release()
    cv2.destroyAllWindows()
Example #7
0
def find_face(imageNames):
        haar_file = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
        test_img = cv2.imread("dest_folder/"+imageNames)
        test_img = cv2.resize(test_img, (2000,2000))
        test_img = detect_faces(haar_file,test_img)
        #cv2.imshow("bikas",test_img)
        #name={0:"Bikas",1:"Bishal"}
        name={}
        db = MySQLdb.connect('localhost', 'root','','facetag_db')
        cur = db.cursor()
        cur.execute("SELECT * FROM login_tbl")
        for row in cur.fetchall():
                #imageName=row[2]
                uidd=int(row[0])
                uidd=uidd-1
                name.update({uidd:row[3]})
        faces_detected,gray_img=fr.faceDetection(test_img)
        print("faces_detected:",faces_detected)
        face_recognizer=cv2.face.LBPHFaceRecognizer_create()
        face_recognizer.read('trainingData.yml')
        for face in faces_detected:
                (x,y,w,h)=face
                roi_gray=gray_img[y:y+h,x:x+h]
                label,confidence=face_recognizer.predict(roi_gray)#predicting the label of given image
                print("confidence:",confidence)
                print("label:",label)
                fr.draw_rect(test_img,face)
                predicted_name=name[label]
                uidds=label+1
                if(userId !=uidds):
                        if(confidence>47):#If confidence less than 37 then don't print predicted face text on screen
                                continue
                        db = MySQLdb.connect('localhost', 'root','','facetag_db')
                        cur = db.cursor()
                        sql = "INSERT INTO tagimg_tbl(id,img_id,uid,date,status)VALUES (null,'"+str(image_ids)+"','"+str(label+1)+"', '2020-04-10','0')"
                        try:
                                cur.execute(sql)
                                db.commit()
                        except pymysql.Error:
                                db.rollback()
                fr.put_text(test_img,predicted_name,x,y)
        resized_img=cv2.resize(test_img,(700,700))
        cv2.imshow("face detection",resized_img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
Example #8
0
def b():
    import os
    import cv2
    import numpy as np
    import faceRecognition as fr

    face_recognizer = cv2.face.LBPHFaceRecognizer_create()
    face_recognizer.read("D:\ML PROJECT\TrainingData.yml")
    name = {0: "Alex", 1: "Ashwini", 2: "Amanda"}

    cap = cv2.VideoCapture(0)
    while True:
        ret, test_img = cap.read()
        face_detected, gray_img = fr.faceDetection(test_img)

        for (x, y, w, h) in face_detected:
            cv2.rectangle(test_img, (x, y), (x + w, y + h), (0, 255, 0),
                          thickness=5)

        resized_img = cv2.resize(test_img, (1000, 700))
        cv2.imshow("Face Detection", resized_img)
        cv2.waitKey(10)
        for face in face_detected:
            (x, y, w, h) = face
            roi_gray = gray_img[y:y + h, x:x + h]
            label, confidence = face_recognizer.predict(roi_gray)
            print("Confidence:", confidence)
            print("Label:", label)
            fr.draw_rect(test_img, face)
            predicted_name = name[label]

            if label == 1:
                messagebox.showinfo("Authentication", "face detected")

            fr.put_text(test_img, predicted_name, x, y)

        resized_img = cv2.resize(test_img, (1000, 700))
        cv2.imshow("Face detection", resized_img)
        if cv2.waitKey(10) == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
Example #9
0
    def get_frame(self):

        if self.vid.isOpened():
            ret, frame = self.vid.read()
            predicted_name = 'Take attandence'
            label = 0

            if ret:
                # Return a boolean success flag and the current frame converted to BGR
                faces_detected, gray_img = fr.faceDetection(frame)

                for (x, y, w, h) in faces_detected:
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0),
                                  thickness=7)

                resized_img = cv2.resize(frame, (1000, 700))

                #print(faces_detected)

                for face in faces_detected:
                    (x, y, w, h) = face
                    roi_gray = gray_img[y:y + w, x:x + h]

                    label, confidence = face_recognizer.predict(
                        roi_gray)  #predicting the label of given image
                    if confidence < 39:
                        print("confidence:", confidence)
                        print("label:", label)
                        fr.draw_rect(frame, face)

                        predicted_name = name[label]
                        print(predicted_name)
                        fr.put_text(frame, predicted_name, x, y)
                        break

                return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB),
                        predicted_name, label)
            else:
                return (ret, None, 'Take attandence', 0)
        else:
            return (ret, None, 'Take attandence', 0)
Example #10
0
def a():
    import cv2
    import os
    import numpy as np
    import faceRecognition as fr
    test_img = cv2.imread("D:\ML PROJECT\Testimg\Ma.jpg")
    faces_detected, gray_img = fr.faceDetection(test_img)
    print("Face Detected:", faces_detected)
    '''for (x,y,w,h) in faces_detected:
        cv2.rectangle(test_img,(x,y),(x+w,y+h),(0,255,0),thickness=5)

    resized_img = cv2.resize(test_img,(1000,700))
    cv2.imshow("Face detection",resized_img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()'''

    # faces,faceID = fr.labels_for_training_data("D:\ML PROJECT\TrainingData")
    # face_recognizer = fr.train_classifier(faces,faceID)
    # face_recognizer.save('trainingData.yml')
    face_recognizer = cv2.face.LBPHFaceRecognizer_create()
    face_recognizer.read("D:\ML PROJECT\TrainingData.yml")
    name = {0: "Alex", 1: "Ashwini", 2: "Amanda"}

    for face in faces_detected:
        (x, y, w, h) = face
        roi_gray = gray_img[y:y + h, x:x + h]
        label, confidence = face_recognizer.predict(roi_gray)
        print("Confidence:", confidence)
        print("Label:", label)
        fr.draw_rect(test_img, face)
        predicted_name = name[label]
        if confidence > 37:
            continue
        fr.put_text(test_img, predicted_name, x, y)

    resized_img = cv2.resize(test_img, (800, 600))
    cv2.imshow("Face detection", resized_img)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Example #11
0
def dete():
    def audio():
        my_text = "user found"
        language = 'en'
        myobj = gTTS(text=my_text, lang=language, slow=False)
        myobj.save("welcome.mp3")
        os.system("welcome.mp3")

    test_img = cv2.imread('TestImages/divyanshu.jpg')

    faces_detected, gray_img = fr.faceDetection(test_img)
    print("faces_detected:", faces_detected)

    faces, faceID = fr.labels_for_training_data('trainingImages')
    face_recognizer = fr.train_classifier(faces, faceID)
    face_recognizer.write('trainingData.yml')

    name = {0: "chirag", 1: "divyanshu"}

    for face in faces_detected:
        (x, y, w, h) = face
        roi_gray = gray_img[y:y + h, x:x + h]
        label, confidence = face_recognizer.predict(roi_gray)
        print("confidence:", confidence)
        print("label:", label)
        fr.draw_rect(test_img, face)
        predicted_name = name[label]
        if (confidence > 37):
            continue

        fr.put_text(test_img, predicted_name, x, y)
        audio()

    resized_img = cv2.resize(test_img, (1000, 1000))
    cv2.imshow("face dtecetion ", resized_img)
    cv2.waitKey(0)
    cv2.destroyAllWindows
Example #12
0
def poison(load_img):
    test_img = imagePoisoning()
    faces_detected, gray_img = fr.faceDetection(load_img)
    face_recognizer = cv2.face.LBPHFaceRecognizer_create()
    face_recognizer.read('trainingData.yml')
    name = {
        0: "Priyanka",
        1: "Kangana",
    }  #creating dictionary containing names for each label
    for face in faces_detected:
        (x, y, w, h) = face
        roi_gray = gray_img[y:y + h, x:x + h]
        label, confidence = face_recognizer.predict(
            roi_gray)  #predicting the label of given image
        print("confidence:", confidence)
        print("label:", label)
        fr.draw_rect(load_img, face)
        predicted_name = name[label]
        if (
                confidence < 100
        ):  #If confidence more than 100 then don't print predicted face text on screen
            fr.put_text(load_img, predicted_name, x, y)
        else:
            poison(test_img)
                      random.choice(l),
                      thickness=2)

    resized_img = cv2.resize(test_img, (800, 700))
    cv2.imshow('face detection Tutorial ', resized_img)
    cv2.waitKey(10)

    for face in faces_detected:
        (x, y, w, h) = face
        roi_gray = gray_img[y:y + w, x:x + h]
        label, confidence = face_recognizer.predict(
            roi_gray)  #predicting the label of given image
        print("confidence:", confidence)
        print("label:", label)
        fr.draw_rect(test_img, face)
        predicted_name = name[label]
        predicted_name1 = name1[label]
        if confidence < 55:  #If confidence less than  then don't priqnt predicted face text on screen
            c = c + 1
            fr.put_text(test_img, predicted_name, x, y)
            fr.put_text(test_img, predicted_name1 + "  " + str(c), x, y - 24)
            if c > 2:
                fr.put_text(test_img, "Duplicate", x, y - 50)
    resized_img = cv2.resize(test_img, (1000, 700))
    cv2.imshow('face recognition', resized_img)
    if cv2.waitKey(10) == ord('q'):
        break

cap.release()
cv2.destroyAllWindows
    )  # captures frame and returns boolean value and captured image
    faces_detected, gray_img = fr.faceDetection(test_img)

    for (x, y, w, h) in faces_detected:
        cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0),
                      thickness=7)

    resized_img = cv2.resize(test_img, (1000, 700))
    cv2.imshow('face detection Tutorial ', resized_img)
    cv2.waitKey(10)

    for face in faces_detected:
        (x, y, w, h) = face
        roi_gray = gray_img[y:y + w, x:x + h]
        label, confidence = face_recognizer.predict(
            roi_gray)  #predicting the label of given image
        print("confidence:", confidence)
        print("label:", label)
        fr.draw_rect(test_img, face)
        predicted_name = name[label]
        if confidence < 50:  #If confidence less than 37 then don't print predicted face text on screen
            fr.put_text(test_img, predicted_name, x + 20, y - 20)

    resized_img = cv2.resize(test_img, (1000, 700))
    cv2.imshow('face recognition tutorial ', resized_img)
    if cv2.waitKey(10) == ord('q'):  #wait until 'q' key is pressed
        break

cap.release()
cv2.destroyAllWindows
Example #15
0
    face_recognizer = cv2.face.LBPHFaceRecognizer_create()
    face_recognizer.read('data/model/classifier.yml')

    cap = cv2.VideoCapture(0)

    while True:

        ret, test_img = cap.read()
        faces_detected, gray_img = fr.faceDetection(test_img)

        resized_img = cv2.resize(test_img, (1000, 700))
        cv2.imshow("Face Detection", resized_img)
        cv2.waitKey(10)

        for face in faces_detected:

            (x, y, w, h) = face
            roi_gray = gray_img[y:y + h, x:x + w]
            label, conf = face_recognizer.predict(roi_gray)
            print(label, conf)

            fr.draw_rect(test_img, face)
            fr.put_text(test_img, people[label], x, y)

        resized_img = cv2.resize(test_img, (1000, 700))
        cv2.imshow("Face Detection", resized_img)
        if cv2.waitKey(10) == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows()
Example #16
0
    0: "Actor",
    1: "CSE",
    2: "Studnt",
    3: "Engineering",
    4: "Teacher",
    6: "sir",
    7: "Garu"
}
for face in faces_detected:
    c = c + 1
    (x, y, w, h) = face
    roi_gray = gray_img[y:y + h, x:x + h]
    label, confidence = face_recognizer.predict(
        roi_gray)  #predicting the label of given image
    print("confidence:", confidence)
    print("label:", label)
    fr.draw_rect(test_img, face)
    predicted_name = name[label]
    predicted_name1 = name1[label]
    if (
            confidence > 38
    ):  #If confidence more than 37 then don't print predicted face text on screen
        continue
    fr.put_text(test_img, predicted_name, x, y)
    fr.put_text(test_img, predicted_name1 + " " + str(c), x, y - 23)

resized_img = cv2.resize(test_img, (1000, 1000))
cv2.imshow("face dEtecetion Attendance", test_img)
cv2.waitKey(0)  #Waits indefinitely until a key is pressed
cv2.destroyAllWindows
Example #17
0
import cv2
import pandas as pd
import time
import faceRecognition as fr

face_recognizer = cv2.face.LBPHFaceRecognizer_create()
face_recognizer.read('trainDATA.yml')
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath)
df = pd.read_csv("StudentDetails\StudentDetails.csv")
x = str(input('Enter Location Of Students Photo (Without ["]): '))
cap = cv2.VideoCapture(x)
col_names = ['Id', 'Name']
df = pd.read_csv("StudentDetails\StudentDetails.csv")
while True:
    ret, test_img = cap.read()
    #test_img = cv2.resize(test_img, (1080, 720))
    faces_detected, gray_img = fr.faceDetection(test_img)

    for face in faces_detected:
        (x, y, w, h) = face
        roi_gray = gray_img[y:y + h, x:x + w]
        Id, confidence = face_recognizer.predict(roi_gray)
        fr.draw_rect(test_img, face)
        if confidence > 67:
            aa = df.loc[df['Id'] == Id]['Name'].values
            fr.put_text(test_img, str(aa), x, y)
    cv2.imshow('face detection Tutorial', test_img)
    cv2.waitKey(0)
cap.release()
cv2.destroyAllWindows()
Example #18
0
while True:
    ret, test_img = cap.read(
    )  # captures frame and returns boolean value and captured image
    faces_detected, gray_img = fr.faceDetection(test_img)

    for (x, y, w, h) in faces_detected:
        cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0),
                      thickness=7)

    for face in faces_detected:
        (x, y, w, h) = face
        roi_gray = gray_img[y:y + w, x:x + h]
        label, confidence = face_recognizer.predict(
            roi_gray)  # predicting the label of given image
        print("confidence:", confidence)
        print("label:", label)
        fr.draw_rect(test_img, face)
        predicted_name = name[label]
        if confidence < 30:  # If confidence less than 30 then don't print predicted face text on screen
            fr.put_text(test_img, predicted_name, x, y)
        else:
            fr.put_text(test_img, "don't know", x, y)
    resized_img = cv2.resize(test_img, (720, 480))
    cv2.imshow('Face Recognition Project ', resized_img)
    if cv2.waitKey(1) == ord('q'):  # wait until 'q' key is pressed
        break

cap.release()
cv2.destroyAllWindows()
    )  # captures frame and returns boolean value and captured image
    faces_detected, gray_img = fr.faceDetection(test_img)

    for (x, y, w, h) in faces_detected:
        cv2.rectangle(test_img, (x, y), (x + w, y + h), (255, 0, 0),
                      thickness=7)

    #resized_img = cv2.resize(test_img, (1000, 700))
    #cv2.imshow('face detection Tutorial ',resized_img)
    #cv2.waitKey(10)

    for face in faces_detected:
        (x, y, w, h) = face
        roi_gray = gray_img[y:y + w, x:x + h]
        label, confidence = face_recognizer.predict(
            roi_gray)  #predicting the label of given image
        print("confidence:", confidence)
        print("label:", label)
        fr.draw_rect(test_img, face)
        predicted_name = name[label]
        if confidence > 55:  #If confidence less than 50 then don't print predicted face text on screen
            fr.put_text(test_img, predicted_name, x, y)

    resized_img = cv2.resize(test_img, (600, 570))
    cv2.imshow('FR', resized_img)
    if cv2.waitKey(10) == ord('q'):  #wait until 'q' key is pressed
        break

cap.release()
cv2.destroyAllWindows
Example #20
0
face_recognizer.load('/Users/harshit/Desktop/New folder/FaceRegspyder/trainingData.yml')
name = {0:"Pratik",1:"Unknown",2:"Harshit"}

cap=cv2.VideoCapture(0)

while True:
    ret,test_img=cap.read()# captures frame and returns boolean value and captured image
    faces_detected,gray_img=fr.faceDetection(test_img)

    for (x,y,w,h) in faces_detected:
      cv2.rectangle(test_img,(x,y),(x+w,y+h),(255,0,0),thickness=4)
    
    resized_img = cv2.resize(test_img, (1000, 700))
    cv2.waitKey(10)

    fr.put_text(test_img,"Press Q To Exit ",70,70)    
    for face in faces_detected:
        (x,y,w,h)=face
        roi_gray=gray_img[y:y+w, x:x+h]
        #label,confidence=face_recognizer.predict(roi_gray)#predicting the label of given image
        label=face_recognizer.predict(roi_gray)
        confidence=face_recognizer.predict(roi_gray)
        print("label:",label)
        fr.draw_rect(test_img,face)
        predicted_name=name[label]

        if(label==0):#If confidence less than 37 then don't print predicted face text on screen
             fr.put_text(test_img,predicted_name,x,y)
        #continue
        elif(label==2):
             fr.put_text(test_img,predicted_name,x,y)
    for face in faces_detected:
        (x, y, w, h) = face
        roi_gray = gray_img[y:y + h, x:x + h]  # roi - region of interest
        label, confidence = face_recognizer.predict(roi_gray)
        print("Confidence :", confidence)
        print(f"Label-{label}, {def_const.labels[label]}")
        predicted_name = def_const.labels[label]

        # Eyes detection
        eyes_detected = fr.eyes_detection(roi_gray)
        for eyes in eyes_detected:
            fr.draw_elipse(test_img, eyes, face)
            # fr.draw_circle(test_img, eyes, face)

        disp_text = f"{predicted_name},{str(int(confidence))}"
        fr.draw_rect(test_img, face)
        fr.put_text(test_img, disp_text, x, y)

        # Open your eyes
        if eyes_detected == ():
            cv2.putText(test_img, "Open your eyes", (x, y + h + 30),
                        cv2.FONT_HERSHEY_PLAIN, 1.5, (255, 0, 0), 2,
                        cv2.LINE_AA)

    resized_img = cv2.resize(test_img, (1000, 700))

    cv2.imshow("Face Detection with confidence, Press q ro exit", resized_img)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
Example #22
0
bath = [1, 1, 1]
while True:
    ret, test_img = cap.read()
    faces_detected, gray_img = fr.faceDetection(test_img)
    print("face Detected: ", faces_detected)
    for (x, y, w, h) in faces_detected:
        cv2.rectangle(test_img, (x, y), (x + w, y + h), (0, 255, 0),
                      thickness=5)
    for face in faces_detected:
        (x, y, w, h) = face
        roi_gray = gray_img[y:y + h, x:x + h]
        label, confidence = face_recognizer.predict(roi_gray)
        print("Confidence :", confidence)
        print("label :", label)
        fr.draw_rect(test_img, face)
        predicted_name = name[label]
        if (bath[label]):
            fr.put_text(test_img, predicted_name + '\n 20 litres granted', x,
                        y)
        else:
            fr.put_text(test_img,
                        predicted_name + '\n maximum capacity reached', x, y)
        time.sleep(5)
        bath[label] = 0

    resized_img = cv2.resize(test_img, (1000, 700))

    cv2.imshow("face detection ", resized_img)
    if cv2.waitKey(10) == ord('q'):
        break
Example #23
0
    51: "Avisek shaw",
    60: "Agnibesh Mukherjee",
    64: "Abhishek Charan",
    37: "Madhurima Maji",
    32: "Arnab kumar Pati"
}

for face in faces_detected:
    (x, y, w, h) = face
    roi_gray = gray_img[y:y + h, x:x + h]
    label, confidence = face_recognizer.predict(roi_gray)
    print(f"confidence : {confidence}")
    print(f"label : {label}")
    if confidence > 150:
        fr.draw_rect(test_img, face)
        fr.put_text(test_img, "Not Registered", x, y)
    else:

        label1.append(int(label))
        #confidence1.append(confidence)
        fr.draw_rect(test_img, face)
        predicted_name = name[label]
        fr.put_text(test_img, predicted_name, x, y)

resized_imag = cv2.resize(test_img, (1000, 700))
cv2.imshow("face detection", resized_imag)
cv2.waitKey(0)
cv2.destroyAllWindows

print(f"label : {label1}")
Example #24
0
    ret,test_img = cap.read()
    faces_detected,gray_img = fr.faceDetection(test_img)
    #print("Face Detected: ",faces_detected)
    for(x,y,w,h) in faces_detected:
        cv2.rectangle(test_img,(x,y),(x+w,y+h),(0,255,0),thickness=3)
        
    for face in faces_detected:
        (x,y,w,h) = face
        roi_gray = gray_img[y:y+w,x:x+h]
        label,confidence = face_recognizer.predict(roi_gray)
        print("label: ",label)
        print("confidence: ",confidence)
        fr.draw_rect(test_img, face)
        predict_name = name[label]
        #less confidence more better
        if(confidence>85):
            fr.put_text(test_img, 'Unknown', x, y)
            continue
        fr.put_text(test_img,predict_name, x, y)
        
    resized_img = cv2.resize(test_img,(700,700))
    
    cv2.imshow('image',resized_img)
    
    if cv2.waitKey(10) == ord('q'):
        cv2.destroyAllWindows()
        cap.release()
        break


    
Example #25
0
def main():

    ##        faces,faceID = fr.labels_for_training_data("training")
    ##        face_recognizer = fr.train_classifier(faces,faceID)
    ##        face_recognizer.save("trainingData.yml")

    face_recognizer = cv2.face.LBPHFaceRecognizer_create()
    face_recognizer.read("trainingData.yml")

    name = {
        0: "Fariha",
        1: "Inara",
        2: "Arowa",
        3: "Ankon",
        4: "Farhan",
        5: "Minhaz",
        6: "Afifa",
        7: "Karishma",
        8: "Nafisa"
    }
    gender = {
        0: "Female",
        1: "Male",
        2: "Female",
        3: "Male",
        4: "Male",
        5: "Male",
        6: "Female",
        7: "Female",
        8: "Female"
    }
    relation = {
        0: "Sister",
        1: "Cousin",
        2: "Aunt",
        3: "Uncle",
        4: "Nephew",
        5: "Child",
        6: "Enemy",
        7: "Mother",
        8: "Daughter"
    }
    prof = {
        0: "Student",
        1: "MUA",
        2: "Student",
        3: "Student",
        4: "Student",
        5: "Student",
        6: "Villain",
        7: "Student",
        8: "Student"
    }

    cap = cv2.VideoCapture(0)

    while True:
        ret, test_img = cap.read()
        faces_detected, gray_img = fr.faceDetection(test_img)

        for face in faces_detected:
            (x, y, w, h) = face
            roi_gray = gray_img[y:y + h, x:x + h]
            label, confidence = face_recognizer.predict(roi_gray)
            print("confidence:", confidence)
            print("label:", label)
            fr.draw_rect(test_img, face)
            predicted_name = name[label]
            predicted_relation = relation[label]
            predicted_prof = prof[label]

            if (confidence < 50):
                if (cv2.waitKey(1) == ord('s')):
                    if (label == 0):
                        winsound.PlaySound("Fariha.wav", winsound.SND_ASYNC)
                    elif (label == 1):
                        winsound.PlaySound("Inara.wav", winsound.SND_ASYNC)
                    elif (label == 2):
                        winsound.PlaySound("Arowa.wav", winsound.SND_ASYNC)
                    elif (label == 3):
                        winsound.PlaySound("Ankon.wav", winsound.SND_ASYNC)
                    elif (label == 4):
                        winsound.PlaySound("Farhan.wav", winsound.SND_ASYNC)
                    elif (label == 5):
                        winsound.PlaySound("Minhaz.wav", winsound.SND_ASYNC)
                    elif (label == 6):
                        winsound.PlaySound("Afifa.wav", winsound.SND_ASYNC)
                    elif (label == 7):
                        winsound.PlaySound("Karishma.wav", winsound.SND_ASYNC)
                    elif (label == 8):
                        winsound.PlaySound("Nafisa.wav", winsound.SND_ASYNC)

                fr.put_text(test_img, predicted_name, x, y)
                fr.put_text2(test_img, predicted_relation, x, y)
                fr.put_text2(test_img, predicted_prof, x, y + h - 50)

                if (gender[label] == "Female"):
                    s_img = cv2.imread("pin2.png")
                elif (gender[label] == "Male"):
                    s_img = cv2.imread("pin3.png")
                r_img = cv2.imread("relation.png")
                p_img = cv2.imread("prof.png")
                x_offset = x
                y_offset = y - 50
                test_img[y_offset:y_offset + s_img.shape[0],
                         x_offset:x_offset + s_img.shape[1]] = s_img
                x1 = x + 10
                y1 = y + 10
                test_img[y1:y1 + r_img.shape[0],
                         x1:x1 + r_img.shape[1]] = r_img
                x1 = x + 10
                y1 = y + h - 40
                test_img[y1:y1 + p_img.shape[0],
                         x1:x1 + p_img.shape[1]] = p_img

        resized_img = cv2.resize(test_img, (1000, 700))
        cv2.imshow("face detection", resized_img)
        if cv2.waitKey(1) == ord('q'):
            break

    cap.release()
    cv2.destroyAllWindows
#Uncomment below line for subsequent runs
face_recognizer = cv2.face.createLBPHFaceRecognizer()
face_recognizer.load(
    '/Users/harshit/Desktop/New folder/FaceRegspyder/trainingData.yml')

name = {0: "Pratik", 1: "Prea", 2: "Harshit"}
for face in faces_detected:
    (x, y, w, h) = face
    roi_gray = gray_img[y:y + h, x:x + h]
    label = face_recognizer.predict(roi_gray)
    confidence = face_recognizer.predict(roi_gray)
    #predicting the label of given image
    #print("confidence:",confidence)
    print("label:", label)
    fr.draw_rect(test_img, face)
    predicted_name = name[label]
    #if(confidence>37):#If confidence less than 37 then don't print predicted face text on screen
    #fr.put_text(test_img,"Unknown",x,y)#continue
    #fr.put_text(test_img,predicted_name,x,y)
    if (
            label == 0 or label == 2
    ):  #If confidence less than 37 then don't print predicted face text on screen
        fr.put_text(test_img, predicted_name, x, y)  #continue
    else:
        fr.put_text(test_img, "Unknown", x, y)

resized_img = cv2.resize(test_img, (1000, 1000))
cv2.imshow("Face Recognition Tutorial", resized_img)
cv2.waitKey(0)  #Waits indefinitely until a key is pressed
cv2.destroyAllWindows