コード例 #1
0
ファイル: run.py プロジェクト: elamrily/face-detection
def restart_visualisation():

    # initialize the bounding box coordinates of the object we are going to track
    initBB = None
    # initialize the FPS throughput estimator
    fps = None
    # id of img
    img_id = 0

    video_capture = cv2.VideoCapture(camera_port) # + cv2.CAP_DSHOW)
    # video_capture.set(cv2.CAP_PROP_FRAME_WIDTH,camera_width)
    # video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT,camera_height)

    faces,faceID,name_dict=fr.labels_for_training_data('../face_reco_app/dataset')
    face_recognizer=fr.train_classifier(faces,faceID)
    param_track = False

    state = 0
    while state == 0:
        # Capture frame-by-frame
        ret, test_img = video_capture.read()

        if test_img is None:
            video_capture = cv2.VideoCapture(camera_port) # + cv2.CAP_DSHOW)
            # video_capture.set(cv2.CAP_PROP_FRAME_WIDTH,camera_width)
            # video_capture.set(cv2.CAP_PROP_FRAME_HEIGHT,camera_height)
            
            ret, test_img = video_capture.read()

        if test_img is not None:
            faces_detected,gray_img = fr.detect_face(test_img)
            key = cv2.waitKey(1) & 0xFF

            # face detect par
            if param_track == False :
                for face in faces_detected:
                    (x,y,w,h)=face
                    roi_gray=gray_img[y:y+h,x:x+w]
                    label,confidence=face_recognizer.predict(roi_gray)
                    print("\nconfidence:",confidence)
                    print("label     :",label)
                    if confidence < 100:
                        fr.draw_rect(test_img,face)
                        predicted_name=name_dict[label]
                        fr.put_text(test_img,predicted_name,x,y)
                    else:
                        fr.draw_rect(test_img,face)
                        predicted_name="John Doe"
                        fr.put_text(test_img,predicted_name,x,y)
            
            ret, jpeg = cv2.imencode('.jpg', test_img)  

            yield (b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n')
コード例 #2
0
test_img=cv2.imread(r'C:\Users\msath\Desktop\LBPH\Test.jpeg')      #Give path to the image which you want to test


faces_detected,gray_img=fr.faceDetection(test_img)
print("face Detected: ",faces_detected)


face_recognizer=cv2.face.LBPHFaceRecognizer_create()
face_recognizer.read(r'C:\Users\msath\Desktop\LBPH\trainingData.yml')  #Give path of where trainingData.yml is saved

name={0:"Saan"}             #Change names accordingly.  If you want to recognize only one person then write:- name={0:"name"} thats all. Dont write for id number 1. 

for face in faces_detected:
    (x,y,w,h)=face
    roi_gray=gray_img[y:y+h,x:x+h]
    label,confidence=face_recognizer.predict(roi_gray)
    print ("Confidence :",confidence)
    print("label :",label)
    fr.draw_rect(test_img,face)
    predicted_name=name[label]
    if(confidence>60):
        fr.put_text(test_img,'Unknown',x,y)
        continue
    fr.put_text(test_img,predicted_name,x,y)

resized_img=cv2.resize(test_img,(1000,700))

cv2.imshow("face detection ", resized_img)
cv2.waitKey(0)
cv2.destroyAllWindows
コード例 #3
0
    # Detecting the face.
    faces_detected, gray_img = fr.faceDetection(test_img)
    print("face Detected: ", faces_detected)

    for (x, y, w, h) in faces_detected:
        # Draws bounding boxes.
        cv2.rectangle(test_img, (x, y), (x + w, y + h), (0, 255, 0),
                      thickness=2)

    for face in faces_detected:
        # faces is a list having x,y coordinates and width and height of the img.
        (x, y, w, h) = face
        # Refer: https://stackoverflow.com/questions/57068928/opencv-rect-conventions-what-is-x-y-width-height  to know about conventions.
        roi_gray = gray_img[y:y + h, x:x + h]

        # face_recogniser.predict() returns a tuple having label and confidence score
        label, confidence = face_recognizer.predict(roi_gray)
        print("Confidence :", confidence)
        print("label :", label)

        fr.draw_rect(test_img, face)  # Draws bounding boxes
        # Extracting value from the dictionary "name"
        predicted_name = name[label]
        fr.put_text(test_img, predicted_name, x, y)

    resized_img = cv2.resize(test_img, (700, 700))

    cv2.imshow("face detection ", resized_img)
    if cv2.waitKey(10) == ord('q'):
        break
コード例 #4
0
print("Faces Detected",faces_detected)

#Initializing the training
faces,face_ID=FR.labels_for_training_data(r'G:\Data Science Project\LBPH Face Recongition\capture\0')
face_recognizer=FR.train_classifier(faces,face_ID)
face_recognizer.save(r'G:\Data Science Project\LBPH Face Recongition\trainingData.yml')

name={0:'Yashraj/nData Scientist'}

for face in faces_detected:
    (x,y,w,h)=face
    roi_gray=gray_img[y:y+w,x:x+h]
    label,confidence=face_recognizer.predict(roi_gray)
    print(label)
    print(confidence)
    FR.draw_rect(test_img,face)
    predict_name=name[label]
    FR.put_text1(test_img,predict_name,x,y)
    
resized_img=cv2.resize(test_img,(1000,700))

cv2.imshow("face detection ", resized_img)
cv2.waitKey(0)
cv2.destroyAllWindows