def videoLoop(self):
        try:
            # keep looping over frames until we are instructed to stop
            i = 0
            while not self.stopEvent.is_set():
                # grab the frame from the video stream and resize it to
                # have a maximum width of 300 pixels
                flag, self.frame = vs.read()

                if i == 0:
                    i += 2
                    faceCoordinates = None
                else:

                    try:
                        faceCoordinates = fd.face_detect(self.frame)
                        startX = faceCoordinates[0]
                        startY = faceCoordinates[1]
                        endX = faceCoordinates[2]
                        endY = faceCoordinates[3]
                        if startX is not None:
                            image = fd.draw_rect(self.frame, startX, startY,
                                                 endX, endY)
                            face_img = fd.face_crop(self.frame,
                                                    faceCoordinates,
                                                    face_shape=(128, 128))
                            im = img_to_array(face_img)
                            im = np.expand_dims(im, axis=0)
                            result = self.model.predict(im)
                            for i in range(0, 7):
                                self.resultLabel[i].config(
                                    text=str(round(result[0][i], 4)))
                    except:
                        pass

                self.frame = imutils.resize(self.frame, width=300)
                self.frame = cv2.flip(self.frame, 1)
                # OpenCV represents images in BGR order; however PIL
                # represents images in RGB order, so we need to swap
                # the channels, then convert to PIL and ImageTk format
                image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
                image = Image.fromarray(image)
                image = ImageTk.PhotoImage(image)

                # if the panel is not None, we need to initialize it
                if self.panelA is None:
                    self.panelA = tki.Label(image=image)
                    self.panelA.image = image
                    self.panelA.pack(side="left", padx=10, pady=10)
                    self.panelA.place(x=20, y=20)
# otherwise, simply update the panel
                else:
                    self.panelA.configure(image=image)
                    self.panelA.image = image

        except RuntimeError:
            print("[INFO] caught a RuntimeError")
def refreshFrame(frame, startX, startY, endX, endY, index):
    
    if startX is not None:
        fd.draw_rect(frame, startX, startY, endX, endY, index)
    cv2.imshow(windowName, frame)
Beispiel #3
0
faces, faceID = fr.labels_for_training_data(
    'E:/python_programs/face detection/training')
face_recognizer = fr.train_classifier(faces, faceID)
face_recognizer.save('trainingData.yml')
name = {0: "person1", 1: "person2", 2: "person3", 3: "person4"}
# face_recognizer=cv2.face.LBPHFaceRecognizer_create()
# face_recognizer.read('E:/python_programs/face detection/trainingData.yml')

for face in faces_detected:
    (x, y, w, h) = face
    roi_gray = gray_img[y:y + w, x:x + h]
    label, confidence = face_recognizer.predict(roi_gray)
    print("confidence:", confidence)
    print("label:", label)
    fr.draw_rect(test_img, face)

    if (confidence > 100):
        continue
    predicted_name = name[label]
    fr.put_text(test_img, predicted_name, x, y)
    # #resize the image inorder to fit the rectangle
    resized_img = cv2.resize(test_img,
                             (1000, 700))  # resizes te image to 1000X700
    cv2.imshow("face_detected:", resized_img)
    time.sleep(1)
    fr.to_audio(name, label)
    f = open('name.txt', 'w')
    f.write("the person/s is {}".format(name[label]))
    f.close()