capture = cv2.VideoCapture(0) while True: # Capture video feed ret, frame = capture.read() image = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) faces = face.detect_face(image) if faces != None: for i in range(0, len(faces)): x, y, w, h = faces[i] # x und y cordinaten des Gesichts speichern um ausschnittsabsuchungen wieder zu normalisieren x_face = x y_face = y if config.RECOGNITION_ALGORITHM == 1: crop = face.crop(image, x, y, w, h) else: crop = face.resize(face.crop(image, x, y, w, h)) label, confidence = model.predict(crop) cv2.rectangle(frame, (x, y), (x + w, y + h), 255) cv2.putText(frame, str(h), (x + w, y + h + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) # print config.personen[label] + " - " + str(confidence) if label != -1: # If person is close to the camera use smaller POSITIVE_THRESHOLD if h > 190 and confidence < 50: cv2.putText( frame, config.personen[label], (x - 3, y - 8), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (255, 255, 255), 1 ) cv2.putText( frame, str(confidence), (x - 2, y + h + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1
faces = face.detect_faces(image) label = 0 if faces is not None: for i in range(0, len(faces)): if faces[i] is None: print("Bad face object None") continue if len(faces[i]) != 4: print("Bad face object {0}".format(faces[i])) continue x, y, w, h = faces[i] # x and y coordinates of the face x_face = x y_face = y if config.RECOGNITION_ALGORITHM == 1: crop = face.crop(image, x, y, w, h) else: crop = face.resize(face.crop(image, x, y, w, h)) # confidence the lower the stronger the match confidence = model.predict(crop) label = label + 1 match = "None" label_str = "None" if (label != -1 and label != 0): label_str = config.user_label(label) print label_str, confidence # the closer confidence is to zer the stronger the match if confidence < 0.6 * config.POSITIVE_THRESHOLD: label_str = 'Strong:' + label_str + str(confidence) elif confidence < config.POSITIVE_THRESHOLD: