def enroll_face(image, label, embeddings_path="/home/ubuntu/Downloads/snehafc/face-recognition-master/face_embeddings.npy", labels_path="/home/ubuntu/Downloads/snehafc/face-recognition-master/labels.cpickle", down_scale=1.0): faces = detect_faces(image, down_scale) if len(faces)<1: return False if len(faces)>1: print("ok") face = faces[0] face_embeddings = extract_face_embeddings(image, face, shape_predictor, face_recognizer) add_embeddings(face_embeddings, label, embeddings_path=embeddings_path, labels_path=labels_path) print("training done") return True
def enroll_face(image, label, embeddings_path="face_embeddings.npy", labels_path="labels.pickle", down_scale=1.0): faces = detect_faces(image, down_scale) if len(faces) < 1: return False if len(faces) > 1: raise ValueError("Multiple faces not allowed for enrolling") face = faces[0] face_embeddings = extract_face_embeddings(image, face, shape_predictor, face_recognizer) add_embeddings(face_embeddings, label, embeddings_path=embeddings_path, labels_path=labels_path) return True
def loop(cam_obj, servo): i = 0 running = True while running: # pobierz obraz z kamery camera = get_image(cam_obj) faces = detect_faces(camera) for face in faces: add_filters(camera, i, face) # wyswietl obraz cv2.imshow("Smile!", camera) if len(faces) == 1: (camera_height, camera_width) = (camera.shape[0], camera.shape[1]) (face_x, face_y, face_width, face_height) = faces[0] position = get_position(camera_width, face_x, face_width) t = ServoThread(position, servo) t.start() else: t = ServoThread(7.5, servo) t.start() key = cv2.waitKey(1) if key == ord('n'): i += 1 i %= f_length if key == ord('b'): i -= 1 if i < 0: i = f_length - 1 # jesli odczytamy wcisniecie klawisza q, to wychodzimy if key == ord('q'): running = False
default="labels.pickle") args = vars(ap.parse_args()) embeddings = np.load(args["embeddings"]) labels = cPickle.load(open(args["labels"])) shape_predictor = dlib.shape_predictor( "models/" "shape_predictor_5_face_landmarks.dat") face_recognizer = dlib.face_recognition_model_v1( "models/" "dlib_face_recognition_resnet_model_v1.dat") image = cv2.imread(args["image"]) image_original = image.copy() image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) faces = detect_faces(image) for face in faces: embedding = extract_face_embeddings(image, face, shape_predictor, face_recognizer) label = recognize_face(embedding, embeddings, labels) (x1, y1, x2, y2) = face.left(), face.top(), face.right(), face.bottom() cv2.rectangle(image_original, (x1, y1), (x2, y2), (255, 120, 120), 2, cv2.CV_AA) cv2.putText(image_original, label[0], (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 0), 2) cv2.imshow("Image", image_original) cv2.waitKey(0)