def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if unknown_image.shape[1] > 1600: scale_factor = 1600.0 / unknown_image.shape[1] with warnings.catch_warnings(): warnings.simplefilter("ignore") unknown_image = scipy.misc.imresize(unknown_image, scale_factor) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) if True in result: [ print_result(image_to_check, name, distance, show_distance) for is_match, name, distance in zip(result, known_names, distances) if is_match ] else: print_result(image_to_check, "unknown_person", None, show_distance)
def detect_faces_in_image(file_stream): # Pre-calculated face encoding of Obama generated with face_recognition.face_encodings(img) known_face_encoding = [ -0.09634063, 0.12095481, -0.00436332, -0.07643753, 0.0080383, 0.01902981, -0.07184699, -0.09383309, 0.18518871, -0.09588896, 0.23951106, 0.0986533, -0.22114635, -0.1363683, 0.04405268, 0.11574756, -0.19899382, -0.09597053, -0.11969153, -0.12277931, 0.03416885, -0.00267565, 0.09203379, 0.04713435, -0.12731361, -0.35371891, -0.0503444, -0.17841317, -0.00310897, -0.09844551, -0.06910533, -0.00503746, -0.18466514, -0.09851682, 0.02903969, -0.02174894, 0.02261871, 0.0032102, 0.20312519, 0.02999607, -0.11646006, 0.09432904, 0.02774341, 0.22102901, 0.26725179, 0.06896867, -0.00490024, -0.09441824, 0.11115381, -0.22592428, 0.06230862, 0.16559327, 0.06232892, 0.03458837, 0.09459756, -0.18777156, 0.00654241, 0.08582542, -0.13578284, 0.0150229, 0.00670836, -0.08195844, -0.04346499, 0.03347827, 0.20310158, 0.09987706, -0.12370517, -0.06683611, 0.12704916, -0.02160804, 0.00984683, 0.00766284, -0.18980607, -0.19641446, -0.22800779, 0.09010898, 0.39178532, 0.18818057, -0.20875394, 0.03097027, -0.21300618, 0.02532415, 0.07938635, 0.01000703, -0.07719778, -0.12651891, -0.04318593, 0.06219772, 0.09163868, 0.05039065, -0.04922386, 0.21839413, -0.02394437, 0.06173781, 0.0292527, 0.06160797, -0.15553983, -0.02440624, -0.17509389, -0.0630486, 0.01428208, -0.03637431, 0.03971229, 0.13983178, -0.23006812, 0.04999552, 0.0108454, -0.03970895, 0.02501768, 0.08157793, -0.03224047, -0.04502571, 0.0556995, -0.24374914, 0.25514284, 0.24795187, 0.04060191, 0.17597422, 0.07966681, 0.01920104, -0.01194376, -0.02300822, -0.17204897, -0.0596558, 0.05307484, 0.07417042, 0.07126575, 0.00209804 ] # Load the uploaded image file img = face_recognition.load_image_file(file_stream) # Get face encodings for any faces in the uploaded image unknown_face_encodings = face_recognition.face_encodings(img) face_found = False is_obama = False if len(unknown_face_encodings) > 0: face_found = True # See if the first face in the uploaded image matches the known face of Obama match_results = face_recognition.compare_faces( [known_face_encoding], unknown_face_encodings[0]) if match_results[0]: is_obama = True # Return the result as json result = { "face_found_in_image": face_found, "is_picture_of_obama": is_obama } return jsonify(result)
def scan_known_people(known_people_folder): known_names = [] known_face_encodings = [] for file in image_files_in_folder(known_people_folder): basename = os.path.splitext(os.path.basename(file))[0] img = face_recognition.load_image_file(file) encodings = face_recognition.face_encodings(img) if len(encodings) > 1: click.echo( "WARNING: More than one face found in {}. Only considering the first face." .format(file)) if len(encodings) == 0: click.echo( "WARNING: No faces found in {}. Ignoring file.".format(file)) else: known_names.append(basename) known_face_encodings.append(encodings[0]) return known_names, known_face_encodings
from PIL import Image from cv.face_recognition import face_recognition # Load the jpg file into a numpy array image = face_recognition.load_image_file("biden.jpg") # Find all the faces in the image using a pre-trained convolutional neural network. # This method is more accurate than the default HOG model, but it's slower # unless you have an nvidia GPU and dlib compiled with CUDA extensions. But if you do, # this will use GPU acceleration and perform well. # See also: find_faces_in_picture.py face_locations = face_recognition.face_locations(image, number_of_times_to_upsample=0, model="cnn") print("I found {} face(s) in this photograph.".format(len(face_locations))) for face_location in face_locations: # Print the location of each face in this image top, right, bottom, left = face_location print( "A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}" .format(top, left, bottom, right)) # You can access the actual face itself like this: face_image = image[top:bottom, left:right] pil_image = Image.fromarray(face_image) pil_image.show()
from cv.face_recognition import face_recognition # Often instead of just checking if two faces match or not (True or False), it's helpful to see how similar they are. # You can do that by using the face_distance function. # The model was trained in a way that faces with a distance of 0.6 or less should be a match. But if you want to # be more strict, you can look for a smaller face distance. For example, using a 0.55 cutoff would reduce false # positive matches at the risk of more false negatives. # Note: This isn't exactly the same as a "percent match". The scale isn't linear. But you can assume that images with a # smaller distance are more similar to each other than ones with a larger distance. # Load some images to compare against known_obama_image = face_recognition.load_image_file("obama.jpg") known_biden_image = face_recognition.load_image_file("biden.jpg") # Get the face encodings for the known images obama_face_encoding = face_recognition.face_encodings(known_obama_image)[0] biden_face_encoding = face_recognition.face_encodings(known_biden_image)[0] known_encodings = [ obama_face_encoding, biden_face_encoding ] # Load a test image and get encondings for it image_to_test = face_recognition.load_image_file("obama2.jpg") image_to_test_encoding = face_recognition.face_encodings(image_to_test)[0] # See how far apart the test image is from the known faces face_distances = face_recognition.face_distance(known_encodings, image_to_test_encoding)
# https://gist.github.com/ageitgey/1ac8dbe8572f3f533df6269dab35df65 from cv.face_recognition import face_recognition import picamera import numpy as np # Get a reference to the Raspberry Pi camera. # If this fails, make sure you have a camera connected to the RPi and that you # enabled your camera in raspi-config and rebooted first. camera = picamera.PiCamera() camera.resolution = (320, 240) output = np.empty((240, 320, 3), dtype=np.uint8) # Load a sample picture and learn how to recognize it. print("Loading known face image(s)") obama_image = face_recognition.load_image_file("obama_small.jpg") obama_face_encoding = face_recognition.face_encodings(obama_image)[0] # Initialize some variables face_locations = [] face_encodings = [] while True: print("Capturing image.") # Grab a single frame of video from the RPi camera as a numpy array camera.capture(output, format="rgb") # Find all the faces and face encodings in the current frame of video face_locations = face_recognition.face_locations(output) print("Found {} faces in image.".format(len(face_locations))) face_encodings = face_recognition.face_encodings(output, face_locations)
from cv.face_recognition import face_recognition # Load the jpg files into numpy arrays biden_image = face_recognition.load_image_file("biden.jpg") obama_image = face_recognition.load_image_file("obama.jpg") unknown_image = face_recognition.load_image_file("obama2.jpg") # Get the face encodings for each face in each image file # Since there could be more than one face in each image, it returns a list of encodings. # But since I know each image only has one face, I only care about the first encoding in each image, so I grab index 0. biden_face_encoding = face_recognition.face_encodings(biden_image)[0] obama_face_encoding = face_recognition.face_encodings(obama_image)[0] unknown_face_encoding = face_recognition.face_encodings(unknown_image)[0] known_faces = [ biden_face_encoding, obama_face_encoding ] # results is an array of True/False telling if the unknown face matched anyone in the known_faces array results = face_recognition.compare_faces(known_faces, unknown_face_encoding) print("Is the unknown face a picture of Biden? {}".format(results[0])) print("Is the unknown face a picture of Obama? {}".format(results[1])) print("Is the unknown face a new person that we've never seen before? {}".format(not True in results))
# This is a demo of running face recognition on a video file and saving the results to a new video file. # # PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam. # OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this # specific demo. If you have trouble installing it, try any of the other demos that don't require it instead. # Open the input movie file input_movie = cv2.VideoCapture("hamilton_clip.mp4") length = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT)) # Create an output movie file (make sure resolution/frame rate matches input video!) fourcc = cv2.VideoWriter_fourcc(*'XVID') output_movie = cv2.VideoWriter('output.avi', fourcc, 29.97, (640, 360)) # Load some sample pictures and learn how to recognize them. lmm_image = face_recognition.load_image_file("lin-manuel-miranda.png") lmm_face_encoding = face_recognition.face_encodings(lmm_image)[0] al_image = face_recognition.load_image_file("alex-lacamoire.png") al_face_encoding = face_recognition.face_encodings(al_image)[0] known_faces = [lmm_face_encoding, al_face_encoding] # Initialize some variables face_locations = [] face_encodings = [] face_names = [] frame_number = 0 while True: # Grab a single frame of video