def test_image(image_to_check, model): unknown_image = face_recognition.load_image_file(image_to_check) face_locations = face_recognition.face_locations( unknown_image, number_of_times_to_upsample=0, model=model) for face_location in face_locations: print_result(image_to_check, face_location)
def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if max(unknown_image.shape) > 1600: pil_img = PIL.Image.fromarray(unknown_image) pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS) unknown_image = np.array(pil_img) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) #if True in result: #[print_result(image_to_check, name, distance, show_distance) for is_match, name, distance in zip(result, known_names, distances) if is_match] #else: #print_result(image_to_check, "unknown_person", None, show_distance) if not unknown_encodings: # print out fact that no faces were found in image print_result(image_to_check, "no_persons_found", None, show_distance)
def sendVector2(id, status): photo = load_image_file('test.jpg') face_vectors = face_encodings(photo) face = face_vectors[0] face = 'data:' + str(face) function = 'function:' + str(status) userid = 'id:' + str(id) addr = ('122.51.26.166', 22222) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(addr) msg = userid + '\r\n' + function + '\r\n' + face msg = str.encode(msg) print(msg) sock.send(msg) feedback = sock.recv(1024).decode() print(feedback) eel.alert_feedback(feedback) sock.close()
def scan_known_people(known_people_folder): known_names = [] known_face_encodings = [] for file in image_files_in_folder(known_people_folder): basename = os.path.splitext(os.path.basename(file))[0] img = face_recognition.load_image_file(file) encodings = face_recognition.face_encodings(img) if len(encodings) > 1: print( "WARNING: More than one face found in {}. Only considering the first face." .format(file)) if len(encodings) == 0: print("WARNING: No faces found in {}. Ignoring file.".format(file)) else: known_names.append(basename) known_face_encodings.append(encodings[0]) return known_names, known_face_encodings
def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) if max(unknown_image.shape) > 1600: pil_img = PIL.Image.fromarray(unknown_image) pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS) unknown_image = np.array(pil_img) unknown_encodings = face_recognition.face_encodings(unknown_image) faces = [] for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) res = [] if True in result: [ res.append( print_result(image_to_check, name, distance, show_distance)) for is_match, name, distance in zip(result, known_names, distances) if is_match ] else: res.append( print_result(image_to_check, "unknown", None, show_distance)) mat = defaultdict(int) mat = Counter(res) res = max(mat.items(), key=lambda x: x[1]) faces.append(res[0]) if not unknown_encodings: print_result(image_to_check, "no_persons_found", None, show_distance) return faces
def get_face_encoding_in_image(file_stream): # Load the uploaded image file img = api.load_image_file(file_stream) # print(img) # Get face encodings for any faces in the uploaded image unknown_face_encodings = api.face_encodings(img) # print(unknown_face_encodings) face_encoding = None face_found = False if len(unknown_face_encodings) > 0: face_found = True face_encoding = str(list(unknown_face_encodings[0])) print(face_encoding) # Return the result as json result = { "face_found_in_image": face_found, "unknown_face_encodings": face_encoding } # print(result['unknown_face_encodings']) return result
def test_image(image_to_check, known_names, known_face_encodings): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if unknown_image.shape[1] > 1600: scale_factor = 1600.0 / unknown_image.shape[1] with warnings.catch_warnings(): warnings.simplefilter("ignore") unknown_image = scipy.misc.imresize(unknown_image, scale_factor) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: result = face_recognition.compare_faces(known_face_encodings, unknown_encoding) if True in result: [ print("{},{}".format(image_to_check, name)) for is_match, name in zip(result, known_names) if is_match ] else: print("{},unknown_person".format(image_to_check))
def sendVector(base64_data, id, status): base64_data = base64_data.split(',')[1] base64_data = str.encode(base64_data) data = base64.b64decode(base64_data) file = open('prtsc.png', 'wb') file.write(data) file.close() photo = load_image_file('prtsc.png') face_vectors = face_encodings(photo) face = face_vectors[0] face = 'data:' + str(face) function = 'function:' + str(status) userid = 'id:' + str(id) addr = ('122.51.26.166', 22222) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(addr) msg = userid + '\r\n' + function + '\r\n' + face msg = str.encode(msg) print(msg) sock.send(msg) feedback = sock.recv(1024).decode() print(feedback) eel.alert_feedback(feedback) sock.close()
# This is a demo of running face recognition on a video file and saving the results to a new video file. # # PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam. # OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this # specific demo. If you have trouble installing it, try any of the other demos that don't require it instead. # Open the input movie file input_movie = cv2.VideoCapture("got.mp4") length = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT)) # Create an output movie file (make sure resolution/frame rate matches input video!) fourcc = cv2.VideoWriter_fourcc(*'MP4V') output_movie = cv2.VideoWriter('output.mp4', 0x7634706d, 20.0, (640, 360)) # Load some sample pictures and learn how to recognize them. dst_image = face_recognition.load_image_file("DST.jpg") dst_face_encoding = face_recognition.face_encodings(dst_image)[0] js_image = face_recognition.load_image_file("JS.jpg") js_face_encoding = face_recognition.face_encodings(js_image)[0] tl_image = face_recognition.load_image_file("TL.jpg") tl_face_encoding = face_recognition.face_encodings(tl_image)[0] sd_image = face_recognition.load_image_file("SD.jpg") sd_face_encoding = face_recognition.face_encodings(sd_image)[0] known_faces = [ dst_face_encoding, js_face_encoding, tl_face_encoding, sd_face_encoding ]
os.path.join(folder, f) for f in os.listdir(folder) if re.match(r'.*\.(jpg|jpeg|png)', f, flags=re.I) ] X = [] y = [] verbose = False for class_dir in os.listdir(train_dir): if not os.path.isdir(os.path.join(train_dir, class_dir)): continue print('starting {}.format', class_dir) for img_path in image_files_in_folder(os.path.join(train_dir, class_dir)): image = face_recognition.load_image_file(img_path) face_bounding_boxes = face_recognition.face_locations(image) if len(face_bounding_boxes) != 1: # If there are no people (or too many people) in a training image, skip the image. if verbose: print("Image {} not suitable for training: {}".format( img_path, "Didn't find a face" if len(face_bounding_boxes) < 1 else "Found more than one face")) else: # Add face encoding for current image to the training set X.append( face_recognition.face_encodings( image, known_face_locations=face_bounding_boxes)[0]) y.append(class_dir)