def test_compare_faces(self): img_a1 = api.load_image_file( os.path.join(os.path.dirname(__file__), "test_images", "obama.jpg")) img_a2 = api.load_image_file( os.path.join(os.path.dirname(__file__), "test_images", "obama2.jpg")) img_a3 = api.load_image_file( os.path.join(os.path.dirname(__file__), "test_images", "obama3.jpg")) img_b1 = api.load_image_file( os.path.join(os.path.dirname(__file__), "test_images", "biden.jpg")) face_encoding_a1 = api.face_encodings(img_a1)[0] face_encoding_a2 = api.face_encodings(img_a2)[0] face_encoding_a3 = api.face_encodings(img_a3)[0] face_encoding_b1 = api.face_encodings(img_b1)[0] faces_to_compare = [ face_encoding_a2, face_encoding_a3, face_encoding_b1 ] match_results = api.compare_faces(faces_to_compare, face_encoding_a1) assert match_results[0] == True assert match_results[1] == True assert match_results[2] == False
def scan(img1,img2): pil_img = Image.open(img1).convert('RGB') cv_img = array(pil_img) image1 = cv_img[:,:,::-1].copy() encoding1 = face_recognition.face_encodings(image1) pil_img = Image.open(img2).convert('RGB') cv_img = array(pil_img) image2 = cv_img[:,:,::-1].copy() encoding2 = face_recognition.face_encodings(image2) gimage1 = image1 gimage1 = cv2.resize(gimage1, (96,96)) gimage1 = gimage1.astype("float") / 255.0 gimage1 = img_to_array(gimage1) gimage1 = np.expand_dims(gimage1, axis=0) gimage2 = image2 gimage2 = cv2.resize(gimage2, (96,96)) gimage2 = gimage2.astype("float") / 255.0 gimage2 = img_to_array(gimage2) gimage2 = np.expand_dims(gimage2, axis=0) return image1,encoding1,gimage1,image2,encoding2,gimage2
def test_compare_faces(self): img_a1 = api.load_image_file( os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) img_a2 = api.load_image_file( os.path.join(os.path.dirname(__file__), 'test_images', 'obama2.jpg')) img_a3 = api.load_image_file( os.path.join(os.path.dirname(__file__), 'test_images', 'obama3.jpg')) img_b1 = api.load_image_file( os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg')) face_encoding_a1 = api.face_encodings(img_a1)[0] face_encoding_a2 = api.face_encodings(img_a2)[0] face_encoding_a3 = api.face_encodings(img_a3)[0] face_encoding_b1 = api.face_encodings(img_b1)[0] faces_to_compare = [ face_encoding_a2, face_encoding_a3, face_encoding_b1 ] match_results = api.compare_faces(faces_to_compare, face_encoding_a1) self.assertEqual(type(match_results), list) self.assertTrue(match_results[0]) self.assertTrue(match_results[1]) self.assertFalse(match_results[2])
def test_face_distance(self): img_a1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) img_a2 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama2.jpg')) img_a3 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama3.jpg')) img_b1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg')) face_encoding_a1 = api.face_encodings(img_a1)[0] face_encoding_a2 = api.face_encodings(img_a2)[0] face_encoding_a3 = api.face_encodings(img_a3)[0] face_encoding_b1 = api.face_encodings(img_b1)[0] faces_to_compare = [ face_encoding_a2, face_encoding_a3, face_encoding_b1] distance_results = api.face_distance(faces_to_compare, face_encoding_a1) # 0.6 is the default face distance match threshold. So we'll spot-check that the numbers returned # are above or below that based on if they should match (since the exact numbers could vary). self.assertEqual(type(distance_results), np.ndarray) self.assertLessEqual(distance_results[0], 0.6) self.assertLessEqual(distance_results[1], 0.6) self.assertGreater(distance_results[2], 0.6)
def recognition_faces_in_image(knownfile_stream, detectfile_stream): # 载入用户上传的图片 knownimg = face_recognition.load_image_file(knownfile_stream) detectimg = face_recognition.load_image_file(detectfile_stream) # 为用户上传的图片中的人脸编码 knownface_encodings = face_recognition.face_encodings(knownimg) detectface_encodings = face_recognition.face_encodings(detectimg) if len(knownface_encodings) > 1: result = {"ret": 1, "msg": "knownface has more than one face"} return jsonify(result) if not knownface_encodings or not detectface_encodings: result = {"ret": 2, "msg": "knownface or detectface has no face"} return jsonify(result) checked_results = [] for detectface_encoding in detectface_encodings: distances = face_recognition.face_distance(knownface_encodings, detectface_encoding) checked_result = list(distances <= 0.6) checked_results.append(distances.tolist()) # 讲识别结果以json键值对的数据结构输出 result = {"ret": 0, "results": checked_results} return jsonify(result)
def process_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) result_list = list() # Scale down image if it's giant so things run a little faster if max(unknown_image.shape) > 1600: pil_img = PIL.Image.fromarray(unknown_image) pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS) unknown_image = np.array(pil_img) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) recognized_name = "" if True in result: for is_match, name, distance in zip(result, known_names, distances): if is_match: recognized_name = name else: recognized_name = "unknown_person" result_list.append(recognized_name) return image_to_check, result_list
def compare_faces(profile, person, target_encoding, threshold=0.6): profilelink, profilepic, distance = profile match = None try: image = urllib.request.urlopen(profilepic) unknown_image = face_recognition.load_image_file(image) unknown_encoding = face_recognition.face_encodings(unknown_image) if len(unknown_encoding) > 0: results = face_recognition.face_distance(target_encoding, unknown_encoding[0]) for result in results: if result < float(threshold): person.instagram = encoding.smart_str(profilelink, encoding='ascii', errors='ignore') person.instagramimage = encoding.smart_str( profilepic, encoding='ascii', errors='ignore') logging.info("Match found: " + person.full_name + " ,Instagram: " + person.instagram) if args.vv == True: print("\tMatch found: " + person.full_name) print("\tInstagram: " + person.instagram) match = person except Exception as e: logging.error("compare_faces: " + str(e)) print("ERROR") print(e) return match
def scan_known_people(known_people_folder): known_names = [] known_face_encodings = [] for file in image_files_in_folder(known_people_folder): basename = os.path.splitext(os.path.basename(file))[0] img = face_recognition.load_image_file(file) encodings = face_recognition.face_encodings(img) if len(encodings) > 1: click.echo( "WARNING: More than one face found in {}. Only considering the first face." .format(file)) if len(encodings) == 0: click.echo( "WARNING: No faces found in {}. Ignoring file.".format(file)) else: known_names.append(basename) known_face_encodings.append(encodings[0]) print(f'{basename} - {encodings[0]} {type(encodings[0])}') jstr = msgpack.packb(encodings[0], default=m.encode) print(jstr) print(len(jstr)) jobj = msgpack.unpackb(jstr, object_hook=m.decode) print('jobj=', jobj, type(jobj)) return known_names, known_face_encodings
def main(file, model): im = PIL.Image.open(file) im = im.convert("RGB") image = np.array(im) locations = fr.face_locations(image, number_of_times_to_upsample=1, model=model) encodings = fr.face_encodings(image, known_face_locations=locations, model="default") faces = [] for i, encoding in enumerate(encodings): width = locations[i][1] - locations[i][3] height = locations[i][2] - locations[i][0] x = round((locations[i][3] + (width / 2)) / im.width, percision) y = round((locations[i][0] + (height / 2)) / im.height, percision) w = round((width * scaling) / im.width, percision) h = round((height * scaling) / im.height, percision) id = str(uuid.uuid4()) faces.append({ "id": id, "x": x, "y": y, "w": w, "h": h, "detector": "face_recognition@" + face_recognition.__version__, "confidence": 1, "encoding": list(encoding) }) print(json.dumps(faces))
def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if unknown_image.shape[1] > 1600: scale_factor = 1600.0 / unknown_image.shape[1] with warnings.catch_warnings(): warnings.simplefilter("ignore") unknown_image = scipy.misc.imresize(unknown_image, scale_factor) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: result = face_recognition.compare_faces(known_face_encodings, unknown_encoding, tolerance=tolerance) if True in result: [ print("{},{}".format(image_to_check, name)) for is_match, name in zip(result, known_names) if is_match ] else: print("{},unknown_person".format(image_to_check))
def test_image_output_json(image_to_check, known_names, known_face_encodings): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if unknown_image.shape[1] > 1600: scale_factor = 1600.0 / unknown_image.shape[1] with warnings.catch_warnings(): warnings.simplefilter("ignore") unknown_image = scipy.misc.imresize(unknown_image, scale_factor) unknown_encodings = face_recognition.face_encodings(unknown_image) output = list() for unknown_encoding in unknown_encodings: result = face_recognition.compare_faces(known_face_encodings, unknown_encoding) if True in result: [ output.append({ 'imagePath': image_to_check, 'name': name }) for is_match, name in zip(result, known_names) if is_match ] else: [ output.append({ 'imagePath': image_to_check, 'name': 'unknown_name' }) ] return output
def scan_known_people_for_known_names(known_people_folder): known_names = [] known_face_encodings = [] encodings = [] for file in image_files_in_folder(known_people_folder): basename = os.path.splitext(os.path.basename(file))[0] img = face_recognition.load_image_file(file) if os.path.exists("./encodings/" + basename + ".arr.npy"): encodings = np.array([np.load("./encodings/" + basename + ".arr.npy")]) else: encodings = face_recognition.face_encodings(img, None, 4) np.save("./encodings/" + basename + ".arr", encodings[0]) if len(encodings) > 1: click.echo("WARNING: More than one face found in {}. Only considering the first face.".format(file)) if len(encodings) == 0: click.echo("WARNING: No faces found in {}. Ignoring file.".format(file)) else: known_names.append(basename) return known_names
def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if max(unknown_image.shape) > 1600: pil_img = PIL.Image.fromarray(unknown_image) pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS) unknown_image = np.array(pil_img) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) if True in result: [ print_result(image_to_check, name, distance, show_distance) for is_match, name, distance in zip(result, known_names, distances) if is_match ]
def getAttendanceData(self): # student = User.objects.get(matric_no = "U1620133D") course = Course.objects.get(course_code = "CZ3002") course_index = CourseIndex.objects.get(index = "12345") course_index_type = CourseIndexType.objects.get(course_index = course_index, class_type="lab") today = datetime.datetime.now() tdelta = (3 - datetime.datetime.today().weekday()) % 7 today_date = today.date() + datetime.timedelta(days=tdelta) # today_date = today.date() time = course_index_type.time today_time = today.time() class_session = Class.objects.get(course_index_type = course_index_type, datetime__date = today_date) attendance = Attendance.objects.filter(class_session = class_session) student_list = [] for a in attendance: student_list.append(a.student) pic_list = [] for s in student_list: pic_list.append(s.face_image) p_encs = [] for p in pic_list: p_dir = settings.MEDIA_ROOT+"/"+str(p) p_image = api.load_image_file(p_dir) p_enc = api.face_encodings(p_image)[0] p_encs.append(p_enc) return student_list, pic_list, p_encs
def test_image(image_to_check, known_names, known_face_encodings): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if unknown_image.shape[1] > 1600: scale_factor = 1600.0 / unknown_image.shape[1] with warnings.catch_warnings(): warnings.simplefilter("ignore") unknown_image = scipy.misc.imresize(unknown_image, scale_factor) unknown_encodings = face_recognition.face_encodings(unknown_image) print("unknown_encodings " + str(unknown_encodings)) if len(unknown_encodings) == 1: for unknown_encoding in unknown_encodings: result = face_recognition.compare_faces(known_face_encodings, unknown_encoding) distance = face_recognition.face_distance(known_face_encodings, unknown_encoding) print(distance[0]) print("True") if True in result else print("False ") return distance[0], result[0] else: return "0", "Many Faces or No Faces"
def import_known_people(name, file, id_card, sex, age, phone): img = face_recognition.load_image_file(file) encodings = face_recognition.face_encodings(img) enc_len = len(encodings) if enc_len > 1: click.echo( "WARNING: More than one face found in {}. Only considering the first face." .format(file)) return False if enc_len == 0: click.echo( "WARNING: No faces found in {}. Ignoring file.".format(file)) return False sql = "insert into person_info (name, id_card, sex, age, phone, file, found) " \ "values('%s', '%s', '%s', %s, '%s', '%s', '%s')" \ %(name, id_card, sex, str(age), phone, file, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) cursor = get_db().cursor() cursor.execute(sql) person_id = get_db().insert_id() args = [] sql_t = "insert into index_%s (id, eigenvalue) values((%s), (%s))" for i, val in enumerate(encodings[0]): args.append([i + 1, person_id, float(encodings[0][i])]) cursor.executemany(sql_t, args) get_db().commit() cursor.close() return True
def test_image(src, image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) if max(unknown_image.shape) > 1600: pil_img = PIL.Image.fromarray(unknown_image) pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS) unknown_image = np.array(pil_img) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) if True in result: [ print_result(src, image_to_check, name, distance, show_distance) for is_match, name, distance in zip(result, known_names, distances) if is_match ] else: print_result(src, image_to_check, "unknown_person", None, show_distance) if not unknown_encodings: print_result(image_to_check, "no_persons_found", None, show_distance)
def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if max(unknown_image.shape) > 1600: pil_img = PIL.Image.fromarray(unknown_image) pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS) unknown_image = np.array(pil_img) unknown_encodings = face_recognition.face_encodings(unknown_image) face_names = [] for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) match = face_recognition.compare_faces(known_face_encodings, unknown_encoding, 0.5) name = "Unknown" for k in range(len(match)): if match[k]: name = known_names[k] face_names.append(name) if not unknown_encodings: # print out fact that no faces were found in image print(image_to_check, "no_persons_found", None, show_distance) return face_names
def identityRecognition(testimg, known_face_encodings, known_face_IDs, Threshold): #testimg:当前截图,待匹配人脸 #known_face_encodings: 人脸编码集合 #known_face_IDs: 人脸标号集合 face_locations = face_recognition.face_locations(testimg)#检测出图像中所有面部 #针对多人脸的操作, 核心思想是只取最大的 max_face_id = 0 face_locations_select = [] if len(face_locations)>1: for face_idx in range(len(face_locations)): if (face_locations[face_idx][2]>face_locations[max_face_id][2]) and (face_locations[face_idx][3]>face_locations[max_face_id][3]): max_face_id = face_idx face_locations_select.append(face_locations[face_idx]) else: face_locations_select = face_locations face_encodings = face_recognition.face_encodings(testimg, face_locations_select) #获取图像中所有面部的编码 retname, retscore = np.array(0), np.array(0) top_k_idx = [] for face_encoding in face_encodings: matches, score, top_k_idx = compare_faces(known_face_encodings, face_encoding, Threshold, top_k_num = 3) retname, retscore = np.array(0), np.array(0) # if True in matches: # first_match_index = matches.index(True) # name = known_face_IDs[first_match_index] known_face_IDs_np = np.array(known_face_IDs) name = known_face_IDs_np[top_k_idx] if score > retscore: retname = name retscore = score return retname, top_k_idx
def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if max(unknown_image.shape) > 1600: pil_img = PIL.Image.fromarray(unknown_image) pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS) unknown_image = np.array(pil_img) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) if True in result: [ print_result(image_to_check, name, distance, show_distance) for is_match, name, distance in zip(result, known_names, distances) if is_match ] #put IMAGE into firebase unknown_image? else: #no matches print_result(image_to_check, "unknown_person", None, show_distance) if not unknown_encodings: # print out fact that no faces were found in image print_result(image_to_check, "no_persons_found", None, show_distance)
def test_image(image_to_check, known_names, known_face_encodings): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if unknown_image.shape[1] > 1600: scale_factor = 1600.0 / unknown_image.shape[1] with warnings.catch_warnings(): warnings.simplefilter("ignore") unknown_image = scipy.misc.imresize(unknown_image, scale_factor) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: result = face_recognition.compare_faces(known_face_encodings, unknown_encoding) output="" if True in result: for is_match, name in zip(result, known_names): if is_match: output += ("{} ".format(name)) print(output)
def test_face_encodings(self): img = api.load_image_file( os.path.join(os.path.dirname(__file__), 'test_img', 'obama.jpg')) encodings = api.face_encodings(img) self.assertEqual(len(encodings), 1) self.assertEqual(len(encodings[0]), 128)
def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if unknown_image.shape[1] > 1600: scale_factor = 1600.0 / unknown_image.shape[1] with warnings.catch_warnings(): warnings.simplefilter("ignore") unknown_image = scipy.misc.imresize(unknown_image, scale_factor) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) if True in result: [ print_result(image_to_check, name, distance, show_distance) for is_match, name, distance in zip(result, known_names, distances) if is_match ] else: print_result(image_to_check, "unknown_person", None, show_distance)
def image_recognition(image_to_check, tolerance, bucket, confidence): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if max(unknown_image.shape) > 1600: pil_img = PIL.Image.fromarray(unknown_image) pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS) unknown_image = np.array(pil_img) unknown_encodings = face_recognition.face_encodings(unknown_image) enc_len = len(unknown_encodings) if enc_len > 1: recg_res = "MoreThanOneFaceFound." return recg_res if enc_len == 0: recg_res = "NoFacesFound." return recg_res db = pymysql.connect(config.get_db_host(), config.get_db_user(), config.get_db_pass(), config.get_db_name()) cursor = db.cursor() for i, eigenval in enumerate(unknown_encodings[0]): if i == 0: sql_t = "(select id from index_" \ + str(i + 1) + " force index(ridx) where eigenvalue >= (" \ + str(eigenval - tolerance) + ") and (eigenvalue <= " \ + str(eigenval + tolerance) + ") order by abs(power(eigenvalue,2) - " \ + str(math.pow(eigenval, 2)) + ") asc limit " + str(bucket) + ") " else: sql_t += "union all (select id from index_" \ + str(i + 1) + " force index(ridx) where eigenvalue >= (" \ + str(eigenval - tolerance) + ") and (eigenvalue <= " \ + str(eigenval + tolerance) + ") order by abs(power(eigenvalue,2) - " \ + str(math.pow(eigenval, 2)) + ") asc limit " + str(bucket) + ") " cursor.execute(sql_t) data = cursor.fetchall() data = [i for item in data for i in item] count = Counter(data) count_dict = dict(count) # sort # test = sorted(count_dict.items(), key = lambda k: k[1], reverse = True) if len(count_dict) > 0: max_item = max(count_dict.items(), key=lambda x: x[1]) if max_item[1] >= confidence: sql_t = "select name, id_card, sex, age, phone from person_info where id=%s" % ( max_item[0]) cursor.execute(sql_t) recg_res = cursor.fetchone(), max_item[1] else: recg_res = "Unrecognized,but one or more similar faces found" else: recg_res = "Unrecognized" cursor.close() db.close() return recg_res
def test_face_encodings(self): img = api.load_image_file( os.path.join(os.path.dirname(__file__), "test_images", "obama.jpg")) encodings = api.face_encodings(img) assert len(encodings) == 1 assert len(encodings[0]) == 128
def fromimg(cls, img): import face_recognition.api as face_recognition self = cls() self.id = 'driver license match' self.name = 'driver license match' self.encoding = face_recognition.face_encodings(img)[0] self.imgFile = cv2.resize(img, (100, 100)) self.shape = self.imgFile.shape return self
def compute_distances(image_to_check, known_face_encodings): unknown_encodings = face_recognition.face_encodings(image_to_check, num_jitters=1) #skimage.io.imsave(open('try0.png','wb'), image_to_check, plugin='pil', format_str='png') if len(unknown_encodings) == 0: rotated = skimage.transform.rotate(image_to_check, 90, resize=True) unknown_encodings = face_recognition.face_encodings(rotated, num_jitters=3) if len(unknown_encodings) == 0: rotated = skimage.transform.rotate(image_to_check, -90, resize=True) unknown_encodings = face_recognition.face_encodings(rotated, num_jitters=3) return [ list( face_recognition.face_distance(known_face_encodings, unknown_encoding)) for unknown_encoding in unknown_encodings ]
def test_face_matching(self): ic_path = os.path.join(test_data_path, 'my ic.jpg') driving_license_path = os.path.join(test_data_path, 'my driving license.jpg') image_ic = api.load_image_file(ic_path) image_driving_license = api.load_image_file(driving_license_path) face_encoding_ic = api.face_encodings(image_ic)[0] face_encoding_driving = api.face_encodings(image_driving_license)[0] self.tolerance = 0.50 match_results = api.compare_faces([face_encoding_ic], face_encoding_driving, tolerance=self.tolerance) self.assertEqual(type(match_results), list) self.assertTrue(match_results[0])
def test_bad_face_matching_confidence(self): self.tolerance = 0.50 self.threshold = 0.80 ic_path = os.path.join(test_data_path, 'my ic.jpg') passport_path = os.path.join(test_data_path, 'passport.jpg') image_ic = api.load_image_file(ic_path) image_passport = api.load_image_file(passport_path) face_encoding_ic = api.face_encodings(image_ic)[0] face_encoding_passport = api.face_encodings(image_passport)[0] face_distances = api.face_distance([face_encoding_ic], face_encoding_passport) confidence = self.face_distance_to_conf(face_distances, self.tolerance) self.assertLessEqual(confidence, self.threshold)
def scan_known_people(known_people_folder): basename_pattern = re.compile(r"[a-zA-Z]+\_[a-zA-Z]+") basename_pattern_number = re.compile((r"[a-zA-Z]+\_[a-zA-Z]+\_[0-9]+")) name_surname_pattern = re.compile((r"[a-zA-Z]+")) db_engine = create_engine("sqlite:///face_recognition/sqlite_db/face_recognition.db") Base.metadata.create_all(db_engine) session_factory = sessionmaker(bind=db_engine) session = scoped_session(session_factory) known_names = [] known_face_encodings = [] for file in image_files_in_folder(known_people_folder): basename = os.path.splitext(os.path.basename(file))[0] img = face_recognition.load_image_file(file) # Check for files with appropriate name if basename_pattern.fullmatch(basename) or basename_pattern_number.fullmatch(basename): # Get name and surname of the person from image pattern_identity = ' '.join([name.capitalize() for name in name_surname_pattern.findall(basename)]) # Select pattern from database face_pattern = session.query(FacePattern).filter_by(file_name = basename).first() if face_pattern: if face_pattern.file_hash == zlib.crc32(open(file, "rb").read()): click.echo("Pattern for {} found in database.".format(file)) known_names.append(face_pattern.pattern_identity) known_face_encodings.append(np.array(json.loads(face_pattern.encodings))) else: click.echo("Pattern for {} found in database, but CRC32 is not the same." \ "Probably someone have overwritten the file.".format(file)) else: click.echo("Pattern for {} not found in database. Calculating pattern..".format(file)) encodings = face_recognition.face_encodings(img) if len(encodings) > 1: click.echo("WARNING: More than one face found in {}. Only considering the first face.".format(file)) if len(encodings) == 0: click.echo("WARNING: No faces found in {}. Ignoring file.".format(file)) else: known_names.append(pattern_identity) known_face_encodings.append(encodings[0]) session.add( FacePattern( file_name = basename, file_hash = zlib.crc32(open(file, "rb").read()), pattern_identity = pattern_identity, encodings = json.dumps(list(encodings[0])) ) ) session.commit() return known_names, known_face_encodings
def test_compare_faces(self): img_a1 = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "obama.jpg")) img_a2 = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "obama2.jpg")) img_a3 = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "obama3.jpg")) img_b1 = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "biden.jpg")) face_encoding_a1 = api.face_encodings(img_a1)[0] face_encoding_a2 = api.face_encodings(img_a2)[0] face_encoding_a3 = api.face_encodings(img_a3)[0] face_encoding_b1 = api.face_encodings(img_b1)[0] faces_to_compare = [ face_encoding_a2, face_encoding_a3, face_encoding_b1 ] match_results = api.compare_faces(faces_to_compare, face_encoding_a1) assert match_results[0] == True assert match_results[1] == True assert match_results[2] == False
def test_compare_faces(self): img_a1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) img_a2 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama2.jpg')) img_a3 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama3.jpg')) img_b1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg')) face_encoding_a1 = api.face_encodings(img_a1)[0] face_encoding_a2 = api.face_encodings(img_a2)[0] face_encoding_a3 = api.face_encodings(img_a3)[0] face_encoding_b1 = api.face_encodings(img_b1)[0] faces_to_compare = [ face_encoding_a2, face_encoding_a3, face_encoding_b1] match_results = api.compare_faces(faces_to_compare, face_encoding_a1) self.assertEqual(type(match_results), list) self.assertTrue(match_results[0]) self.assertTrue(match_results[1]) self.assertFalse(match_results[2])
def test_compare_faces_empty_lists(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg')) face_encoding = api.face_encodings(img)[0] # empty python list faces_to_compare = [] match_results = api.compare_faces(faces_to_compare, face_encoding) self.assertEqual(type(match_results), list) self.assertListEqual(match_results, []) # empty numpy list faces_to_compare = np.array([]) match_results = api.compare_faces(faces_to_compare, face_encoding) self.assertEqual(type(match_results), list) self.assertListEqual(match_results, [])
def test_face_distance_empty_lists(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg')) face_encoding = api.face_encodings(img)[0] # empty python list faces_to_compare = [] distance_results = api.face_distance(faces_to_compare, face_encoding) self.assertEqual(type(distance_results), np.ndarray) self.assertEqual(len(distance_results), 0) # empty numpy list faces_to_compare = np.array([]) distance_results = api.face_distance(faces_to_compare, face_encoding) self.assertEqual(type(distance_results), np.ndarray) self.assertEqual(len(distance_results), 0)
def scan_known_people(known_people_folder): known_names = [] known_face_encodings = [] for file in image_files_in_folder(known_people_folder): basename = os.path.splitext(os.path.basename(file))[0] img = face_recognition.load_image_file(file) encodings = face_recognition.face_encodings(img) if len(encodings) > 1: click.echo("WARNING: More than one face found in {}. Only considering the first face.".format(file)) if len(encodings) == 0: click.echo("WARNING: No faces found in {}. Ignoring file.".format(file)) else: known_names.append(basename) known_face_encodings.append(encodings[0]) return known_names, known_face_encodings
def test_image(image_to_check, known_names, known_face_encodings): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if unknown_image.shape[1] > 1600: scale_factor = 1600.0 / unknown_image.shape[1] with warnings.catch_warnings(): warnings.simplefilter("ignore") unknown_image = scipy.misc.imresize(unknown_image, scale_factor) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: result = face_recognition.compare_faces(known_face_encodings, unknown_encoding) if True in result: [print("{},{}".format(image_to_check, name)) for is_match, name in zip(result, known_names) if is_match] else: print("{},unknown_person".format(image_to_check))
def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if unknown_image.shape[1] > 1600: scale_factor = 1600.0 / unknown_image.shape[1] with warnings.catch_warnings(): warnings.simplefilter("ignore") unknown_image = scipy.misc.imresize(unknown_image, scale_factor) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) if True in result: [print_result(image_to_check, name, distance, show_distance) for is_match, name, distance in zip(result, known_names, distances) if is_match] else: print_result(image_to_check, "unknown_person", None, show_distance)
def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if max(unknown_image.shape) > 1600: pil_img = PIL.Image.fromarray(unknown_image) pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS) unknown_image = np.array(pil_img) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) if True in result: [print_result(image_to_check, name, distance, show_distance) for is_match, name, distance in zip(result, known_names, distances) if is_match] else: print_result(image_to_check, "unknown_person", None, show_distance) if not unknown_encodings: # print out fact that no faces were found in image print_result(image_to_check, "no_persons_found", None, show_distance)
def test_face_encodings(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "obama.jpg")) encodings = api.face_encodings(img) assert len(encodings) == 1 assert len(encodings[0]) == 128
def test_face_encodings(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) encodings = api.face_encodings(img) self.assertEqual(len(encodings), 1) self.assertEqual(len(encodings[0]), 128)