def test_face_distance(self): img_a1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) img_a2 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama2.jpg')) img_a3 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama3.jpg')) img_b1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg')) face_encoding_a1 = api.face_encodings(img_a1)[0] face_encoding_a2 = api.face_encodings(img_a2)[0] face_encoding_a3 = api.face_encodings(img_a3)[0] face_encoding_b1 = api.face_encodings(img_b1)[0] faces_to_compare = [ face_encoding_a2, face_encoding_a3, face_encoding_b1] distance_results = api.face_distance(faces_to_compare, face_encoding_a1) # 0.6 is the default face distance match threshold. So we'll spot-check that the numbers returned # are above or below that based on if they should match (since the exact numbers could vary). self.assertEqual(type(distance_results), np.ndarray) self.assertLessEqual(distance_results[0], 0.6) self.assertLessEqual(distance_results[1], 0.6) self.assertGreater(distance_results[2], 0.6)
def recognition_faces_in_image(knownfile_stream, detectfile_stream): # 载入用户上传的图片 knownimg = face_recognition.load_image_file(knownfile_stream) detectimg = face_recognition.load_image_file(detectfile_stream) # 为用户上传的图片中的人脸编码 knownface_encodings = face_recognition.face_encodings(knownimg) detectface_encodings = face_recognition.face_encodings(detectimg) if len(knownface_encodings) > 1: result = {"ret": 1, "msg": "knownface has more than one face"} return jsonify(result) if not knownface_encodings or not detectface_encodings: result = {"ret": 2, "msg": "knownface or detectface has no face"} return jsonify(result) checked_results = [] for detectface_encoding in detectface_encodings: distances = face_recognition.face_distance(knownface_encodings, detectface_encoding) checked_result = list(distances <= 0.6) checked_results.append(distances.tolist()) # 讲识别结果以json键值对的数据结构输出 result = {"ret": 0, "results": checked_results} return jsonify(result)
def test_compare_faces(self): img_a1 = api.load_image_file( os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) img_a2 = api.load_image_file( os.path.join(os.path.dirname(__file__), 'test_images', 'obama2.jpg')) img_a3 = api.load_image_file( os.path.join(os.path.dirname(__file__), 'test_images', 'obama3.jpg')) img_b1 = api.load_image_file( os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg')) face_encoding_a1 = api.face_encodings(img_a1)[0] face_encoding_a2 = api.face_encodings(img_a2)[0] face_encoding_a3 = api.face_encodings(img_a3)[0] face_encoding_b1 = api.face_encodings(img_b1)[0] faces_to_compare = [ face_encoding_a2, face_encoding_a3, face_encoding_b1 ] match_results = api.compare_faces(faces_to_compare, face_encoding_a1) self.assertEqual(type(match_results), list) self.assertTrue(match_results[0]) self.assertTrue(match_results[1]) self.assertFalse(match_results[2])
def test_compare_faces(self): img_a1 = api.load_image_file( os.path.join(os.path.dirname(__file__), "test_images", "obama.jpg")) img_a2 = api.load_image_file( os.path.join(os.path.dirname(__file__), "test_images", "obama2.jpg")) img_a3 = api.load_image_file( os.path.join(os.path.dirname(__file__), "test_images", "obama3.jpg")) img_b1 = api.load_image_file( os.path.join(os.path.dirname(__file__), "test_images", "biden.jpg")) face_encoding_a1 = api.face_encodings(img_a1)[0] face_encoding_a2 = api.face_encodings(img_a2)[0] face_encoding_a3 = api.face_encodings(img_a3)[0] face_encoding_b1 = api.face_encodings(img_b1)[0] faces_to_compare = [ face_encoding_a2, face_encoding_a3, face_encoding_b1 ] match_results = api.compare_faces(faces_to_compare, face_encoding_a1) assert match_results[0] == True assert match_results[1] == True assert match_results[2] == False
def test_partial_face_locations(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama_partial_face.jpg')) detected_faces = api.face_locations(img) self.assertEqual(len(detected_faces), 1) self.assertEqual(detected_faces[0], (142, 191, 365, 0)) img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama_partial_face2.jpg')) detected_faces = api.face_locations(img) self.assertEqual(len(detected_faces), 1) self.assertEqual(detected_faces[0], (142, 551, 409, 349))
def getAttendanceData(self): # student = User.objects.get(matric_no = "U1620133D") course = Course.objects.get(course_code = "CZ3002") course_index = CourseIndex.objects.get(index = "12345") course_index_type = CourseIndexType.objects.get(course_index = course_index, class_type="lab") today = datetime.datetime.now() tdelta = (3 - datetime.datetime.today().weekday()) % 7 today_date = today.date() + datetime.timedelta(days=tdelta) # today_date = today.date() time = course_index_type.time today_time = today.time() class_session = Class.objects.get(course_index_type = course_index_type, datetime__date = today_date) attendance = Attendance.objects.filter(class_session = class_session) student_list = [] for a in attendance: student_list.append(a.student) pic_list = [] for s in student_list: pic_list.append(s.face_image) p_encs = [] for p in pic_list: p_dir = settings.MEDIA_ROOT+"/"+str(p) p_image = api.load_image_file(p_dir) p_enc = api.face_encodings(p_image)[0] p_encs.append(p_enc) return student_list, pic_list, p_encs
def test_cnn_raw_face_locations_32bit_image(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', '32bit.png')) detected_faces = api._raw_face_locations(img, model="cnn") self.assertEqual(len(detected_faces), 1) self.assertAlmostEqual(detected_faces[0].rect.top(), 259, delta=25) self.assertAlmostEqual(detected_faces[0].rect.bottom(), 552, delta=25)
def test_raw_face_locations_32bit_image(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "32bit.png")) detected_faces = api._raw_face_locations(img) assert len(detected_faces) == 1 assert detected_faces[0].top() == 290 assert detected_faces[0].bottom() == 558
def test_raw_face_locations(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "obama.jpg")) detected_faces = api._raw_face_locations(img) assert len(detected_faces) == 1 assert detected_faces[0].top() == 142 assert detected_faces[0].bottom() == 409
def test_image(image_to_check, known_names, known_face_encodings): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if unknown_image.shape[1] > 1600: scale_factor = 1600.0 / unknown_image.shape[1] with warnings.catch_warnings(): warnings.simplefilter("ignore") unknown_image = scipy.misc.imresize(unknown_image, scale_factor) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: result = face_recognition.compare_faces(known_face_encodings, unknown_encoding) output="" if True in result: for is_match, name in zip(result, known_names): if is_match: output += ("{} ".format(name)) print(output)
def scan_known_people_for_known_names(known_people_folder): known_names = [] known_face_encodings = [] encodings = [] for file in image_files_in_folder(known_people_folder): basename = os.path.splitext(os.path.basename(file))[0] img = face_recognition.load_image_file(file) if os.path.exists("./encodings/" + basename + ".arr.npy"): encodings = np.array([np.load("./encodings/" + basename + ".arr.npy")]) else: encodings = face_recognition.face_encodings(img, None, 4) np.save("./encodings/" + basename + ".arr", encodings[0]) if len(encodings) > 1: click.echo("WARNING: More than one face found in {}. Only considering the first face.".format(file)) if len(encodings) == 0: click.echo("WARNING: No faces found in {}. Ignoring file.".format(file)) else: known_names.append(basename) return known_names
def test_face_encodings(self): img = api.load_image_file( os.path.join(os.path.dirname(__file__), 'test_img', 'obama.jpg')) encodings = api.face_encodings(img) self.assertEqual(len(encodings), 1) self.assertEqual(len(encodings[0]), 128)
def import_known_people(name, file, id_card, sex, age, phone): img = face_recognition.load_image_file(file) encodings = face_recognition.face_encodings(img) enc_len = len(encodings) if enc_len > 1: click.echo( "WARNING: More than one face found in {}. Only considering the first face." .format(file)) return False if enc_len == 0: click.echo( "WARNING: No faces found in {}. Ignoring file.".format(file)) return False sql = "insert into person_info (name, id_card, sex, age, phone, file, found) " \ "values('%s', '%s', '%s', %s, '%s', '%s', '%s')" \ %(name, id_card, sex, str(age), phone, file, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) cursor = get_db().cursor() cursor.execute(sql) person_id = get_db().insert_id() args = [] sql_t = "insert into index_%s (id, eigenvalue) values((%s), (%s))" for i, val in enumerate(encodings[0]): args.append([i + 1, person_id, float(encodings[0][i])]) cursor.executemany(sql_t, args) get_db().commit() cursor.close() return True
def compare_faces(profile, person, target_encoding, threshold=0.6): profilelink, profilepic, distance = profile match = None try: image = urllib.request.urlopen(profilepic) unknown_image = face_recognition.load_image_file(image) unknown_encoding = face_recognition.face_encodings(unknown_image) if len(unknown_encoding) > 0: results = face_recognition.face_distance(target_encoding, unknown_encoding[0]) for result in results: if result < float(threshold): person.instagram = encoding.smart_str(profilelink, encoding='ascii', errors='ignore') person.instagramimage = encoding.smart_str( profilepic, encoding='ascii', errors='ignore') logging.info("Match found: " + person.full_name + " ,Instagram: " + person.instagram) if args.vv == True: print("\tMatch found: " + person.full_name) print("\tInstagram: " + person.instagram) match = person except Exception as e: logging.error("compare_faces: " + str(e)) print("ERROR") print(e) return match
def video_detect_and_blur(img, input_path, output_path, model): if (os.stat(input_path + img).st_size != 0): name = img[:img.rfind('.')] unknown_image = face_recognition.load_image_file(input_path + img) face_locations = face_recognition.face_locations( unknown_image, number_of_times_to_upsample=0, model=model) image = cv2.imread(input_path + img) for face_location in face_locations: top, right, bottom, left = face_location sub_face = image[top:bottom, left:right] # apply a gaussian blur on this new recangle image sub_face = cv2.GaussianBlur(sub_face, (51, 51), 75) # merge this blurry rectangle to our final image image[top:top + sub_face.shape[0], left:left + sub_face.shape[1]] = sub_face # cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2) # print(output_path + BLURRED_DIR + img) cv2.imwrite(output_path + BLURRED_DIR + img, image) with open(output_path + INFO_DIR + name + '.csv', 'w', newline='', encoding="utf-8") as csvfile: fieldnames = ['location_id', 'top', 'left', 'bottom', 'right'] writer = csv.writer(csvfile) writer.writerow(fieldnames) for (idx, loc) in enumerate(face_locations): top, right, bottom, left = loc writer.writerow([ 'id_' + str(idx), str(top), str(left), str(bottom), str(right) ])
def scan_known_people(known_people_folder): known_names = [] known_face_encodings = [] for file in image_files_in_folder(known_people_folder): basename = os.path.splitext(os.path.basename(file))[0] img = face_recognition.load_image_file(file) encodings = face_recognition.face_encodings(img) if len(encodings) > 1: click.echo( "WARNING: More than one face found in {}. Only considering the first face." .format(file)) if len(encodings) == 0: click.echo( "WARNING: No faces found in {}. Ignoring file.".format(file)) else: known_names.append(basename) known_face_encodings.append(encodings[0]) print(f'{basename} - {encodings[0]} {type(encodings[0])}') jstr = msgpack.packb(encodings[0], default=m.encode) print(jstr) print(len(jstr)) jobj = msgpack.unpackb(jstr, object_hook=m.decode) print('jobj=', jobj, type(jobj)) return known_names, known_face_encodings
def test_image(image_to_check, known_names, known_face_encodings): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if unknown_image.shape[1] > 1600: scale_factor = 1600.0 / unknown_image.shape[1] with warnings.catch_warnings(): warnings.simplefilter("ignore") unknown_image = scipy.misc.imresize(unknown_image, scale_factor) unknown_encodings = face_recognition.face_encodings(unknown_image) print("unknown_encodings " + str(unknown_encodings)) if len(unknown_encodings) == 1: for unknown_encoding in unknown_encodings: result = face_recognition.compare_faces(known_face_encodings, unknown_encoding) distance = face_recognition.face_distance(known_face_encodings, unknown_encoding) print(distance[0]) print("True") if True in result else print("False ") return distance[0], result[0] else: return "0", "Many Faces or No Faces"
def test_image_output_json(image_to_check, known_names, known_face_encodings): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if unknown_image.shape[1] > 1600: scale_factor = 1600.0 / unknown_image.shape[1] with warnings.catch_warnings(): warnings.simplefilter("ignore") unknown_image = scipy.misc.imresize(unknown_image, scale_factor) unknown_encodings = face_recognition.face_encodings(unknown_image) output = list() for unknown_encoding in unknown_encodings: result = face_recognition.compare_faces(known_face_encodings, unknown_encoding) if True in result: [ output.append({ 'imagePath': image_to_check, 'name': name }) for is_match, name in zip(result, known_names) if is_match ] else: [ output.append({ 'imagePath': image_to_check, 'name': 'unknown_name' }) ] return output
def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if unknown_image.shape[1] > 1600: scale_factor = 1600.0 / unknown_image.shape[1] with warnings.catch_warnings(): warnings.simplefilter("ignore") unknown_image = scipy.misc.imresize(unknown_image, scale_factor) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) if True in result: [ print_result(image_to_check, name, distance, show_distance) for is_match, name, distance in zip(result, known_names, distances) if is_match ] else: print_result(image_to_check, "unknown_person", None, show_distance)
def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if max(unknown_image.shape) > 1600: pil_img = PIL.Image.fromarray(unknown_image) pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS) unknown_image = np.array(pil_img) unknown_encodings = face_recognition.face_encodings(unknown_image) face_names = [] for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) match = face_recognition.compare_faces(known_face_encodings, unknown_encoding, 0.5) name = "Unknown" for k in range(len(match)): if match[k]: name = known_names[k] face_names.append(name) if not unknown_encodings: # print out fact that no faces were found in image print(image_to_check, "no_persons_found", None, show_distance) return face_names
def test_raw_face_locations(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) detected_faces = api._raw_face_locations(img) self.assertEqual(len(detected_faces), 1) self.assertEqual(detected_faces[0].top(), 142) self.assertEqual(detected_faces[0].bottom(), 409)
def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if max(unknown_image.shape) > 1600: pil_img = PIL.Image.fromarray(unknown_image) pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS) unknown_image = np.array(pil_img) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) if True in result: [ print_result(image_to_check, name, distance, show_distance) for is_match, name, distance in zip(result, known_names, distances) if is_match ] #put IMAGE into firebase unknown_image? else: #no matches print_result(image_to_check, "unknown_person", None, show_distance) if not unknown_encodings: # print out fact that no faces were found in image print_result(image_to_check, "no_persons_found", None, show_distance)
def process_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) result_list = list() # Scale down image if it's giant so things run a little faster if max(unknown_image.shape) > 1600: pil_img = PIL.Image.fromarray(unknown_image) pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS) unknown_image = np.array(pil_img) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) recognized_name = "" if True in result: for is_match, name, distance in zip(result, known_names, distances): if is_match: recognized_name = name else: recognized_name = "unknown_person" result_list.append(recognized_name) return image_to_check, result_list
def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if max(unknown_image.shape) > 1600: pil_img = PIL.Image.fromarray(unknown_image) pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS) unknown_image = np.array(pil_img) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) if True in result: [ print_result(image_to_check, name, distance, show_distance) for is_match, name, distance in zip(result, known_names, distances) if is_match ]
def test_raw_face_locations_32bit_image(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', '32bit.png')) detected_faces = api._raw_face_locations(img) self.assertEqual(len(detected_faces), 1) self.assertEqual(detected_faces[0].top(), 290) self.assertEqual(detected_faces[0].bottom(), 558)
def test_image(image_to_check, model): unknown_image = face_recognition.load_image_file(image_to_check) face_locations = face_recognition.face_locations( unknown_image, number_of_times_to_upsample=0, model=model) for face_location in face_locations: print_result(image_to_check, face_location)
def test_image(src, image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) if max(unknown_image.shape) > 1600: pil_img = PIL.Image.fromarray(unknown_image) pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS) unknown_image = np.array(pil_img) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) if True in result: [ print_result(src, image_to_check, name, distance, show_distance) for is_match, name, distance in zip(result, known_names, distances) if is_match ] else: print_result(src, image_to_check, "unknown_person", None, show_distance) if not unknown_encodings: print_result(image_to_check, "no_persons_found", None, show_distance)
def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if unknown_image.shape[1] > 1600: scale_factor = 1600.0 / unknown_image.shape[1] with warnings.catch_warnings(): warnings.simplefilter("ignore") unknown_image = scipy.misc.imresize(unknown_image, scale_factor) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: result = face_recognition.compare_faces(known_face_encodings, unknown_encoding, tolerance=tolerance) if True in result: [ print("{},{}".format(image_to_check, name)) for is_match, name in zip(result, known_names) if is_match ] else: print("{},unknown_person".format(image_to_check))
def main(image_to_check, cpus, model): for img_file in image_files_in_folder(image_to_check): unknown_image = face_recognition.load_image_file(img_file) face_locations = face_recognition.face_locations( unknown_image, number_of_times_to_upsample=0, model=model) for faceLocation in face_locations: # print_result(img_file, faceLocation) try: with Image.open(img_file) as im: print("Processing {}...".format(img_file)) top, right, bottom, left = faceLocation myuuid = uuid.uuid1() print("Face Location: {}".format(faceLocation)) print("Format: {0}\tSize: {1}\tMode: {2}".format( im.format, im.size, im.mode)) im1 = im.crop((left, top, right, bottom)) print("Format: {0}\tSize: {1}\tMode: {2}".format( im1.format, im1.size, im1.mode)) im1.save("./" + str(myuuid) + "." + str(im.format).lower(), im.format) except IOError as err: print("Unable to load image: {} ({})".format( img_file, err.errno)) sys.exit(1)
def image_recognition(image_to_check, tolerance, bucket, confidence): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if max(unknown_image.shape) > 1600: pil_img = PIL.Image.fromarray(unknown_image) pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS) unknown_image = np.array(pil_img) unknown_encodings = face_recognition.face_encodings(unknown_image) enc_len = len(unknown_encodings) if enc_len > 1: recg_res = "MoreThanOneFaceFound." return recg_res if enc_len == 0: recg_res = "NoFacesFound." return recg_res db = pymysql.connect(config.get_db_host(), config.get_db_user(), config.get_db_pass(), config.get_db_name()) cursor = db.cursor() for i, eigenval in enumerate(unknown_encodings[0]): if i == 0: sql_t = "(select id from index_" \ + str(i + 1) + " force index(ridx) where eigenvalue >= (" \ + str(eigenval - tolerance) + ") and (eigenvalue <= " \ + str(eigenval + tolerance) + ") order by abs(power(eigenvalue,2) - " \ + str(math.pow(eigenval, 2)) + ") asc limit " + str(bucket) + ") " else: sql_t += "union all (select id from index_" \ + str(i + 1) + " force index(ridx) where eigenvalue >= (" \ + str(eigenval - tolerance) + ") and (eigenvalue <= " \ + str(eigenval + tolerance) + ") order by abs(power(eigenvalue,2) - " \ + str(math.pow(eigenval, 2)) + ") asc limit " + str(bucket) + ") " cursor.execute(sql_t) data = cursor.fetchall() data = [i for item in data for i in item] count = Counter(data) count_dict = dict(count) # sort # test = sorted(count_dict.items(), key = lambda k: k[1], reverse = True) if len(count_dict) > 0: max_item = max(count_dict.items(), key=lambda x: x[1]) if max_item[1] >= confidence: sql_t = "select name, id_card, sex, age, phone from person_info where id=%s" % ( max_item[0]) cursor.execute(sql_t) recg_res = cursor.fetchone(), max_item[1] else: recg_res = "Unrecognized,but one or more similar faces found" else: recg_res = "Unrecognized" cursor.close() db.close() return recg_res
def test_face_landmarks_small_model(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) face_landmarks = api.face_landmarks(img, model="small") self.assertEqual( set(face_landmarks[0].keys()), set(['nose_tip', 'left_eye', 'right_eye'])) self.assertEqual(face_landmarks[0]['nose_tip'], [(496, 295)])
def test_raw_face_landmarks(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "obama.jpg")) face_landmarks = api._raw_face_landmarks(img) example_landmark = face_landmarks[0].parts()[10] assert len(face_landmarks) == 1 assert face_landmarks[0].num_parts == 68 assert (example_landmark.x, example_landmark.y) == (552, 399)
def test_raw_face_landmarks(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) face_landmarks = api._raw_face_landmarks(img) example_landmark = face_landmarks[0].parts()[10] self.assertEqual(len(face_landmarks), 1) self.assertEqual(face_landmarks[0].num_parts, 68) self.assertEqual((example_landmark.x, example_landmark.y), (552, 399))
def test_cnn_face_locations(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) detected_faces = api.face_locations(img, model="cnn") self.assertEqual(len(detected_faces), 1) self.assertAlmostEqual(detected_faces[0][0], 144, delta=25) self.assertAlmostEqual(detected_faces[0][1], 608, delta=25) self.assertAlmostEqual(detected_faces[0][2], 389, delta=25) self.assertAlmostEqual(detected_faces[0][3], 363, delta=25)
def test_raw_face_locations_batched(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) images = [img, img, img] batched_detected_faces = api._raw_face_locations_batched(images, number_of_times_to_upsample=0) for detected_faces in batched_detected_faces: self.assertEqual(len(detected_faces), 1) self.assertEqual(detected_faces[0].rect.top(), 154) self.assertEqual(detected_faces[0].rect.bottom(), 390)
def test_batched_face_locations(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) images = [img, img, img] batched_detected_faces = api.batch_face_locations(images, number_of_times_to_upsample=0) for detected_faces in batched_detected_faces: self.assertEqual(len(detected_faces), 1) self.assertEqual(detected_faces[0], (154, 611, 390, 375))
def test_compare_faces(self): img_a1 = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "obama.jpg")) img_a2 = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "obama2.jpg")) img_a3 = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "obama3.jpg")) img_b1 = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "biden.jpg")) face_encoding_a1 = api.face_encodings(img_a1)[0] face_encoding_a2 = api.face_encodings(img_a2)[0] face_encoding_a3 = api.face_encodings(img_a3)[0] face_encoding_b1 = api.face_encodings(img_b1)[0] faces_to_compare = [ face_encoding_a2, face_encoding_a3, face_encoding_b1 ] match_results = api.compare_faces(faces_to_compare, face_encoding_a1) assert match_results[0] == True assert match_results[1] == True assert match_results[2] == False
def test_compare_faces(self): img_a1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) img_a2 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama2.jpg')) img_a3 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama3.jpg')) img_b1 = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg')) face_encoding_a1 = api.face_encodings(img_a1)[0] face_encoding_a2 = api.face_encodings(img_a2)[0] face_encoding_a3 = api.face_encodings(img_a3)[0] face_encoding_b1 = api.face_encodings(img_b1)[0] faces_to_compare = [ face_encoding_a2, face_encoding_a3, face_encoding_b1] match_results = api.compare_faces(faces_to_compare, face_encoding_a1) self.assertEqual(type(match_results), list) self.assertTrue(match_results[0]) self.assertTrue(match_results[1]) self.assertFalse(match_results[2])
def test_face_landmarks(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) face_landmarks = api.face_landmarks(img) self.assertEqual( set(face_landmarks[0].keys()), set(['chin', 'left_eyebrow', 'right_eyebrow', 'nose_bridge', 'nose_tip', 'left_eye', 'right_eye', 'top_lip', 'bottom_lip'])) self.assertEqual( face_landmarks[0]['chin'], [(369, 220), (372, 254), (378, 289), (384, 322), (395, 353), (414, 382), (437, 407), (464, 424), (495, 428), (527, 420), (552, 399), (576, 372), (594, 344), (604, 314), (610, 282), (613, 250), (615, 219)])
def test_face_distance_empty_lists(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg')) face_encoding = api.face_encodings(img)[0] # empty python list faces_to_compare = [] distance_results = api.face_distance(faces_to_compare, face_encoding) self.assertEqual(type(distance_results), np.ndarray) self.assertEqual(len(distance_results), 0) # empty numpy list faces_to_compare = np.array([]) distance_results = api.face_distance(faces_to_compare, face_encoding) self.assertEqual(type(distance_results), np.ndarray) self.assertEqual(len(distance_results), 0)
def test_compare_faces_empty_lists(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'biden.jpg')) face_encoding = api.face_encodings(img)[0] # empty python list faces_to_compare = [] match_results = api.compare_faces(faces_to_compare, face_encoding) self.assertEqual(type(match_results), list) self.assertListEqual(match_results, []) # empty numpy list faces_to_compare = np.array([]) match_results = api.compare_faces(faces_to_compare, face_encoding) self.assertEqual(type(match_results), list) self.assertListEqual(match_results, [])
def scan_known_people(known_people_folder): known_names = [] known_face_encodings = [] for file in image_files_in_folder(known_people_folder): basename = os.path.splitext(os.path.basename(file))[0] img = face_recognition.load_image_file(file) encodings = face_recognition.face_encodings(img) if len(encodings) > 1: click.echo("WARNING: More than one face found in {}. Only considering the first face.".format(file)) if len(encodings) == 0: click.echo("WARNING: No faces found in {}. Ignoring file.".format(file)) else: known_names.append(basename) known_face_encodings.append(encodings[0]) return known_names, known_face_encodings
def test_image(image_to_check, known_names, known_face_encodings): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if unknown_image.shape[1] > 1600: scale_factor = 1600.0 / unknown_image.shape[1] with warnings.catch_warnings(): warnings.simplefilter("ignore") unknown_image = scipy.misc.imresize(unknown_image, scale_factor) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: result = face_recognition.compare_faces(known_face_encodings, unknown_encoding) if True in result: [print("{},{}".format(image_to_check, name)) for is_match, name in zip(result, known_names) if is_match] else: print("{},unknown_person".format(image_to_check))
def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if unknown_image.shape[1] > 1600: scale_factor = 1600.0 / unknown_image.shape[1] with warnings.catch_warnings(): warnings.simplefilter("ignore") unknown_image = scipy.misc.imresize(unknown_image, scale_factor) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) if True in result: [print_result(image_to_check, name, distance, show_distance) for is_match, name, distance in zip(result, known_names, distances) if is_match] else: print_result(image_to_check, "unknown_person", None, show_distance)
def test_image(image_to_check, known_names, known_face_encodings, tolerance=0.6, show_distance=False): unknown_image = face_recognition.load_image_file(image_to_check) # Scale down image if it's giant so things run a little faster if max(unknown_image.shape) > 1600: pil_img = PIL.Image.fromarray(unknown_image) pil_img.thumbnail((1600, 1600), PIL.Image.LANCZOS) unknown_image = np.array(pil_img) unknown_encodings = face_recognition.face_encodings(unknown_image) for unknown_encoding in unknown_encodings: distances = face_recognition.face_distance(known_face_encodings, unknown_encoding) result = list(distances <= tolerance) if True in result: [print_result(image_to_check, name, distance, show_distance) for is_match, name, distance in zip(result, known_names, distances) if is_match] else: print_result(image_to_check, "unknown_person", None, show_distance) if not unknown_encodings: # print out fact that no faces were found in image print_result(image_to_check, "no_persons_found", None, show_distance)
def test_load_image_file_32bit(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "32bit.png")) assert img.shape == (1200, 626, 3)
def test_face_locations(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "obama.jpg")) detected_faces = api.face_locations(img) assert len(detected_faces) == 1 assert detected_faces[0] == (142, 617, 409, 349)
def test_face_landmarks(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "obama.jpg")) face_landmarks = api.face_landmarks(img) assert set(face_landmarks[0].keys()) == set(['chin', 'left_eyebrow', 'right_eyebrow', 'nose_bridge', 'nose_tip', 'left_eye', 'right_eye', 'top_lip', 'bottom_lip']) assert face_landmarks[0]['chin'] == [(369, 220), (372, 254), (378, 289), (384, 322), (395, 353), (414, 382), (437, 407), (464, 424), (495, 428), (527, 420), (552, 399), (576, 372), (594, 344), (604, 314)]
def test_face_encodings(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), "test_images", "obama.jpg")) encodings = api.face_encodings(img) assert len(encodings) == 1 assert len(encodings[0]) == 128
def test_face_encodings(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) encodings = api.face_encodings(img) self.assertEqual(len(encodings), 1) self.assertEqual(len(encodings[0]), 128)
def test_load_image_file(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) self.assertEqual(img.shape, (1137, 910, 3))
def test_load_image_file_32bit(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', '32bit.png')) self.assertEqual(img.shape, (1200, 626, 3))
def test_face_locations(self): img = api.load_image_file(os.path.join(os.path.dirname(__file__), 'test_images', 'obama.jpg')) detected_faces = api.face_locations(img) self.assertEqual(len(detected_faces), 1) self.assertEqual(detected_faces[0], (142, 617, 409, 349))