def test_identifier(): print('test identifier:') detector = Detector() aligner = Aligner() identifier = Identifier() # load image image_color_A = Image.open('data/single.jpg').convert('RGB') image_gray_A = image_color_A.convert('L') image_color_B = Image.open('data/double.jpg').convert('RGB') image_gray_B = image_color_B.convert('L') # detect face in image faces_A = detector.detect(image_gray_A) faces_B = detector.detect(image_gray_B) draw_A = ImageDraw.Draw(image_color_A) draw_B = ImageDraw.Draw(image_color_B) if len(faces_A) and len(faces_B): landmarks_A = aligner.align(image_gray_A, faces_A[0]) featA = identifier.extract_feature_with_crop(image_color_A, landmarks_A) print(len(featA)) draw_A.rectangle([(faces_A[0].left, faces_A[0].top), (faces_A[0].right, faces_A[0].bottom)], outline='green') sim_list = [] for face in faces_B: landmarks_B = aligner.align(image_gray_B, face) featB = identifier.extract_feature_with_crop( image_color_B, landmarks_B) sim = identifier.calc_similarity(featA, featB) sim_list.append(sim) print('sim: {}'.format(sim_list)) index = np.argmax(sim_list) for i, face in enumerate(faces_B): color = 'green' if i == index else 'red' draw_B.rectangle([(face.left, face.top), (face.right, face.bottom)], outline=color) image_color_A.show() image_color_B.show() identifier.release() aligner.release() detector.release()
def test_identifier(): print('test identifier:') # load model detector = Detector() aligner = Aligner() identifier = Identifier() # load image image_color_A = cv2.imread('data/single.jpg', cv2.IMREAD_COLOR) image_gray_A = cv2.cvtColor(image_color_A, cv2.COLOR_BGR2GRAY) image_color_B = cv2.imread('data/double.jpg', cv2.IMREAD_COLOR) image_gray_B = cv2.cvtColor(image_color_B, cv2.COLOR_BGR2GRAY) # detect face in image faces_A = detector.detect(image_gray_A) faces_B = detector.detect(image_gray_B) if len(faces_A) and len(faces_B): landmarks_A = aligner.align(image_gray_A, faces_A[0]) featA = identifier.extract_feature_with_crop(image_color_A, landmarks_A) cv2.rectangle(image_color_A, (faces_A[0].left, faces_A[0].top), (faces_A[0].right, faces_A[0].bottom), (0, 255, 0), thickness=2) sim_list = [] for face in faces_B: landmarks_B = aligner.align(image_gray_B, face) featB = identifier.extract_feature_with_crop( image_color_B, landmarks_B) sim = identifier.calc_similarity(featA, featB) sim_list.append(sim) print('sim: {}'.format(sim_list)) index = np.argmax(sim_list) for i, face in enumerate(faces_B): color = (0, 255, 0) if i == index else (0, 0, 255) cv2.rectangle(image_color_B, (face.left, face.top), (face.right, face.bottom), color, thickness=2) cv2.imshow('single', image_color_A) cv2.imshow('double', image_color_B) cv2.waitKey(0) identifier.release() aligner.release() detector.release()
def test_recognition(img_path,db_path): if not os.path.isfile(img_path) or not os.path.isdir(db_path): print("indicated path doesn't exist!");return # load model detector = Detector(DET_MODEL_PATH) aligner = Aligner(ALI_MODEL_PATH) identifier = Identifier(REC_MODEL_PATH) # load test image image_color_A = cv2.imread(img_path, cv2.IMREAD_COLOR) image_gray_A = cv2.cvtColor(image_color_A, cv2.COLOR_BGR2GRAY) faces_A = detector.detect(image_gray_A) # load database for fn in os.listdir(db_path): fp = os.path.join(db_path,fn) if not os.path.isfile(fp): continue image_color_B = cv2.imread(fp, cv2.IMREAD_COLOR) image_gray_B = cv2.cvtColor(image_color_B, cv2.COLOR_BGR2GRAY) # detect face in image faces_B = detector.detect(image_gray_B) if len(faces_A) and len(faces_B): landmarks_A = aligner.align(image_gray_A, faces_A[0]) featA = identifier.extract_feature_with_crop(image_color_A, landmarks_A) # cv2.rectangle(image_color_A, (faces_A[0].left, faces_A[0].top), (faces_A[0].right, faces_A[0].bottom), (0,255,0), thickness=2) sim_list = [] for face in faces_B: landmarks_B = aligner.align(image_gray_B, face) featB = identifier.extract_feature_with_crop(image_color_B, landmarks_B) sim = identifier.calc_similarity(featA, featB) sim_list.append(sim) print('sim: {}'.format(sim_list)) # index = np.argmax(sim_list) for i, face in enumerate(faces_B): color = (0,255,0) if sim_list[i] > 0.5 else (0,0,255) cv2.rectangle(image_color_B, (face.left, face.top), (face.right, face.bottom), color, thickness=2) # cv2.imshow('test', resize(image_color_A)) cv2.imshow('double', resize(image_color_B)) cv2.waitKey(0) release(aligner,detector,identifier)
def test_identifier(model_path, img1_name, img2_name): print('test identifier:') if model_path is None: print('test_identifier: Please specify the path to the model folder') return if img1_name is None: print('test_identifier: Please specify the path to the image file1') return if img2_name is None: print('test_identifier: Please specify the path to the image file2') return assert os.path.isdir( model_path ) is True, 'test_identifier: The model file path should be a folder' assert os.path.isfile( img1_name) is True, 'test_identifier: no such image file1' assert os.path.isfile( img2_name) is True, 'test_identifier: no such image file2' # load model model_path_fd = model_path + '/seeta_fd_frontal_v1.0.bin' model_path_fa = model_path + '/seeta_fa_v1.1.bin' model_path_fr = model_path + '/seeta_fr_v1.0.bin' detector = Detector(model_path_fd) aligner = Aligner(model_path_fa) identifier = Identifier(model_path_fr) # load image image_color_A = cv2.imread(img1_name, cv2.IMREAD_COLOR) image_gray_A = cv2.cvtColor(image_color_A, cv2.COLOR_BGR2GRAY) image_color_B = cv2.imread(img2_name, cv2.IMREAD_COLOR) image_gray_B = cv2.cvtColor(image_color_B, cv2.COLOR_BGR2GRAY) # detect face in image faces_A = detector.detect(image_gray_A) faces_B = detector.detect(image_gray_B) if len(faces_A) and len(faces_B): landmarks_A = aligner.align(image_gray_A, faces_A[0]) featA = identifier.extract_feature_with_crop(image_color_A, landmarks_A) cv2.rectangle(image_color_A, (faces_A[0].left, faces_A[0].top), (faces_A[0].right, faces_A[0].bottom), (0, 255, 0), thickness=2) sim_list = [] for face in faces_B: landmarks_B = aligner.align(image_gray_B, face) featB = identifier.extract_feature_with_crop( image_color_B, landmarks_B) sim = identifier.calc_similarity(featA, featB) sim_list.append(sim) print('sim: {}'.format(sim_list)) index = np.argmax(sim_list) for i, face in enumerate(faces_B): color = (0, 255, 0) if i == index else (0, 0, 255) cv2.rectangle(image_color_B, (face.left, face.top), (face.right, face.bottom), color, thickness=2) cv2.imshow('single', image_color_A) cv2.imshow('double', image_color_B) cv2.waitKey(0) identifier.release() aligner.release() detector.release()
def test_identifier(model_path, img1_name, img2_name): print('test identifier:') if model_path is None: print('test_identifier: Please specify the path to the model folder') return if img1_name is None: print('test_identifier: Please specify the path to the image file1') return if img2_name is None: print('test_identifier: Please specify the path to the image file2') return assert os.path.isdir( model_path ) is True, 'test_identifier: The model file path should be a folder' assert os.path.isfile( img1_name) is True, 'test_identifier: no such image file1' assert os.path.isfile( img2_name) is True, 'test_identifier: no such image file2' model_path_fd = model_path + '/seeta_fd_frontal_v1.0.bin' model_path_fa = model_path + '/seeta_fa_v1.1.bin' model_path_fr = model_path + '/seeta_fr_v1.0.bin' detector = Detector(model_path_fd) detector.set_min_face_size(30) aligner = Aligner(model_path_fa) identifier = Identifier(model_path_fr) # load image image_color_A = Image.open(img1_name).convert('RGB') image_gray_A = image_color_A.convert('L') image_color_B = Image.open(img2_name).convert('RGB') image_gray_B = image_color_B.convert('L') # detect face in image faces_A = detector.detect(image_gray_A) faces_B = detector.detect(image_gray_B) draw_A = ImageDraw.Draw(image_color_A) draw_B = ImageDraw.Draw(image_color_B) if len(faces_A) and len(faces_B): landmarks_A = aligner.align(image_gray_A, faces_A[0]) featA = identifier.extract_feature_with_crop(image_color_A, landmarks_A) draw_A.rectangle([(faces_A[0].left, faces_A[0].top), (faces_A[0].right, faces_A[0].bottom)], outline='green') sim_list = [] for face in faces_B: landmarks_B = aligner.align(image_gray_B, face) featB = identifier.extract_feature_with_crop( image_color_B, landmarks_B) sim = identifier.calc_similarity(featA, featB) sim_list.append(sim) print('sim: {}'.format(sim_list)) index = np.argmax(sim_list) for i, face in enumerate(faces_B): color = 'green' if i == index else 'red' draw_B.rectangle([(face.left, face.top), (face.right, face.bottom)], outline=color) image_color_A.show() image_color_B.show() identifier.release() aligner.release() detector.release()