Ejemplo n.º 1
0
def test_cropface(model_path, img_name):
    if model_path is None:
        print('test_aligner: Please specify the path to the model folder')
        return
    if img_name is None:
        print('test_aligner: Please specify the path to the image file')
        return
    assert os.path.isdir(
        model_path
    ) is True, 'test_aligner: The model file path should be a folder'
    assert os.path.isfile(img_name) is True, 'test_aligner: no such file'

    model_path_fd = model_path + '/seeta_fd_frontal_v1.0.bin'
    model_path_fa = model_path + '/seeta_fa_v1.1.bin'
    model_path_fr = model_path + '/seeta_fr_v1.0.bin'
    detector = Detector(model_path_fd)
    detector.set_min_face_size(30)
    aligner = Aligner(model_path_fa)
    identifier = Identifier(model_path_fr)

    image_color = Image.open(img_name).convert('RGB')
    image_gray = image_color.convert('L')
    import cv2
    faces = detector.detect(image_gray)
    for face in faces:
        landmarks = aligner.align(image_gray, face)
        crop_face = identifier.crop_face(image_color, landmarks)
        Image.fromarray(crop_face).show()

    identifier.release()
    aligner.release()
    detector.release()
Ejemplo n.º 2
0
    def __init__(self, treshhold=20, single=False, logfile=None, pics_dir='data', isShow=True):
        '''
            single:  是否为处理单个图片的模式
            logfile: 日志文件路径
            pics_dir:若single为False则图片文件夹路径
                     若single为True则图片路径
            isShow:  显示图片处理过程
            
        '''
        self.detector = Detector()
        self.aligner = Aligner()
        self.identifier = Identifier()

        self.detector.set_min_face_size(treshhold)
        if not single:
            all_files = os.listdir(pics_dir)

            for f in all_files:
                if not os.path.isdir(f):
                    self.images_name.append(os.path.join(pics_dir,f))
        else:
            self.images_name.append(pics_dir)
        
        self.images_name.sort()

        if logfile == None:
            logging.basicConfig(level=logging.INFO, 
            format='%(levelname)s-%(lineno)d-%(asctime)s  %(message)s',
            filename=logfile)
        else:#print to screen
            logging.basicConfig(level=logging.INFO, 
            format='%(levelname)s-%(lineno)d-%(asctime)s  [FaceFeatureExtract]: %(message)s')
        self.isShow = isShow
Ejemplo n.º 3
0
def test_identifier():
    print('test identifier:')
    detector = Detector()
    aligner = Aligner()
    identifier = Identifier()

    # load image
    image_color_A = Image.open('data/single.jpg').convert('RGB')
    image_gray_A = image_color_A.convert('L')
    image_color_B = Image.open('data/double.jpg').convert('RGB')
    image_gray_B = image_color_B.convert('L')

    # detect face in image
    faces_A = detector.detect(image_gray_A)
    faces_B = detector.detect(image_gray_B)

    draw_A = ImageDraw.Draw(image_color_A)
    draw_B = ImageDraw.Draw(image_color_B)

    if len(faces_A) and len(faces_B):
        landmarks_A = aligner.align(image_gray_A, faces_A[0])
        featA = identifier.extract_feature_with_crop(image_color_A,
                                                     landmarks_A)
        print(len(featA))
        draw_A.rectangle([(faces_A[0].left, faces_A[0].top),
                          (faces_A[0].right, faces_A[0].bottom)],
                         outline='green')

        sim_list = []
        for face in faces_B:
            landmarks_B = aligner.align(image_gray_B, face)
            featB = identifier.extract_feature_with_crop(
                image_color_B, landmarks_B)
            sim = identifier.calc_similarity(featA, featB)
            sim_list.append(sim)
        print('sim: {}'.format(sim_list))
        index = np.argmax(sim_list)
        for i, face in enumerate(faces_B):
            color = 'green' if i == index else 'red'
            draw_B.rectangle([(face.left, face.top),
                              (face.right, face.bottom)],
                             outline=color)

    image_color_A.show()
    image_color_B.show()

    identifier.release()
    aligner.release()
    detector.release()
Ejemplo n.º 4
0
def test_identifier():
    print('test identifier:')
    # load model
    detector = Detector()
    aligner = Aligner()
    identifier = Identifier()

    # load image
    image_color_A = cv2.imread('data/single.jpg', cv2.IMREAD_COLOR)
    image_gray_A = cv2.cvtColor(image_color_A, cv2.COLOR_BGR2GRAY)
    image_color_B = cv2.imread('data/double.jpg', cv2.IMREAD_COLOR)
    image_gray_B = cv2.cvtColor(image_color_B, cv2.COLOR_BGR2GRAY)
    # detect face in image
    faces_A = detector.detect(image_gray_A)
    faces_B = detector.detect(image_gray_B)

    if len(faces_A) and len(faces_B):
        landmarks_A = aligner.align(image_gray_A, faces_A[0])
        featA = identifier.extract_feature_with_crop(image_color_A,
                                                     landmarks_A)
        cv2.rectangle(image_color_A, (faces_A[0].left, faces_A[0].top),
                      (faces_A[0].right, faces_A[0].bottom), (0, 255, 0),
                      thickness=2)
        sim_list = []
        for face in faces_B:
            landmarks_B = aligner.align(image_gray_B, face)
            featB = identifier.extract_feature_with_crop(
                image_color_B, landmarks_B)
            sim = identifier.calc_similarity(featA, featB)
            sim_list.append(sim)
        print('sim: {}'.format(sim_list))
        index = np.argmax(sim_list)
        for i, face in enumerate(faces_B):
            color = (0, 255, 0) if i == index else (0, 0, 255)
            cv2.rectangle(image_color_B, (face.left, face.top),
                          (face.right, face.bottom),
                          color,
                          thickness=2)
    cv2.imshow('single', image_color_A)
    cv2.imshow('double', image_color_B)
    cv2.waitKey(0)

    identifier.release()
    aligner.release()
    detector.release()
Ejemplo n.º 5
0
def test_cropface():
    detector = Detector('SeetaFaceEngine/model/seeta_fd_frontal_v1.0.bin')
    detector.set_min_face_size(30)
    aligner = Aligner('SeetaFaceEngine/model/seeta_fa_v1.1.bin')
    identifier = Identifier('SeetaFaceEngine/model/seeta_fr_v1.0.bin')

    image_color = Image.open('data/chloecalmon.png').convert('RGB')
    image_gray = image_color.convert('L')
    import cv2
    faces = detector.detect(image_gray)
    for face in faces:
        landmarks = aligner.align(image_gray, face)
        crop_face = identifier.crop_face(image_color, landmarks)
        Image.fromarray(crop_face).show()

    identifier.release()
    aligner.release()
    detector.release()
Ejemplo n.º 6
0
def test_cropface():
    detector = Detector()
    detector.set_min_face_size(30)
    aligner = Aligner()
    identifier = Identifier()

    image_color = Image.open('data/chloecalmon.png').convert('RGB')
    image_gray = image_color.convert('L')

    faces = detector.detect(image_gray)
    for face in faces:
        landmarks = aligner.align(image_gray, face)
        crop_face = identifier.crop_face(image_color, landmarks)
        Image.fromarray(crop_face).show()

    identifier.release()
    aligner.release()
    detector.release()
Ejemplo n.º 7
0
def test_recognition(img_path,db_path):
    if not os.path.isfile(img_path) or not os.path.isdir(db_path): 
        print("indicated path doesn't exist!");return

    # load model
    detector = Detector(DET_MODEL_PATH)
    aligner = Aligner(ALI_MODEL_PATH)
    identifier = Identifier(REC_MODEL_PATH)

    # load test image
    image_color_A = cv2.imread(img_path, cv2.IMREAD_COLOR)
    image_gray_A = cv2.cvtColor(image_color_A, cv2.COLOR_BGR2GRAY)
    faces_A = detector.detect(image_gray_A)

    # load database
    for fn in os.listdir(db_path):
        fp = os.path.join(db_path,fn)
        if not os.path.isfile(fp): continue
        image_color_B = cv2.imread(fp, cv2.IMREAD_COLOR)
        image_gray_B = cv2.cvtColor(image_color_B, cv2.COLOR_BGR2GRAY)
        # detect face in image
        faces_B = detector.detect(image_gray_B)
        if len(faces_A) and len(faces_B):
            landmarks_A = aligner.align(image_gray_A, faces_A[0])
            featA = identifier.extract_feature_with_crop(image_color_A, landmarks_A)
            # cv2.rectangle(image_color_A, (faces_A[0].left, faces_A[0].top), (faces_A[0].right, faces_A[0].bottom), (0,255,0), thickness=2)
            sim_list = []
            for face in faces_B:
                landmarks_B = aligner.align(image_gray_B, face)
                featB = identifier.extract_feature_with_crop(image_color_B, landmarks_B)
                sim = identifier.calc_similarity(featA, featB)
                sim_list.append(sim)
            print('sim: {}'.format(sim_list))
            # index = np.argmax(sim_list)
            for i, face in enumerate(faces_B):
                color = (0,255,0) if sim_list[i] > 0.5 else (0,0,255)
                cv2.rectangle(image_color_B, (face.left, face.top), (face.right, face.bottom), color, thickness=2)
            # cv2.imshow('test', resize(image_color_A))
            cv2.imshow('double', resize(image_color_B))
            cv2.waitKey(0)

    release(aligner,detector,identifier)
Ejemplo n.º 8
0
    def __init__(self):
        self.args = argparse()
        gpu_id = int(self.args.gpu)
        if gpu_id < 0:
            caffe.set_mode_cpu()
        else:
            caffe.set_device(gpu_id)

        assert os.path.exists(
            self.args.deploy_det), 'file {} is not found'.format(
                self.args.deploy_det)
        assert os.path.isfile(
            self.args.weights_det), 'file {} is not found'.format(
                self.args.weights_det)

        self.net = caffe.Net(self.args.deploy_det, self.args.weights_det,
                             caffe.TEST)
        # self.Onet_p = ONet_Points(self.args.deploy_det_land, self.args.weights_det_land)
        print('\t deploy_det:{} is used'.format(self.args.deploy_det))
        print('\t weights_det:{} is used'.format(self.args.weights_det))
        self.aligner = Aligner(self.args.seeta_land)
Ejemplo n.º 9
0
def test_aligner():
    print('test aligner:')
    # load model
    detector = Detector()
    detector.set_min_face_size(30)
    aligner = Aligner()

    image_color = Image.open('data/chloecalmon.png').convert('RGB')
    image_gray = image_color.convert('L')
    faces = detector.detect(image_gray)
    draw = ImageDraw.Draw(image_color)
    draw.ellipse((0, 0, 40, 80), fill=128)
    for face in faces:
        landmarks = aligner.align(image_gray, face)
        for point in landmarks:
            x1, y1 = point[0] - 2, point[1] - 2
            x2, y2 = point[0] + 2, point[1] + 2
            draw.ellipse((x1, y1, x2, y2), fill='red')
    image_color.show()

    aligner.release()
    detector.release()
Ejemplo n.º 10
0
def test_aligner():
    print('test aligner:')
    # load model
    detector = Detector()
    detector.set_min_face_size(30)
    aligner = Aligner()

    image_color = cv2.imread('data/chloecalmon.png', cv2.IMREAD_COLOR)
    image_gray = cv2.cvtColor(image_color, cv2.COLOR_BGR2GRAY)

    faces = detector.detect(image_gray)

    for face in faces:
        landmarks = aligner.align(image_gray, face)
        for point in landmarks:
            cv2.circle(image_color, point, 1, (0, 255, 0), 2)

    cv2.imshow('test aligner', image_color)
    cv2.waitKey(0)

    aligner.release()
    detector.release()
Ejemplo n.º 11
0
def test_aligner(model_path, img_name):
    print('test aligner:')
    if model_path is None:
        print('test_aligner: Please specify the path to the model folder')
        return
    if img_name is None:
        print('test_aligner: Please specify the path to the image file')
        return
    assert os.path.isdir(
        model_path
    ) is True, 'test_aligner: The model file path should be a folder'
    assert os.path.isfile(img_name) is True, 'test_aligner: no such file'

    # load model
    model_path_fd = model_path + '/seeta_fd_frontal_v1.0.bin'
    detector = Detector(model_path_fd)
    detector.set_min_face_size(30)
    model_path_fa = model_path + '/seeta_fa_v1.1.bin'
    aligner = Aligner(model_path_fa)

    image_color = Image.open(img_name).convert('RGB')
    image_gray = image_color.convert('L')
    print(np.array(image_gray))
    faces = detector.detect(image_gray)
    draw = ImageDraw.Draw(image_color)
    draw.ellipse((0, 0, 40, 80), fill=128)
    for face in faces:
        landmarks = aligner.align(image_gray, face)
        for point in landmarks:
            x1, y1 = point[0] - 2, point[1] - 2
            x2, y2 = point[0] + 2, point[1] + 2
            draw.ellipse((x1, y1, x2, y2), fill='red')
    image_color.show()

    aligner.release()
    detector.release()
Ejemplo n.º 12
0
def test_aligner(model_path, img_name):
    print('test aligner:')

    if model_path is None:
        print('test_aligner: Please specify the path to the model folder')
        return
    if img_name is None:
        print('test_aligner: Please specify the path to the image file')
        return
    assert os.path.isdir(
        model_path
    ) is True, 'test_aligner: The model file path should be a folder'
    assert os.path.isfile(img_name) is True, 'test_aligner: no such file'

    # load model
    model_path_fd = model_path + '/seeta_fd_frontal_v1.0.bin'
    detector = Detector(model_path_fd)
    detector.set_min_face_size(30)
    model_path_fa = model_path + '/seeta_fa_v1.1.bin'
    aligner = Aligner(model_path_fa)

    image_color = cv2.imread(img_name, cv2.IMREAD_COLOR)
    image_gray = cv2.cvtColor(image_color, cv2.COLOR_BGR2GRAY)

    faces = detector.detect(image_gray)

    for face in faces:
        landmarks = aligner.align(image_gray, face)
        for point in landmarks:
            cv2.circle(image_color, point, 1, (0, 255, 0), 2)

    cv2.imshow('test aligner', image_color)
    cv2.waitKey(0)

    aligner.release()
    detector.release()
Ejemplo n.º 13
0
def recog(request):
    try:
        global avdata, name_img

        detector = Detector('SeetaFaceEngine/model/seeta_fd_frontal_v1.0.bin')
        aligner = Aligner('SeetaFaceEngine/model/seeta_fa_v1.1.bin')
        identifier = Identifier('SeetaFaceEngine/model/seeta_fr_v1.0.bin')
        detector.set_min_face_size(30)

        if request.method == 'POST':
            path = tempIMG(img=request.FILES['img'], )
            path.save()
            image_color_A = imread(str(path.img))
            image_gray_A = cv2.cvtColor(image_color_A, cv2.COLOR_BGR2GRAY)
            faces_A = detector.detect(image_gray_A)
            cv2.rectangle(image_color_A, (faces_A[0].left, faces_A[0].top),
                          (faces_A[0].right, faces_A[0].bottom), (0, 255, 0),
                          thickness=2)
            cv2.imwrite('facerecog/static/facerecog/' + 'img.jpg',
                        image_color_A)
            length_list = []
            if len(faces_A) or 0:
                landmarks_A = aligner.align(image_gray_A, faces_A[0])
                feat_test = identifier.extract_feature_with_crop(
                    image_color_A, landmarks_A)

            average_sim_list = []
            name_list = []
            sim_list = []

            for cla in avdata:
                simlist = []
                name_list.append(cla)
                weight = 0
                for fea in avdata[cla]:
                    sim = feat_match(feat_test, fea)
                    simlist.append(sim)
                    if sim > 0.5:
                        simlist.append(sim)
                    if sim > 0.55:
                        simlist.append(sim)
                    if sim > 0.6:
                        simlist.append(sim)
                sim_list.append(simlist)
                if len(simlist) == 0:
                    average_sim_list.append(0)
                else:
                    average_sim = sum(simlist) / len(simlist)
                    average_sim_list.append(average_sim)

            # print(average_sim_list)
            max_index = average_sim_list.index(max(average_sim_list))

            sort_list = sorted(average_sim_list)
            result_list = []
            for j in range(5):
                result_list.append(name_list[average_sim_list.index(
                    sort_list[-(j + 1)])])

            print(name_list[max_index])
            print(average_sim_list[max_index])
        identifier.release()
        aligner.release()
        detector.release()

        name = str(request.FILES['img'])
        print(name)
        file_name = []
        file_name.append(name)

        print(result_list)
        name_img = np.load('name_img.npy').item()
        print(name_img[result_list[0]])
        img_link = []
        for name in result_list:
            img_link.append(name_img[name])

        content = {
            'result_list': result_list,
            'file_name': file_name,
            'img_link': img_link
        }

        # return HttpResponse(json.dumps(content,ensure_ascii=False))
        return render(request, 'facerecog/match.html', content)
    except:
        return HttpResponse('請指定有人臉的檔案!')
Ejemplo n.º 14
0
def test_identifier(model_path, img1_name, img2_name):
    print('test identifier:')
    if model_path is None:
        print('test_identifier: Please specify the path to the model folder')
        return
    if img1_name is None:
        print('test_identifier: Please specify the path to the image file1')
        return
    if img2_name is None:
        print('test_identifier: Please specify the path to the image file2')
        return
    assert os.path.isdir(
        model_path
    ) is True, 'test_identifier: The model file path should be a folder'
    assert os.path.isfile(
        img1_name) is True, 'test_identifier: no such image file1'
    assert os.path.isfile(
        img2_name) is True, 'test_identifier: no such image file2'

    model_path_fd = model_path + '/seeta_fd_frontal_v1.0.bin'
    model_path_fa = model_path + '/seeta_fa_v1.1.bin'
    model_path_fr = model_path + '/seeta_fr_v1.0.bin'
    detector = Detector(model_path_fd)
    detector.set_min_face_size(30)
    aligner = Aligner(model_path_fa)
    identifier = Identifier(model_path_fr)

    # load image
    image_color_A = Image.open(img1_name).convert('RGB')
    image_gray_A = image_color_A.convert('L')
    image_color_B = Image.open(img2_name).convert('RGB')
    image_gray_B = image_color_B.convert('L')

    # detect face in image
    faces_A = detector.detect(image_gray_A)
    faces_B = detector.detect(image_gray_B)

    draw_A = ImageDraw.Draw(image_color_A)
    draw_B = ImageDraw.Draw(image_color_B)

    if len(faces_A) and len(faces_B):
        landmarks_A = aligner.align(image_gray_A, faces_A[0])
        featA = identifier.extract_feature_with_crop(image_color_A,
                                                     landmarks_A)
        draw_A.rectangle([(faces_A[0].left, faces_A[0].top),
                          (faces_A[0].right, faces_A[0].bottom)],
                         outline='green')

        sim_list = []
        for face in faces_B:
            landmarks_B = aligner.align(image_gray_B, face)
            featB = identifier.extract_feature_with_crop(
                image_color_B, landmarks_B)
            sim = identifier.calc_similarity(featA, featB)
            sim_list.append(sim)
        print('sim: {}'.format(sim_list))
        index = np.argmax(sim_list)
        for i, face in enumerate(faces_B):
            color = 'green' if i == index else 'red'
            draw_B.rectangle([(face.left, face.top),
                              (face.right, face.bottom)],
                             outline=color)

    image_color_A.show()
    image_color_B.show()

    identifier.release()
    aligner.release()
    detector.release()
Ejemplo n.º 15
0
from os.path import exists#,isfile, join

app = Flask(__name__)#, static_folder='static', static_url_path='')

ALI_MODEL_PATH = "models/seeta_fa_v1.1.bin"
DET_MODEL_PATH = "models/seeta_fd_frontal_v1.0.bin"
REC_MODEL_PATH = "models/seeta_fr_v1.0.bin"

from pyseeta import Detector
from pyseeta import Aligner
from pyseeta import Identifier

UPLOAD_FOLDER = 'examples/uploads'
WEBFILE_FOLDER = 'webfiles'
app.detector = Detector(DET_MODEL_PATH)
app.aligner = Aligner(ALI_MODEL_PATH)
app.identifier = Identifier(REC_MODEL_PATH)

app.results = []
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['WEBFILE_FOLDER'] = WEBFILE_FOLDER
app.config["CACHE_TYPE"] = "null"
app.config['ALLOWED_EXTENSIONS'] = set(['txt', 'png', 'jpg', 'jpeg', 'gif'])
app.config['MAX_CONTENT_LENGTH'] = 3 * 1024 * 1024    # 1 Mb limit
app.config['1.img'] = app.config['UPLOAD_FOLDER'] + "/1.img"
app.config['2.img'] = app.config['UPLOAD_FOLDER'] + "/2.img"
app.image_fn = os.path.join(app.config['UPLOAD_FOLDER'], "image.jpg")
app.result_fn = os.path.join(app.config['UPLOAD_FOLDER'], "result.txt")
app.filename = ""
        
def allowed_file(filename):
Ejemplo n.º 16
0
 def __init__(self):
     self.detector = Detector()
     self.detector.set_min_face_size(30)
     self.aligner = Aligner()
     self.identifier = Identifier()
Ejemplo n.º 17
0
def test_identifier(model_path, img1_name, img2_name):
    print('test identifier:')
    if model_path is None:
        print('test_identifier: Please specify the path to the model folder')
        return
    if img1_name is None:
        print('test_identifier: Please specify the path to the image file1')
        return
    if img2_name is None:
        print('test_identifier: Please specify the path to the image file2')
        return
    assert os.path.isdir(
        model_path
    ) is True, 'test_identifier: The model file path should be a folder'
    assert os.path.isfile(
        img1_name) is True, 'test_identifier: no such image file1'
    assert os.path.isfile(
        img2_name) is True, 'test_identifier: no such image file2'

    # load model
    model_path_fd = model_path + '/seeta_fd_frontal_v1.0.bin'
    model_path_fa = model_path + '/seeta_fa_v1.1.bin'
    model_path_fr = model_path + '/seeta_fr_v1.0.bin'
    detector = Detector(model_path_fd)
    aligner = Aligner(model_path_fa)
    identifier = Identifier(model_path_fr)

    # load image
    image_color_A = cv2.imread(img1_name, cv2.IMREAD_COLOR)
    image_gray_A = cv2.cvtColor(image_color_A, cv2.COLOR_BGR2GRAY)
    image_color_B = cv2.imread(img2_name, cv2.IMREAD_COLOR)
    image_gray_B = cv2.cvtColor(image_color_B, cv2.COLOR_BGR2GRAY)
    # detect face in image
    faces_A = detector.detect(image_gray_A)
    faces_B = detector.detect(image_gray_B)

    if len(faces_A) and len(faces_B):
        landmarks_A = aligner.align(image_gray_A, faces_A[0])
        featA = identifier.extract_feature_with_crop(image_color_A,
                                                     landmarks_A)
        cv2.rectangle(image_color_A, (faces_A[0].left, faces_A[0].top),
                      (faces_A[0].right, faces_A[0].bottom), (0, 255, 0),
                      thickness=2)
        sim_list = []
        for face in faces_B:
            landmarks_B = aligner.align(image_gray_B, face)
            featB = identifier.extract_feature_with_crop(
                image_color_B, landmarks_B)
            sim = identifier.calc_similarity(featA, featB)
            sim_list.append(sim)
        print('sim: {}'.format(sim_list))
        index = np.argmax(sim_list)
        for i, face in enumerate(faces_B):
            color = (0, 255, 0) if i == index else (0, 0, 255)
            cv2.rectangle(image_color_B, (face.left, face.top),
                          (face.right, face.bottom),
                          color,
                          thickness=2)
    cv2.imshow('single', image_color_A)
    cv2.imshow('double', image_color_B)
    cv2.waitKey(0)

    identifier.release()
    aligner.release()
    detector.release()