Ejemplo n.º 1
0
Archivo: core.py Proyecto: HarryZhu/elk
 def _set_face_recognition_model(self):
     fr = self.getConfig('dataset','dataset_face_recognition')
     if os.path.isfile(fr):
         self.face_recognition_model = dlib.face_recognition_model_v1(fr)
     else:
         print(fr + " is invalid, pls check your config file.")
         logging.error(fr + " is invalid, pls check your config file.")
Ejemplo n.º 2
0
def dlib_generate_500_without_detect(query_paths, dataset_paths, nodetected_dict):
    predictor_dat_path = r"./shape_predictor_68_face_landmarks.dat"
    # face_rec_dat_path = r"./orig_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./1000_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./4000_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./10000_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./20000_dlib_face_recognition_resnet_model_v1.dat"
    face_rec_dat_path = r"./origplus20000_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./10x10origplus20000_dlib_face_recognition_resnet_model_v1.dat"


    shape_predictor = dlib.shape_predictor(predictor_dat_path)
    face_rec = dlib.face_recognition_model_v1(face_rec_dat_path)
    embedding_dict = {}

    for i, img_path in enumerate(query_paths + dataset_paths):
        img = io.imread(img_path)
        key = os.path.basename(img_path)

        if i % 1000 == 0:
            print(i, time.asctime(time.localtime(time.time())))
        det = dlib.rectangle(1, 1, 1 + 180, 1 + 180)

        shape = shape_predictor(img, det)
        face_embedding = face_rec.compute_face_descriptor(img, shape)
        embedding_dict[key] = np.array(face_embedding)

    print("embedding geranating finished", time.asctime(time.localtime(time.time())))
    ground_truth_file = open("ground_truth.txt", "w")
    result_file = open("test_result.txt", "w")
    qkey_list = []
    dkey_list = []
    embeddingq = []
    embeddingd = []
    for qkey in query_paths:
        qqkey = os.path.basename(qkey)
        for dkey in dataset_paths:
            ddkey = os.path.basename(dkey)
            qkey_list.append(qqkey)
            dkey_list.append(ddkey)
            embeddingq.append(embedding_dict[qqkey])
            embeddingd.append(embedding_dict[ddkey])
            if qqkey[1:] == ddkey[1:]:
                ground_truth_file.write(qqkey + "," + ddkey + "\n")

    diff = np.subtract(embeddingq, embeddingd)
    dist = np.sum(np.square(diff), 1)

    for i, _ in enumerate(qkey_list):
        result_file.write(qkey_list[i] + "," + dkey_list[i] + "," + str(1.0 / dist[i]) + "\n")

    ground_truth_file.close()
    result_file.close()
Ejemplo n.º 3
0
def dlib_generate_embedding(image_paths, actual_issame, embedding_size=128):
    predictor_dat_path = r"./shape_predictor_68_face_landmarks.dat"
    # face_rec_dat_path = r"./orig_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./1000_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./4000_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./10000_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./20000_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./origplus20000_dlib_face_recognition_resnet_model_v1.dat"
    face_rec_dat_path = r"./10x10origplus20000_dlib_face_recognition_resnet_model_v1.dat"

    # predictor_dat_path = r"C:\sources\dlib\examples\build\Release\shape_predictor_68_face_landmarks.dat"
    # face_rec_dat_path = r"C:\sources\dlib\examples\build\Release\orig_dlib_face_recognition_resnet_model_v1.dat"

    detector = dlib.get_frontal_face_detector()
    shape_predictor = dlib.shape_predictor(predictor_dat_path)
    face_rec = dlib.face_recognition_model_v1(face_rec_dat_path)
    embedding_list = []
    non_only_one_list = []

    for i, img_path in enumerate(image_paths):
        img = io.imread(img_path)
        dets = detector(img, 1)
        if i % 1000 == 0:
            print(i, time.asctime(time.localtime(time.time())))
        if len(dets) != 1:
            # print('%s failed on face detection, please check it.' % img_path)
            # If we can't detect only one face, we temporarily don't need it.
            non_only_one_list.append(i)
            embedding_list.append(None)
            continue
        shape = shape_predictor(img, dets[0])
        face_embedding = face_rec.compute_face_descriptor(img, shape)
        embedding_list.append(np.array(face_embedding))

    non_only_one_list.reverse()
    print("skip %i pairs" % len(non_only_one_list))
    for _, del_id in enumerate(non_only_one_list):
        # !!! be careful here. !!!
        # 1) we are using python 2 here, python 3 will NOT handle the following as we need.
        # 2) the order is important on manipulating the list.
        real_id = del_id / 2
        del actual_issame[real_id]
        del embedding_list[real_id*2 + 1]
        del embedding_list[real_id*2]

    return embedding_list, actual_issame
Ejemplo n.º 4
0
    def __init__(self, landmarks=None, embedding=None):
        """Face detection

        Parameters
        ----------
        landmarks : str
            Path to dlib's 68 facial landmarks predictor model.
        embedding : str
            Path to dlib's face embedding model.
        """
        super(Face, self).__init__()

        # face detection
        self.face_detector_ = dlib.get_frontal_face_detector()

        # landmark detection
        if landmarks is not None:
            self.shape_predictor_ = dlib.shape_predictor(landmarks)

        # face embedding
        if embedding is not None:
            self.face_recognition_ = dlib.face_recognition_model_v1(embedding)
Ejemplo n.º 5
0
 def __init__(self):
     self.cap = cv2.VideoCapture(0)
     self.detector = dlib.get_frontal_face_detector()
     #self.cascade = cv2.CascadeClassifier("data/haarcascade_frontalface_default.xml")
     self.predictor = dlib.shape_predictor("data/shape_predictor_68_face_landmarks.dat")
     self.face_rec = dlib.face_recognition_model_v1("data/dlib_face_recognition_resnet_model_v1.dat")
Ejemplo n.º 6
0
    def dlib_para_init(self):

        # 人脸关键点检测器
        #  face_landmark_path    = '/home/dx/Desktop/detect_face_pyqt5/shape_predictor_68_face_landmarks.dat'
        face_landmark_path = os.path.join(os.path.dirname(os.path.abspath('__file__')),
                                          'shape_predictor_68_face_landmarks.dat')

        # 人脸识别模型
        #  face_recognize_path   = '/home/dx/Desktop/detect_face_pyqt5/dlib_face_recognition_resnet_model_v1.dat'
        face_recognize_path = os.path.join(os.path.dirname(os.path.abspath('__file__')),
                                           'dlib_face_recognition_resnet_model_v1.dat')

        # 候选人文件夹
        #  faces_folder_path     = '/home/dx/Desktop/detect_face_pyqt5/candidate-faces'
        self.faces_folder_path = os.path.join(os.path.dirname(os.path.abspath('__file__')),
                                              'candidate-faces/')
        # 1.加载正脸检测器
        self.detector = dlib.get_frontal_face_detector()
        # 2.加载人脸关键点检测器
        self.landmark = dlib.shape_predictor(face_landmark_path)
        # 3. 加载人脸识别模型
        self.facerec = dlib.face_recognition_model_v1(face_recognize_path)

        # 候选人脸描述子list
        self.descriptors = []
        # 对文件夹下的每一个人脸进行:

        # 1.人脸检测
        # 2.关键点检测
        # 3.描述子提取
        time_flag = []  # 获取照片中时间
        file_glob = os.path.join(self.faces_folder_path, "*.jpg")
        file_list = []
        file_list.extend(glob.glob(file_glob))
        print(file_list)
        for i in range(0, len(file_list)):
            tmp = str(file_list[i])
            tmp_1 = tmp[51:65]  # 截取字符串,截取时间
            time_flag.append(tmp_1)
        #print(time_flag)
        cand_d = dict(zip(file_list, time_flag))
        cand_sorted = sorted(cand_d.items(), key=lambda d: d[1])  # 按字典的第二个关键字排序
        # print(cand_sorted)

        for f in range(0, len(cand_sorted)):

            print("Processing file: {}".format(cand_sorted[f][0]))
            self.img = io.imread(cand_sorted[f][0])
            # win.clear_overlay()
            # win.set_image(img)

            # 1.人脸检测
            dets = self.detector(self.img, 1)  # 人脸标定
            print("Number of faces detected: {}".format(len(dets)))

            for k, d in enumerate(dets):
                # 2.关键点检测
                shape = self.landmark(self.img, d)
                # 画出人脸区域和和关键点
                # win.clear_overlay()
                # win.add_overlay(d)
                # win.add_overlay(shape)

                # 3.描述子提取,128D向量
                face_descriptor = self.facerec.compute_face_descriptor(self.img, shape)

                # 转换为numpy array
                v = np.array(face_descriptor)
                self.descriptors.append(v)

        # 对需识别人脸进行同样处理

        # 提取描述子,不再注释

        # 候选人名单

        # self.candidate = ['dwh', 'whr', 'zjr', 'dx', 'dx', 'whr', 'dx']
        self.candidate = self.readfile()  # 读取候选人,从name.txt中
        print(self.candidate)
        return np.empty((0))

    return np.linalg.norm(face_encodings - face_to_compare, axis=1)


def get_coords(p1):
    try:
        return int(p1[0][0][0]), int(p1[0][0][1])
    except:
        return int(p1[0][0]), int(p1[0][1])


face_detector = dlib.get_frontal_face_detector()
predictor_model = "shape_predictor_68_face_landmarks.dat"
face_pose_predictor = dlib.shape_predictor(predictor_model)
face_encoder = dlib.face_recognition_model_v1(
    'dlib_face_recognition_resnet_model_v1.dat')

lk_params = dict(winSize=(15, 15),
                 maxLevel=2,
                 criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10,
                           0.03))
max_head_movement = 20
movement_threshold = 50
gesture_threshold = 175
face_found = False
frame_num = 0
cap = cv2.VideoCapture(0)

while frame_num < 30:
    # Take first frame and find corners in it
    frame_num += 1
Ejemplo n.º 8
0
def reco(content):
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(
        'model/shape_predictor_68_face_landmarks.dat')
    facerec = dlib.face_recognition_model_v1(
        "model/dlib_face_recognition_resnet_model_v1.dat")

    now_time = time.time()

    while cap.isOpened():
        flag, im_rd = cap.read()
        dets = detector(im_rd)
        tip = '请有序打卡,不要多人一起打卡,看到自己打卡成功即可离开'
        im_rd = cv2ImgAddText(im_rd,
                              tip,
                              0,
                              100,
                              textColor=(0, 0, 255),
                              textSize=23)
        if len(dets) != 0:
            biggest_face = dets[0]
            # 取占比最大的脸
            maxArea = 0
            for det in dets:
                w = det.right() - det.left()
                h = det.top() - det.bottom()
                if w * h > maxArea:
                    biggest_face = det
                    maxArea = w * h
            cv2.rectangle(im_rd,
                          tuple([biggest_face.left(),
                                 biggest_face.top()]),
                          tuple([biggest_face.right(),
                                 biggest_face.bottom()]), (255, 0, 0), 2)
            # 获取当前捕获到的图像的所有人脸的特征,存储到 features_cap_arr
            if (time.time() - now_time) > 2:
                shape = predictor(im_rd, biggest_face)
                global features_cap
                features_cap = facerec.compute_face_descriptor(im_rd, shape)
                for key, value in database.items():
                    compare = return_euclidean_distance(features_cap, value)
                    if compare == "same":  # 找到了相似脸
                        study_time, ret = take_time(key)
                        if ret == 1:
                            content.setText(
                                str(key + '签到打卡成功现在时间为: ' +
                                    str(datetime.datetime.now())))
                        elif ret == 2:
                            content.setText(
                                str(key + '签退打卡成功现在时间为: ' +
                                    str(datetime.datetime.now())))
                        elif ret == 3:
                            content.setText(
                                str(key + '已经签到打卡成功现在时间为: ' +
                                    str(datetime.datetime.now())) + '请勿重复打卡!')
                        elif ret == 4:
                            content.setText(
                                str(key + '已经签退打卡成功现在时间为: ' +
                                    str(datetime.datetime.now())) + '请勿重复打卡!')
                        if (not (dic_time.__contains__(key))):
                            dic_time[key] = [study_time]
                        else:
                            if (study_time != 0):
                                dic_time[key].append(study_time)
                now_time = time.time()

        cv2.imshow('face_recoinition', im_rd)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            now_study_time = {}
            a = []
            b = []
            for key, value in dic_time.items():
                now_study_time[key] = sec_to_data(sum(value))
            print(now_study_time)
            for key, values in now_study_time.items():
                a.append(key)
                b.append(values)
            dataframe = pd.DataFrame({'name': a, 'time': b})
            now = datetime.datetime.now()
            str2 = str(now.month) + '-' + str(now.day)
            fd = open((str2 + '.csv'), mode="w")
            dataframe.to_csv((str2 + '.csv'), encoding="utf_8_sig")
            fd.close()
            break
    cap.release()
    cv2.destroyAllWindows()
Ejemplo n.º 9
0
from PIL import Image, ImageDraw, ImageFont
from subprocess import call
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
from gpiozero import LED
from time import sleep

# Dlib 정방향 얼굴 인식 detector
detector = dlib.get_frontal_face_detector()

#Dlib 얼굴 landmark 검출기
predictor = dlib.shape_predictor('data/data_dlib/shape_predictor_68_face_landmarks.dat')

#Dlib Resnet 얼굴 인식 모델,128D 특징 벡터 추출
face_reco_model = dlib.face_recognition_model_v1("data/data_dlib/dlib_face_recognition_resnet_model_v1.dat")

#caffe 얼굴 탐지기 로딩
protoPath = os.path.join("face_detector", "deploy.prototxt")
modelPath = os.path.join("face_detector", "res10_300x300_ssd_iter_140000.caffemodel")
net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)

# 진짜 얼굴 탐지 모델 및 레이블 로딩
model = load_model("liveness.model")
le = pickle.loads(open("le.pickle", "rb").read())

# load OpenCV's Haar cascade for face detection (which is faster than
# dlib's built-in HOG detector, but less accurate), then create the facial landmark predictor
print("[INFO] loading facial landmark predictor...")
detector_eye = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
Ejemplo n.º 10
0
def main(mode='test', img_path='def'):
    t = time.clock()
    classes = [
        'MXG', 'Sanaken', 'Zofinka', 'Toalk', 'Zissxzirsziiss', 'kiasummer'
    ]

    known_face_encodes = [
        np.loadtxt(MAIN_PATH + '/persons/MXG/fv.txt'),
        np.loadtxt(MAIN_PATH + '/persons/Sanaken/fv.txt'),
        np.loadtxt(MAIN_PATH + '/persons/Zofinka/fv.txt'),
        np.loadtxt(MAIN_PATH + '/persons/Toalk/fv.txt'),
        np.loadtxt(MAIN_PATH + '/persons/Zissxzirsziiss/fv.txt'),
        np.loadtxt(MAIN_PATH + '/persons/kiasummer/fv.txt')
    ]

    known_face_encodes = np.reshape(known_face_encodes, (6, 5, 128))

    # get image
    if img_path == 'def':
        image = cv2.imread('team.jpg', 1)
    else:
        image = cv2.imread(img_path, 1)

    # output
    init_align_faces = []
    out_arr = []

    # get bboxes
    fd = FaceDetector()

    conf, faceboxes, mboxes = fd.get_faceboxes(image)

    color = [255, 255, 255]

    # border widths; I set them all to 150
    top, bottom, left, right = [120] * 4

    image = cv2.copyMakeBorder(image,
                               top,
                               bottom,
                               left,
                               right,
                               cv2.BORDER_CONSTANT,
                               value=color)
    bbox_mark_image = image.copy()

    # get alignment model
    predictor_model = MAIN_PATH + "/models/shape_predictor_68_face_landmarks.dat"
    face_pose_predictor = dlib.shape_predictor(predictor_model)
    face_aligner = openface.AlignDlib(predictor_model)

    # init predection model
    predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location(
    )
    pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)
    face_recognition_model = face_recognition_models.face_recognition_model_location(
    )
    face_encoder = dlib.face_recognition_model_v1(face_recognition_model)

    for i in range(len(faceboxes)):
        # get dlib rectangle from facebox
        face_rect = dlib.rectangle(faceboxes[i][0], faceboxes[i][1],
                                   faceboxes[i][2], faceboxes[i][3])

        # Get the the face's pose
        pose_landmarks = face_pose_predictor(image, face_rect)

        # Use openface to calculate and perform the face alignment
        alignedFace = face_aligner.align(
            534,
            image,
            face_rect,
            landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
        alignedFace_out = cv2.resize(alignedFace,
                                     (faceboxes[i][2] - faceboxes[i][0],
                                      faceboxes[i][3] - faceboxes[i][1]))
        initFace_out = image[faceboxes[i][1]:faceboxes[i][3],
                             faceboxes[i][0]:faceboxes[i][2]]
        init_align_faces.append([initFace_out, alignedFace_out])

        # draw marks
        parts = dlib.full_object_detection.parts(pose_landmarks)
        FaceDetector.draw_marks(bbox_mark_image, parts)

        # get face landmarks for feature extraction
        landmark_set = pose_predictor_5_point(
            alignedFace,
            dlib.rectangle(0, 0, alignedFace.shape[0], alignedFace.shape[1]))

        # get feature vector
        feature_vector = np.array(
            face_encoder.compute_face_descriptor(alignedFace, landmark_set, 1))

        # known_face_encode = np.loadtxt('persons/MXG/fv.txt')
        ind = compare_faces(known_face_encodes, feature_vector)
        if (ind != -1):
            face_class = classes[ind]
            colour = (0, 255, 0)
        else:
            face_class = "Unknown"
            colour = (0, 0, 255)
        sh = max(image.shape[0], image.shape[1])
        mult = sh / 500
        if mult < 1:
            mult = 1
        cv2.putText(image, face_class, (faceboxes[i][0], faceboxes[i][1] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5 * mult, colour, 1)
        cv2.rectangle(bbox_mark_image, (faceboxes[i][0], faceboxes[i][1]),
                      (faceboxes[i][2], faceboxes[i][3]), (0, 255, 0))
        cv2.rectangle(image, (faceboxes[i][0], faceboxes[i][1]),
                      (faceboxes[i][2], faceboxes[i][3]), (0, 255, 0))
        #cv2.rectangle(image, (mboxes[i][0], mboxes[i][1]), (mboxes[i][2], mboxes[i][3]), (0, 255, 0))
        #out_arr.append({'x1': faceboxes[i][0], 'y1': faceboxes[i][1], 'x2': faceboxes[i][2], 'y2': faceboxes[i][3], 'class': face_class, 'conf': conf[i]})
        out_arr.append({
            'x1': mboxes[i][0],
            'y1': mboxes[i][1],
            'x2': mboxes[i][2],
            'y2': mboxes[i][3],
            'class': face_class,
            'conf': conf[i]
        })

    t = time.clock() - t
    out_imgs = [image, bbox_mark_image, init_align_faces, t]
    if mode == 'test':
        cv2.imshow("Preview", image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
    if mode == 'metr':
        #cv2.imshow("Preview", image)
        #cv2.waitKey(0)
        #cv2.destroyAllWindows()
        return out_arr
    if mode == 'process':
        return out_imgs
    if mode == 'def':
        return out_imgs
Ejemplo n.º 11
0
import dlib
import cv2
import numpy as np
predictor = dlib.shape_predictor("./saved_model/shape_predictor_68_face_landmarks.dat")
face_rec_model = dlib.face_recognition_model_v1("./saved_model/dlib_face_recognition_resnet_model_v1.dat")
detector = dlib.get_frontal_face_detector()
frame = cv2.imread(r'./images/person.JPG')
face_rect = detector(frame, 1)[0]
shape = predictor(frame, face_rect)
for pt in shape.parts():
    pt_pos = (pt.x, pt.y)
    cv2.circle(frame, pt_pos, 2, (0, 255, 0), 1)
cv2.imshow("image", frame)
encoding=face_rec_model.compute_face_descriptor(frame, shape)
encoding=np.array(encoding)
print(encoding)
cv2.waitKey()
Ejemplo n.º 12
0
    print(
        "Please install `face_recognition_models` with this command before using `face_recognition`:"
    )
    print()
    print(
        "pip install git+https://github.com/ageitgey/face_recognition_models")
    quit()

face_detector = dlib.get_frontal_face_detector()

predictor_model = face_recognition_models.pose_predictor_model_location()
pose_predictor = dlib.shape_predictor(predictor_model)

face_recognition_model = face_recognition_models.face_recognition_model_location(
)
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)


def _rect_to_css(rect):
    """
    Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order

    :param rect: a dlib 'rect' object
    :return: a plain tuple representation of the rect in (top, right, bottom, left) order
    """
    return rect.top(), rect.right(), rect.bottom(), rect.left()


def _css_to_rect(css):
    """
    Convert a tuple in (top, right, bottom, left) order to a dlib `rect` object
import dlib
import cv2
import numpy as np
from datetime import datetime

#C:\Users\haneu\fin_pro
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(
    'C:/Users/haneu/fin_pro/modle/shape_predictor_68_face_landmarks.dat')
face_recog = dlib.face_recognition_model_v1(
    'C:/Users/haneu/fin_pro/modle/dlib_face_recognition_resnet_model_v1.dat')

#인코딩이 되어있거 가져옴
#print("20개의 인코딩")
#descs = np.load('C:/Users/haneu/fin_pro/a_project/a_descs.npy', allow_pickle=True)[()]
#descs = np.load('C:/Users/haneu/fin_pro/a_project/a5_descs.npy', allow_pickle=True)[()]
#descs = np.load('C:/Users/haneu/fin_pro/a_project/a10_descs.npy', allow_pickle=True)[()]
descs = np.load('C:/Users/haneu/fin_pro/a_project/a20_descs.npy',
                allow_pickle=True)[()]


#들어온 영상에서 이미지 찾기
def encode_faces(image):

    faces = detector(image, 1)

    if len(faces) == 0:
        return np.empty(0)

    for k, d in enumerate(faces):
        land = predictor(image, d)
Ejemplo n.º 14
0
            pic_list.append(img)
    except IOError:
        print('read error')
        return False
    else:
        print('read successfully')
        return pic_name_list, pic_list

# 人脸检测器
detector = dlib.get_frontal_face_detector()

# 关键点检测器
feature_point = dlib.shape_predictor(predictor_path)

# 人脸参数模型
feature_model = dlib.face_recognition_model_v1(face_rc_model_path)

# 候选人特征向量列表
descriptors = []

if __name__ == '__main__':
    name_list, pic_list = read_data(face_folder_path)
    num = 1
    for i in pic_list:
        # 人脸检测
        dets = detector(i, 1)

        for k, d in enumerate(dets):
            # 关键点检测
            shape = feature_point(i, d)
def get_face_locations(img, times_to_upsample):
    locations_rect = face_detector(img, times_to_upsample)
    locations_tuple = [
        mmodrect_to_tuple(face_location) for face_location in locations_rect
    ]
    return locations_rect, locations_tuple


dirname = os.path.dirname(__file__)
face_detector = dlib.get_frontal_face_detector()
face_detector_cnn = dlib.cnn_face_detection_model_v1(
    dirname + '/model/mmod_human_face_detector.dat')
face_landmark_5_points = dlib.shape_predictor(
    dirname + '/model/shape_predictor_5_face_landmarks.dat')
face_encoder = dlib.face_recognition_model_v1(
    dirname + '/model/dlib_face_recognition_resnet_model_v1.dat')

keep_running = True

guolong_image = cv2.imread(dirname + '/images/guolong.jpg')[:, :, ::-1]
_, guolong_encoding = get_locations_and_encodings(guolong_image)

known_face_encodings = [guolong_encoding[0]]

known_face_names = ['Guolong']


def face_recognition(frame):
    small_frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
    rgb_small_frame = small_frame[:, :, ::-1]
Ejemplo n.º 16
0
            w)  # Рассчитываем соотношение высоты и построить размеры
        dim = (width, int(h * r))
    resized = cv2.resize(image, dim,
                         interpolation=inter)  # Меняем размер изображения
    return resized  # Возвращаем измененный размер изображения


dat_path = 'Data/'
mp3_pic_path = 'mp3_pic/'
detector = dlib.get_frontal_face_detector(
)  # создаем объект, который выделяет лицо прямоугольником
predictor = dlib.shape_predictor(
    dat_path + 'shape_predictor_68_face_landmarks.dat'
)  # загрузка(шаблона) данных обучения для точек на лице
facerec = dlib.face_recognition_model_v1(
    dat_path + 'dlib_face_recognition_resnet_model_v1.dat'
)  # загрузка данных обучения нейросети resnet
video_capture = cv2.VideoCapture(0)  # подключение камеры
video_capture.set(3, 360)  # задаем размеры кадра камеры   160x120
video_capture.set(4, 240)  # 360x240
img_path = ['p_D.jpg', 'p_N.jpg', 'p_I.jpg', 'p_V.jpg']  #'p_V.jpg'
fdi = []
audio = ['u_D.mp3', 'u_N.mp3', 'u_I.mp3', 'u_V.mp3', 'u_U.mp3']
names = ['Даня', 'Никита', 'Белла', 'Вадимчик']
ind_u = 0
mixer.init()
for im in img_path:
    img = cv2.imread(mp3_pic_path + im)
    detections = detector(img, 1)  # ф-ция выделяет лицо в прямоугольник
    for k, d in enumerate(
            detections):  # цикл по всем найденным на изображении лицам
Ejemplo n.º 17
0
f = open('/var/www/html/main_pro/upload/data.txt','r')
data = sj.loads(f.read())
f.close()

fo = open('data.txt', 'r')
brr = sj.loads(fo.read())
fo.close()

a = data['name']
b = data['number']

mdl = fm.pose_predictor_model_location()

frm = fm.face_recognition_model_location()

cnn = dlib.face_recognition_model_v1(frm)

fd = dlib.get_frontal_face_detector()

im = cv.imread('test.jpg')

hi = (200,200)

im = cv.resize(im, hi, interpolation = cv.INTER_AREA)
df = fd(im,1)

if len(df)!=0:
    mk = 'mkdir '+a
    os.system(mk)
    fpose = dlib.shape_predictor(mdl)
    pl = fpose(im,df[0])
Ejemplo n.º 18
0
        "   ./face_recognition.py shape_predictor_68_face_landmarks.dat dlib_face_recognition_resnet_model_v1.dat ../examples/faces\n"
        "You can download a trained facial shape predictor and recognition model from:\n"
        "    http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2\n"
        "    http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2")
    exit()

predictor_path = sys.argv[1]
face_rec_model_path = sys.argv[2]
faces_folder_path = sys.argv[3]

# Load all the models we need: a detector to find the faces, a shape predictor
# to find face landmarks so we can precisely localize the face, and finally the
# face recognition model.
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_path)
facerec = dlib.face_recognition_model_v1(face_rec_model_path)

win = dlib.image_window()

# Now process all the images
for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
    print("Processing file: {}".format(f))
    img = io.imread(f)

    win.clear_overlay()
    win.set_image(img)

    # Ask the detector to find the bounding boxes of each face. The 1 in the
    # second argument indicates that we should upsample the image 1 time. This
    # will make everything bigger and allow us to detect more faces.
    dets = detector(img, 1)
Ejemplo n.º 19
0
def dlib_face_cluster(pic_path,tmp_path,files_path): 
    global detector,sp,facerec
    predictor_path = files_path + '/shape_predictor_5_face_landmarks.dat'
    face_rec_model_path = files_path + '/dlib_face_recognition_resnet_model_v1.dat'

    faces_folder_path = pic_path
    output_folder_path = tmp_path

    # Load all the models we need: a detector to find the faces, a shape predictor
    # to find face landmarks so we can precisely localize the face, and finally the
    # face recognition model.
    if not 'detector' in globals():
        detector = dlib.get_frontal_face_detector()
    if not 'sp' in globals():
        sp = dlib.shape_predictor(predictor_path)
    if not 'facerec' in globals():    
        facerec = dlib.face_recognition_model_v1(face_rec_model_path)

    raw_image_list = glob.glob(os.path.join(faces_folder_path, "*.jpg"))
    raw_image_list.append(raw_image_list[0])
    counter = 1
    batch_index = 1
    # Now find all the faces and compute 128D face descriptors for each face.
    while(True):
        descriptors = []
        images = []
        while(True):
        #     print("Processing file: {}".format(f))
            try:
                f = raw_image_list.pop(0)
                counter += 1              
            except:
                break
            try:
                img = io.imread(f)
            except:
                print('[Debug] : something wrong at io.imread, {0}'.format(f))
                continue

            # Ask the detector to find the bounding boxes of each face. The 1 in the
            # second argument indicates that we should upsample the image 1 time. This
            # will make everything bigger and allow us to detect more faces.
            try:
                dets = detector(img, 1)
            except:
                print('[Debug] : something wrong at detector, {0}'.format(f))
                continue
        #     print("Number of faces detected: {}".format(len(dets)))

            # Now process each face we found.
            for k, d in enumerate(dets):
                # Get the landmarks/parts for the face in box d.
                shape = sp(img, d)

                # Compute the 128D vector that describes the face in img identified by
                # shape.
                try:
                    face_descriptor = facerec.compute_face_descriptor(img, shape)
                    descriptors.append(face_descriptor)
                    images.append((img, shape))
                except:
                    print('[Debug] : something wrong at facerec.compute_face_descriptor, {0}'.format(f))
                    continue
        
        # Now let's cluster the faces.  
        labels = dlib.chinese_whispers_clustering(descriptors, 0.5)
        num_classes = len(set(labels))
        # print("Number of clusters: {}".format(num_classes))
        # Find biggest class
        biggest_class = None
        biggest_class_length = 0
        for i in range(0, num_classes):
            class_length = len([label for label in labels if label == i])
            if class_length > biggest_class_length:
                biggest_class_length = class_length
                biggest_class = i

        # print("Biggest cluster id number: {}".format(biggest_class))
        # print("Number of faces in biggest cluster: {}".format(biggest_class_length))

        # Find the indices for the biggest class
        indices = []
        for i, label in enumerate(labels):
            if label == biggest_class:
                indices.append(i)

        # print("Indices of images in the biggest cluster: {}".format(str(indices)))

        # Ensure output directory exists
        if not os.path.isdir(output_folder_path):
            os.makedirs(output_folder_path)
        
        # Save the extracted faces
        # print("Saving faces in largest cluster to output folder...")
        for i, index in enumerate(indices):
            img, shape = images[index]
            file_path = os.path.join(output_folder_path,'batch{0}_{1}'.format(batch_index,i))
            dlib.save_face_chip(img, shape, file_path)
            break
        batch_index += 1

        if len(raw_image_list) == 0:
            break   
Ejemplo n.º 20
0
def process_video(folder, file_name=None):
    video_src = os.path.join(os.curdir, folder, file_name)
    images_folder = os.path.join(folder, 'frames')
    if not os.path.isdir(images_folder):
        os.mkdir(images_folder)
    if os.path.isfile(video_src):
        print('it is a file: ', video_src)
    else:
        print('no file you n00b', video_src)
    predictor_path = "shape_predictor_68_face_landmarks.dat"
    face_rec_model_path = "dlib_face_recognition_resnet_model_v1.dat"

    detector = dlib.get_frontal_face_detector()
    sp = dlib.shape_predictor(predictor_path)
    facerec = dlib.face_recognition_model_v1(face_rec_model_path)
    cam = cv2.VideoCapture(video_src)
    image_count = 0
    frame_count = 0
    while True:
        frame_count += 1
        if frame_count % 1000 == 1:
            print(
                'frames processed: ', frame_count - 1
            )  # yes, it's off by one, but looks better with whole thousands
        ret, img = cam.read()
        if not ret:
            break
        if not int(cam.get(cv2.CAP_PROP_POS_FRAMES)) % 15 == 0:
            continue

        # Ask the detector to find the bounding boxes of each face. The 1 in the
        # second argument indicates that we should upsample the image 1 time. This
        # will make everything bigger and allow us to detect more faces.
        dets = detector(img, 1)
        # print("Number of faces detected: {}".format(len(dets)))
        if len(dets) > 0:

            # Now process each face we found.
            for k, d in enumerate(dets):
                # print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
                # k, d.left(), d.top(), d.right(), d.bottom()))
                # Get the landmarks/parts for the face in box d.
                shape = sp(img, d)

                # Compute the 128D vector that describes the face in img identified by
                # shape.
                face_id = facerec.compute_face_descriptor(img, shape)
                if not have_face(face_id):
                    print("new face found at frame number {0}".format(
                        frame_count))
                    face_list.append(
                        facerec.compute_face_descriptor(img, shape))
                    save_file_to = os.path.join(
                        images_folder,
                        "{0}{1}.jpg".format(file_name[:-4], image_count))
                    # save image as file
                    cv2.imwrite(save_file_to, img)
                    image_count += 1

    print("{0} images made out of {1} frames".format(image_count, frame_count))
    cv2.destroyAllWindows()
Ejemplo n.º 21
0
import sys
import dlib
import numpy as np
import os
import glob
from skimage import io
import re

current_path = os.getcwd()  #获取当前路径
predictor_path = current_path + '/model/shape_predictor_68_face_landmarks.dat'  # 1.人脸关键点检测器
face_rec_model_path = current_path + '/model/dlib_face_recognition_resnet_model_v1.dat'  # 2.人脸识别模型
face_folder_path = current_path + '/all_face'  # 3.所有人脸文件夹

detector = dlib.get_frontal_face_detector()  #加载正脸识别器
shaper = dlib.shape_predictor(predictor_path)  #加载人脸关键识别器
facerec = dlib.face_recognition_model_v1(face_rec_model_path)  #加载人脸模型

all_face_d = {}
for img_path in glob.glob(os.path.join(face_folder_path, "*.jpg")):
    print("processing file: {}".format(img_path))
    a = os.path.basename(img_path)
    b = re.compile('\d+')
    filename = b.findall(a)[0]
    print(filename)
    img = io.imread(img_path)
    dets = detector(img, 1)

    print("number of faces detected: {}".format(len(dets)))
    for index, face in enumerate(dets):
        shape = shaper(img, face)  # 2.关键点检测
        face_desciptor = facerec.compute_face_descriptor(
    def __init__(self,
                 image_topic='/usb_cam/image_raw',
                 predictor_model=pkg_path +
                 '/models/shape_predictor_68_face_landmarks.dat',
                 recognition_model=pkg_path +
                 '/models/dlib_face_recognition_resnet_model_v1.dat',
                 database_names=os.listdir(pkg_path + '/training_data'),
                 n_input=128,
                 n_output=len(os.listdir(pkg_path + '/training_data')),
                 load_model=True,
                 prob_threshhold=0.97):

        # define global variables
        self.n_input = n_input
        self.n_output = n_output
        self.database_names = np.array(database_names)
        self.prob_threshold = prob_threshhold

        # define network for face prediction
        def model(embedding):
            l_in = InputLayer(input_var=embedding, shape=(None, n_input))

            h_1 = DenseLayer(l_in, num_units=64, nonlinearity=rectify)

            h_2 = DenseLayer(h_1, num_units=32, nonlinearity=rectify)

            probs = DenseLayer(h_1, num_units=n_output, nonlinearity=softmax)

            return probs

        self.X = T.fmatrix()
        self.Y = T.fmatrix()

        self.network_probs = model(self.X)

        # load the trained weights for the network
        try:
            if load_model:
                with np.load(pkg_path + '/models/model.npz') as f:
                    param_values = [
                        f['arr_%d' % i] for i in range(len(f.files))
                    ]
                    set_all_param_values(self.network_probs, param_values)
        except IOError:
            print 'run the training script before use!'

        # define the network functions
        self.output = get_output(self.network_probs)

        self.predict_faces = theano.function(inputs=[self.X],
                                             outputs=self.output,
                                             allow_input_downcast=True)

        # define all face recognition functions
        self.face_detector = dlib.get_frontal_face_detector()
        self.face_pose_predictor = dlib.shape_predictor(predictor_model)
        self.face_recognition_model = dlib.face_recognition_model_v1(
            recognition_model)

        # start the ros pipeline
        self.face_features_pub = rospy.Publisher(
            'face_recognition/marked_faces', Image, queue_size=0)
        self.face_labels_pub = rospy.Publisher(
            'face_recognition/labelled_faces', Image, queue_size=0)
        self.face_msgs_pub = rospy.Publisher('face_recognition/detected_faces',
                                             DetectedFaces,
                                             queue_size=0)

        self.bridge = CvBridge()

        self.camera_sub = rospy.Subscriber(image_topic, Image, self.camera_cb)
Ejemplo n.º 23
0
    import argparse
    from face_detector import detect_faces
    from face_embeddings import extract_face_embeddings
    import pickle
    import dlib
    from tqdm import tqdm

    args = parse_args()

    embeddings = np.load(args.embeddings)
    labels = pickle.load(open(args.labels, 'rb'))
    shape_predictor = dlib.shape_predictor(
        "models/"
        "shape_predictor_5_face_landmarks.dat")
    face_recognizer = dlib.face_recognition_model_v1(
        "models/"
        "dlib_face_recognition_resnet_model_v1.dat")

    image_path_list = [l.strip() for l in open(args.input_list).readlines()]
    output_dict = dict()
    for idx, img_path in enumerate(tqdm(image_path_list)):
        try:
            image = cv2.imread(img_path)
            if image is None:
                output_dict[img_path] = []
                continue
            image_original = image.copy()
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            faces = detect_faces(image)
Ejemplo n.º 24
0
def test():
    # 取得dlib預設的臉部偵測器
    detector = dlib.get_frontal_face_detector()
    # 根據shape_predictor方法載入68個特徵點模型,此方法為人臉表情識別的偵測器
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
    # 臉部校正
    fa = FaceAligner(predictor, desiredFaceWidth=256)
    # 載入人臉辨識檢測器
    facerec = dlib.face_recognition_model_v1(
        "dlib_face_recognition_resnet_model_v1.dat")
    # 比對人臉描述子列表
    descriptors = []
    # 比對人臉名稱列表
    candidate = []
    # 開啟影片檔案
    cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
    # 比對人臉圖片資料夾名稱
    faces_folder_path = "./rec"
    WHO = None
    Erro = 0

    # 有讀過的照片存成新的檔可直接使用,有新的圖片再加

    # 有讀過的照片存成新的檔可直接使用,有新的圖片再加

    # 路徑中的file個數
    NUM_OF_FILES = 0  # 檔案數
    for fn in os.listdir(faces_folder_path):
        NUM_OF_FILES += 1
    # print(NUM_OF_FILES)

    person = [0 for i in range(NUM_OF_FILES)]
    # print(person)

    # 原本的個數
    f = open("numOfFiles.txt", encoding="utf-8")
    num_o = f.read()
    # print(type(num_o))
    num_o = int(num_o)
    # print(type(num_o))
    # print(num_o)

    if num_o != NUM_OF_FILES:
        # 把新的個數寫到txt檔裡
        num = open('numOfFiles.txt', 'w', encoding="utf-8")
        num.write(str(NUM_OF_FILES))
        num.close()

        # 跑新的
        # 讀取資料夾裡的圖片及檔案名(人名),並將每張圖的128維特徵向量存到description一維陣列中
        for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
            base = os.path.basename(f)
            # 依序取得圖片檔案人名,存到candidate一維陣列中
            candidate.append(os.path.splitext(base)[0])
            img = io.imread(f)
            gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            # 1.人臉偵測
            face_rects = detector(img, 0)
            # print(face_rects)
            for index, face in enumerate(face_rects):
                # 2.人臉校正
                faceAligned = fa.align(img, gray, face)
                # 3.再人臉偵測(去除校正後多餘的部分)
                face_rects2 = detector(faceAligned, 1)
                for index2, face2 in enumerate(face_rects2):
                    ax1 = face2.left()
                    ay1 = face2.top()
                    ax2 = face2.right()
                    ay2 = face2.bottom()
                    # 4.68特徵點偵測
                    shape = predictor(faceAligned, face2)
                    # 5.取得描述子,128維特徵向量
                    face_descriptor = facerec.compute_face_descriptor(
                        faceAligned, shape)
                    # 轉換numpy array格式
                    v = np.array(face_descriptor)
                    descriptors.append(v)
                    # print(face_rects)
        # 存新的到原本的txt檔
        np.savetxt('desc_txt.txt', descriptors)
        c = open("cand.txt", "w", encoding="utf-8")
        str_list = [line + '\n' for line in candidate]
        c.writelines(str_list)
        c.close()

    # 以迴圈從影片檔案讀取影格,並顯示出來
    while cap.isOpened():
        desc_list = np.loadtxt("./desc_txt.txt")
        k = open("cand.txt", encoding="utf-8")
        can = k.readlines()
        x1 = 0
        x2 = 0
        y1 = 0
        y2 = 0
        # 從視訊鏡頭擷取畫面
        ret, frame = cap.read()
        # 縮小圖片
        frame = imutils.resize(frame, width=800)
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
        # 1.人臉偵測
        face_rects = detector(frame, 1)
        # 取出所有偵測的結果(所有人臉座標點)
        for index, rect in enumerate(face_rects):
            x1 = rect.left()
            y1 = rect.top()
            x2 = rect.right()
            y2 = rect.bottom()
            # 以方框標示偵測的人臉
            cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 4,
                          cv2.LINE_AA)
            # 2.人臉校正
            faceAligned = fa.align(frame, gray, rect)
            # 3.再人臉偵測(去除校正後多餘的部分)
            face_rects2 = detector(faceAligned, 1)
            # 取出所有偵測的結果(所有人臉座標點)
            for index2, rect2 in enumerate(face_rects2):
                ax1 = rect2.left()
                ay1 = rect2.top()
                ax2 = rect2.right()
                ay2 = rect2.bottom()
                # 4.68特徵點偵測
                shape = predictor(faceAligned, rect2)
                # 5.取得描述子,128維特徵向量
                face_descriptor = facerec.compute_face_descriptor(
                    faceAligned, shape)
                # 轉換numpy array格式
                d_test = np.array(face_descriptor)
                # 計算歐式距離  (與圖片庫裡,各個人臉間的距離)(5張照片就有5個距離)
                # 清空 存放人臉距離的陣列
                dist = []
                for index in desc_list:
                    # 計算距離
                    dist_ = np.linalg.norm(index - d_test)
                    # 加入陣列
                    dist.append(dist_)
                # print(dist)
                # 辨識人名
                if dist != []:
                    # 將比對人名和比對出來的歐式距離組成一個dict
                    c_d = dict(zip(can, dist))
                    # 根據歐式距離由小到大排序 [("名字",距離)]二微陣列
                    cd_sorted = sorted(c_d.items(), key=lambda d: d[1])
                    # print(cd_sorted)
                    # 歐式距離(0~1)越小越像,設定0.5作為最低辨識標準
                    if cd_sorted[0][1] < 0.4:
                        rec_name = cd_sorted[0][0]
                    else:
                        rec_name = "No Data"
                        Erro = Erro + 1
                        if Erro == 6:
                            WHO = "No Data"
                            cap.release()
                            cv2.destroyAllWindows()
                            break

                imgPil = Image.fromarray(frame)
                font = ImageFont.truetype("C:/Windows/Fonts/msjh.ttc", 20)
                draw = ImageDraw.Draw(imgPil)
                draw.fontmode = '1'  # 關閉反鋸齒
                draw.text((x1, y1 - 20),
                          rec_name,
                          font=font,
                          fill=(255, 255, 255))
                frame = np.array(imgPil)

                for index, name in enumerate(can):
                    if rec_name == name:
                        person[index] = person[index] + 1
                        # print(person)
                        if person[index] == 4:
                            WHO = can[index]
                            # 標示辨識的人名(中文)
                            if WHO is not None:
                                # imgPil.show()
                                print(WHO)
                                time.sleep(5)
                                cap.release()
                                cv2.destroyAllWindows()
                                return WHO
                            break
                    elif sum(person) > 8:
                        WHO = "No Data"
                        cap.release()
                        cv2.destroyAllWindows()
                        break

            # 標示辨識的人名(只能標示英文)
            # cv2.putText(frame, rec_name, (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
        # CV2是用BGR
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        """
        # FPS
        # Find OpenCV version
        (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
    
        if int(major_ver) < 3:
            fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
            print("Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps))
        else:
            fps = cap.get(cv2.CAP_PROP_FPS)
            print("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps))
        """
        # 顯示結果
        cv2.imshow("Face Detection", frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
    cv2.destroyAllWindows()
Ejemplo n.º 25
0
import zlib
import numpy
import pandas
import PIL.Image
import tkinter as tk
import face_recognition
from PIL import ImageDraw, ImageFont, ImageTk
from PyFaceDet import facedetectcnn
from golbalvar import *
from io import BytesIO
from tkinter import *
from sql import *

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(shape_predictor_68_face_landmarks_path)
facerec = dlib.face_recognition_model_v1(
    dlib_face_recognition_resnet_model_v1_path)
font = ImageFont.truetype(
    os.path.abspath(os.path.join(os.getcwd(), "../lib")) + "\simsun.ttc",
    30,
    encoding='utf-8')
#从数据库中提取的已知的数据
knew_id = []
knew_name = []
knew_face_feature = []


def draw_person_name(num, img_rd, faces, face_position_list, face_name):
    """
    绘制名字
    :param faces:检测到的人脸 
    :param img_rd:帧图像
Ejemplo n.º 26
0
import dlib
import face_recognition
from cv2 import cv2
import os
import numpy as np

shape_predictor_model = "shape_predictor_68_face_landmarks.dat"
face_rec_128 = "dlib_face_recognition_resnet_model_v1.dat"
face_Unique_feature = dlib.face_recognition_model_v1(face_rec_128)
face_detector = dlib.get_frontal_face_detector()
face_pose_predictor = dlib.shape_predictor(shape_predictor_model)

def load_images_from_folder(folder):
    features = []
    for filename in os.listdir(folder):
        img = cv2.imread(os.path.join(folder,filename))
        if img is not None:
            face_2 = face_detector(img, 1)
            pose_landmarks_1 = face_pose_predictor(img, face_2[0])
            features.append(face_Unique_feature.compute_face_descriptor(img, pose_landmarks_1))
    return features
data = np.array(load_images_from_folder("C:\\Users\\Abdul Jalil\\face"))
np.save('data.npy', data)
Ejemplo n.º 27
0
def dlib_generate_result(query_paths, dataset_paths, nodetected_dict):
    predictor_dat_path = r"./shape_predictor_68_face_landmarks.dat"
    # face_rec_dat_path = r"./orig_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./1000_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./4000_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./10000_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./20000_dlib_face_recognition_resnet_model_v1.dat"
    face_rec_dat_path = r"./origplus20000_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./10x10origplus20000_dlib_face_recognition_resnet_model_v1.dat"

    detector = dlib.get_frontal_face_detector()
    shape_predictor = dlib.shape_predictor(predictor_dat_path)
    face_rec = dlib.face_recognition_model_v1(face_rec_dat_path)
    embedding_dict = {}
    nodetected_fail = False

    for i, img_path in enumerate(query_paths + dataset_paths):
        img = io.imread(img_path)
        key = os.path.basename(img_path)
        try:
            dets = detector(img, 1)
        except RuntimeError:
            print('%s failed on image format, please check it.' % img_path)
            continue

        if i % 1000 == 0:
            print(i, time.asctime(time.localtime(time.time())))
        det = None
        if len(dets) < 1:

            # If we can't detect only one face, we give the position of face.
            try:
                det = nodetected_dict[key]
            except KeyError:
                print('%s failed on face detection, please check it.' % img_path)
                nodetected_fail = True

        if len(dets) >= 1:
            det = dets[0]
            # print('%s multi-faces detected during face detection, please check it.' % img_path)
            # If we detected multi-face, we choose the biggest rectangle.
            for k, d in enumerate(dets):
                if (d.right() - d.left()) * (d.bottom() - d.top()) > (det.right() - det.left()) * (
                            det.bottom() - det.top()):
                    det = d

        shape = shape_predictor(img, det)
        face_embedding = face_rec.compute_face_descriptor(img, shape)
        embedding_dict[key] = np.array(face_embedding)

    if nodetected_fail:
        sys.exit(1)

    print("embedding geranating finished", time.asctime(time.localtime(time.time())))
    ground_truth_file = open("j_ground_truth.txt", "w")
    result_file = open("j_test_result.txt", "w")
    qkey_list = []
    dkey_list = []
    embeddingq = []
    embeddingd = []
    for qkey in query_paths:
        qqkey = os.path.basename(qkey)
        for dkey in dataset_paths:
            ddkey = os.path.basename(dkey)
            qkey_list.append(qqkey)
            dkey_list.append(ddkey)
            embeddingq.append(embedding_dict[qqkey])
            embeddingd.append(embedding_dict[ddkey])
            if qqkey[2:] == ddkey[2:]:
                ground_truth_file.write(qqkey + "," + ddkey + "\n")

    diff = np.subtract(embeddingq, embeddingd)
    dist = np.sum(np.square(diff), 1)

    for i, _ in enumerate(qkey_list):
        result_file.write(qkey_list[i] + "," + dkey_list[i] + "," + str(1.0 / dist[i]) + "\n")

    ground_truth_file.close()
    result_file.close()
Ejemplo n.º 28
0
    parser.add_argument("--save_faces")
    parser.add_argument("--similarity_threshold", type=float, default=0.5)
    parser.add_argument("--video_max_images", type=int, default=10)
    parser.add_argument("--type",
                        choices=['default', 'photobooth', 'google', 'crime'],
                        default='default')
    args = parser.parse_args()

    if args.detector == 'hog':
        detector = dlib.get_frontal_face_detector()
    elif args.detector == 'cnn':
        detector = dlib.cnn_face_detection_model_v1(args.cnn_model_path)
    else:
        assert False, "Unknown detector " + args.detector
    predictor = dlib.shape_predictor(args.predictor_path)
    facerec = dlib.face_recognition_model_v1(args.face_rec_model_path)

    if args.save_resized:
        makedirs(args.save_resized)

    if args.save_faces:
        makedirs(args.save_faces)

    db.connect(args.db)

    print("Processing files...")
    start_time = time.time()
    num_files = 0
    num_images = 0
    num_faces = 0
    for dirpath, dirnames, filenames in os.walk(args.dir):
Ejemplo n.º 29
0
import os
import glob
import cv2
import dlib
import numpy as np
from PIL import Image

detectorFace = dlib.get_frontal_face_detector()
detectorPontos = dlib.shape_predictor(
    "recursos/shape_predictor_68_face_landmarks.dat")
reconhecimentoFacial = dlib.face_recognition_model_v1(
    "recursos/dlib_face_recognition_resnet_model_v1.dat")
indices = np.load("recursos/indices_yale.pickle")
descritoresFaciais = np.load("recursos/descritores_yale.npy")
limiar = 0.5
totalFaces = 0
totalAcertos = 0

for arquivo in glob.glob(os.path.join("yalefaces/teste", "*.gif")):
    imagemFace = Image.open(arquivo).convert('RGB')
    imagem = np.asarray(imagemFace, 'uint8')
    idatual = int(
        os.path.split(arquivo)[1].split(".")[0].replace("subject", ""))
    totalFaces += 1
    facesDetectadas = detectorFace(imagem, 3)
    for face in facesDetectadas:
        e, t, d, b = (int(face.left()), int(face.top()), int(face.right()),
                      int(face.bottom()))
        pontosFaciais = detectorPontos(imagem, face)
        descritorFacial = reconhecimentoFacial.compute_face_descriptor(
            imagem, pontosFaciais)
Ejemplo n.º 30
0
import dlib, cv2
import numpy as np

detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor('models/shape_predictor_68_face_landmarks.dat')
facerec = dlib.face_recognition_model_v1(
    'models/dlib_face_recognition_resnet_model_v1.dat')

descs = np.load('img/descs.npy')[()]


def encode_face(img):
    dets = detector(img, 1)

    if len(dets) == 0:
        return np.empty(0)

    for k, d in enumerate(dets):
        shape = sp(img, d)
        face_descriptor = facerec.compute_face_descriptor(img, shape)

        return np.array(face_descriptor)


video_path = 'img/bs_clip2.mp4'
cap = cv2.VideoCapture(video_path)

if not cap.isOpened():
    exit()

_, img_bgr = cap.read()  # (800, 1920, 3)
Ejemplo n.º 31
0
import numpy as np
import dlib
from imgaug import augmenters as iaa
import pandas as pd 
from sklearn.svm import SVC
import random

face_encoder = dlib.face_recognition_model_v1('./model/dlib_face_recognition_resnet_model_v1.dat')
face_pose_predictor = dlib.shape_predictor('./model/shape_predictor_68_face_landmarks.dat')

def encoding_faces(images, label, coord_detect):
    """
    Encoding a list of faces using FaceNet generating a 128D vector
    :param images: list of images of faces to encode
    :param coord_detect: coordinates of the detection
    :return: pandas dataframe of the faces for one label
    """
    l = []
    for img, d in zip(images, coord_detect):
        (x1, y1, x2, y2) = d
        detected_face = dlib.rectangle(left=0, top=0, right=int(x2-x1), bottom=int(y2-y1))
        pose_landmarks = face_pose_predictor(img, detected_face)
        face_encoding = face_encoder.compute_face_descriptor(img, pose_landmarks, 1)
        l.append(np.append(face_encoding, [label]))
    
    return np.array(l)

def create_positive_set(pictures, coords, label=1):
    """
    Create positive train set for one face from a list of three pictures of this face.
    Data Augmentation on these three pictures to generate 10 pictures.
Ejemplo n.º 32
0
import numpy as np
from sklearn import svm
import pickle
import glob
import os
import dlib
import cv2
##Final Variables
predictor_url="/home/shubham/Documents/project/Final_project/data_functioning/shape_predictor_68_face_landmarks.dat"
recog_model_url="/home/shubham/Documents/project/Final_project/data_functioning/dlib_face_recognition_resnet_model_v1.dat"
to_recog="/home/shubham/Documents/project/Final_project/data/"
detector=dlib.get_frontal_face_detector()
sp=dlib.shape_predictor(predictor_url)
facerec=dlib.face_recognition_model_v1(recog_model_url)
files=[]
def get_arr(frame):
    dets=detector(frame,1)
    if len(dets)!=0:
        for l,d in enumerate(dets):
            shape=sp(frame,d)
            face_descriptor=facerec.compute_face_descriptor(frame,shape)
            return list(face_descriptor)
def arr_to_str(arr):
    retStr=str(arr[0])
    for i in range(1,len(arr)):
        retStr=retStr+","+str(arr[i])
    return retStr
for f in glob.glob(os.path.join("training_vdos/", "*.mp4")):
    files.append(f)
for fl_url in files:
    label_processing=fl_url.split("/")[1].split(".")[0]
Ejemplo n.º 33
0
import os
import dlib
import numpy as np
from skimage import io
import cv2
from collections import namedtuple

data_dir = os.path.expanduser(
    '/home/rezeck/Documents/itelicam/face-recognition-dlib/data')
faces_folder_path = data_dir + '/kodemaker/'

# Globals
dlib_frontal_face_detector = dlib.get_frontal_face_detector()
shape_predictor = dlib.shape_predictor(
    data_dir + '/dlib/shape_predictor_5_face_landmarks.dat')
face_recognition_model = dlib.face_recognition_model_v1(
    data_dir + '/dlib/dlib_face_recognition_resnet_model_v1.dat')
face_classifier_opencv = cv2.CascadeClassifier(
    data_dir + '/dlib/haarcascade_frontalface_default.xml')


def to_dlib_rect(w, h):
    return dlib.rectangle(left=0, top=0, right=w, bottom=h)


def to_rect(dr):
    #  (x, y, w, h)
    return dr.left(), dr.top(), dr.right() - dr.left(), dr.bottom() - dr.top()


def face_detector_opencv(image):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
Ejemplo n.º 34
0
def dlib_generate_embedding(query_paths, dataset_paths):
    predictor_dat_path = r"./shape_predictor_68_face_landmarks.dat"
    face_rec_dat_path = r"./orig_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./1000_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./4000_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./10000_dlib_face_recognition_resnet_model_v1.dat"
    # face_rec_dat_path = r"./20000_dlib_face_recognition_resnet_model_v1.dat"
    # predictor_dat_path = r"C:\sources\dlib\build\Release\shape_predictor_68_face_landmarks.dat"
    # face_rec_dat_path = r"C:\sources\dlib\build\Release\orig_dlib_face_recognition_resnet_model_v1.dat"

    detector = dlib.get_frontal_face_detector()
    shape_predictor = dlib.shape_predictor(predictor_dat_path)
    face_rec = dlib.face_recognition_model_v1(face_rec_dat_path)
    embedding_dict = {}
    non_only_one_list = []
    more_than_one_list = []

    for i, img_path in enumerate(query_paths + dataset_paths):
        img = io.imread(img_path)
        key = os.path.basename(img_path)
        dets = detector(img, 1)

        # win.clear_overlay()
        # win.set_image(img)

        if i % 1000 == 0:
            print(i, time.asctime(time.localtime(time.time())))
        det = None
        if len(dets) < 1:

            # If we can't detect only one face, we give the position of face.
            try:
                det = nodetected_dict[key]
            except KeyError:
                print('%s failed on face detection, please check it.' % img_path)
                continue

        if len(dets) >= 1:
            det = dets[0]
            # print('%s multi-faces detected during face detection, please check it.' % img_path)
            # If we detected multi-face, we choose the biggest rectangle.
            for k, d in enumerate(dets):
                if (d.right() - d.left()) * (d.bottom() - d.top()) > (det.right() - det.left()) * (det.bottom() - det.top()):
                    det = d

        shape = shape_predictor(img, det)
        face_embedding = face_rec.compute_face_descriptor(img, shape)
        embedding_dict[key] = np.array(face_embedding)

    print("embedding geranating finished", time.asctime(time.localtime(time.time())))
    actual_issame = []
    embedding_list = []
    for qkey in query_paths:
        qqkey = os.path.basename(qkey)
        for dkey in dataset_paths:
            ddkey = os.path.basename(dkey)
            embedding_list.append(embedding_dict[qqkey])
            embedding_list.append(embedding_dict[ddkey])
            if qqkey[1:] == ddkey[1:]:
                actual_issame = True
            else:
                actual_issame = False




    return embedding_list, actual_issame
import os
import shutil
import dlib

cluster_size_threshold = 2  # 修改此阈值参数可更改类内最低的人脸数

detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor('users/config/shape_predictor_68_face_landmarks.dat')
facerec = dlib.face_recognition_model_v1(
    'users/config/dlib_face_recognition_resnet_model_v1.dat')


# 传入所有需要聚类的图片路径
def face_cluster(input_images):
    # 初始化,防止多次打开导致重复生成
    descriptors = []
    images = []

    for image in input_images:
        # 逐个加载图片
        img = dlib.load_rgb_image(image['path'])
        # 检测人脸数量和位置
        faces = detector(img, 1)
        # print(f'{len(faces)} faces detected in', image['path'])

        # 遍历所有人脸
        for face in faces:
            # 利用68关键点模型检测人脸关键点
            shape = sp(img, face)
            # 将关键点集转化为128维特征向量
            face_descriptor = facerec.compute_face_descriptor(img, shape)
Ejemplo n.º 36
0
      help="download at http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2")
  a.add_argument("--model", default="dlib_face_recognition_resnet_model_v1.dat",
      help="download at http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2")
  a.add_argument("--image", help="images must be .jpg files")
  a.add_argument("--image_dir", help="images must be .jpg files")
  a.add_argument("--viz_off", action='store_true', help="flag to vizualize detections and shape prediction")

  args = a.parse_args()

  if args.landmarks_dat is None or args.model is None or (args.image is None and args.image_dir is None):
    a.print_help()
    sys.exit(1)

  detector = dlib.get_frontal_face_detector()
  sp = dlib.shape_predictor(args.landmarks_dat)
  facerec = dlib.face_recognition_model_v1(args.model)

  win = None
  if not args.viz_off:
    win = dlib.image_window()

  if args.image is not None:
    img = io.imread(args.image)
    print(encode(detector, sp, facerec, img, win))

  if args.image_dir is not None:
    fns = os.listdir(args.image_dir)
    descriptors = []
    for fn in fns:
      if FILE_EXT not in fn:
        continue
Ejemplo n.º 37
0
import dlib
from skimage import io  #这个模块是用来图片输入输出操作的
import csv

path_pics = "D://xx/"
path_csv = "D://xx/"

# detector to find the faces
#使用人脸提取器
detector = dlib.get_frontal_face_detector()
#dlib68点模型
# shape predictor to find the face landmarks
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")

# face recognition model, the object maps human faces into 128D vectors#映射到128D的向量中,加载人脸识别模型
facerec = dlib.face_recognition_model_v1(
    "dlib_face_recognition_resnet_model_v1.dat")


#返回单张图片的128D特征
def return_128d_features(path_img):
    #图片读取
    img = io.imread(path_img)
    #取灰度
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    dets = detector(img_gray, 1)
    #检测到人脸
    if (len(dets) != 0):
        shape = predictor(img_gray, dets[0])
        face_descriptor = facerec.compute_face_descriptor(img_gray, shape)
    else:
        face_descriptor = 0
Ejemplo n.º 38
0
    print("pip install git+https://github.com/ageitgey/face_recognition_models")
    quit()

face_detector = dlib.get_frontal_face_detector()

predictor_68_point_model = face_recognition_models.pose_predictor_model_location()
pose_predictor_68_point = dlib.shape_predictor(predictor_68_point_model)

predictor_5_point_model = face_recognition_models.pose_predictor_five_point_model_location()
pose_predictor_5_point = dlib.shape_predictor(predictor_5_point_model)

cnn_face_detection_model = face_recognition_models.cnn_face_detector_model_location()
cnn_face_detector = dlib.cnn_face_detection_model_v1(cnn_face_detection_model)

face_recognition_model = face_recognition_models.face_recognition_model_location()
face_encoder = dlib.face_recognition_model_v1(face_recognition_model)


def _rect_to_css(rect):
    """
    Convert a dlib 'rect' object to a plain tuple in (top, right, bottom, left) order

    :param rect: a dlib 'rect' object
    :return: a plain tuple representation of the rect in (top, right, bottom, left) order
    """
    return rect.top(), rect.right(), rect.bottom(), rect.left()


def _css_to_rect(css):
    """
    Convert a tuple in (top, right, bottom, left) order to a dlib `rect` object
Ejemplo n.º 39
0
def main():

    if len(sys.argv) != 2:
        exit()

    os.chdir(sys.path[0])
    #欲辨識圖片
    img_name = sys.argv[1]
    face_data_path = "./face"

    # Dlib 的人臉偵測器
    detector = dlib.get_frontal_face_detector()

    #人臉68特徵點模型檢測器
    shape_predictor = dlib.shape_predictor(
        "../dat/shape_predictor_68_face_landmarks.dat")

    #載入人臉辨識模型及檢測器
    face_rec_model = dlib.face_recognition_model_v1(
        "../dat/dlib_face_recognition_resnet_model_v1.dat")

    descriptors = []

    candidate = []

    img_files = glob.glob(os.path.join(face_data_path, "*.jpg"))

    for file in img_files:
        base = os.path.basename(file)
        candidate.append(os.path.splitext(base)[0])
        img = io.imread(file)

        dets = detector(img, 1)

        # 取出所有偵測的結果
        for k, d in enumerate(dets):
            #68特徵點偵測
            shape = shape_predictor(img, d)

            #128維特徵向量描述
            face_descriptor = face_rec_model.compute_face_descriptor(
                img, shape)

            #轉換numpy array 格式
            v = np.array(face_descriptor)
            descriptors.append(v)

    #辨識圖片處理
    img = io.imread(img_name)
    face_rects = detector(img, 1)
    distance = []
    for k, d in enumerate(face_rects):
        #68特徵點偵測
        shape = shape_predictor(img, d)

        #128維特徵向量描述
        face_descriptor = face_rec_model.compute_face_descriptor(img, shape)

        d_test = np.array(face_descriptor)
        x1 = d.left()
        y1 = d.top()
        x2 = d.right()
        y2 = d.bottom()
        cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 4, cv2.LINE_AA)
        for i in descriptors:
            #計算向量差值
            dist_ = np.linalg.norm(i - d_test)
            distance.append(dist_)

    candidate_distance_dict = dict(zip(candidate, distance))

    #依照距離排序
    candidate_distance_dict_sorted = sorted(candidate_distance_dict.items(),
                                            key=lambda d: d[1])

    print(candidate_distance_dict_sorted)
    #取出最相像的
    result = candidate_distance_dict_sorted[0][0]

    cv2.putText(img, result, (x1, y1), cv2.FONT_HERSHEY_DUPLEX, 0.7,
                (255, 255, 255), 1, cv2.LINE_AA)

    #img = imutils.resize(img, width=800)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    cv2.imshow("test", img)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
Ejemplo n.º 40
0
# @Time    : 2018/12/4 0004 17:20
# @Author  : lzc
# @File    : face_recongnition.py

import dlib
import numpy as np
import cv2
import pandas as pd

#首先初始化128d人脸识别模型
recognition = dlib.face_recognition_model_v1('lib/dlib_face_recognition_resnet_model_v1.dat')

#计算向量间的距离
#fea_1是指从特征均值文件中拿到特征数据
#fea_2是指从摄像头拍摄到的人脸特征数据
def get_xiangliang_distance(fea_1,fea_2):
    fea_1 = np.array(fea_1)
    fea_2 = np.array(fea_2)
    distinct = np.sqrt(np.sum(np.square(fea_1-fea_2)))

    #根据阈值来判断是否是同一个人,当向量距离差异和大于30%也就是0.3的时候,认为不是同一个人
    print('distinct======', distinct)
    if distinct > 0.5:
        return 'N'
    else:
        return 'Y'
#首先读取存放入人脸特征的均值
path_face_feature_mean_csv = 'data/feature_mean_all.csv'
csv_rd = pd.read_csv(path_face_feature_mean_csv)

#定义一个集合存放所有人特征均值
Ejemplo n.º 41
0
def dataGeneration():

    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(pathAttributes.face_detection_model)
    face_encoder = dlib.face_recognition_model_v1(
        pathAttributes.face_recognition_model)

    print("[INFO] camera sensor warming up...")
    #vs = VideoStream().start()
    #video_capture = cv2.VideoCapture(0)
    #time.sleep(2.0)
    names = []
    s = os.listdir(pathAttributes.faces)

    for i in s:
        name_w_time = os.path.basename(i)
        name_only = name_w_time.split("_", 1)
        names.append(name_only)
    people = []
    IDs = []
    for i in s:
        name_w_time = os.path.basename(i)
        time_only = name_w_time.split("_", 1)[0]
        IDs.append(time_only)
    #count = 1

    for i in s:
        for ID in IDs:
            if str(i).find(ID) > -1:
                document = os.path.join(pathAttributes.faces, i)
                #name_w_time = os.path.basename(document)
                #name_only = name_w_time.split("_",1)
                file_path_data = os.path.join(document, "*.png")
                pics = glob.glob(file_path_data)
                for index in pics:
                    pic = os.path.join(document, index)
                    print(pic)
                    image = cv2.imread(pic)
                    try:
                        faceDetect = detector(image, 1)
                        shape = predictor(image, faceDetect[0])
                        face_encoding = face_encoder.compute_face_descriptor(
                            image, shape, 1)
                        features = list(face_encoding)
                        features.insert(0, ID)
                        #features.insert(1,name_only)
                        people.append(features)
                        print(features)
                    except:
                        pass
    #online mode
    newest_data_name = time.strftime("%Y%m%d%H%M%S", time.localtime(
        time.time())) + ".csv"
    newest_data = os.path.join(pathAttributes.data, newest_data_name)
    csvfile = open(newest_data, "w", newline='')
    #csvfile = open(pathAttributes.face_features_data,"w",newline='')  batch mode
    writer = csv.writer(csvfile)
    writer.writerows(people)
    csvfile.close()
    return False
    #online mode
    """