Example #1
0
    def __init__(self, FLAGS, mydb, is_save_mysql):
        super().__init__()
        with tf.Graph().as_default():
            with tf.Session() as sess:
                #加载预训练模型
                facenet.load_model(FLAGS.model_path)
                gpu_options = tf.GPUOptions(
                    per_process_gpu_memory_fraction=1.0)
                sess = tf.Session(config=tf.ConfigProto(
                    gpu_options=gpu_options, log_device_placement=False))
                #创建MTCNN模型,初始化pnet,rnet,onet网络,为摄像头获取的图片进行人脸对齐做准备
                self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(
                    sess, None)

        #初始化UI界面
        self.setupUi(self)
        #打开摄像头
        self.camera = cv2.VideoCapture(0)
        #判断摄像头是否打开
        self.is_camera_opened = False

        # 定时器:30ms捕获一帧
        self._timer = QtCore.QTimer(self)
        self._timer.timeout.connect(self._queryFrame)
        self._timer.setInterval(30)

        self.FLAGS = FLAGS
        self.mydb = mydb
        # 是否保存到数据库
        self.is_save_mysql = is_save_mysql
Example #2
0
    def __init__(self, minsize=20, factor=0.709, threshold=[0.8, 0.9, 0.9]):

        with tf.device('/gpu:0'):

            with tf.Graph().as_default():

                gpu_options = tf.GPUOptions(allow_growth=True)

                sess = tf.Session(config=tf.ConfigProto(
                    gpu_options=gpu_options, log_device_placement=False))

                with sess.as_default():

                    pnet, rnet, onet = detect_face.create_mtcnn(sess, 'align/')

        self.pnet = pnet

        self.rnet = rnet

        self.onet = onet

        self.minsize = minsize

        self.factor = factor

        self.threshold = threshold
Example #3
0
def load_and_align_data(image_paths, image_size=160, margin=44):
    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor
    print('Creating networks and loading parameters')
    with tf.Graph().as_default():
        sess = tf.Session()
        with sess.as_default():
            pnet, rnet, onet = detect_face.create_mtcnn(sess, None)

    img_list = []
    nrof_samples = len(image_paths)
    success_paths = []
    for img_path in image_paths:
        img = misc.imread(img_path, mode='RGB')
        img_size = np.asarray(img.shape)[0:2]
        bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet,
                                                    onet, threshold, factor)
        if len(bounding_boxes) < 1:
            print("no face found in {}".format(img_path))
            continue
        det = np.squeeze(bounding_boxes[0, 0:4])
        bb = np.zeros(4, dtype=np.int32)
        bb[0] = np.maximum(det[0] - margin / 2, 0)
        bb[1] = np.maximum(det[1] - margin / 2, 0)
        bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
        bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
        cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
        aligned = misc.imresize(cropped, (image_size, image_size),
                                interp='bilinear')
        prewhitened = prewhiten(aligned)
        img_list.append(prewhitened)
        success_paths.append(img_path)
    images = np.stack(img_list)
    return success_paths, images
Example #4
0
 def __init__(self):
     self.graph = tf.Graph()
     with self.graph.as_default():
         gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
         sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
         with sess.as_default():
             self.pnet, self.rnet, self.onet = FaceDet.create_mtcnn(sess, None)
    def __init__(self):
        # some constants kept as default from facenet
        self.minsize = 20
        self.threshold = [0.6, 0.7, 0.7]
        self.factor = 0.709
        self.margin = 44
        self.input_image_size = 160
        self.max_dist = 0.75

        self.sess = tf.Session()

        # read pnet, rnet, onet models from align directory and files are det1.npy, det2.npy, det3.npy
        self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(
            self.sess, 'align')

        # read 20170512-110547 model file downloaded from https://drive.google.com/file/d/0B5MzpY9kBtDVZ2RpVDYwWmxoSUk
        facenet.load_model('20170512-110547/20170512-110547.pb')

        # Get input and output tensors
        self.images_placeholder = tf.get_default_graph().get_tensor_by_name(
            "input:0")
        self.embeddings = tf.get_default_graph().get_tensor_by_name(
            "embeddings:0")
        self.phase_train_placeholder = tf.get_default_graph(
        ).get_tensor_by_name("phase_train:0")
        self.embedding_size = self.embeddings.get_shape()[1]

        self.face_dic = {}
Example #6
0
def detect_face(img):
    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor
    margin = 32
    image_size = 160

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = df.create_mtcnn(sess, None)

    img_size = np.asarray(img.shape)[0:2]
    bounding_boxes, _ = df.detect_face(img, minsize, pnet, rnet, onet,
                                       threshold, factor)
    det = np.squeeze(bounding_boxes[0, 0:4])
    bb = np.zeros(4, dtype=np.int32)
    bb[0] = np.maximum(det[0] - margin / 2, 0)
    bb[1] = np.maximum(det[1] - margin / 2, 0)
    bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
    bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
    cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
    aligned = misc.imresize(cropped, (image_size, image_size),
                            interp='bilinear')
    face_token = str(uuid.uuid4())
    output_filename = os.path.join(align_path, face_token + '.png')
    misc.imsave(output_filename, aligned)
    prewhitened = facenet.prewhiten(aligned)
    aligned_image = prewhitened
    aligned_pictures = [aligned_image]

    return aligned_pictures
def initialize_mtcnn(gpu_memory_fraction):
    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
    return pnet, rnet, onet
def process_celebrity(img):
    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor

    print('Creating networks and loading parameters')
    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = detect_face.create_mtcnn(sess, None)

    img_size = np.asarray(img.shape)[0:2]
    img_list = []
    bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet,
                                                threshold, factor)
    count = len(bounding_boxes)
    for i in range(count):
        det = np.squeeze(bounding_boxes[i, 0:4])
        bb = np.zeros(4, dtype=np.int32)
        bb[0] = np.maximum(det[0] - margin / 2, 0)
        bb[1] = np.maximum(det[1] - margin / 2, 0)
        bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
        bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
        cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
        aligned = misc.imresize(cropped, (160, 160), interp='bilinear')
        prewhitened = facenet.prewhiten(aligned)
        # prewhitened = np.array(prewhitened).reshape(160, 160, 3)
        img_list.append(prewhitened)
    prewhitened = np.stack(img_list)

    with tf.Session() as sess:
        facenet.load_model(celeb_model)
        images_placeholder = tf.get_default_graph().get_tensor_by_name(
            "input:0")
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(
            "phase_train:0")

        feed_dict = {
            images_placeholder: prewhitened,
            phase_train_placeholder: False
        }
        emb = sess.run(embeddings, feed_dict=feed_dict)
        classifier_filename_exp = os.path.expanduser(classifier)
        with open(classifier_filename_exp, 'rb') as infile:
            (model, class_names) = pickle.load(infile, encoding='latin1')
        print('Loaded classifier model from file "%s"\n' %
              classifier_filename_exp)
        predictions = model.predict_proba(emb)
        best_class_indices = np.argmax(predictions, axis=1)
        best_class_probabilities = predictions[
            np.arange(len(best_class_indices)), best_class_indices]
        result_string = ""
        for i in range(count):
            result_string += class_names[best_class_indices[i]] + ', '
        print(result_string)
        return result_string
Example #9
0
 def _setup_mtcnn(self):
     with tf.Graph().as_default():
         gpu_options = tf.GPUOptions(
             per_process_gpu_memory_fraction=gpu_memory_fraction)
         sess = tf.Session(config=tf.ConfigProto(
             gpu_options=gpu_options, log_device_placement=False))
         with sess.as_default():
             return detect_face.create_mtcnn(sess, None)
 def __init__(self):
     self.minsize = 20  # minimum size of face
     self.threshold = [0.6, 0.7, 0.7]  # three steps's threshold
     self.factor = 0.709  # scale factor
     with tf.Graph().as_default():
         sess = tf.Session()
         with sess.as_default():
             self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(
                 sess, None)
 def __init__(self):
     with tf.Graph().as_default():
         gpu_options = tf.GPUOptions(
             per_process_gpu_memory_fraction=GPU_MEMORY_FRACTION)
         sess = tf.Session(config=tf.ConfigProto(
             gpu_options=gpu_options, log_device_placement=False))
         with sess.as_default():
             self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(
                 sess, None)
Example #12
0
 def load_detect_face_model(device='auto'):
     with tf.Graph().as_default():
         if 'cpu' in device:
             config = tf.ConfigProto(device_count={'GPU': 0})
             sess = tf.Session(config=config)
         else:
             sess = tf.Session()
         with sess.as_default():
             pnet, rnet, onet = df.create_mtcnn(sess, None)
     return (pnet, rnet, onet)
Example #13
0
    def __init__(self):
        with tf.Graph().as_default():
            gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.85)
            self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
            with self.sess.as_default():
                self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(self.sess, None)

            self.minsize = 20 # minimum size of face
            self.threshold = [ 0.6, 0.7, 0.7 ]  # three steps's threshold
            self.factor = 0.709 # scale factor
 def __init__(self):
     self.minsize = 30  # minimum size of face
     self.threshold = [0.6, 0.7, 0.7]  # three steps's threshold
     self.factor = 0.709  # scale factor
     print('Creating networks and loading parameters')
     with tf.Graph().as_default():
         # gpu_memory_fraction = 1.0
         # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
         # sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
         sess = tf.Session()
         with sess.as_default():
             self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(sess, None)
Example #15
0
def main(args):

    # MTCNN
    with tf.Graph().as_default():
        sess = tf.Session()
        with sess.as_default():
            pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
    threshold = [0.6, 0.7, 0.7]
    factor = 0.709

    # Output dirs creation
    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
    images = []
    for path in sorted(os.listdir(args.input_dir)):
        if not os.path.exists(os.path.join(args.output_dir, path)):
            os.mkdir(os.path.join(args.output_dir, path))
        for name in sorted(os.listdir(os.path.join(args.input_dir, path))):
            images.append(os.path.join(path, name))

    # Alignment procedure
    for path in tqdm(images):
        img = io.imread(os.path.join(args.input_dir, path))
        if img.ndim == 2:
            img = to_rgb(img)
        img = img[:, :, 0:3]
        _minsize = min(min(img.shape[0] // 5, img.shape[1] // 5), 80)
        bounding_boxes, points = detect_face.detect_face(
            img, _minsize, pnet, rnet, onet, threshold, factor)
        if bounding_boxes.size > 0:
            bindex = -1
            nrof_faces = bounding_boxes.shape[0]
            if nrof_faces > 0:
                det = bounding_boxes[:, 0:4]
                img_size = np.asarray(img.shape)[0:2]
                bindex = 0
                if nrof_faces > 1:
                    bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] -
                                                                   det[:, 1])
                    img_center = img_size / 2
                    offsets = np.vstack([
                        (det[:, 0] + det[:, 2]) / 2 - img_center[1],
                        (det[:, 1] + det[:, 3]) / 2 - img_center[0]
                    ])
                    offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
                    bindex = np.argmax(bounding_box_size -
                                       offset_dist_squared * 2.0)
            points = points[:, bindex]
            landmark = points.reshape((2, 5)).T
            warped = preprocess(img, landmark)
            io.imsave(os.path.join(args.output_dir, path), warped)
        else:
            print(path + ' was skipped')
Example #16
0
def sortface(path):
    minsize = 3  # 脸部最小的大小
    threshold = [0.65, 0.7, 0.7]  # 三个步骤的阈值
    factor = 0.709  # 用于在图像中检测的人脸大小的缩放金字塔的因子

# 分配给 tensorflow 的 gpu 的显存大小: GPU 实际显存 * 0.7
    gpu_memory_fraction = 0.7

# 创建 tensorflow 网络并加载参数
    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = detect_face.create_mtcnn(sess, None)

    def eyes(im):
        img = misc.imread(im)
        bounding_boxes, face_points = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
        if bounding_boxes.shape[0] == 0:
            return -1
        eye_dis = abs(face_points[0] - face_points[1])
        eye_mid = (face_points[0] + face_points[1])/2
        score1 = abs(face_points[2]-eye_mid)/eye_dis
        return round(score1[0], 2)

    def lap(im):
        img = cv2.imread(im)
        score2 = cv2.Laplacian(img, cv2.CV_64F).var()
        return score2//50

    face_score1 = []
    face_score2 = []
    face_dirs = os.listdir(path)
    face_dirs = [path+i for i in face_dirs]

    for face in face_dirs:
        print(face)
        s1 = eyes(face)
        face_score1.append(s1)
        if s1 >= 0:
            face_score2.append(lap(face))
        else:
            face_score2.append(0)

    data = {'a': face_dirs, 'b': face_score1, 'c': face_score2}
    data = pd.DataFrame(data)
    data = data.sort_values(['b', 'c'], ascending=[True, False])
#    data = data.sort_values('c', ascending=False)
    dir = data['a'].values.tolist()
    return dir
Example #17
0
def initialize_mtcnn(gpu_memory_fraction,
                     rect_minsize=100,
                     mtcnn_thresholds=[0.6, 0.7, 0.7],
                     scale_factor=0.709):
    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = create_mtcnn(sess, None)
    minsize = rect_minsize
    threshold = mtcnn_thresholds
    factor = scale_factor
    return pnet, rnet, onet, minsize, threshold, factor
    def __init__(self, joint_align=True):
        self.joint_align = joint_align

        self.minsize = 20
        self.threshold = [0.6, 0.9, 0.9]
        self.threshold = [0.6, 0.7, 0.9]
        self.threshold = [0.6, 0.7, 0.7]
        self.threshold = [0.6, 0.7, 0.8]
        self.factor = 0.6
        self.factor = 0.8
        self.factor = 0.709
        self.graph = tf.Graph()
        self.sess = tf.Session(graph=self.graph)
        with self.graph.as_default():
            self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(self.sess, None)
def detection_face(img):
    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor
    print('Creating networks and loading parameters')
    with tf.Graph().as_default():
        # gpu_memory_fraction = 1.0
        # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
        # sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess = tf.Session()
        with sess.as_default():
            pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
    bounding_boxes, points = detect_face.detect_face(img, minsize, pnet, rnet,
                                                     onet, threshold, factor)
    return bounding_boxes, points
Example #20
0
 def __init__(self):
     # minimum size of face所认为的图片中需要识别的人脸的最小尺寸,minsize越大,生成的“金字塔”层数越少,resize和pnet的计算量越小。
     self.minsize = 30  # 需要识别的人脸的最小尺寸,minsize越大,生成的“金字塔”层数越少,resize和pnet的计算量越小。
     self.threshold = [0.6, 0.7, 0.7]  # three steps's threshold阈值
     self.factor = 0.709  # scale factor生成图片金字塔时使用的缩放因子即每次对边缩放的倍数
     print('Creating networks and loading parameters')
     with tf.Graph().as_default():
         #     # gpu_memory_fraction = 1.0
         #     # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
         #     # sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
         sess = tf.Session()
         with sess.as_default():
             # 在使用mtcnn模型时必须先调用detect_facec的creat_mtcnn方法导入网络结构,
             self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(
                 sess, None)
Example #21
0
def get_face_img(img_paths):
    # input a list of paths of images
    # return
    #     (1) close-ups of faces
    #     (2) source
    #     (3) locations
    face_closeups = list()
    face_source = list()
    face_locations = list()

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = detect_face.create_mtcnn(sess, None)

        for path in img_paths:
            img = img_read(path)
            bounding_boxes, _ = detect_face.detect_face(
                img, minsize, pnet, rnet, onet, threshold, factor)
            nrof_faces = bounding_boxes.shape[0]

            if nrof_faces > 0:
                det = bounding_boxes[:, 0:4]
                img_size = np.asarray(img.shape)[0:2]

                for det_no in range(nrof_faces):
                    each_det = np.squeeze(det[det_no])
                    bb = np.zeros(4, dtype=np.int32)
                    bb[0] = np.maximum(each_det[0] - args_margin / 2,
                                       0)  # left Bound
                    bb[1] = np.maximum(each_det[1] - args_margin / 2,
                                       0)  # upper Bound
                    bb[2] = np.minimum(each_det[2] + args_margin / 2,
                                       img_size[1])  # right Bound
                    bb[3] = np.minimum(each_det[3] + args_margin / 2,
                                       img_size[0])  # lower Bound
                    cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
                    scaled = misc.imresize(cropped,
                                           (args_image_size, args_image_size),
                                           interp='bilinear')

                    face_closeups.append(scaled)
                    face_source.append(path)
                    face_locations.append(bb)

    return face_closeups, face_source, face_locations
Example #22
0
 def convert_to_embedding(self, single=False, img_path=None):
     extracted = []
     npy=''
     with tf.Graph().as_default():
             with tf.Session() as sess:
                 self.sess = sess
                 # Load the model
                 facenet.load_model(self.model_dir)
                 pnet, rnet, onet = detect_face.create_mtcnn(sess, npy)
                 minsize = 20  # minimum size of face
                 threshold = [0.6, 0.7, 0.7]  # three steps's threshold
                 factor = 0.709  # scale factor
                 margin = 32
                 frame_interval = 3
                 batch_size = 1000
                 image_size = 160
                 input_image_size = 160
                 # Get input and output tensors
                 images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
                 self.images_placeholder = tf.image.resize_images(images_placeholder,(self.image_size, self.image_size))
                 self.embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
                 self.phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
                 self.embedding_size = self.embeddings.get_shape()[1]
                 if not single:
                     for filename in os.listdir(self.data_path):
                         img = cv2.imread(self.data_path+"/"+filename, 1)
                         img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                         #cv2.imshow('img', img)
                         #cv2.waitKey(0)
                         bounding_boxes, points = self.alignMTCNN.get_bounding_boxes(image=img)
                         bounding_boxes,points = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
                         #detector = MTCNN()
                         #image = cv2.imread()
                         #result = detector.detect_faces(img)
                         #bounding_box = result[0]['box']
                         #keypoints = result[0]['keypoints']
                         faces = self.get_faces(img, bounding_boxes, points, filename)
                         extracted.append(faces)
                     with open('extracted_embeddings.pickle','wb') as f:
                         pickle.dump(extracted,f)
                     return None
                 else:
                     img = cv2.imread(img_path, 1)
                     img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
                     bounding_boxes, points = self.alignMTCNN.get_bounding_boxes(image=img)
                     faces = self.get_faces(img, bounding_boxes, points, img_path)
                     return faces
Example #23
0
def main():
    frame_interval = 3  # Number of frames after which to run face detection
    model_path = '/home/zxk/AI/tensorflow-yolov3/checkpoint/yolov3.ckpt'

    # detector=YoLoService(model_path)
    sess = tf.Session()
    pnet, rnet, onet = create_mtcnn(sess, None)

    frame_count = 0

    video_capture = cv2.VideoCapture(
        '/home/zxk/AI/y2mate.com - _c0raDbZpV9s_360p.mp4')
    # face_recognition = face.Recognition()

    while True:
        # Capture frame-by-frame
        ret, frame = video_capture.read()
        if not ret: break

        frame_count += 1
        if frame_count % 1 == 0:
            # frame=cv2.rotate(frame,cv2.ROTATE_90_CLOCKWISE)
            frameorgin = frame
            H, W, _ = frameorgin.shape

            frame = cv2.resize(frame, (640, 480),
                               interpolation=cv2.INTER_LINEAR)
            # objects=detector.predict(frame)
            total_boxes, _ = detect_face(frame[:, :, ::-1], 30, pnet, rnet,
                                         onet, [0.6, 0.6, 0.7], 0.7)
            objects = {
                'boxes': total_boxes,
                'labels': [1] * len(total_boxes),
                'scores': [1] * len(total_boxes)
            }
            # cropPeople(frameorgin,objects,frame_count,'out')

            add_overlays(frame, objects, 1)
            cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        print(frame_count, ret)
    # When everything is done, release the capture
    video_capture.release()
    cv2.destroyAllWindows()
Example #24
0
    def __init__(self):
        '''load mtcnn and facenet model

        load all needed parameters from sys_config.py
        preload the models and save correspond session
        '''
        self.facenet_weight_dir = sys_config.facenet_weight_dir
        self.face_minsize = sys_config.face_minsize
        self.image_size = sys_config.image_size
        self.margin = sys_config.margin
        self.three_threshold = sys_config.three_threshold
        self.factor = sys_config.factor
        self.distance_threshold = sys_config.distance_threshold

        print('Creating networks and loading parameters')
        with tf.Graph().as_default():
            # setting not fully occupied memory, allocated on demand
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            sess = tf.Session(config=config)
            with sess.as_default():
                # load mtcnn model(do face detection and align)
                self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(
                    sess, None)

                # load the facenet model(do face recognition)
                facenet.load_model(self.facenet_weight_dir)

                # Get input and output tensors
                self.images_placeholder = tf.get_default_graph(
                ).get_tensor_by_name("input:0")
                self.embeddings = tf.get_default_graph().get_tensor_by_name(
                    "embeddings:0")
                self.phase_train_placeholder = tf.get_default_graph(
                ).get_tensor_by_name("phase_train:0")

                # save session
                self.facenet_session = sess

                # load target people feature embeddings
                template_embeddings_file = sys_config.template_face_embeddings_file
                if os.path.exists(template_embeddings_file):
                    self.template_face_embeddings = np.load(
                        template_embeddings_file)
        print("init finish")
Example #25
0
def load_nets(**kwargs):
    global pnets
    global rnet
    global onet

    use_tf = PARAMS['use_tf']
    if use_tf:
        model_path = PARAMS['tf_path']
        import tensorflow as tf
        sess = tf.Session()
        pnets, rnet, onet = detect_face.create_mtcnn(sess, model_path)
    else:
        plugin = kwargs.get('plugin')
        model_dir = PARAMS.get('align_model_dir')

        LOG.info('Load PNET')

        pnets_proxy = []
        for r in ko.parse_resolutions(PARAMS['resolutions']):
            p = ko.PNetHandler(plugin, r[0], r[1])
            pnets_proxy.append(p.proxy())

        LOG.info('Load RNET')
        net = ie.IENetwork.from_ir(*net_filenames(model_dir, 'rnet'))
        rnet_proxy = OpenVINONet(plugin, net)

        LOG.info('Load ONET')

        net = ie.IENetwork.from_ir(*net_filenames(model_dir, 'onet'))
        onet_proxy = OpenVINONet(plugin, net)
        onet_input_name = list(net.inputs.keys())[0]
        onet_batch_size = net.inputs[onet_input_name][0]
        LOG.info('ONET_BATCH_SIZE = {}'.format(onet_batch_size))

        pnets, rnet, onet = detect_face.create_openvino_mtcnn(
            pnets_proxy, rnet_proxy, onet_proxy, onet_batch_size)

    LOG.info('Load classifier')
    with open(PARAMS['classifier'], 'rb') as f:
        global model
        global class_names
        opts = {'file': f}
        if six.PY3:
            opts['encoding'] = 'latin1'
        (model, class_names) = pickle.load(**opts)
Example #26
0
    def __init__(self):
        #minsize :最小可检测图像
        self.minsize = 20

        #threshold:人脸框阈值
        self.threshold = [0.6, 0.7, 0.7]

        #factor :生成图像金字塔时候的缩放系数
        self.factor = 0.709

        print('MTCNN人脸检测与定位正在运行>>>\n')

        #调用create_mtcnn-API,利用TensorFlow搭建神经网络
        with tf.Graph().as_default():
            sess = tf.Session()
            with sess.as_default():
                self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(
                    sess, None)  #参数解包,依次赋值给左侧,第二个参数默认值为None
Example #27
0
def load_and_align_data(image_path, image_size, margin, gpu_memory_fraction):

    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor

    print('Creating networks and loading parameters')
    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=gpu_memory_fraction)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            pnet, rnet, onet = detect_face.create_mtcnn(sess, None)

    img_list = []
    img = misc.imread(os.path.expanduser(image_path), mode='RGB')
    print("os.path.expanduser(image_path)")
    print(os.path.expanduser(image_path))
    img_size = np.asarray(img.shape)[0:2]
    bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet,
                                                threshold, factor)

    if len(bounding_boxes) < 1:
        #image_paths.remove(image)
        print("can't detect face, remove ", image_path)
    else:
        for bounding_box in bounding_boxes:
            print("bounding_box")
            print(bounding_box)
            det = np.squeeze(bounding_box)
            bb = np.zeros(4, dtype=np.int32)
            bb[0] = np.maximum(det[0] - margin / 2, 0)
            bb[1] = np.maximum(det[1] - margin / 2, 0)
            bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
            bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
            cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
            aligned = misc.imresize(cropped, (image_size, image_size),
                                    interp='bilinear')
            prewhitened = facenet.prewhiten(aligned)
            img_list.append(prewhitened)
    #print("img_list")
    #print(img_list)
    return img_list, bounding_boxes
Example #28
0
    def initialise(self, filename):
        # some constants kept as default from facenet
        global pnet, rnet, onet, threshold, factor, minsize, margin, sess, images_placeholder, phase_train_placeholder, embedding_size, embeddings
        sess = tf.Session()

        # read pnet, rnet, onet models from align directory and files are det1.npy, det2.npy, det3.npy
        pnet, rnet, onet = detect_face.create_mtcnn(sess, 'align')

        # read 20170512-110547 model file downloaded from https://drive.google.com/file/d/0B5MzpY9kBtDVZ2RpVDYwWmxoSUk
        facenet.load_model("20170512-110547/20170512-110547.pb")

        # Get input and output tensors
        images_placeholder = tf.get_default_graph().get_tensor_by_name(
            "input:0")
        embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
        phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(
            "phase_train:0")
        embedding_size = embeddings.get_shape()[1]
        obj = Recognition
        obj.printResults(self, filename)
def detection_face(img):
    minsize = 20  # minimum size of face
    threshold = [0.6, 0.7, 0.7]  # three steps's threshold
    factor = 0.709  # scale factor
    print('Creating networks and loading parameters')
    with tf.Graph().as_default():
        # gpu_memory_fraction = 1.0
        # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
        # sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
        sess = tf.Session()
        with sess.as_default():
            pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
            bboxes, landmarks = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
    landmarks = np.transpose(landmarks)
    bboxes = bboxes.astype(int)
    bboxes = [b[:4] for b in bboxes]
    landmarks_list=[]
    for landmark in landmarks:
        face_landmarks = [[landmark[j], landmark[j + 5]] for j in range(5)]
        landmarks_list.append(face_landmarks)
    return bboxes,landmarks_list
Example #30
0
def main(args):
    sess = tf.Session()
    pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
    threshold = [ 0.6, 0.7, 0.7 ]
    factor = 0.709

    img = io.imread(args.image)
    _minsize = min(min(img.shape[0]//5, img.shape[1]//5),80)
    bounding_boxes, points = detect_face.detect_face(img, _minsize, pnet, rnet, onet, threshold, factor)
    assert bounding_boxes.size>0
    points = points[:, 0]
    landmark = points.reshape((2,5)).T
    warped = preprocess(img, landmark)

    io.imsave(args.image[:-4]+'_aligned.png',warped)

    if args.mask:
        if args.logo is None:
            logo_mask = np.ones((1,400,900,3),dtype=np.float32)
        else:
            logo_mask = np.where(io.imread(args.logo)/255.< 0.5, 1., 0.)
            logo_mask = np.expand_dims(logo_mask, axis=0)

        logo = tf.placeholder(tf.float32,shape=[1,400,900,3])
        param = tf.placeholder(tf.float32,shape=[1,1])
        ph = tf.placeholder(tf.float32,shape=[1,1])
        result = projector(param,ph,logo)

        face_input = tf.placeholder(tf.float32,shape=[1,600,600,3])
        theta = tf.placeholder(tf.float32,shape=[1,6])
        prepared = stn(result,theta)

        united = prepared[:,300:,150:750]+face_input*(1-prepared[:,300:,150:750])

        img_with_mask = sess.run(united,feed_dict={ph:[[args.ph]],logo:logo_mask,param:[[args.param]],\
                                        face_input:np.expand_dims(warped/255.,0),\
                                        theta:1./args.scale*np.array([[1.,0.,-args.x/450.,0.,1.,-args.y/450.]])})[0]

        io.imsave(args.image[:-4]+'_mask.png',img_with_mask)