Exemplo n.º 1
0
    def __init__(self, specific_model):
        super(FaceDetectorSSD, self).__init__()
        self.specific_model = specific_model
        graph_path = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            DeepFaceConfs.get()['detector'][self.specific_model][
                'frozen_graph']  # Pesos congelados del modelo
        )
        self.detector = self._load_graph(graph_path)  # Cargar Tensorflow graph

        self.tensor_image = self.detector.get_tensor_by_name(
            'prefix/image_tensor:0')  # Tensor referente a imagen
        self.tensor_boxes = self.detector.get_tensor_by_name(
            'prefix/detection_boxes:0')  # Tensor de bounding boxes
        self.tensor_score = self.detector.get_tensor_by_name(
            'prefix/detection_scores:0')  # Tensor de scores
        self.tensor_class = self.detector.get_tensor_by_name(
            'prefix/detection_classes:0')  # Tensor de clases detectadas

        predictor_path = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            DeepFaceConfs.get()['detector']['dlib']['landmark_detector'])
        self.predictor = dlib.shape_predictor(
            predictor_path)  # Inicializar predictor de landmarks

        config = tf.ConfigProto(
            gpu_options=tf.GPUOptions(allow_growth=True))  # Uso eficiente gpu
        self.session = tf.Session(graph=self.detector, config=config)
Exemplo n.º 2
0
    def __init__(self, specific_model):
        super(FaceDetectorSSD, self).__init__()
        self.specific_model = specific_model
        graph_path = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            DeepFaceConfs.get()['detector'][self.specific_model]
            ['frozen_graph'])
        self.detector = self._load_graph(graph_path)

        self.tensor_image = self.detector.get_tensor_by_name(
            'prefix/image_tensor:0')
        self.tensor_boxes = self.detector.get_tensor_by_name(
            'prefix/detection_boxes:0')
        self.tensor_score = self.detector.get_tensor_by_name(
            'prefix/detection_scores:0')
        self.tensor_class = self.detector.get_tensor_by_name(
            'prefix/detection_classes:0')

        predictor_path = os.path.join(
            os.path.dirname(os.path.realpath(__file__)),
            DeepFaceConfs.get()['detector']['dlib']['landmark_detector'])
        self.predictor = dlib.shape_predictor(predictor_path)

        config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
        self.session = tf.Session(graph=self.detector, config=config)
Exemplo n.º 3
0
 def __init__(self):
     super(FaceDetectorDlib, self).__init__()
     self.detector = dlib.get_frontal_face_detector()
     predictor_path = os.path.join(
         os.path.dirname(os.path.realpath(__file__)),
         DeepFaceConfs.get()['detector']['dlib']['landmark_detector'])
     self.predictor = dlib.shape_predictor(predictor_path)
     self.upsample_scale = DeepFaceConfs.get()['detector']['dlib']['scale']
Exemplo n.º 4
0
def objective(args):
    if 'crop_y_ratio' in args.keys():
        print('---------- crop_y_ratio set', args['crop_y_ratio'])
        DeepFaceConfs.get()['roi']['crop_y_ratio'] = args['crop_y_ratio']
    if 'size_ratio' in args.keys():
        print('---------- size_ratio set', args['size_ratio'])
        DeepFaceConfs.get()['roi']['size_ratio'] = args['size_ratio']

    t = DeepFace()
    try:
        score = t.test_lfw(visualize=False)
    except Exception as e:
        print('--------- error...')
        print(e)
        return 100
    print('---------- score=', score)

    return -score
Exemplo n.º 5
0
    def detect(self, npimg, resize=(480, 640)):
        """

        :param npimg:
        :param resize: False or tuple
        :return:
        """
        height, width = npimg.shape[:2]
        if not resize:
            infer_img = npimg
        else:
            infer_img = cv2.resize(npimg, resize,
                                   cv2.INTER_AREA)  # TODO : Resize or not?

        dets, scores, classes = self.session.run(
            [self.tensor_boxes, self.tensor_score, self.tensor_class],
            feed_dict={self.tensor_image: [infer_img]})
        dets, scores = dets[0], scores[0]

        faces = []
        for det, score in zip(dets, scores):
            if score < DeepFaceConfs.get()['detector'][
                    self.specific_model]['score_th']:
                continue

            y = int(max(det[0], 0) * height)
            x = int(max(det[1], 0) * width)
            h = int((det[2] - det[0]) * height)
            w = int((det[3] - det[1]) * width)

            if w <= 1 or h <= 1:
                continue

            bbox = BoundingBox(x, y, w, h, score)

            # find landmark
            rect = dlib.rectangle(left=x, top=y, right=x + w, bottom=y + h)
            shape = self.predictor(npimg, rect)
            coords = np.zeros((68, 2), dtype=np.int)

            # loop over the 68 facial landmarks and convert them
            # to a 2-tuple of (x, y)-coordinates
            for i in range(0, 68):
                coords[i] = (shape.part(i).x, shape.part(i).y)
            bbox.face_landmark = coords

            faces.append(bbox)

        faces = sorted(faces, key=lambda x: x.score, reverse=True)

        return faces
Exemplo n.º 6
0
    def detect(self, npimg, resize=(480, 640)):
        """

        :param npimg:
        :param resize: False or tuple
        :return:
        """
        height, width = npimg.shape[:2]
        if not resize:
            infer_img = npimg
        else:
            infer_img = cv2.resize(npimg, resize,
                                   cv2.INTER_AREA)  # TODO : Resize or not?

        dets, scores, classes = self.session.run(
            [self.tensor_boxes, self.tensor_score, self.tensor_class],
            feed_dict={self.tensor_image: [infer_img]})
        dets, scores = dets[0], scores[0]

        faces = []
        for det, score in zip(dets, scores):
            if score < DeepFaceConfs.get()['detector'][
                    self.specific_model]['score_th']:
                continue

            y = int(max(det[0], 0) * height)
            x = int(max(det[1], 0) * width)
            h = int((det[2] - det[0]) * height)
            w = int((det[3] - det[1]) * width)

            if w <= 1 or h <= 1:
                continue

            bbox = BoundingBox(x, y, w, h, score)

            # find landmark

            # loop over the 68 facial landmarks and convert them
            # to a 2-tuple of (x, y)-coordinates

            faces.append(
                (bbox.x, bbox.y, bbox.x + bbox.w, bbox.y + bbox.h, bbox.score))

        return faces
Exemplo n.º 7
0
    def detect(self, npimg, resize=(480, 640)):

        height, width = npimg.shape[:2]
        infer_img = cv2.resize(npimg, resize, cv2.INTER_AREA)

        dets, scores, classes = self.session.run(
            [self.tensor_boxes, self.tensor_score, self.tensor_class],
            feed_dict={self.tensor_image: [infer_img]})
        dets, scores = dets[0], scores[0]

        faces = []
        for det, score in zip(dets, scores):
            if score < DeepFaceConfs.get()['detector'][self.specific_model][
                    'score_th']:  # th = 0.7 (deepface/config/basic.yml)
                continue

            # Coordenadas del rostro
            y = int(max(det[0], 0) * height)
            x = int(max(det[1], 0) * width)
            h = int((det[2] - det[0]) * height)
            w = int((det[3] - det[1]) * width)

            if w <= 1 or h <= 1:
                continue

            bbox = BoundingBox(x, y, w, h, score)

            # Calcular landmarks
            rect = dlib.rectangle(left=x, top=y, right=x + w, bottom=y + h)
            shape = self.predictor(npimg, rect)
            coords = np.zeros((68, 2), dtype=np.int)

            # Iterar sobre los 68 landmarks del rostro y convertirlos en duplas (x, y)
            for i in range(0, 68):
                coords[i] = (shape.part(i).x, shape.part(i).y)
            bbox.face_landmark = coords

            faces.append(bbox)

        faces = sorted(faces, key=lambda x: x.score,
                       reverse=True)  # Orderar por score

        return faces
Exemplo n.º 8
0
    def detect(self, npimg):
        dets, scores, idx = self.detector.run(npimg, self.upsample_scale, -1)
        faces = []
        for det, score in zip(dets, scores):
            if score < DeepFaceConfs.get()['detector']['dlib']['score_th']:
                continue

            x = max(det.left(), 0)
            y = max(det.top(), 0)
            w = min(det.right() - det.left(), npimg.shape[1] - x)
            h = min(det.bottom() - det.top(), npimg.shape[0] - y)

            if w <= 1 or h <= 1:
                continue

            bbox = BoundingBox(x, y, w, h, score)

            # find landmark
            bbox.face_landmark = self.detect_landmark(npimg, det)

            faces.append(bbox)

        faces = sorted(faces, key=lambda x: x.score, reverse=True)
        return faces
Exemplo n.º 9
0
    def detect(self, npimg, rois=None, faces=None):
        probs, feats = self.extract_features(npimg=npimg,
                                             rois=rois,
                                             faces=faces)

        if self.db is None:
            names = [[(self.class_names[idx], prop[idx]) for idx in
                      prop.argsort()[-DeepFaceConfs.get()['recognizer']['topk']:][::-1]] for prop in probs]
        else:
            # TODO
            names = []
            for feat in feats:
                scores = []
                for db_name, db_feature in self.db.items():
                    similarity = feat_distance_cosine(feat, db_feature)
                    scores.append((db_name, similarity))
                scores.sort(key=lambda x: x[1], reverse=True)
                names.append(scores)

        return {
            'output': probs,
            'feature': feats,
            'name': names
        }
Exemplo n.º 10
0
    def test_lfw(self, set='test', model='ssdm_resnet', visualize=True):
        if set is 'train':
            pairfile = 'pairsDevTrain.txt'
        else:
            pairfile = 'pairsDevTest.txt'
        lfw_path = DeepFaceConfs.get()['dataset']['lfw']
        path = os.path.join(lfw_path, pairfile)
        with open(path, 'r') as f:
            lines = f.readlines()[1:]

        pairs = []
        for line in lines:
            elms = line.split()
            if len(elms) == 3:
                pairs.append((elms[0], int(elms[1]), elms[0], int(elms[2])))
            elif len(elms) == 4:
                pairs.append((elms[0], int(elms[1]), elms[2], int(elms[3])))
            else:
                logger.warning('line should have 3 or 4 elements, line=%s' % line)

        detec = FaceDetectorDlib.NAME
        if model == 'baseline':
            recog = FaceRecognizerVGG.NAME
            just_name = 'vgg'
        elif model == 'baseline_resnet':
            recog = FaceRecognizerResnet.NAME
            just_name = 'resnet'
        elif model == 'ssdm_resnet':
            recog = FaceRecognizerResnet.NAME
            just_name = 'resnet'
            detec = 'detector_ssd_mobilenet_v2'
        else:
            raise Exception('invalid model name=%s' % model)

        logger.info('pair length=%d' % len(pairs))
        test_result = []  # score, label(1=same)
        for name1, idx1, name2, idx2 in tqdm(pairs):
            img1_path = os.path.join(lfw_path, name1, '%s_%04d.jpg' % (name1, idx1))
            img2_path = os.path.join(lfw_path, name2, '%s_%04d.jpg' % (name2, idx2))
            img1 = cv2.imread(img1_path, cv2.IMREAD_COLOR)
            img2 = cv2.imread(img2_path, cv2.IMREAD_COLOR)

            if img1 is None:
                logger.warning('image not read, path=%s' % img1_path)
            if img2 is None:
                logger.warning('image not read, path=%s' % img2_path)

            result1 = self.run(image=img1, detector=detec, recognizer=recog, visualize=False)
            result2 = self.run(image=img2, detector=detec, recognizer=recog, visualize=False)

            if len(result1) == 0:
                logger.warning('face not detected, name=%s(%d)! %s(%d)' % (name1, idx1, name2, idx2))
                test_result.append((0.0, name1 == name2))
                continue
            if len(result2) == 0:
                logger.warning('face not detected, name=%s(%d) %s(%d)!' % (name1, idx1, name2, idx2))
                test_result.append((0.0, name1 == name2))
                continue

            feat1 = result1[0].face_feature
            feat2 = result2[0].face_feature
            similarity = feat_distance_cosine(feat1, feat2)
            test_result.append((similarity, name1 == name2))

        # calculate accuracy TODO
        accuracy = sum([label == (score > DeepFaceConfs.get()['recognizer'][just_name]['score_th']) for score, label in test_result]) / float(len(test_result))
        logger.info('accuracy=%.8f' % accuracy)

        # ROC Curve, AUC
        tps = []
        fps = []
        accuracy0 = []
        accuracy1 = []
        acc_th = []

        for th in range(0, 100, 5):
            th = th / 100.0
            tp = 0
            tn = 0
            fp = 0
            fn = 0
            for score, label in test_result:
                if score >= th and label == 1:
                    tp += 1
                elif score >= th and label == 0:
                    fp += 1
                elif score < th and label == 0:
                    tn += 1
                elif score < th and label == 1:
                    fn += 1
            tpr = tp / (tp + fn + 1e-12)
            fpr = fp / (fp + tn + 1e-12)
            tps.append(tpr)
            fps.append(fpr)
            accuracy0.append(tn / (tn + fp + 1e-12))
            accuracy1.append(tp / (tp + fn + 1e-12))
            acc_th.append(th)

        fpr, tpr, thresh = roc_curve([x[1] for x in test_result], [x[0] for x in test_result])
        fnr = 1 - tpr
        eer = fnr[np.nanargmin(np.absolute((fnr - fpr)))]
        logger.info('1-eer=%.4f' % (1.0 - eer))

        with open('./etc/test_lfw.pkl', 'rb') as f:
            results = pickle.load(f)

        if visualize in [True, 'True', 'true', 1, '1']:
            fig = plt.figure()
            a = fig.add_subplot(1, 2, 1)
            plt.title('Experiment on LFW')
            plt.plot(fpr, tpr, label='%s(%.4f)' % (model, 1 - eer))  # TODO : label

            for model_name in results:
                if model_name == model:
                    continue
                fpr_prev = results[model_name]['fpr']
                tpr_prev = results[model_name]['tpr']
                eer_prev = results[model_name]['eer']
                plt.plot(fpr_prev, tpr_prev, label='%s(%.4f)' % (model_name, 1 - eer_prev))

            plt.xlim([0.0, 1.0])
            plt.ylim([0.0, 1.05])
            plt.xlabel('False Positive Rate')
            plt.ylabel('True Positive Rate')
            a.legend()
            a.set_title('Receiver operating characteristic')

            a = fig.add_subplot(1, 2, 2)
            plt.plot(accuracy0, acc_th, label='Accuracy_diff')
            plt.plot(accuracy1, acc_th, label='Accuracy_same')
            plt.xlim([0.0, 1.0])
            plt.ylim([0.0, 1.05])
            a.legend()
            a.set_title('%s : TP, TN' % model)

            fig.savefig('./etc/roc.png', dpi=300)
            plt.show()
            plt.draw()

        with open('./etc/test_lfw.pkl', 'wb') as f:
            results[model] = {
                'fpr': fpr,
                'tpr': tpr,
                'acc_th': acc_th,
                'accuracy0': accuracy0,
                'accuracy1': accuracy1,
                'eer': eer
            }
            pickle.dump(results, f, pickle.HIGHEST_PROTOCOL)

        return 1.0 - eer
Exemplo n.º 11
0
def get_roi(img, face, roi_mode):
    """
    :return: Cropped & Aligned Face Image
    """
    rpy, node_point = landmark_to_pose(face.face_landmark, img.shape)
    roll = rpy[0]
    if abs(roll) > math.pi / 2.0:
        roll = 0.0  # TODO ?

    height, width = img.shape[:2]
    new_w, new_h = (abs(math.sin(roll) * height) + abs(math.cos(roll) * width),
                    abs(math.sin(roll) * width) + abs(math.cos(roll) * height))
    new_w = roundint(new_w)
    new_h = roundint(new_h)
    mat = cv2.getRotationMatrix2D((height / 2, width / 2), -1 * roll * 180.0 / math.pi, 1.0) 
    (tx, ty) = (roundint((new_w - width) / 2), roundint((new_h - height) / 2))
    mat[0, 2] += tx
    mat[1, 2] += ty
    dst = cv2.warpAffine(img, mat, dsize=(new_w + tx * 2, new_h + ty * 2))                      

    aligned_points = []
    if face.face_landmark is not None:
        for x, y in face.face_landmark:
            new_x, new_y = rotate_dot((x, y), mat)
            aligned_points.append((new_x, new_y))

        min_x = min(aligned_points, key=lambda x: x[0])[0]
        max_x = max(aligned_points, key=lambda x: x[0])[0]
        min_y = min(aligned_points, key=lambda x: x[1])[1]
        max_y = max(aligned_points, key=lambda x: x[1])[1]

        aligned_w = max_x - min_x
        aligned_h = max_y - min_y
        crop_y_ratio = float(DeepFaceConfs.get()['roi'][roi_mode]['crop_y_ratio'])
        center_point = ((min_x + max_x) / 2, min_y * crop_y_ratio + max_y * (1.0 - crop_y_ratio))
        image_size = int(
            max(aligned_w, aligned_h) * DeepFaceConfs.get()['roi'][roi_mode]['size_ratio'])  # TODO : Parameter tuning?
    else:
        min_x, min_y = rotate_dot((face.x, face.y), mat)
        max_x, max_y = rotate_dot((face.x + face.w, face.y + face.h), mat)

        aligned_w = max_x - min_x
        aligned_h = max_y - min_y
        center_point = ((min_x + max_x) / 2, (min_y + max_y) / 2)
        image_size = int(max(aligned_w, aligned_h) * DeepFaceConfs.get()['roi'][roi_mode]['size_ratio'])

    crop_x1 = roundint(center_point[0] - image_size / 2)
    crop_y1 = roundint(center_point[1] - image_size / 2)
    crop_x2 = roundint(center_point[0] + image_size / 2)
    crop_y2 = roundint(center_point[1] + image_size / 2)

    cropped = dst[max(0, crop_y1):min(new_h, crop_y2), max(0, crop_x1):min(new_w, crop_x2)]
    pasted = np.zeros((image_size, image_size, 3), np.uint8)

    start_x = 0 if crop_x1 > 0 else -crop_x1
    start_y = 0 if crop_y1 > 0 else -crop_y1
    crop_w = min(cropped.shape[1], pasted.shape[1] - start_x)
    crop_h = min(cropped.shape[0], pasted.shape[0] - start_y)
    try:
        pasted[start_y:start_y + crop_h, start_x:start_x + crop_w] = cropped[:crop_h, :crop_w]  # TODO
    except:
        print(crop_y_ratio, (1.0 - crop_y_ratio), roll, pasted.shape, cropped.shape, 'min', min_x, max_x, min_y, max_y,
              'imgsize', image_size, start_x, start_y, crop_w, crop_h)
        print(center_point)
        # crop_y_ratio set 0.3667256819925064
        # 0.0 (128, 128, 3) (249, 128, 3) min 76 166 101 193 imgsize 128 0 129 128 -1
        # (121.0, -65.31315823528763)

    return pasted
Exemplo n.º 12
0
    def __init__(self, custom_db=None):
        self.batch_size = 4
        dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vggface')
        filename = 'weight.mat'
        filepath = os.path.join(dir_path, filename)

        if not os.path.exists(filepath):
            raise FileNotFoundError('Weight file not found, path=%s' % filepath)

        data = loadmat(filepath)

        # read meta info
        meta = data['meta']
        classes = meta['classes']
        normalization = meta['normalization']

        self.average_image = np.squeeze(normalization[0][0]['averageImage'][0][0][0][0]).reshape(1, 1, 1, 3)
        self.input_hw = tuple(np.squeeze(normalization[0][0]['imageSize'][0][0])[:2])
        self.input_node = tf.placeholder(tf.float32, shape=(None, self.input_hw[0], self.input_hw[1], 3), name='image')
        self.class_names = [str(x[0][0]) for x in classes[0][0]['description'][0][0]]

        input_norm = tf.subtract(self.input_node, self.average_image, name='normalized_image')

        # read layer info
        layers = data['layers']
        current = input_norm
        network = {}
        for layer in layers[0]:
            name = layer[0]['name'][0][0]
            layer_type = layer[0]['type'][0][0]
            if layer_type == 'conv':
                if name[:2] == 'fc':
                    padding = 'VALID'
                else:
                    padding = 'SAME'
                stride = layer[0]['stride'][0][0]
                kernel, bias = layer[0]['weights'][0][0]
                # kernel = np.transpose(kernel, (1, 0, 2, 3))
                bias = np.squeeze(bias).reshape(-1)
                conv = tf.nn.conv2d(current, tf.constant(kernel), strides=(1, stride[0], stride[0], 1), padding=padding)
                current = tf.nn.bias_add(conv, bias)
            elif layer_type == 'relu':
                current = tf.nn.relu(current)
            elif layer_type == 'pool':
                stride = layer[0]['stride'][0][0]
                pool = layer[0]['pool'][0][0]
                current = tf.nn.max_pool(current, ksize=(1, pool[0], pool[1], 1), strides=(1, stride[0], stride[0], 1),
                                         padding='SAME')
            elif layer_type == 'softmax':
                current = tf.nn.softmax(tf.reshape(current, [-1, len(self.class_names)]))

            network[name] = current
        self.network = network

        self.graph = tf.get_default_graph()
        config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
        self.persistent_sess = tf.Session(graph=self.graph, config=config)
        self.db = None

        if custom_db:
            db_path = custom_db
        else:
            db_path = DeepFaceConfs.get()['recognizer']['vgg'].get('db', '')
            db_path = os.path.join(dir_path, db_path)
        with open(db_path, 'rb') as f:
            self.db = pickle.load(f)

        # warm-up
        self.persistent_sess.run([self.network['prob'], self.network['fc7']], feed_dict={
            self.input_node: np.zeros((self.batch_size, 224, 224, 3), dtype=np.uint8)
        })
Exemplo n.º 13
0
 def get_threshold(self):
     return DeepFaceConfs.get()['recognizer']['vgg']['score_th']