Пример #1
0
import os, sys

sys.path.append('.')

import cv2
import numpy as np

from tools.utils import *
from MTCNN import MTCNNDetector

if __name__ == "__main__":
    base_model_path = './pretrained_weights/mtcnn'
    mtcnn_detector = MTCNNDetector(
        p_model_path=os.path.join(base_model_path, 'best_pnet.pth'),
        r_model_path=os.path.join(base_model_path, 'best_rnet.pth'),
        o_model_path=os.path.join(base_model_path, 'best_onet.pth'),
        min_face_size=40,
        threshold=[0.99, 0.9, 0.8])

    image_path = './data/user/images'
    images = [f for f in os.listdir(image_path) if f.endswith('.jpg')]

    for image in images:
        image = cv2.imread(os.path.join(image_path, image), 1)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        bboxes = mtcnn_detector.detect_face(image)
        for i in range(bboxes.shape[0]):
            x0, y0, x1, y1 = bboxes[i, :4]
            cv2.rectangle(image, (int(x0), int(y0)), (int(x1), int(y1)),
                          (0, 255, 255), 1)
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
Пример #2
0
# anno_file: store labels of the wider face training data
with open(anno_file, 'r') as f:
    annotations = f.readlines()
num = len(annotations)
print("%d pics in total" % num)

image_size = 24
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
p_idx = 0  # positive
n_idx = 0  # negative
d_idx = 0  # dont care
idx = 0

# create MTCNN Detector
mtcnn_detector = MTCNNDetector(
    p_model_path='./pretrained_weights/mtcnn/best_pnet.pth')

for annotation in annotations:
    annotation = annotation.strip().split(' ')
    im_path = os.path.join(im_dir, annotation[0])
    print(im_path)
    bbox = list(map(float, annotation[1:]))
    boxes = np.array(bbox, dtype=np.int32).reshape(-1, 4)

    # anno form is x1, y1, w, h, convert to x1, y1, x2, y2
    # boxes[:,2] += boxes[:,0] - 1
    # boxes[:,3] += boxes[:,1] - 1

    image = cv2.imread(im_path)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    bboxes = mtcnn_detector.detect_face(image)
Пример #3
0

def draw_images(img, bboxs):  # 在图片上绘制人脸框及特征点
    num_face = bboxs.shape[0]
    for i in range(num_face):
        cv2.rectangle(img, (int(bboxs[i, 0]), int(bboxs[i, 1])),
                      (int(bboxs[i, 2]), int(bboxs[i, 3])), (0, 255, 0), 3)
    return img


if __name__ == '__main__':
    base_model_path = './pretrained_weights/mtcnn'
    mtcnn_detector = MTCNNDetector(
        p_model_path=os.path.join(base_model_path, 'best_pnet.pth'),
        r_model_path=os.path.join(base_model_path, 'best_rnet.pth'),
        o_model_path=os.path.join(base_model_path, 'best_onet.pth'),
        min_face_size=24,
        threshold=[0.7, 0.8, 0.9],
        use_cuda=False)
    logger.info("Init the MtcnnDetector.")

    cap = cv2.VideoCapture('./test/test_video.mov')
    if not cap.isOpened():
        print("Failed to open capture from file")
    start = time.time()
    num = 0
    while (cap.isOpened):
        ret, img = cap.read()
        logger.info("Start to process No.{} image.".format(num))
        RGB_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        #RGB_image = cv2.medianBlur(RGB_image, 5)