Beispiel #1
0
class Face():
    def __init__(self):
        import os
        before_folder = os.path.abspath('.')
        os.chdir(
            os.path.join('/'.join(Face.__module__.split('.')[:-1]), 'DFace'))
        from dface.core.detect import create_mtcnn_net, MtcnnDetector
        pnet, rnet, onet = create_mtcnn_net(
            p_model_path="./model_store/pnet_epoch.pt",
            r_model_path="./model_store/rnet_epoch.pt",
            o_model_path="./model_store/onet_epoch.pt",
            use_cuda=False)
        self.detector = MtcnnDetector(
            pnet=pnet, rnet=rnet, onet=onet, min_face_size=64)
        os.chdir(before_folder)

    def get_face_from_file(self, org_file, margin=5.):
        import imageio
        img = imageio.imread(org_file, pilmode="RGB")
        try:
            bbox = map(int,
                       self.detector.detect_face(img[:, :, ::-1])[0][0][:-1])
            bbox = [max(0, i) for i in bbox]

            def marginP(x, y):
                return int(x + y / margin)

            def marginM(x, y):
                return int(x - y / margin)

            bbox = [
                marginM(bbox[0], bbox[2] - bbox[0]),
                marginM(bbox[1], bbox[3] - bbox[1]),
                marginP(bbox[2], bbox[2] - bbox[0]),
                marginP(bbox[3], bbox[3] - bbox[1])
            ]
            bbox = [max(0, i) for i in bbox]
            img_face = img[bbox[1]:bbox[3], bbox[0]:bbox[2]]
            return img_face, True
        except IndexError:
            img_face = img
            return img_face, False

    def get_all_faces_from_file(self, org_file, margin=5.):
        import imageio
        img = imageio.imread(org_file, pilmode="RGB")
        bboxes = [
            _bbox[:-1]
            for _bbox in self.detector.detect_face(img[:, :, ::-1])[0]
        ]
        good_bboxes = []
        for n in range(len(bboxes)):
            try:
                bbox = map(int, bboxes[n])
                bbox = [max(0, i) for i in bbox]

                def marginP(x, y):
                    return int(x + y / margin)

                def marginM(x, y):
                    return int(x - y / margin)

                bbox = [
                    marginM(bbox[0], bbox[2] - bbox[0]),
                    marginM(bbox[1], bbox[3] - bbox[1]),
                    marginP(bbox[2], bbox[2] - bbox[0]),
                    marginP(bbox[3], bbox[3] - bbox[1])
                ]
                bbox = [max(0, i) for i in bbox]
                # img_face = img[bbox[1]:bbox[3], bbox[0]:bbox[2]]
                good_bboxes.append(bbox)
            except IndexError:
                continue
        return good_bboxes

    def get_face_and_save(self, org_file, face_file):
        import imageio
        import os
        if not os.path.isfile(face_file):
            face, success = self.get_face_from_file(org_file)
            imageio.imwrite(face_file, face)
            return success
        else:
            return False
Beispiel #2
0
import cv2
from dface.core.detect import create_mtcnn_net, MtcnnDetector
import dface.core.vision as vision
import numpy as np
import torch
if __name__ == '__main__':
    pnet, rnet, onet = create_mtcnn_net(
        p_model_path="./model_store/pnet_epoch.pt",
        r_model_path="./model_store/rnet_epoch.pt",
        o_model_path="./model_store/onet_epoch.pt",
        use_cuda=True)
    mtcnn_detector = MtcnnDetector(pnet=pnet,
                                   rnet=rnet,
                                   onet=onet,
                                   min_face_size=24)

    img = cv2.imread("./test.jpg")
    img_bg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    with torch.no_grad():
        bboxs, landmarks = mtcnn_detector.detect_face(img)

    vision.vis_face(img_bg, bboxs, landmarks)
Beispiel #3
0
    net.load_state_dict(torch.load(cls_model_path))
    transform = ToTensor()

    #get face data (used for training)
    #save_dir='./data/null/'
    #if not os.path.isdir(save_dir):
    #    os.mkdir(save_dir)

    #打开摄像头
    capture = cv2.VideoCapture(0)

    i = 0
    while True:
        timer1 = time.time()
        ret, frame = capture.read()
        faces, _ = mtcnn_detector.detect_face(frame)

        for (top_x, top_y, bottom_x, bottom_y, s) in faces:
            i = i + 1
            top_x = int(top_x)
            top_y = int(top_y)
            bottom_x = int(bottom_x)
            bottom_y = int(bottom_y)
            #矩形标记
            cv2.rectangle(frame, (int(top_x), int(top_y)),
                          (int(bottom_x), int(bottom_y)), (0, 255, 0), 2)
            frame_save = frame[top_y:bottom_y, top_x:bottom_x, :]
            try:
                #cv2.imwrite(save_dir+str(i)+'.jpg',frame_save)
                cls_input = transform(cv2.resize(frame_save,
                                                 (28, 28))).unsqueeze(0)
Beispiel #4
0
def face_detection():
    global face_out
    global ready
    #face detection model
    p_model_path = "./weights/pnet_epoch.pt"
    r_model_path = "./weights/rnet_epoch.pt"
    o_model_path = "./weights/onet_epoch.pt"
    pnet, rnet, onet = create_mtcnn_net(p_model_path,
                                        r_model_path,
                                        o_model_path,
                                        use_cuda=True)
    mtcnn_detector = MtcnnDetector(pnet=pnet,
                                   rnet=rnet,
                                   onet=onet,
                                   min_face_size=24)
    assert os.path.exists(p_model_path), "pnet Model Path is not exist!"
    assert os.path.exists(r_model_path), "rnet Model Path is not exist!"
    assert os.path.exists(o_model_path), "onet Model Path is not exist!"

    #face classification model
    cls_model_path = './weights/cnet_final.pth'
    assert os.path.exists(cls_model_path), "Cls Model Path is not exist!"
    net = CNet()
    net.load_state_dict(torch.load(cls_model_path))
    transform = ToTensor()

    #get face data (used for training)
    #save_dir='./data/null/'
    #if not os.path.isdir(save_dir):
    #    os.mkdir(save_dir)

    #打开摄像头
    capture = cv2.VideoCapture(0)

    i = 0
    while True:
        ready = True
        print(ready)
        timer1 = time.time()
        ret, frame = capture.read()
        faces, _ = mtcnn_detector.detect_face(frame)

        for (top_x, top_y, bottom_x, bottom_y, s) in faces:
            i = i + 1
            top_x = int(top_x)
            top_y = int(top_y)
            bottom_x = int(bottom_x)
            bottom_y = int(bottom_y)
            #矩形标记
            cv2.rectangle(frame, (int(top_x), int(top_y)),
                          (int(bottom_x), int(bottom_y)), (0, 255, 0), 2)
            frame_save = frame[top_y:bottom_y, top_x:bottom_x, :]
            try:
                #cv2.imwrite(save_dir+str(i)+'.jpg',frame_save)
                cls_input = transform(cv2.resize(frame_save,
                                                 (28, 28))).unsqueeze(0)
            except:
                continue
            out = net(cls_input)
            cls = torch.argmax(out, dim=1)
            print((top_x, top_y, bottom_x, bottom_y), '\t', cls.item())
            face_out = cls.item()
        timer2 = time.time()
        #print(timer2-timer1)
        #显示图片
        cv2.imshow("faces in video", frame)
        #暂停窗口
        if cv2.waitKey(5) & 0xFF == ord('q'):
            break
    #释放资源
    capture.release()
    #销毁窗口
    cv2.destroyAllWindows()
Beispiel #5
0
    frame_num = 1
    import matplotlib.pyplot as plt

    while frame_num <= args.n_frames:
        print(frame_num)

        ret, frame = video.read()
        if ret == False:
            break
        # frame1 = test_img
        # frame = cv2.transpose(cv2.resize(frame, (480,640), interpolation = cv2.INTER_CUBIC))

        cv2_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        # dets = cnn_face_detector(cv2_frame, 1)
        bboxs, landmarks = mtcnn_detector.detect_face(frame)
        #print(frame.shape)

        for idx, det in enumerate(bboxs):
            # Get x_min, y_min, x_max, y_max, conf
            x_min = det[0]  #.rect.left()
            y_min = det[1]  #.rect.top()
            x_max = det[2]  #.rect.right()
            y_max = det[3]  #.rect.bottom()
            conf = det[4]  #.confidence
            #vision.vis_face(cv2_frame,bboxs,landmarks)

            if conf > 0.9:
                bbox_width = abs(x_max - x_min)
                bbox_height = abs(y_max - y_min)
                x_min -= 2 * bbox_width / 4