def loadNet(self):
     global Pnet, Rnet, Onet
     Pnet = create_Kao_Pnet(r'12net.h5')
     Rnet = create_Kao_Rnet(r'24net.h5')
     Onet = create_Kao_Onet(r'48net.h5')  # will not work. caffe and TF incompatible
     img = cv2.imread('data/0001.png')
     scale_img = cv2.resize(img, (100, 100))
     input = scale_img.reshape(1, *scale_img.shape)
     Pnet.predict(input)
     img = cv2.imread('data/0001.png')
     scale_img = cv2.resize(img, (24, 24))
     input = scale_img.reshape(1, *scale_img.shape)
     Rnet.predict(input)
     img = cv2.imread('data/0001.png')
     scale_img = cv2.resize(img, (48, 48))
     input = scale_img.reshape(1, *scale_img.shape)
     Onet.predict(input)
     return Pnet, Rnet, Onet
Beispiel #2
0
import sys
import tools_matrix as tools
import cv2
import numpy as np
import matplotlib.pyplot as plt
import time
from MTCNN import create_Kao_Onet, create_Kao_Rnet, create_Kao_Pnet

Pnet = create_Kao_Pnet(r'12net.h5')
Rnet = create_Kao_Rnet(r'24net.h5')
Onet = create_Kao_Onet(r'48net.h5')  # will not work. caffe and TF incompatible


def detectFace(img, threshold):

    caffe_img = (img.copy() - 127.5) / 127.5
    origin_h, origin_w, ch = caffe_img.shape
    scales = tools.calculateScales(img)
    out = []
    t0 = time.time()
    # del scales[:4]

    for scale in scales:
        hs = int(origin_h * scale)
        ws = int(origin_w * scale)
        scale_img = cv2.resize(caffe_img, (ws, hs))
        input = scale_img.reshape(1, *scale_img.shape)
        ouput = Pnet.predict(
            input
        )  # .transpose(0,2,1,3) should add, but seems after process is wrong then.
        out.append(ouput)
Beispiel #3
0
import tools
import os
import cv2
import numpy as np
import time
from MTCNN import create_Kao_Onet, create_Kao_Rnet, create_Kao_Pnet

#The threshold can be modified for custom usage
#Example: For the original high-accuracy usage :threshold = [0.6,0.6,0.7]
threshold = [0.4, 0.4, 0.5]
DIR_PATH = 'path_to_imagedir/image_dir/'

Pnet = create_Kao_Pnet('./weight/12net.h5')
Rnet = create_Kao_Rnet('./weight/24net.h5')
Onet = create_Kao_Onet(
    './weight/48net.h5')  # will not work. caffe and TF incompatible


def detectFace(img, threshold):

    caffe_img = (img.copy() - 127.5) / 127.5
    origin_h, origin_w, ch = caffe_img.shape
    scales = tools.calculateScales(img)
    out = []
    t0 = time.time()
    # del scales[:4]

    for scale in scales:
        hs = int(origin_h * scale)
        ws = int(origin_w * scale)
        scale_img = cv2.resize(caffe_img, (ws, hs))
from tools import calculateScales, detect_face_12net, NMS, filter_face_24net, filter_face_48net
import cv2
import numpy as np
from MTCNN import create_Kao_Onet, create_Kao_Rnet, create_Kao_Pnet

print('Creating Face Detector....')
print('Creating Face Detector Pnet....')
Pnet = create_Kao_Pnet('./weight/MTCNN/12net.h5')
print('Creating Face Detector Rnet....')
Rnet = create_Kao_Rnet('./weight/MTCNN/24net.h5')
print('Creating Face Detector Onet....')
Onet = create_Kao_Onet('./weight/MTCNN/48net.h5')
print('Face Detector Created')


def detectFace(img, threshold):
    caffe_img = (img.copy() - 127.5) / 127.5
    origin_h, origin_w, ch = caffe_img.shape
    scales = calculateScales(img)
    out = []
    for scale in scales:
        hs = int(origin_h * scale)
        ws = int(origin_w * scale)
        scale_img = cv2.resize(caffe_img, (ws, hs))
        input = scale_img.reshape(1, *scale_img.shape)
        ouput = Pnet.predict(input)
        out.append(ouput)
    image_num = len(scales)
    rectangles = []
    for i in range(image_num):
        cls_prob = out[i][0][0][:, :, 1]
Beispiel #5
0
                # elif crop_img.ndim == 2:
                #     rgb = cvtColor(crop_img, COLOR_GRAY2BGR)
                #temp_image = QImage(crop_img.flatten(), width, height, QImage.Format_RGB888)
                #temp_pixmap = QPixmap.fromImage(temp_image)
                #self.imgeLabel.setPixmap(temp_pixmap)

                cv2.imwrite('data/' + str(self.threadID) + 'test.jpg',
                            crop_img)
        return draw


if __name__ == "__main__":
    video_path = 'east.mp4'
    Pnet = create_Kao_Pnet(r'12net.h5')
    Rnet = create_Kao_Rnet(r'24net.h5')
    Onet = create_Kao_Onet(r'48net.h5')
    cap = cv2.VideoCapture(video_path)
    preTime = 0
    lock = threading.Lock()
    index = 0
    img = cv2.imread('0001.png')
    scale_img = cv2.resize(img, (100, 100))
    input = scale_img.reshape(1, *scale_img.shape)
    Pnet.predict(input)
    img = cv2.imread('0001.png')
    scale_img = cv2.resize(img, (24, 24))
    input = scale_img.reshape(1, *scale_img.shape)
    Rnet.predict(input)
    img = cv2.imread('0001.png')
    scale_img = cv2.resize(img, (48, 48))
    input = scale_img.reshape(1, *scale_img.shape)
Beispiel #6
0
import cv2
import time
import numpy as np
from MTCNN import create_Kao_Onet

detection_model_path = 'models/haarcascade_frontalface_default.xml'
face_detector = cv2.CascadeClassifier(detection_model_path)

# starting video streaming
cv2.namedWindow('window_frame')
video_capture = cv2.VideoCapture(0)

Onet = create_Kao_Onet('models/48net.h5')

while True:
    bgr_image = video_capture.read()[1]
    gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
    faces = face_detector.detectMultiScale(gray_image, 1.3, 5)
    if len(faces) > 0:
        for face_coordinates in faces:
            batch_in = []
            x, y, w, h = face_coordinates
            face_crop = bgr_image[y:(y + h), x:(x + w)]
            face_crop = cv2.resize(face_crop, (48, 48))
            face_crop = (face_crop - 127.5) / 127.5
            batch_in.append(cv2.resize(face_crop, (48, 48)))
            cv2.rectangle(bgr_image, (x, y), (x + w, y + h), (255, 255, 255),
                          2)

            batch_in = np.array(batch_in)
            start = time.time()