Beispiel #1
0
blob = cv2.dnn.blobFromImage(
    img)  # 'size' param resize the output to the given shape

# Load the net
net = cv2.dnn.readNet(args.model)
net.setPreferableBackend(args.backend)
net.setPreferableTarget(args.target)

# Run the net
output_names = ['loc', 'conf', 'iou']
net.setInput(blob)
loc, conf, iou = net.forward(output_names)

# Decode bboxes and landmarks
pb = PriorBox(input_shape=(w, h), output_shape=(w, h))
dets = pb.decode(np.squeeze(loc, axis=0), np.squeeze(conf, axis=0),
                 np.squeeze(iou, axis=0), args.conf_thresh)

# NMS
if dets.shape[0] > 0:
    dets = nms(dets, args.nms_thresh)
    faces = dets[:args.keep_top_k, :]
    print('Detection results: {} faces found'.format(faces.shape[0]))
    print(faces)
else:
    print('No faces found.')
    exit()

# Draw boudning boxes and landmarks on the original image
img_res = draw(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), faces[:, :4],
               np.reshape(faces[:, 4:14], (-1, 5, 2)), faces[:, -1])
if args.vis:
Beispiel #2
0
                        dst=None,
                        dsize=(input_shape),
                        interpolation=cv2.INTER_LINEAR)
hr, wr, _ = img_resize.shape
print('Network input size: h={}, w={}'.format(hr, wr))

blob = cv2.dnn.blobFromImage(img_resize, size=input_shape)

# run the net
output_names = ['loc', 'conf']
net.setInput(blob)
loc, conf = net.forward(output_names)

# Decode bboxes and landmarks
pb = PriorBox(input_shape=input_shape, output_shape=(w, h))
dets = pb.decode(np.squeeze(loc, axis=0), np.squeeze(conf, axis=0))

# Ignore low scores
idx = np.where(dets[:, -1] > args.conf_thresh)[0]
dets = dets[idx]

# NMS
if dets.shape[0] > 0:
    dets = nms(dets, args.nms_thresh)
    faces = dets[:args.keep_top_k, :]
    print('Detection results: {} faces found'.format(faces.shape[0]))
    print(faces)
else:
    print('No faces found.')
    exit()
class faceDetectorModel:
    def __init__(self,
                 method='haarCascades',
                 gpu=0,
                 confidence=0.7,
                 threshold=0.3):
        self.gpu = gpu
        self.method = method
        self.init = 0
        self.detector = None
        self.pb = None
        self.detectorInit()
        self.confidence = confidence
        self.threshold = threshold

    def detectorInit(self):
        if self.method == 'haarCascades':
            if self.gpu == 0:
                self.detector = cv2.CascadeClassifier(
                    'faceDetect/haarcascade_frontalface_default.xml')
            elif self.gpu == 1:
                self.detector = cv2.cuda.CascadeClassifier_create(
                    'faceDetect'
                    '/haarcascade_frontalface_default_cuda.xml')

        elif self.method == 'lbpCascades':
            if self.gpu == 0:
                self.detector = cv2.CascadeClassifier(
                    'faceDetect/lbpcascade_frontalface_improved.xml')
            elif self.gpu == 1:
                self.detector = cv2.cuda.CascadeClassifier_create(
                    'faceDetect/lbpcascade_frontalface_improved.xml')

        if self.method == 'yuNet':
            self.detector = cv2.dnn.readNet(
                'faceDetect/YuFaceDetectNet_640.onnx')
            self.detector.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
            self.detector.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)

    def CascadesDetector(self, frame):
        if self.gpu == 1:
            faces = []
            gpuFrame = cv2.cuda_GpuMat()
            gpuFrame.upload(frame)
            gpuMat = cv2.cuda.cvtColor(gpuFrame, cv2.COLOR_BGR2GRAY)
            objbuff = self.detector.detectMultiScale(gpuMat)
            facess = objbuff.download()
            if facess is None:
                facess = ()
            np.array(facess)
            for multipleFace in facess:
                for face in multipleFace:
                    faces.append(face)
            return faces
        elif self.gpu == 0:
            grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            faces = self.detector.detectMultiScale(grayFrame,
                                                   scaleFactor=1.2,
                                                   minNeighbors=5,
                                                   minSize=(20, 20))
            return faces

    def yuNetDetection(self, frame):
        if self.init == 0:
            frameWidth, frameHeight = frame.shape[:2]
            self.pb = PriorBox(input_shape=(640, 480),
                               output_shape=(frameHeight, frameWidth))
            self.init = 1

        blob = cv2.dnn.blobFromImage(frame, size=(640, 480))
        outputNames = ['loc', 'conf', 'iou']
        self.detector.setInput(blob)
        loc, conf, iou = self.detector.forward(outputNames)
        dets = self.pb.decode(np.squeeze(loc, axis=0), np.squeeze(conf,
                                                                  axis=0),
                              np.squeeze(iou, axis=0))
        idx = np.where(dets[:, -1] > self.confidence)[0]
        dets = dets[idx]

        if dets.shape[0]:
            facess = nms(dets, self.threshold)
        else:
            facess = ()
            return facess
        faces = np.array(facess[:, :4])
        faces = faces.astype(np.int)
        faceStartXY = faces[:, :2]
        faceEndXY = faces[:, 2:4]
        faceWH = faceEndXY - faceStartXY
        faces = np.hstack((faceStartXY, faceWH))
        # scores = facess[:, -1]
        return faces

    def predict(self, frame, painted=1):
        frameNew = frame.copy()
        faces = ()
        if self.method == 'haarCascades' or self.method == 'lbpCascades':
            faces = self.CascadesDetector(frameNew)
        elif self.method == 'yuNet':
            faces = self.yuNetDetection(frameNew)

        if painted:
            for (x, y, w, h) in faces:
                cv2.rectangle(frameNew, (x, y), (x + w, y + h), (0, 0, 255))

        return frameNew, faces