Exemple #1
0
def main():
    detectum = dnn.Detector()
    THRESHOLD = 0.4  # Value between 0 and 1 for confidence score

    im = cv.imread('../data/beatles.jpg')

    _, bboxes = detectum.process_frame(im, THRESHOLD)

    # --- uncomment to utilise post-processing --- #
    # 1.
    # Post-processing to remove any bboxes whose coordinates are inside larger bboxes.
    # bboxes = remove_inside_bboxes(bboxes) # uncomment me!!!
    # 2.
    # Post-processing that combines any two overlapping bboxes into one larger bbox.
    # bboxes = combine_bboxes(bboxes) # uncomment me!!!

    # Loop through list (if empty this will be skipped) and overlay green bboxes
    # Format of bboxes is: xmin, ymin (top left), xmax, ymax (bottom right)
    for i in bboxes:
        cv.rectangle(im, (i[0], i[1]), (i[2], i[3]), (0, 255, 0), 3)

    cv.imwrite('../data/beatles_output.jpg', im)
Exemple #2
0
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""

# This script has been tested using a Raspberry Pi Camera Modeule v1.3

from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2 as cv
import math
from xailient import dnn
import numpy as np

#By default Low resolution DNN for face detector will be loaded.
#To load the high resolution Face detector please comment the below lines.
detectum = dnn.Detector()

THRESHOLD = 0.45  # Value between 0 and 1 for confidence score

# initialize the camera and grab a reference to the raw camera capture
RES_W = 640  # 1280 # 640 # 256 # 320 # 480 # pixels
RES_H = 480  # 720 # 480 # 144 # 240 # 360 # pixels

camera = PiCamera()
camera.resolution = (RES_W, RES_H)
camera.framerate = 30  # FPS
rawCapture = PiRGBArray(camera, size=(RES_W, RES_H))

# allow the camera to warmup
time.sleep(0.1)
def faceTracking(sender):
    res1 = (320, 240)
    res2 = (480, 320)
    res3 = (640, 480)
    res4 = (1280, 720)
    res = res1

    detectum = dnn.Detector()
    THRESHOLD = 0.55  # Value between 0 and 1 for confidence score
    cap = cv2.VideoCapture(-1)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, res[0])
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, res[1])
    frameCounter = 0
    currentID = 0
    faceTrackers = {}

    WIDTH = res[0] / 2
    HEIGHT = res[1] / 2
    EYE_DEPTH = 2
    hFOV = 125 / 2  #62/2
    vFOV = 85 / 2  #49/2
    ppcm = WIDTH * 2 / 15.5 * 1.5
    term = False
    while not term:
        _, frame = cap.read()
        frame = cv2.rotate(frame, cv2.ROTATE_180)
        frameCounter += 1
        if frameCounter % 1 == 0:
            _, bboxes = detectum.process_frame(frame, THRESHOLD)
            for (x1, y1, x2, y2) in bboxes:
                w = x2 - x1
                h = y2 - y1
                print(x1, y1, x2, y2)
                center = (int(x1 + (x2 - x1) * 0.5), int(y1 + (y2 - y1) * 0.5))
                fidMatch = False
                for fid in faceTrackers.keys():
                    (tx1, ty1, tx2, ty2, n, u) = faceTrackers.get(fid)
                    if tx1 - w * 0.5 <= center[
                            0] <= tx2 + w * 0.5 and ty1 - h * 0.5 <= center[
                                1] <= ty2 + h * 0.5:
                        if n < 50: n += 1
                        faceTrackers.update({fid: (x1, y1, x2, y2, n, True)})
                        fidMatch = True
                        break
                if not fidMatch:
                    faceTrackers.update({currentID: (x1, y1, x2, y2, 1, True)})
                    currentID += 1

        trackID = -1
        fidsToDelete = []
        for fid in faceTrackers.keys():
            (tx1, ty1, tx2, ty2, n, u) = faceTrackers.get(fid)
            if not u: n -= 1
            if n < 1: fidsToDelete.append(fid)
            else:
                faceTrackers.update({fid: (tx1, ty1, tx2, ty2, n, False)})
                if n < 25:
                    pass
                else:
                    trackID = fid

        for fid in fidsToDelete:
            faceTrackers.pop(fid, None)
        print(faceTrackers)
        if trackID != -1:
            # determine who to track
            (x1, y1, x2, y2, n, u) = faceTrackers.get(trackID)
            center = (int(x1 + (x2 - x1) * 0.5), int(y1 + (y2 - y1) * 0.5))
            hAngle = (1 - center[0] / WIDTH) * hFOV
            vAngle = (1 - center[1] / HEIGHT) * vFOV
            c = -0.26 * (x2 - x1) + 103

            # horizontal
            b = 4
            angleA = (90 - hAngle) * math.pi / 180
            a = math.sqrt(b * b + c * c - 2 * b * c * math.cos(angleA))
            angleC = math.acos((a * a + b * b - c * c) / (2 * a * b))
            pupilL = int((angleC - math.pi / 2) * EYE_DEPTH * ppcm)

            b_hat = 2 * b
            c_hat = math.sqrt(a * a + b_hat * b_hat -
                              2 * a * b_hat * math.cos(angleC))
            angleA_hat = math.acos(
                (b_hat * b_hat + c_hat * c_hat - a * a) / (2 * b_hat * c_hat))
            pupilR = int((math.pi / 2 - angleA_hat) * EYE_DEPTH * ppcm)

            # vertical
            b = 6
            angleA = (90 - vAngle) * math.pi / 180
            a = math.sqrt(b * b + c * c - 2 * b * c * math.cos(angleA))
            angleC = math.acos((a * a + b * b - c * c) / (2 * a * b))
            pupilV = int((angleC - math.pi / 2) * EYE_DEPTH * ppcm)

            sender.send((pupilL, pupilR, pupilV))
        if sender.poll():
            term = sender.recv()
    cap.release()