def dealAsian():
    detector = FaceDetector()
    data_path = "./datasets/asianFacesCategory"
    data_out_path = "./datasets/asianFaces"
    identities = os.listdir(data_path)
    new_filenames = []
    widths = []
    heights = []

    for person in identities:
        dirpath = data_path + "/" + person
        if person != ".DS_Store" and person != "._.DS_Store":
            files = os.listdir(dirpath)
            for file in files:
                print("=============================")
                image = cv2.imread(dirpath + "/" + file)
                if image is not None:
                    print("Load Dir={} and Image={}".format(person, file))
                    res = detector.detection(image)
                    if res is not None:
                        crop_image, w, h = res
                        cv2.imwrite(data_out_path + "/" + person + "_" + file,
                                    crop_image)
                        new_filenames.append(person + "_" + file)
                        widths.append(w)
                        heights.append(h)
                    else:
                        print("No Faces")

                else:
                    print("Read Image Wrong....")
    save_height_width(new_filenames, widths, heights)
Beispiel #2
0
def main():
    # Instantiate Classes
    detector = FaceDetector(FACE_CLASSIFIER_PATH, EYE_CLASSIFIER_PATH)
    model = FaceModel()
    display = Display()
    capture = Capture()

    oldTime = time.time()
    i = 0
    frames_num = 0
    delta_sum = 0
    while True:
        # escape key for exit, in linux display is not working without that
        k = cv2.waitKey(30) & 0xff
        if k == 27:
            return

        # Calculate time difference (dt), update oldTime variable
        newTime = time.time()
        dt = newTime - oldTime
        oldTime = newTime

        frames_num += 1
        delta_sum += dt
        if frames_num % 100 == 0:
            print "delta:", delta_sum / float(
                frames_num), "frames pr sec:", 1 / float(
                    delta_sum / float(frames_num))
            frames_num = 0
            delta_sum = 0

        # Grab Frames
        frames = capture.read()

        # Detect face 20% of the time, eyes 100% of the time
        if i % 5 is 0:
            rects = detector.detect(frames)
        else:
            rects = detector.detect(frames, model.getPreviousFaceRects())
        i += 1

        # Add detected rectangles to model
        model.add(rects)

        display.renderScene(frames['display'], model, rects)
        display.renderEyes(frames['color'], model)
Beispiel #3
0
def detect_faces(filename):
    try:
        image = cv2.imread(os.path.join(app.config['UPLOAD_FOLDER'], filename))
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        # find faces in the image
        fd = FaceDetector()
        faceRects = fd.detect(gray, scaleFactor = 1.1, minNeighbors = 5,
            minSize = (30, 30))
        faces_number = len(faceRects)
        # loop over the faces and draw a rectangle around each
        for (x, y, w, h) in faceRects:
            image = cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
        cv2.imwrite((os.path.join(app.config['UPLOAD_FOLDER'], filename)), image)
        return faces_number
    except Exception:
        faces_number = 0
        return faces_number
Beispiel #4
0
def main():
    img_url = ('http://2.bp.blogspot.com/-Ooj8qMem5vo/VRKMGtvmWJI'
               '/AAAAAAAABjg/DH001_agnPY/s1600/face.jpg')
    img_np = asarray(bytearray(urlopen(img_url).read()), dtype=uint8)
    image = imdecode(img_np, IMREAD_COLOR)

    detr = FaceDetector()
    fpos = detr.get_faces_pos(image, minNeighbors=15)

    RGB_GREEN = (0, 255, 0)
    roi = image.copy()
    for (x, y, w, h) in fpos:
        roi = rectangle(roi, (x, y), (x + w, y + h),
                        color=RGB_GREEN,
                        thickness=2)

    imshow('Face Detection', roi)
    waitKey(0)
Beispiel #5
0
def main():
	# Instantiate Classes
	detector = FaceDetector(FACE_CLASSIFIER_PATH, EYE_CLASSIFIER_PATH);
	model = FaceModel();
	display = Display();
	capture = Capture();
	
	oldTime = time.time();
	i = 0;
	frames_num=0
	delta_sum = 0
	while True:
		# escape key for exit, in linux display is not working without that
		k = cv2.waitKey(30) & 0xff
		if k == 27:
			return
		
		# Calculate time difference (dt), update oldTime variable
		newTime = time.time();
		dt =  newTime - oldTime;
		oldTime = newTime;
		
		frames_num += 1
		delta_sum += dt
		if frames_num % 100 == 0:
			print "delta:",delta_sum/float(frames_num),"frames pr sec:",1/float(delta_sum/float(frames_num))
			frames_num=0
			delta_sum = 0
		
		# Grab Frames
		frames = capture.read();	
		
		# Detect face 20% of the time, eyes 100% of the time
		if i % 5 is 0:
			rects = detector.detect(frames);
		else:
			rects = detector.detect(frames,model.getPreviousFaceRects());
		i += 1;
		
		# Add detected rectangles to model
		model.add(rects);
		
		display.renderScene(frames['display'],model,rects);
		display.renderEyes(frames['color'],model);
Beispiel #6
0
def load_models():
    print('Carregando modelos...')
    mask_detector = MaskDetector()
    mask_detector.load_state_dict(torch.load('models/face_mask.ckpt')['state_dict'], strict=False)
    mask_detector = mask_detector.to(device)
    mask_detector.eval()

    face_detector = FaceDetector(
        prototype='models/deploy.prototxt.txt',
        model='models/res10_300x300_ssd_iter_140000.caffemodel',
    )
    return face_detector, mask_detector
def VJFindFace(frame):   
    #Include the global variables inside the scope of the function 
    global RATIO, orig
    #list to store the corner coordinates of the faces found.Initially empty
    allRoiPts = []    
    #generate a copy of the original frame
    orig = frame.copy()    
    #resize the original image. Set the aspect RATIO
    dim = (frame.shape[1]/RATIO, frame.shape[0]/RATIO);        
    # perform the actual resizing of the image and show it
    resized = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)                
    #convert the frame to gray scale
    gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)        
    # find faces in the gray scale frame of the video using Haar feature based trained classifier
    fd = FaceDetector('{0}/haarcascade_frontalface_default.xml'.format(OPENCV_HOME))
    faceRects = fd.detect(gray, scaleFactor = 1.1, minNeighbors = 4, minSize = (10, 10))
    print "\n### Number of Faces: {0}\n".format(np.shape(faceRects)[0])
    # loop over the faces and draw a rectangle around each
    for (x, y, w, h) in faceRects:
        #This step is extremely crucial. Here we are trying to decrease the size of the bounding box
        #for the face detected area. The primary reason for this being the box identified by VJ contains
        #a part of background. Hence when we find the mean of this box at the time of tracking, the resulting 
        #bounding box was much larger than the actual face region. Thus to eliminate the effect of background 
        #changing the mean we decrease the window size, as the color of interest will be available in this area
        #for tracking in the upcoming frames.
        x = RATIO*(x+10)
        y = RATIO*(y+10)
        w = RATIO*(w-15)
        h = RATIO*(h-15)            
        #Uncomment line 70, 76 and 77 to view the boxes around faces found using viola jones. Note that these
        #boxes will appear to be shifted and smaller size due to the opeRATIOn performed above
        #cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)

        #Assign top left and bottom right pixel values each time Viola-Johnes is run
        #Append all the points detected for the face in the list
        allRoiPts.append((x, y, x+w, y+h))        
    #show the detected faces
    cv2.imshow("Faces", frame)
    cv2.waitKey(1)  
    return allRoiPts
def predict(images, attribute):
    #predict image wrt given attribute
    #images: a list of numpy array
    #attribute: name of the attribute
    #return: a list of prediction value
    model_file = [
        "./model/pca_" + attribute + ".pkl",
        "./model/pred_" + attribute + ".pkl"
    ]
    pred_list = []
    for image in images:
        facedetect = FaceDetector()
        cropped_image = facedetect.detection(image)

        with open(model_file[0], 'rb') as f:
            pcaObj = pickle.load(f)
        with open(model_file[1], 'rb') as f:
            predictObj = pickle.load(f)

        image_data = pcaObj.transform(cropped_image)
        pred_list.append(predictObj.predict(image_data))
    return pred_list
Beispiel #9
0
def main():
    # Loading the cascade xml file
    parser = argparse.ArgumentParser()

    parser.add_argument("-f",
                        "--face",
                        required=True,
                        help="path to where the face cascade resides")

    parser.add_argument("-v",
                        "--video",
                        action='store_true',
                        help="detection from WebCam")

    parser.add_argument("-i",
                        "--image",
                        help="path to where the image resides")

    args = vars(parser.parse_args())
    img = cv2.imread(args["image"], 1)
    if (args["video"]):
        # cap points to the WebCam
        cap = cv2.VideoCapture(0)
        while True:
            # Getting images from WebCam
            ret, img = cap.read()
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            # using FaceDetector class to detecte the face
            faceDetector = FaceDetector(args['face'])
            faces = faceDetector.detect(gray)
            # draw a box around faces
            for (x, y, w, h) in faces:
                img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0),
                                    2)
            # draw image
            cv2.imshow('img', img)
            if cv2.waitKey(1) == ord('q'):
                cv2.destroyAllWindows()
                break
        cap.release()
    elif img != None:
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # using FaceDetector class to detecte the face
        faceDetector = FaceDetector(args['face'])
        faces = faceDetector.detect(gray)
        # draw a box around faces
        for (x, y, w, h) in faces:
            img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
        # draw image
        cv2.imshow('img', img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
    else:
        print("Please enter the correct arguments")
 def get_face_detector(self):
     if not self.config.has_section('FaceDetector'):
         raise ValueError('No FaceDetector configuration!')
     else:
         cascade_classifier_path = self.config.get(
             'FaceDetector',
             'cascadeClassifier',
             fallback='./resources/haarcascade_frontalface_default.xml')
         scaleFactor = self.config.getfloat('FaceDetector',
                                            'scaleFactor',
                                            fallback=1.3)
         minNeighbors = self.config.getint('FaceDetector',
                                           'minNeighbors',
                                           fallback=4)
         minSize_x = self.config.getint('FaceDetector',
                                        'minSize_x',
                                        fallback=40)
         minSize_y = self.config.getint('FaceDetector',
                                        'minSize_y',
                                        fallback=40)
         return FaceDetector(cascade_classifier_path, scaleFactor,
                             minNeighbors, (minSize_x, minSize_y))
Beispiel #11
0
# @FileName: cam.py
# @TODO:

from facedetector import FaceDetector
import imutils
import argparse
import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-f", "--face", required=True,
	help="path to the face cascade resides")
ap.add_argument("-v", "--video",
	help="path to the (optional) video file")
args = vars(ap.parse_args())

fd = FaceDetector(args["face"])

# API: https://docs.opencv.org/3.0-beta/modules/videoio/doc/reading_and_writing_video.html?highlight=videocapture
if not args.get("video", False):
	# Param1: filename – name of the opened video file (eg. video.avi) or
	# image sequence (eg. img_%02d.jpg, which will read samples like img_00.jpg,
	#  img_01.jpg, img_02.jpg, ...)
	# Param2: device – id of the opened video capturing device (i.e. a camera index).
	# If there is a single camera connected, just pass 0.
	camera = cv2.VideoCapture(0)
	if not camera.isOpened():
		raise ValueError("camera not found!")
else:
	camera = cv2.VideoCapture(args["video"])

while True:
Beispiel #12
0
from facedetector import FaceDetector
import argparse

import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-f",
                "--face",
                required=True,
                help="path to where the face cascade resides")
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())

image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

# Load the face cascade into the face detector
face_detector = FaceDetector(args["face"])

# Detect all the faces on the image
face_rectangles = face_detector.detect(gray)

print "I found %d face(s)" % len(face_rectangles)

# Draw rectangles around the faces found on the image
for (x, y, w, h) in face_rectangles:
    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

cv2.imshow("Faces", image)
cv2.waitKey(0)
Beispiel #13
0
import numpy as np

ap = argparse.ArgumentParser()
ap.add_argument("-i",
                "--image",
                required=True,
                help="path to where the image file reside")
args = vars(ap.parse_args())

image = cv2.imread(args["image"])
(h, w) = image.shape[:2]
if h > 1080:
    image = cv2.resize(image, (600, 800))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

fd = FaceDetector("./haarcascade_frontalface_default.xml")
faceRects = fd.detect(gray, scaleFactor=1.2, minNeighbors=3, minSize=(40, 40))
print(f'I found {len(faceRects)} face(s)')

model = load_model('./face_mask.model')

face_images = []
for (x, y, w, h) in faceRects:
    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

    face_image = image[y:y + h, x:x + w]

    face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
    face_image = cv2.resize(face_image, (224, 224))

    face_image = face_image / 255.0
def process(left, right):
    direction = 0
    angle = 0
    if left + right < 10 or abs(left - right) > 5:
        if left < right:
            angle = 15 - abs(left - right)
            direction = 1
        elif right < left:
            angle = 15 - abs(right - left)
            direction = 2

    return direction, angle


cap = cv2.VideoCapture(1)
facedetector = FaceDetector()

while 1:
    ret, img = cap.read()
    if not ret:
        print("usb-camera error, get into normal mode without faceID.")
        break
    faces = facedetector.detect(img)
    valid = facedetector.verifyfaces(faces)
    if valid:
        print("FaceID matched sucessfully,unlock the car.")
        break
    else:
        print("No matched face.")
cap.release()
del facedetector
Beispiel #15
0
__author__ = 'XJH'
from facedetector import FaceDetector
import imutils
import cv2

fd = FaceDetector(
    "H:/Machine Learning/computer vision/[10307938]Books/Books/Case Studies, 3nd Edition/code/face_detection/cascades/haarcascade_frontalface_default.xml"
)

camera = cv2.VideoCapture("adrian_face.mov")

while True:
    (grabbed, frame) = camera.read()
    if not grabbed:
        break
    frame = imutils.resize(frame, width=300)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faceRects = fd.detect(gray,
                          scaleFactor=1.1,
                          minNeighbors=5,
                          minSize=(30, 30))
    frameClone = frame.copy()
    for (fX, fY, fW, fH) in faceRects:
        cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH), (0, 0, 255), 2)
    cv2.imshow("Face", frameClone)
    if cv2.waitKey(1) & 0xFF == ord("q"):
        break

camera.release()
    def __init__(self):
        self.video = cv2.VideoCapture(0)

        self.model = load_model('../face_mask.model')
        self.face_detector = FaceDetector(
            './haarcascade_frontalface_default.xml')
import cv2

from facedetector import FaceDetector
import imutils

# setup the argument parse for command line inputs
parser = argparse.ArgumentParser()
parser.add_argument("-f",
                    "--face",
                    required=True,
                    help="path to face cascade classifier ")
parser.add_argument("-v", "--video", help="path to video (optional)")

arguments = vars(parser.parse_args())

detector = FaceDetector(arguments["face"])

# detect if a video file is passed in, otherwise use the camera
if not arguments.get("video", False):
    camera = cv2.VideoCapture(0)
else:
    camera = cv2.VideoCapture(arguments["video"])

# continue processing until the passed in video file is done, or the user
# stops the application by pressing the 'q' key
while True:
    (grabbed, frame) = camera.read()
    # read() returns a boolean value of success and the frame

    # if nothing is returned don't keep going
    if arguments.get("video") and not grabbed:
    for person in identities:
        dirpath = data_path + "/" + person
        if person != ".DS_Store" and person != "._.DS_Store":
            files = os.listdir(dirpath)
            for file in files:
                print("=============================")
                image = cv2.imread(dirpath + "/" + file)
                if image is not None:
                    print("Load Dir={} and Image={}".format(person, file))
                    res = detector.detection(image)
                    if res is not None:
                        crop_image, w, h = res
                        cv2.imwrite(data_out_path + "/" + person + "_" + file,
                                    crop_image)
                        new_filenames.append(person + "_" + file)
                        widths.append(w)
                        heights.append(h)
                    else:
                        print("No Faces")

                else:
                    print("Read Image Wrong....")
    save_height_width(new_filenames, widths, heights)


if __name__ == "__main__":
    detector = FaceDetector()
    #image = cv2.imread("./test3.jpeg")
    dealAsian()
    print("Finish")
def do():
    # TODO try-catch
    pp = PDFPrinter(Config.output)

    xLeftLimit = Config.spacing.xLeftLimit
    yTopLimit = Config.spacing.yTopLimit
    xRightLimit = Config.spacing.xRightLimit
    yBottomLimit = Config.spacing.yBottomLimit
    xIncrement = Config.spacing.xIncrement
    yIncrement = Config.spacing.yIncrement

    if Config.direction == PrintDirection.NORMAL:
        xInit = xLeftLimit
        yInit = yTopLimit
    else:
        xInit = xRightLimit
        yInit = yBottomLimit

    x, y = xInit, yInit

    # For debug message only.
    i = 0
    with open(Config.peoplecsv) as f:
        rows = sum(1 for line in f) - 1

    with open(Config.peoplecsv, newline='') as csvfile:
        reader = csv.DictReader(csvfile)
        for row in reader:
            i += 1

            pi = PersonInfo()
            pi.parse(row)

            logger.info(f"Exporting ({i}/{rows}) {pi.name}")

            if Config.mode != PrintMode.TEXT_ONLY:
                try:
                    foundImg = get_image(pi.name)

                    # TODO refactor this if statement
                    if foundImg is None:
                        logger.error(
                            f"!!! Could not find an image for '{pi.name}'. Skipping photo print..."
                        )
                    else:
                        imgpath = os.path.join(Config.imgpath, foundImg)

                        if Config.facedetect or Config.crop or Config.equalizehist:
                            try:
                                vis = FaceDetector.run(
                                    imgpath,
                                    "haarcascade_frontalface_default.xml")
                                tmpfile = os.path.join(
                                    tempfile._get_default_tempdir(),
                                    next(tempfile._get_candidate_names()) +
                                    ".jpg")
                                cv2.imwrite(tmpfile, vis)
                                imgpath = tmpfile
                            except Exception as e:
                                logger.error(
                                    f"!!! FaceDetector thrown an Exception!\n{str(e)}"
                                )

                        pp.set_coordintates(x, y)
                        pp.print_photo(imgpath, pi.name)
                except Exception as e:
                    logger.error(f"!!! Could not print the image!\n{str(e)}")

            if Config.mode != PrintMode.PHOTO_ONLY:
                xText, yText = x, y

                # If we print both photo and text, move init position of text next to the photo
                if Config.mode != PrintMode.TEXT_ONLY:
                    xText += CardSpacing.textDelta
                    yText += CardSpacing.rowDelta

                pp.set_coordintates(xText, yText)
                pp.print_person_info(pi)

            # Compute new coordinates
            x += xIncrement

            # check for need to increment/decrement row
            if x < xLeftLimit or x > xRightLimit:
                x = xInit
                y += yIncrement

            # check if a new page should be added
            if y < yTopLimit or y > yBottomLimit:
                logger.debug(f"Height limit reached. Adding a new page.")
                y = yInit
                pp.add_page()

    pp.output()
    cv2.destroyAllWindows()
Beispiel #20
0
import cv2
from facedetector import FaceDetector

image = cv2.imread("goat1.jpg", 1)
cv2.imshow("hi", image)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
p = FaceDetector(faceCascadePath="haarcascade_frontalface_default.xml")
face = p.detect(image=gray, scaleFactor=1.2, minNeighbors=5, minSize=(30, 30))
print("I found {} face(s)".format(len(face)))
for (x, y, w, h) in face:
    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

cv2.imshow("Faces", image)
k = cv2.waitKey(0)
if k == 27:
    cv2.destroyAllWindows()
Beispiel #21
0
"""
folder = 'images'
for image_path in os.listdir(folder):
    image = cv2.imread(folder + "\\" + image_path)
    # cv2.imshow("Image", image)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    fd = FaceDetector("cascades\\haarcascade_frontalface_default.xml")
    faceRects = fd.detect(gray, scaleFactor = 1.2)
    print("I found {} face(s)".format(len(faceRects)))
    for (x, y, w, h) in faceRects:
        cv2.rectangle(image, (x,y), (x+w, y+h), (0, 255, 0), 2)
    cv2.imshow("Faces", image)
    cv2.waitKey(0)
"""

camera = cv2.VideoCapture(0)
fd = FaceDetector("cascades\\haarcascade_frontalface_default.xml")
while True:
    (grabbed, frame) = camera.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faceRects = fd.detect(gray)
    print("I found {} face(s)".format(len(faceRects)))
    for (x, y, w, h) in faceRects:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
    cv2.imshow("Faces", frame)

    if cv2.waitKey(1) & 0xFF == ord("q"):
        break

camera.release()
cv2.destroyAllWindows()
import argparse
import cv2

from facedetector import FaceDetector
from imutils import resize

argparser = argparse.ArgumentParser()
argparser.add_argument('-f', '--face', required=True, help='Path to where the face cascade resides.')
argparser.add_argument('-v', '--video', required=False, help='Path to the (optional) video file.')
arguments = vars(argparser.parse_args())

face_detector = FaceDetector(arguments['face'])

if not arguments.get('video', False):
    camera = cv2.VideoCapture(0)
else:
    camera = cv2.VideoCapture(arguments['video'])

while True:
    (grabbed, frame) = camera.read()

    if arguments.get('video') and not grabbed:
        break

    frame = resize(frame, width=300)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    face_rectangles = face_detector.detect(gray, scale_factor=1.1, min_neighbors=5, min_size=(30, 30))
    frame_clone = frame.copy()

    green = (0, 255, 0)
Beispiel #23
0
ap.add_argument("-f","--faces",help="Path to face classfier (optional)")
ap.add_argument("-i","--images",required=True,help="Path to all image(s)",nargs="+")
args = vars(ap.parse_args())
faces=[]
lowerLim = 300
upperLim = 1500

for im in args["images"]:
    if any(x == os.path.splitext(im)[1][1:] for x in ('jpg','jpeg','gif','png','bmp')):
        # Read image
        image = cv2.imread(im)
        # Grayscale image
        gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
        # Create facedetector object
        if args["faces"] is None:
            fd = FaceDetector("cascades/haarcascade_frontalface_alt.xml")
        else:
            fd = FaceDetector(args["faces"])
        # Optimize image size
        image = imutils.optimize(image,lowerLim,upperLim)
        # If size is 300 use custom parameter values, else use default (have to make it dynamic)
        if image.shape[0]==lowerLim or image.shape[1]==lowerLim:
            faceRects = fd.detect(image,scaleFactor=1.04,minNeighbors=3,minSize=(2,2))
        else:
            faceRects = fd.detect(image)
        for (i,rect) in enumerate(faceRects):
            (x,y,w,h) = rect
            #cv2.rectangle(image,(x,y),(x+w,y+h),(0,0,255),1)
            face = imutils.resize(image[y:y+h,x:x+w],width=100)
            faces.append(face)
            t=time.time()
Beispiel #24
0
from facedetector import FaceDetector 
import imutils
import argparse
import cv2

ap = argparse.ArgumentParser()
ap.add_argument('-f','--face',required=True, 
	help='path to where the face canscade')
ap.add_argument('-v','--video',required=False, help='path to video file')
args = vars(ap.parse_args())

fd = FaceDetector(args['face'])

if not args.get('video', False):
	camera = cv2.VideoCapture(0)
else:
	camera = cv2.VideoCapture(args['video'])

while True:
	(grabbed, frame) = camera.read()

	if args.get('video') and not grabbed:
		break

	frame = imutils.resize(frame, width = 1300)
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

	faceRects = fd.detect(gray, scaleFactor=1.2, minNeighbours=5,minSize=(30,30))
	frameClone = frame.copy()

	for (fX, fY, fW,fH) in faceRects:
Beispiel #25
0
#
# Author : Donny
#

import cv2
import shutil
import os
import numpy as np
import scipy as sp
import scipy.misc as spmisc
from facedetector import FaceDetector
from mergerect import mergeRects

if __name__ == '__main__':
    video_capture = cv2.VideoCapture(0)
    faceDetector = FaceDetector()

    while True:
        ret, frame = video_capture.read()
        frame = cv2.resize(frame, (0, 0), fx=0.4, fy=0.4)

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        faces, totalTiles = faceDetector.detect(gray,
                                                min_size=0.0,
                                                max_size=0.3,
                                                step=0.9,
                                                detectPad=(2, 2),
                                                verbose=True,
                                                getTotalTiles=True)
        faces = mergeRects(faces, overlap_rate=0.82, min_overlap_cnt=4)
                                   selectInterval=(1000.0 / 15) / 1000)
    server.serveforever()


# Run the thread
t = threading.Thread(target=run_server)
t.start()

ap = argparse.ArgumentParser()
ap.add_argument("-f", "--face", required=True,
                help="path to where the face cascade resides")
ap.add_argument("-v", "--video",
                help="path to the (optional) video file")
args = vars(ap.parse_args())

fd = FaceDetector(args["face"])

if not args.get("video", False):
    camera = cv2.VideoCapture(0)

else:
    camera = cv2.VideoCapture(args["video"])

while True:
    (grabbed, frame) = camera.read()

    if args.get("video") and not grabbed:
        break

    img_height, img_width, depth = frame.shape
    scale = w / img_width
Beispiel #27
0
# limitations under the License.

import socket
import threading
import socketserver
import time
from facedetector import FaceDetector
import io
import cv2
import numpy as np
import json
import base64
from imageio import imread, imwrite
import imghdr

fd = FaceDetector()


class ThreadedUDPRequestHandler(socketserver.BaseRequestHandler):
    """
    This class works similar to the TCP handler class, except that
    self.request consists of a pair of data and client socket, and since
    there is no connection the client address must be given explicitly
    when sending data back via sendto().
    """
    def handle(self):
        # print(self.request)
        data = self.request[0].strip()
        print("len(data)", len(data), threading.current_thread())
        socket = self.request[1]
Beispiel #28
0
from picamera.array import PiRGBArray
from picamera import PiCamera
from facedetector import FaceDetector
from aim import aim
import time

camera = PiCamera()
facedetector = FaceDetector()

while True:
    start_time = time.time()
    rawCapture = PiRGBArray(camera)
    camera.capture(rawCapture, format="bgr")
    end_time = time.time()
    print(f'Captured image in {(end_time - start_time)}')
    box_color = (0, 255, 0)
    box_thickness = 1

    frame = rawCapture.array
    faces = facedetector.get_faces(frame)
    print(f'Found {len(faces)} faces')

    if len(faces) == 1:
        face = faces[0]
        p1 = face[0]
        p2 = face[1]

        image_w = frame.shape[1]
        image_h = frame.shape[0]

        w = (p1[0] + p2[0]) / 2 / image_w
Beispiel #29
0
from facedetector import FaceDetector
import argparse
import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-f", "--face", required=True, help="path to the face cascade")
ap.add_argument("-i", "--image", required=True, help="path to the image")
args = vars(ap.parse_args())

image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

fd = FaceDetector(args["face"])
faceRects = fd.detect(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
print("I found {} faces(s)".format(len(faceRects)))

for (x, y, w, h) in faceRects:
    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

cv2.imshow("Faces", image)
cv2.waitKey(0)
class VideoCamera(object):
    def __init__(self):
        self.video = cv2.VideoCapture(0)

        self.model = load_model('../face_mask.model')
        self.face_detector = FaceDetector(
            './haarcascade_frontalface_default.xml')

    def __del__(self):
        self.video.release()

    def _resize(self, image, width=None, height=None, inter=cv2.INTER_AREA):
        dim = None
        (h, w) = image.shape[:2]

        if width is None and height is None:
            return image

        if width is None:
            # calculate the ratio of the height and construct the
            # dimensions
            r = height / float(h)
            dim = (int(w * r), height)
        else:
            # calculate the ratio of the width and construct the
            # dimensions
            r = width / float(w)
            dim = (width, int(h * r))

        resized = cv2.resize(image, dim, interpolation=inter)

        return resized

    def get_frame(self):
        (ret, frame) = self.video.read()

        frame = self._resize(frame, width=450)
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        frame = np.dstack([frame, frame, frame])
        face_rects = self.face_detector.detect(frame,
                                               scaleFactor=1.2,
                                               minNeighbors=3,
                                               minSize=(40, 40))

        for (x, y, w, h) in face_rects:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

            face_image = frame[y:y + h, x:x + w]

            face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
            face_image = cv2.resize(face_image, (224, 224))

            face_image = face_image / 255.0

            face_image = img_to_array(face_image)
            face_image = np.expand_dims(face_image, axis=0)

            (mask, without_mask) = self.model.predict(face_image)[0]

            if mask > without_mask:
                print("Mask! :", mask)
                color = (0, 255, 0)
                cv2.putText(frame, "Mask : {}%".format(mask), (x, y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
                cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
            else:
                color = (0, 0, 255)
                cv2.putText(frame, "No mask : {}%".format(without_mask),
                            (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color,
                            2)
                cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
                print("without mask ! : ", without_mask)

        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()
Beispiel #31
0
import argparse
import imutils
from facedetector import FaceDetector
import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the (optional) video file")
args = vars(ap.parse_args())
face_cascade_path = 'haarcascade_frontalface_default.xml'
fd = FaceDetector(face_cascade_path)

if not args.get("video", False):
    camera = cv2.VideoCapture(0)
else:
    camera = cv2.VideoCapture(args["video"])

while True:
    (grabbed, frame) = camera.read()
    if args.get("video") and not grabbed:
        break
    frame = imutils.resize(frame, width=300)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    faceRects = fd.detect(gray,
                          scaleFactor=1.1,
                          minNeighbors=5,
                          minSize=(30, 30))
    frameClone = frame.copy()
    for (fX, fY, fW, fH) in faceRects:
        cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH), (0, 255, 0), 2)
    cv2.imshow("Face", frameClone)
    if cv2.waitKey(1) & 0xFF == ord("q"):
parser.add_argument("-f",
                    "--face",
                    required=True,
                    help="path to where the face cascade resides")
parser.add_argument("-i",
                    "--image",
                    required=True,
                    help="path to where the image file resides")
args = vars(parser.parse_args())

# get the image to use and convert it to Grayscale for processing
image = cv2.imread(args["image"])
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# setup our Classifier processing
detector = FaceDetector(args["face"])
face_rectangles = detector.detect(gray_image,
                                  scaleFactor=1.1,
                                  minNeighbors=5,
                                  minSize=(30, 30))

# Print out the number of faces in the image that were found
print("I found {} face(s)".format(len(face_rectangles)))

# Loop through the faces and compute the rectangles to be drawn
for (x, y, w, h) in face_rectangles:
    cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)

# Show the image and wait to close the window until a key is pressed
cv2.imshow("Faces", image)
cv2.waitKey(0)
Beispiel #33
0
from facedetector import FaceDetector
import cv2
import argparse
from image_processing import imutils

ap = argparse.ArgumentParser()
#ap.add_argument("-f", "--face", required=True, help="path to where the face cascade resides")
ap.add_argument("-v", "--video", help="path to the (optional) video file")
args = vars(ap.parse_args())

fd = FaceDetector("./cascades/haarcascade_frontalface_default.xml")

if not args.get("video", False):
    camera = cv2.VideoCapture(0)
else:
    camera = cv2.VideoCapture(args["video"])

while True:
    (grabbed, frame) = camera.read()

    #the video file reach the end
    if args.get("video") and not grabbed:
        break

    frame = imutils.resize(frame, width=300)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    faceRects = fd.detect(frame,
                          scaleFactor=1.1,
                          minNeighbors=5,
                          minSize=(30, 30))
Beispiel #34
0
# 		predictImage = gray[y:y+h, x:x+w]
# 		nbrPredicted = recognizer.predict(predictImage)
# 		print("Result is {}".format(nbrPredicted))
#
# 	cv2.imshow("Face", frame)
#
# 	waitResult = cv2.waitKey(1)
# 	if waitResult & 0xFF == ord("q"):
# 		break
#
# camera.release()
# cv2.destroyAllWindows()

frame = cv2.imread('src.jpg')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fd = FaceDetector('haarcascade_frontalface_default.xml')
faceRects = fd.detect(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))

for (x, y, w, h) in faceRects:
    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
    predictImage = gray[y:y + h, x:x + w]
    nbrPredicted = recognizer.predict(predictImage)
    print("Result is {}".format(nbrPredicted))

cv2.imshow("Face", frame)

cv2.waitKey(0)
# break

# camera.release()
Beispiel #35
0
from __future__ import print_function
from flask import Flask, render_template, request, Response
from PIC_2_PI_SPI import *
import time
import sqlite3 as sql
import threading
from facedetector import FaceDetector
import cv2
import imutils
app = Flask(__name__)

camera = cv2.VideoCapture(0)  # use 0 for web camera
fd = FaceDetector("haarcascade_frontalface_default.xml")


def gen_frames():  # generate frame by frame from camera
    while True:
        # Capture frame-by-frame
        success, frame = camera.read()  # read the camera frame
        if not success:
            break
        else:
            # resize the frame and convert it to grayscale
            frame = imutils.resize(frame, width=300)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # detect faces in the image and then clone the frame
            # so that we can draw on it
            faceRects = fd.detect(gray,
                                  scaleFactor=1.1,
                                  minNeighbors=5,
                                  minSize=(30, 30))