Пример #1
0
def main():
    # Loading the cascade xml file
    parser = argparse.ArgumentParser()

    parser.add_argument("-f",
                        "--face",
                        required=True,
                        help="path to where the face cascade resides")

    parser.add_argument("-v",
                        "--video",
                        action='store_true',
                        help="detection from WebCam")

    parser.add_argument("-i",
                        "--image",
                        help="path to where the image resides")

    args = vars(parser.parse_args())
    img = cv2.imread(args["image"], 1)
    if (args["video"]):
        # cap points to the WebCam
        cap = cv2.VideoCapture(0)
        while True:
            # Getting images from WebCam
            ret, img = cap.read()
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            # using FaceDetector class to detecte the face
            faceDetector = FaceDetector(args['face'])
            faces = faceDetector.detect(gray)
            # draw a box around faces
            for (x, y, w, h) in faces:
                img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0),
                                    2)
            # draw image
            cv2.imshow('img', img)
            if cv2.waitKey(1) == ord('q'):
                cv2.destroyAllWindows()
                break
        cap.release()
    elif img != None:
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        # using FaceDetector class to detecte the face
        faceDetector = FaceDetector(args['face'])
        faces = faceDetector.detect(gray)
        # draw a box around faces
        for (x, y, w, h) in faces:
            img = cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
        # draw image
        cv2.imshow('img', img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
    else:
        print("Please enter the correct arguments")
Пример #2
0
def main():
    # Instantiate Classes
    detector = FaceDetector(FACE_CLASSIFIER_PATH, EYE_CLASSIFIER_PATH)
    model = FaceModel()
    display = Display()
    capture = Capture()

    oldTime = time.time()
    i = 0
    frames_num = 0
    delta_sum = 0
    while True:
        # escape key for exit, in linux display is not working without that
        k = cv2.waitKey(30) & 0xff
        if k == 27:
            return

        # Calculate time difference (dt), update oldTime variable
        newTime = time.time()
        dt = newTime - oldTime
        oldTime = newTime

        frames_num += 1
        delta_sum += dt
        if frames_num % 100 == 0:
            print "delta:", delta_sum / float(
                frames_num), "frames pr sec:", 1 / float(
                    delta_sum / float(frames_num))
            frames_num = 0
            delta_sum = 0

        # Grab Frames
        frames = capture.read()

        # Detect face 20% of the time, eyes 100% of the time
        if i % 5 is 0:
            rects = detector.detect(frames)
        else:
            rects = detector.detect(frames, model.getPreviousFaceRects())
        i += 1

        # Add detected rectangles to model
        model.add(rects)

        display.renderScene(frames['display'], model, rects)
        display.renderEyes(frames['color'], model)
Пример #3
0
def main():
	# Instantiate Classes
	detector = FaceDetector(FACE_CLASSIFIER_PATH, EYE_CLASSIFIER_PATH);
	model = FaceModel();
	display = Display();
	capture = Capture();
	
	oldTime = time.time();
	i = 0;
	frames_num=0
	delta_sum = 0
	while True:
		# escape key for exit, in linux display is not working without that
		k = cv2.waitKey(30) & 0xff
		if k == 27:
			return
		
		# Calculate time difference (dt), update oldTime variable
		newTime = time.time();
		dt =  newTime - oldTime;
		oldTime = newTime;
		
		frames_num += 1
		delta_sum += dt
		if frames_num % 100 == 0:
			print "delta:",delta_sum/float(frames_num),"frames pr sec:",1/float(delta_sum/float(frames_num))
			frames_num=0
			delta_sum = 0
		
		# Grab Frames
		frames = capture.read();	
		
		# Detect face 20% of the time, eyes 100% of the time
		if i % 5 is 0:
			rects = detector.detect(frames);
		else:
			rects = detector.detect(frames,model.getPreviousFaceRects());
		i += 1;
		
		# Add detected rectangles to model
		model.add(rects);
		
		display.renderScene(frames['display'],model,rects);
		display.renderEyes(frames['color'],model);
Пример #4
0
def detect_faces(filename):
    try:
        image = cv2.imread(os.path.join(app.config['UPLOAD_FOLDER'], filename))
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        # find faces in the image
        fd = FaceDetector()
        faceRects = fd.detect(gray, scaleFactor = 1.1, minNeighbors = 5,
            minSize = (30, 30))
        faces_number = len(faceRects)
        # loop over the faces and draw a rectangle around each
        for (x, y, w, h) in faceRects:
            image = cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
        cv2.imwrite((os.path.join(app.config['UPLOAD_FOLDER'], filename)), image)
        return faces_number
    except Exception:
        faces_number = 0
        return faces_number
Пример #5
0
def VJFindFace(frame):   
    #Include the global variables inside the scope of the function 
    global RATIO, orig
    #list to store the corner coordinates of the faces found.Initially empty
    allRoiPts = []    
    #generate a copy of the original frame
    orig = frame.copy()    
    #resize the original image. Set the aspect RATIO
    dim = (frame.shape[1]/RATIO, frame.shape[0]/RATIO);        
    # perform the actual resizing of the image and show it
    resized = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)                
    #convert the frame to gray scale
    gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)        
    # find faces in the gray scale frame of the video using Haar feature based trained classifier
    fd = FaceDetector('{0}/haarcascade_frontalface_default.xml'.format(OPENCV_HOME))
    faceRects = fd.detect(gray, scaleFactor = 1.1, minNeighbors = 4, minSize = (10, 10))
    print "\n### Number of Faces: {0}\n".format(np.shape(faceRects)[0])
    # loop over the faces and draw a rectangle around each
    for (x, y, w, h) in faceRects:
        #This step is extremely crucial. Here we are trying to decrease the size of the bounding box
        #for the face detected area. The primary reason for this being the box identified by VJ contains
        #a part of background. Hence when we find the mean of this box at the time of tracking, the resulting 
        #bounding box was much larger than the actual face region. Thus to eliminate the effect of background 
        #changing the mean we decrease the window size, as the color of interest will be available in this area
        #for tracking in the upcoming frames.
        x = RATIO*(x+10)
        y = RATIO*(y+10)
        w = RATIO*(w-15)
        h = RATIO*(h-15)            
        #Uncomment line 70, 76 and 77 to view the boxes around faces found using viola jones. Note that these
        #boxes will appear to be shifted and smaller size due to the opeRATIOn performed above
        #cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)

        #Assign top left and bottom right pixel values each time Viola-Johnes is run
        #Append all the points detected for the face in the list
        allRoiPts.append((x, y, x+w, y+h))        
    #show the detected faces
    cv2.imshow("Faces", frame)
    cv2.waitKey(1)  
    return allRoiPts
Пример #6
0
	if not camera.isOpened():
		raise ValueError("camera not found!")
else:
	camera = cv2.VideoCapture(args["video"])

while True:
	# return false if no frame has been grabbed.
	(grabbed, frame) = camera.read()

	# if video is available then break if not grabbed frame.
	if args.get("video") and not grabbed:
		break

	frame = imutils.resize(frame, width = 300)
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

	faceRects = fd.detect(gray, scaleFactor=1.1)
	frameClone = frame.copy()

	for (fX, fY, fW, fH) in faceRects:
		cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH),
			(0, 255, 0), 2)
	cv2.imshow("Face", frameClone)

	# 按q键退出,ord()返回字符的ascii码
	if cv2.waitKey(30) == ord("q"):
		break

camera.release()
cv2.destroyAllWindows()
Пример #7
0
        elif right < left:
            angle = 15 - abs(right - left)
            direction = 2

    return direction, angle


cap = cv2.VideoCapture(1)
facedetector = FaceDetector()

while 1:
    ret, img = cap.read()
    if not ret:
        print("usb-camera error, get into normal mode without faceID.")
        break
    faces = facedetector.detect(img)
    valid = facedetector.verifyfaces(faces)
    if valid:
        print("FaceID matched sucessfully,unlock the car.")
        break
    else:
        print("No matched face.")
cap.release()
del facedetector

car = Car()
detector = Detector()
cap = cv2.VideoCapture(0)

while 1:
    ret, img = cap.read()
Пример #8
0
from mergerect import mergeRects

if __name__ == '__main__':
    video_capture = cv2.VideoCapture(0)
    faceDetector = FaceDetector()

    while True:
        ret, frame = video_capture.read()
        frame = cv2.resize(frame, (0, 0), fx=0.4, fy=0.4)

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        faces, totalTiles = faceDetector.detect(gray,
                                                min_size=0.0,
                                                max_size=0.3,
                                                step=0.9,
                                                detectPad=(2, 2),
                                                verbose=True,
                                                getTotalTiles=True)
        faces = mergeRects(faces, overlap_rate=0.82, min_overlap_cnt=4)

        print("----faces detected----")
        for x, y, w, h in faces:
            print(x, y, w, h)
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        frame = cv2.resize(frame, (0, 0), fx=1.0 / 0.4, fy=1.0 / 0.4)
        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    (grabbed, frame) = camera.read()
    # read() returns a boolean value of success and the frame

    # if nothing is returned don't keep going
    if arguments.get("video") and not grabbed:
        break

    # resize the frame to 300 pixels in width, for performance
    frame = imutils.resize(frame, width=300)
    # convert the frame to gray scale for processing
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # run the frame through the facial detector in the same fashion as
    # still pictures from picture_detection.py
    face_rectangles = detector.detect(gray,
                                      scaleFactor=1.1,
                                      minNeighbors=5,
                                      minSize=config.WINDOW_SIZE)

    # Make a copy of the the frame... just in case
    frame_clone = frame.copy()

    # Loop over the bounding boxes and draw rectangles
    for (fX, fY, fW, fH) in face_rectangles:
        cv2.rectangle(frame_clone, (fX, fY), (fX + fW, fY + fH),
                      config.GREEN_BOX, config.LINE_THICKNESS)

    # Show off our work to the world
    cv2.imshow("Face", frame_clone)

    # Provide a way to stop the display
    if cv2.waitKey(1) & 0xFF == ord("q"):
Пример #10
0
class VideoCamera(object):
    def __init__(self):
        self.video = cv2.VideoCapture(0)

        self.model = load_model('../face_mask.model')
        self.face_detector = FaceDetector(
            './haarcascade_frontalface_default.xml')

    def __del__(self):
        self.video.release()

    def _resize(self, image, width=None, height=None, inter=cv2.INTER_AREA):
        dim = None
        (h, w) = image.shape[:2]

        if width is None and height is None:
            return image

        if width is None:
            # calculate the ratio of the height and construct the
            # dimensions
            r = height / float(h)
            dim = (int(w * r), height)
        else:
            # calculate the ratio of the width and construct the
            # dimensions
            r = width / float(w)
            dim = (width, int(h * r))

        resized = cv2.resize(image, dim, interpolation=inter)

        return resized

    def get_frame(self):
        (ret, frame) = self.video.read()

        frame = self._resize(frame, width=450)
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        frame = np.dstack([frame, frame, frame])
        face_rects = self.face_detector.detect(frame,
                                               scaleFactor=1.2,
                                               minNeighbors=3,
                                               minSize=(40, 40))

        for (x, y, w, h) in face_rects:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

            face_image = frame[y:y + h, x:x + w]

            face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
            face_image = cv2.resize(face_image, (224, 224))

            face_image = face_image / 255.0

            face_image = img_to_array(face_image)
            face_image = np.expand_dims(face_image, axis=0)

            (mask, without_mask) = self.model.predict(face_image)[0]

            if mask > without_mask:
                print("Mask! :", mask)
                color = (0, 255, 0)
                cv2.putText(frame, "Mask : {}%".format(mask), (x, y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
                cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
            else:
                color = (0, 0, 255)
                cv2.putText(frame, "No mask : {}%".format(without_mask),
                            (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color,
                            2)
                cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
                print("without mask ! : ", without_mask)

        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tobytes()
Пример #11
0
for im in args["images"]:
    if any(x == os.path.splitext(im)[1][1:] for x in ('jpg','jpeg','gif','png','bmp')):
        # Read image
        image = cv2.imread(im)
        # Grayscale image
        gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
        # Create facedetector object
        if args["faces"] is None:
            fd = FaceDetector("cascades/haarcascade_frontalface_alt.xml")
        else:
            fd = FaceDetector(args["faces"])
        # Optimize image size
        image = imutils.optimize(image,lowerLim,upperLim)
        # If size is 300 use custom parameter values, else use default (have to make it dynamic)
        if image.shape[0]==lowerLim or image.shape[1]==lowerLim:
            faceRects = fd.detect(image,scaleFactor=1.04,minNeighbors=3,minSize=(2,2))
        else:
            faceRects = fd.detect(image)
        for (i,rect) in enumerate(faceRects):
            (x,y,w,h) = rect
            #cv2.rectangle(image,(x,y),(x+w,y+h),(0,0,255),1)
            face = imutils.resize(image[y:y+h,x:x+w],width=100)
            faces.append(face)
            t=time.time()
            folder = "faces/"+os.path.dirname(im)[7:]
            try:
                os.mkdir(folder)
            except OSError:
                pass
            cv2.imwrite(folder+"/"+str(t)+".jpg",face)
            time.sleep(0.025)
Пример #12
0
ap = argparse.ArgumentParser()
ap.add_argument("-i",
                "--image",
                required=True,
                help="path to where the image file reside")
args = vars(ap.parse_args())

image = cv2.imread(args["image"])
(h, w) = image.shape[:2]
if h > 1080:
    image = cv2.resize(image, (600, 800))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

fd = FaceDetector("./haarcascade_frontalface_default.xml")
faceRects = fd.detect(gray, scaleFactor=1.2, minNeighbors=3, minSize=(40, 40))
print(f'I found {len(faceRects)} face(s)')

model = load_model('./face_mask.model')

face_images = []
for (x, y, w, h) in faceRects:
    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

    face_image = image[y:y + h, x:x + w]

    face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
    face_image = cv2.resize(face_image, (224, 224))

    face_image = face_image / 255.0
                    "--face",
                    required=True,
                    help="path to where the face cascade resides")
parser.add_argument("-i",
                    "--image",
                    required=True,
                    help="path to where the image file resides")
args = vars(parser.parse_args())

# get the image to use and convert it to Grayscale for processing
image = cv2.imread(args["image"])
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# setup our Classifier processing
detector = FaceDetector(args["face"])
face_rectangles = detector.detect(gray_image,
                                  scaleFactor=1.1,
                                  minNeighbors=5,
                                  minSize=(30, 30))

# Print out the number of faces in the image that were found
print("I found {} face(s)".format(len(face_rectangles)))

# Loop through the faces and compute the rectangles to be drawn
for (x, y, w, h) in face_rectangles:
    cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)

# Show the image and wait to close the window until a key is pressed
cv2.imshow("Faces", image)
cv2.waitKey(0)
Пример #14
0
from facedetector import FaceDetector
import argparse

import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-f",
                "--face",
                required=True,
                help="path to where the face cascade resides")
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())

image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)

# Load the face cascade into the face detector
face_detector = FaceDetector(args["face"])

# Detect all the faces on the image
face_rectangles = face_detector.detect(gray)

print "I found %d face(s)" % len(face_rectangles)

# Draw rectangles around the faces found on the image
for (x, y, w, h) in face_rectangles:
    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

cv2.imshow("Faces", image)
cv2.waitKey(0)
Пример #15
0
if not args.get('video', False):
	camera = cv2.VideoCapture(0)
else:
	camera = cv2.VideoCapture(args['video'])

while True:
	(grabbed, frame) = camera.read()

	if args.get('video') and not grabbed:
		break

	frame = imutils.resize(frame, width = 1300)
	gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

	faceRects = fd.detect(gray, scaleFactor=1.2, minNeighbours=5,minSize=(30,30))
	frameClone = frame.copy()

	for (fX, fY, fW,fH) in faceRects:
		cv2.rectangle(frameClone, (fX, fY), (fX+fW, fY+fH),
			(0, 255, 0), 2)
	cv2.imshow('Face', frameClone)
	if cv2.waitKey(1) & 0xFF == ord('q'):
		break

camera.release()
cv2.destroyAllWindows()



while True:
    (grabbed, frame) = camera.read()

    if args.get("video") and not grabbed:
        break

    img_height, img_width, depth = frame.shape
    scale = w / img_width
    h = img_height * scale
    frame = cv2.resize(frame, (0, 0), fx=scale, fy=scale)

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # Face detection function is here
    faceRects = fd.detect(gray)
    frameClone = frame.copy()


    if faceRects is not None:
        for (fX, fY, fW, fH) in faceRects:
            cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH),
                          (0, 255, 0), 2)
            for client in clients:
                data = {'X': int(fX), 'Y': int(fY)}
                json_data = json.dumps(data, cls=IntegerEncoder)
                client.sendMessage(json_data)

    cv2.imshow("Face", cv2.flip(frameClone, 1))

    if cv2.waitKey(1) & 0xFF == ord('q'):
Пример #17
0
face_detector = FaceDetector(arguments['face'])

if not arguments.get('video', False):
    camera = cv2.VideoCapture(0)
else:
    camera = cv2.VideoCapture(arguments['video'])

while True:
    (grabbed, frame) = camera.read()

    if arguments.get('video') and not grabbed:
        break

    frame = resize(frame, width=300)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    face_rectangles = face_detector.detect(gray, scale_factor=1.1, min_neighbors=5, min_size=(30, 30))
    frame_clone = frame.copy()

    green = (0, 255, 0)
    for (f_x, f_y, f_w, f_h) in face_rectangles:
        cv2.rectangle(frame_clone, (f_x, f_y), (f_x + f_w, f_y + f_h), green, 2)

    cv2.imshow('Face', frame_clone)

    if cv2.waitKey(1) & 0xFF == ord('q'):  # Did the user press the q key?
        break

camera.release()
cv2.destroyAllWindows()
Пример #18
0
from facedetector import FaceDetector
import argparse
import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-f", "--face", required=True, help="path to the face cascade")
ap.add_argument("-i", "--image", required=True, help="path to the image")
args = vars(ap.parse_args())

image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

fd = FaceDetector(args["face"])
faceRects = fd.detect(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
print("I found {} faces(s)".format(len(faceRects)))

for (x, y, w, h) in faceRects:
    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

cv2.imshow("Faces", image)
cv2.waitKey(0)
Пример #19
0
import cv2
from facedetector import FaceDetector

image = cv2.imread("goat1.jpg", 1)
cv2.imshow("hi", image)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
p = FaceDetector(faceCascadePath="haarcascade_frontalface_default.xml")
face = p.detect(image=gray, scaleFactor=1.2, minNeighbors=5, minSize=(30, 30))
print("I found {} face(s)".format(len(face)))
for (x, y, w, h) in face:
    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

cv2.imshow("Faces", image)
k = cv2.waitKey(0)
if k == 27:
    cv2.destroyAllWindows()