def __init__(self): self.size = (640, 480) # initialize the camera and grab a reference to the raw camera # capture self.camera = PiCamera() self.camera.resolution = self.size self.camera.framerate = 32 self.rawCapture = PiRGBArray(self.camera, size=self.size) # construct the face detector and allow the camera to warm # up self.fd = FaceDetector("cascades/haarcascade_frontalface_default.xml") time.sleep(0.1)
class Camera(object): def __init__(self): self.size = (640, 480) # initialize the camera and grab a reference to the raw camera # capture self.camera = PiCamera() self.camera.resolution = self.size self.camera.framerate = 32 self.rawCapture = PiRGBArray(self.camera, size=self.size) # construct the face detector and allow the camera to warm # up self.fd = FaceDetector("cascades/haarcascade_frontalface_default.xml") time.sleep(0.1) def show_camera(self): # capture frames from the camera for f in self.camera.capture_continuous(self.rawCapture, format="bgr", use_video_port=True): # grab the raw NumPy array representing the image self.frame = f.array # resize the frame and convert it to grayscale self.frame = self.rot180(imutils.resize(self.frame, width=300)) self.gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY) # detect faces in the image and then clone the frame # so that we can draw on it self.faceRects = self.fd.detect(self.gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30)) self.frameClone = self.frame.copy() # loop over the face bounding boxes and draw them for (fX, fY, fW, fH) in self.faceRects: cv2.rectangle(self.frameClone, (fX, fY), (fX + fW, fY + fH), (0, 255, 0), 2) # Desactivo el laser si detecto una cara if len(self.faceRects) > 0: GPIO.output(laser, 0) else: GPIO.output(laser, 1) # show our detected faces, then clear the frame in # preparation for the next frame cv2.imshow("Robopot", self.frameClone) self.rawCapture.truncate(0) # if the 'q' key is pressed, stop the loop if cv2.waitKey(1) & 0xFF == ord("q"): break def rot180(self, frame): self.frame = frame return numpy.rot90(numpy.rot90(self.frame))
import os from subprocess import call setproctitle.setproctitle("FacialDetectProcess") # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-f", "--face", required = True, help = "path to where the face cascade resides") args = vars(ap.parse_args()) faceimg = "/dev/shm/face.jpg" facetext = "/dev/shm/face_text" facelocation = "/dev/shm/face.txt" faceframe = "/dev/shm/face_frame" # construct the face detector fd = FaceDetector(args["face"]) # keep looping while True: if (os.path.exists(faceframe)): fX = 0 fY = 0 fW = 0 fH = 0 # grab the current frame print "reading image" gray = cv2.imread('/dev/shm/face.jpg',0) height, width = gray.shape #get resolution for plotting location to screen thirds in facedetect.py # detect faces in the image and then clone the frame # so that we can draw on it
print("Screen Width: %s" % screenWidth) print("Screen Height: %s" % screenHeight) # if a video path was not supplied, grab the reference # to the gray if not args.get("video", False): camera = cv2.VideoCapture(0) # otherwise, load the video else: camera = cv2.VideoCapture(args["video"]) # construct the face detector and allow the camera to warm # up fd = FaceDetector(args["face"]) time.sleep(0.1) lastTime = time.time() # send to 0 ser.write('0\n') # keep looping while True: # grab the current frame (grabbed, frame) = camera.read() # if we are viewing a video and we did not grab a # frame, then we have reached the end of the video if args.get("video") and not grabbed:
import numpy as np import cv2 from pyimagesearch.facedetector import FaceDetector fd = FaceDetector('cascades/haarcascade_frontalface_default.xml') cap = cv2.VideoCapture(0) scaling_factor = 0.5 # Load our overlay image: hat.png hat_default = cv2.imread('images/crown.png',-1) # Create the mask for the hat orig_mask = hat_default[:,:,3] # Create the inverted mask for the hat orig_mask_inv = cv2.bitwise_not(orig_mask) # Convert hat image to BGR # and save the original image size (used later when re-sizing the image) hat = hat_default[:,:,0:3] orighatHeight, origHatWidth = hat.shape[:2] while True: ret, frame = cap.read() frame = cv2.resize(frame, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = fd.detect(gray, 1.3, 5)
import argparse import cv2 import numpy as np #variables cx = 0 cy = 0 larghezza_foto = 85 blank_image_raw = np.zeros((larghezza_foto,larghezza_foto), np.uint8) blank_image = cv2.cvtColor(blank_image_raw, cv2.COLOR_GRAY2BGR) proportional_w = 10 proportional_h = 8 offset_constant = 2.5 # construct the face detector fd = FaceDetector("cascades/haarcascade_frontalface_default.xml") # open video capture camera = cv2.VideoCapture(0) #load demo_image image_patente = cv2.imread("patente-fronte.png") #think on use tag for place text and picture in demo_image # merge image with text font = cv2.FONT_HERSHEY_SIMPLEX # good cv2.putText(image_patente,'PIPPO',(130,40), font, 0.4,(0,0,0),1,16) cv2.putText(image_patente,'PAPERINO',(130,55), font, 0.4,(0,0,0),1,16) cv2.putText(image_patente,'08/07/97 Forli (FC)',(130,72), font, 0.3,(0,0,0),1,16) cv2.putText(image_patente,'03/02/1995 MIT-UCO',(130,89), font, 0.3,(0,0,0),1,16)
import numpy as np import cv2 from pyimagesearch.facedetector import FaceDetector fd = FaceDetector('cascades/haarcascade_frontalface_default.xml') cap = cv2.VideoCapture(0) scaling_factor = 0.5 # Load our overlay image: hat.png hat_default = cv2.imread('images/crown.png', -1) # Create the mask for the hat orig_mask = hat_default[:, :, 3] # Create the inverted mask for the hat orig_mask_inv = cv2.bitwise_not(orig_mask) # Convert hat image to BGR # and save the original image size (used later when re-sizing the image) hat = hat_default[:, :, 0:3] orighatHeight, origHatWidth = hat.shape[:2] while True: ret, frame = cap.read() frame = cv2.resize(frame, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA)
"r2D2": "b", "closeEncounters": "c", "ariel": "d", "laugh2": "e", "squeak": "f", "waka": "g", "catcall": "h", "ohhh": "i", "uhoh": "j", "laugh": "k" } #### connect Arduino = connectArduino("/dev/cu.usbmodem1411") # construct the face detector fd = FaceDetector("./cascades/haarcascade_frontalface_default.xml") camera = cv2.VideoCapture(0) while True: numberOfFace = detectFace(camera, fd) print(numberOfFace) if (numberOfFace == 1): messageArduino(commands["laugh"]) elif (numberOfFace == 2): messageArduino(commands["closeEncounters"]) # messageArduino(commands["laugh"]) time.sleep(1) if cv2.waitKey(1) & 0xFF == ord("q"): break camera.release()
# import the necessary packages from pyimagesearch.facedetector import FaceDetector from pyimagesearch import imutils import argparse import cv2 # construct the argument parse and parse the arguments # ap = argparse.ArgumentParser() # ap.add_argument("-f", "--face", required = True, # help = "path to where the face cascade resides") # ap.add_argument("-v", "--video", # help = "path to the (optional) video file") # args = vars(ap.parse_args()) # construct the face detector fd = FaceDetector('cascades/haarcascade_frontalface_default.xml') # if a video path was not supplied, grab the reference # to the gray # if not args.get("video", False): camera = cv2.VideoCapture(0) # otherwise, load the video # else: # camera = cv2.VideoCapture(args["video"]) # Load our overlay image: mustache.png imgMustache = cv2.imread('images/mustache.png', -1) # Create the mask for the mustache orig_mask = imgMustache[:, :, 3]