def FindFace(): faceCount = 0 while faceCount == 0: stream = io.BytesIO() with picamera.PiCamera() as camera: camera.resolution = (640, 480) camera.capture("images/LifeFaceDetection.jpg") camera.close() stream.seek(0) image = cv2.imread("images/LifeFaceDetection.jpg") fd = FaceDetector() faceRects = fd.detect(image) faceCount = len(faceRects) if len(faceRects) > 0: DrawRectangleAndSave(faceRects, image) lib.logImages.logFace(image) post.postText(700, "Hi!") time.sleep(3) post.postText(400, "Nice outfit") time.sleep(2) post.postText(200, "If you have a tennis ball I can take your picture") FindTennisBall() else: print "no faces found"
ap.add_argument("-c","--cascade",help="path to where the face cascade resides",default="cascades/haarcascade_frontalface_default.xml") ap.add_argument("-e","--eye",help="path to where the eye cascade resides",default="cascades/haarcascade_eye.xml") ap.add_argument("-f","--faces",help="path to the image dataset",default="faces_data/") ap.add_argument("-tf","--testface",help="path to the test image dataset",default="faces_realtime/cloony2.jpg") args = vars(ap.parse_args()) ############################################### #Load faces classifier model from disk pkl_file = open('models/faces_classifier.pkl', 'rb') classifier = pickle.load(pkl_file) pkl_file.close() print "Load model from disk successfully" ############################################### ############################################### fd = FaceDetector(args["cascade"]) desc = RGBHistogram([8,8,8]) ############################################### ############################################### imagePaths = sorted(glob.glob(args["faces"]+"/*.jpg")) target = [] for (imagePath) in imagePaths: image = cv2.imread(imagePath) features = desc.describe(image,None) target.append(imagePath.split("_")[-2]) targetNames = np.unique(target) le = LabelEncoder() target = le.fit_transform(target) ###############################################
ap.add_argument("-f", "--face", default="cascades/haarcascade_frontalface_default.xml", help="path to where the face cascade resides") ap.add_argument("-i", "--image", default="input_face/lena.jpg", help="path to where the image file resides") args = vars(ap.parse_args()) # load the image and convert it to grayscale image = cv2.imread(args["image"]) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # find faces in the image fd = FaceDetector(args["face"]) faceRects = fd.detect(gray, scaleFactor=1.1, minNeighbors=5, minSize=(100, 100)) print "I found %d face(s)" % (len(faceRects)) # loop over the faces and draw a rectangle around each for (x, y, w, h) in faceRects: cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) sub_face = image[y:y + h, x:x + w] sub_face = imutils.resize(sub_face, width=200, height=200) sub_face_file_name = "input_face/subface.jpg" cv2.imwrite(sub_face_file_name, sub_face) detected_face_file = "input_face/detected.jpg"
from PIL import Image, ImageFilter # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-f", "--face", default="cascades/haarcascade_frontalface_default.xml", help = "path to where the face cascade resides") ap.add_argument("-i", "--image", default="input_face/lena.jpg", help = "path to where the image file resides") args = vars(ap.parse_args()) # load the image and convert it to grayscale image = cv2.imread(args["image"]) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # find faces in the image fd = FaceDetector(args["face"]) faceRects = fd.detect(gray, scaleFactor = 1.1, minNeighbors = 5, minSize = (100, 100)) print "I found %d face(s)" % (len(faceRects)) # loop over the faces and draw a rectangle around each for (x, y, w, h) in faceRects: cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) sub_face = image[y:y+h, x:x+w] sub_face = imutils.resize(sub_face,width=200,height=200) sub_face_file_name = "input_face/subface.jpg" cv2.imwrite(sub_face_file_name, sub_face) detected_face_file = "input_face/detected.jpg" print "Write detected file %s" % detected_face_file cv2.imwrite(detected_face_file,image)