def __init__(self, face_detector=CascadeDetector(), tile_size=(128, 128), subtile_size=(32, 32), left_center=pv.Point(39.325481787836871, 50.756936769089975), right_center=pv.Point(91.461135538006289, 50.845357457309881), validate=None, n_iter=1, annotate=False, **kwargs): ''' Create an eye locator. This default implentation uses a cascade classifier for face detection and then SVR for eye location. ''' #TODO: Learn the mean eye locations durring training. self.face_detector = face_detector self.left_center = left_center self.right_center = right_center self.tile_size = tile_size self.subtile_size = subtile_size self.validate = validate self.n_iter = n_iter self.annotate = annotate self.perturbations = True # Number of training images where the face detection did not work. self.detection_failures = 0 # point locators that learn to find the eyes. self.createLocators(**kwargs)
def test_ASEFEyeLocalization(self): '''FilterEyeLocator: Scrapshots Both10 rate == 0.4800...............''' ilog = None if 'ilog' in globals().keys(): ilog = globals()['ilog'] # Load a face database ssdb = ScrapShotsDatabase() # Create a face detector face_detector = CascadeDetector() # Create an eye locator eye_locator = FilterEyeLocator() # Create an eye detection test edt = EyeDetectionTest(name='asef_scraps') #print "Testing..." for face_id in ssdb.keys()[:25]: face = ssdb[face_id] im = face.image dist = face.left_eye.l2(face.right_eye) dist = np.ceil(0.1 * dist) im.annotateCircle(face.left_eye, radius=dist, color='white') im.annotateCircle(face.right_eye, radius=dist, color='white') # Detect the faces faces = face_detector.detect(im) # Detect the eyes pred_eyes = eye_locator(im, faces) for rect, leye, reye in pred_eyes: im.annotateRect(rect) im.annotateCircle(leye, radius=1, color='red') im.annotateCircle(reye, radius=1, color='red') truth_eyes = [[face.left_eye, face.right_eye]] pred_eyes = [[leye, reye] for rect, leye, reye in pred_eyes] # Add to eye detection test edt.addSample(truth_eyes, pred_eyes, im=im, annotate=True) if ilog != None: ilog.log(im, label='test_ASEFEyeLocalization') edt.createSummary() # Very poor accuracy on the scrapshots database self.assertAlmostEqual(edt.face_rate, 1.0000, places=3) self.assertAlmostEqual(edt.both25_rate, 0.8800, places=3) self.assertAlmostEqual(edt.both10_rate, 0.5200, places=3) self.assertAlmostEqual(edt.both05_rate, 0.2800, places=3)
def execute(self, frame, detector=None): if detector == None: print "Initializing Face Detector." detector = CascadeDetector(min_size=(128, 128)) faces = detector(frame) for rect in faces: frame.annotateRect(rect) return [('FACES', self.getFrameId(), faces), ("_FACE_DETECTOR", self.getFrameId(), detector)]
def __init__(self, parent, id, name, demos=DEMO_DEFAULTS, size=(800, 550)): wx.Frame.__init__(self, parent, id, name, size=size) # ---------------- Basic Data ------------------- self.webcam = Webcam() self.harris = DetectorHarris() self.dog = DetectorDOG(n=100, selector='best') self.face = CascadeDetector() self.demos = demos # ------------- Other Components ---------------- self.CreateStatusBar() # ------------------- Menu ---------------------- # Creating the menubar. # ----------------- Image List ------------------ # --------------- Image Display ----------------- self.static_bitmap = wx.StaticBitmap(self, wx.NewId(), bitmap=wx.EmptyBitmap(640, 480)) self.radios = wx.RadioBox(self, wx.NewId(), 'Demos', choices=['None'] + self.demos.keys(), style=wx.RA_SPECIFY_ROWS) self.mirror = wx.CheckBox(self, wx.NewId(), 'Mirror') self.mirror.SetValue(True) # --------------- Window Layout ----------------- grid = wx.FlexGridSizer(2, 2) grid.Add(self.static_bitmap) grid.Add(self.radios) grid.Add(self.mirror) self.SetAutoLayout(True) self.SetSizer(grid) self.Layout() # ----------------------------------------------- self.timer = FrameTimer(self) self.timer.Start(200) # -------------- Event Handleing ---------------- wx.EVT_SIZE(self.static_bitmap, self.onBitmapResize) wx.EVT_LEFT_DOWN(self.static_bitmap, self.onClick) wx.EVT_TIMER(self, -1, self.onTmp)
def test_detect_scraps_opencv(self): fd = CascadeDetector(OPENCV_CASCADE) fdt = FaceDetectionTest(name='scraps') self.eyes = EyesFile(os.path.join(SCRAPS_FACE_DATA, "coords.txt")) for filename in self.eyes.files(): img = pv.Image(os.path.join(SCRAPS_FACE_DATA, filename + ".pgm")) rects = fd(img) truth = self.eyes.getFaces(img.filename) fdt.addSample(truth, rects, im=img) self.assertAlmostEqual( fdt.pos_rate, 0.98265895953757221, places=2) # TODO: Version 2 performance is better
def test_training(self): ''' This trains the FaceFinder on the scraps database. ''' #import cProfile # Load an eyes file eyes_filename = join(pv.__path__[0], 'data', 'csuScrapShots', 'coords.txt') #print "Creating eyes File." eyes_file = EyesFile(eyes_filename) # Create a face detector cascade_file = join(pv.__path__[0], 'config', 'facedetector_celebdb2.xml') #print "Creating a face detector from:",cascade_file face_detector = CascadeDetector(cascade_file) image_dir = join(pv.__path__[0], 'data', 'csuScrapShots') ed = SVMEyeDetectorFromDatabase(eyes_file, image_dir, image_ext=".pgm", face_detector=face_detector, random_seed=0) edt = EyeDetectionTest(name='scraps') #print "Testing..." for img in self.images: #print img.filename faces = ed.detect(img) #faces = ed.detect(img) pred_eyes = [] for _, _, pleye, preye in faces: #detections.append(rect) pred_eyes.append((pleye, preye)) truth_eyes = self.eyes.getEyes(img.filename) edt.addSample(truth_eyes, pred_eyes, im=img, annotate=False) #print edt.createSummary() self.assertAlmostEqual(edt.face_rate, 0.924855491329, places=3)
def __init__(self, face_detector=CascadeDetector(), tile_size=(128, 128), validate=None, n_iter=1, annotate=False, **kwargs): ''' Create an eye locator. This default implentation uses a cascade classifier for face detection and then SVR for eye location. ''' self.face_detector = face_detector self.left_eye = None self.right_eye = None self.tile_size = tile_size self.validate = validate self.n_iter = n_iter self.annotate = annotate self.perturbations = True # this object handles pca normalization self.normalize = VectorClassifier.VectorClassifier( VectorClassifier.TYPE_REGRESSION, reg_norm=VectorClassifier.REG_NORM_NONE) # point locators that learn to find the eyes. self.left_locator = SVMLocator( svm_type=SVM.TYPE_NU_SVR, normalization=VectorClassifier.NORM_NONE) self.right_locator = SVMLocator( svm_type=SVM.TYPE_NU_SVR, normalization=VectorClassifier.NORM_NONE) # Number of training images where the face detection did not work. self.detection_failures = 0 self.training_labels = []
import pyvision as pv import cv2 from pyvision.face.CascadeDetector import CascadeDetector from pyvision.face.FilterEyeLocator import FilterEyeLocator def mouseCallback(event, x, y, flags, param): if event in [cv2.EVENT_LBUTTONDOWN,cv2.EVENT_LBUTTONUP]: print("Mouse Event:",event,x,y) if __name__ == '__main__': # Setup the webcam webcam = pv.Webcam(size=(640,360)) # Setup the face and eye detectors cd = CascadeDetector(min_size=(100,100)) el = FilterEyeLocator() # Setup the mouse callback to handle mause events (optional) cv2.namedWindow("PyVision Live Demo") cv2.setMouseCallback("PyVision Live Demo", mouseCallback) while True: # Grab a frame from the webcam frame = webcam.query() # Run Face and Eye Detection rects = cd(frame) eyes = el(frame,rects) # Annotate the result
from pyvision.face.FilterEyeLocator import FilterEyeLocator if __name__ == "__main__": ilog = pv.ImageLog() # Load the face image file fname = os.path.join(pv.__path__[0], 'data', 'misc', 'FaceSample.jpg') # Create the annotation image in black and white so that color # annotations show up better. im = pv.Image(fname, bw_annotate=True) ilog(pv.Image(fname), "Original") # Create a OpenCV cascade face detector object cd = CascadeDetector() # Create an eye detector object el = FilterEyeLocator() # Call the face detector like a function to get a list of face rectangles rects = cd(im) # print the list of rectangles print("Face Detection Output:", rects) # Also call the eye detector like a function with the original image and # the list of face detections to locate the eyes. eyes = el(im, rects) # print the list of eyes. Format [ [ face_rect, left_eye, right_eye], ...]
def __init__(self): self.face = CascadeDetector()
def __init__(self): self.face = CascadeDetector(image_scale=0.4)
def __init__(self, parent, id, name, size=(640, 672)): ''' Create all the windows and controls used for the window and ''' wx.Frame.__init__(self, parent, id, name, size=size) self.CenterOnScreen(wx.HORIZONTAL) self.timing_window = None # Initialize timing window # ------------- Face Processing ----------------- self.face_detector = CascadeDetector(cascade_name=CASCADE_NAME, image_scale=0.5) self.fel = FilterEyeLocator(FEL_NAME) self.face_rec = SVMFaceRec() self.svm_mode = SVM_AUTOMATIC self.svm_C = 4.000e+00 self.svm_Gamma = 9.766e-04 self.current_faces = [] self.enrolling = None self.enroll_count = 0 self.enroll_max = 32 self.enroll_list = [] self.previous_time = time.time() self.arialblack24 = PIL.ImageFont.truetype(ARIAL_BLACK_NAME, 24) # ---------------- Basic Data ------------------- try: self.webcam = Webcam() except SystemExit: raise except: trace = traceback.format_exc() message = TraceBackDialog(None, "Camera Error", CAMERA_ERROR, trace) message.ShowModal() sys.stderr.write( "FaceL Error: an error occurred while trying to connect to the camera. Details follow.\n\n" ) sys.stderr.write(trace) sys.exit(CAMERA_ERROR_CODE) # ------------- Other Components ---------------- self.CreateStatusBar() # ------------------- Menu ---------------------- # Creating the menubar. # Menu IDs license_id = wx.NewId() mirror_id = wx.NewId() face_id = wx.NewId() svm_tune_id = wx.NewId() performance_id = wx.NewId() # Menu Items self.file_menu = wx.Menu() self.file_menu.Append(wx.ID_ABOUT, "&About...") self.file_menu.Append(license_id, "FaceL License...") self.file_menu.AppendSeparator() self.file_menu.Append(wx.ID_EXIT, "E&xit") self.options_menu = wx.Menu() self.face_menuitem = self.options_menu.AppendCheckItem( face_id, "Face Processing") self.eye_menuitem = self.options_menu.AppendCheckItem( face_id, "Eye Detection") self.mirror_menuitem = self.options_menu.AppendCheckItem( mirror_id, "Mirror Video") self.options_menu.AppendSeparator() self.options_menu.Append(svm_tune_id, "SVM Tuning...") self.options_menu.Append(performance_id, "Performance...") # Create Menu Bar self.menu_bar = wx.MenuBar() self.menu_bar.Append(self.file_menu, "&File") self.menu_bar.Append(self.options_menu, "&Options") self.SetMenuBar(self.menu_bar) # Menu Events wx.EVT_MENU(self, wx.ID_ABOUT, self.onAbout) wx.EVT_MENU(self, license_id, self.onLicense) wx.EVT_MENU(self, mirror_id, self.onNull) wx.EVT_MENU(self, face_id, self.onNull) wx.EVT_MENU(self, svm_tune_id, self.onSVMTune) wx.EVT_MENU(self, performance_id, self.onTiming) # Set up menu checks self.face_menuitem.Check(True) self.eye_menuitem.Check(True) self.mirror_menuitem.Check(True) # ----------------- Image List ------------------ # --------------- Image Display ----------------- self.static_bitmap = wx.StaticBitmap(self, wx.NewId(), bitmap=wx.EmptyBitmap(640, 480)) self.controls_box = wx.StaticBox(self, wx.NewId(), "Controls") self.facel_logo = wx.StaticBitmap(self, wx.NewId(), bitmap=wx.Bitmap(FACEL_LOGO)) self.csu_logo = wx.StaticBitmap(self, wx.NewId(), bitmap=wx.Bitmap(CSU_LOGO)) # self.performance_box = wx.StaticBox(self, wx.NewId(), "Performance") self.enroll_chioce_label = wx.StaticText(self, wx.NewId(), "Enrollment Count:", style=wx.ALIGN_LEFT) self.enroll_choice = wx.Choice(self, wx.NewId(), wx.Point(0, 0), wx.Size(-1, -1), ['16', '32', '48', '64', '128', '256']) self.enroll_choice.Select(3) self.train_button = wx.Button(self, wx.NewId(), 'Train Labeler') self.reset_button = wx.Button(self, wx.NewId(), 'Clear Labels') # --------------- Instrumentation --------------- self.enroll_label = wx.StaticText( self, wx.NewId(), "Click a face in the video to enroll.", style=wx.ALIGN_LEFT) self.ids_label = wx.StaticText(self, wx.NewId(), "Labels:", size=wx.Size(-1, 16), style=wx.ALIGN_LEFT) self.ids_text = wx.StaticText(self, wx.NewId(), size=wx.Size(30, 16), style=wx.ALIGN_RIGHT) self.faces_label = wx.StaticText(self, wx.NewId(), "Faces:", size=wx.Size(-1, 16), style=wx.ALIGN_LEFT) self.faces_text = wx.StaticText(self, wx.NewId(), size=wx.Size(30, 16), style=wx.ALIGN_RIGHT) # --------------- Window Layout ----------------- enroll_sizer = wx.BoxSizer(wx.HORIZONTAL) enroll_sizer.Add(self.ids_label, flag=wx.ALIGN_CENTER | wx.ALL, border=4) enroll_sizer.Add(self.ids_text, flag=wx.ALIGN_CENTER | wx.ALL, border=4) enroll_sizer.AddSpacer(20) enroll_sizer.Add(self.faces_label, flag=wx.ALIGN_CENTER | wx.ALL, border=4) enroll_sizer.Add(self.faces_text, flag=wx.ALIGN_CENTER | wx.ALL, border=4) training_sizer = wx.BoxSizer(wx.HORIZONTAL) training_sizer.Add(self.train_button, flag=wx.ALIGN_CENTER | wx.ALL, border=4) training_sizer.Add(self.reset_button, flag=wx.ALIGN_CENTER | wx.ALL, border=4) enroll_choice_sizer = wx.BoxSizer(wx.HORIZONTAL) enroll_choice_sizer.Add(self.enroll_chioce_label, flag=wx.ALIGN_CENTER | wx.ALL, border=0) enroll_choice_sizer.Add(self.enroll_choice, flag=wx.ALIGN_CENTER | wx.ALL, border=0) controls_sizer = wx.StaticBoxSizer( self.controls_box, wx.VERTICAL) #wx.BoxSizer(wx.VERTICAL) controls_sizer.Add(self.enroll_label, flag=wx.ALIGN_LEFT | wx.ALL, border=0) controls_sizer.Add(enroll_sizer, flag=wx.ALIGN_LEFT | wx.ALL, border=0) controls_sizer.Add(enroll_choice_sizer, flag=wx.ALIGN_LEFT | wx.ALL, border=4) controls_sizer.Add(training_sizer, flag=wx.ALIGN_LEFT | wx.ALL, border=0) bottom_sizer = wx.BoxSizer(wx.HORIZONTAL) bottom_sizer.Add(self.facel_logo, flag=wx.ALIGN_CENTER | wx.ALL, border=0) bottom_sizer.Add(controls_sizer, flag=wx.ALIGN_TOP | wx.ALL, border=4) bottom_sizer.Add(self.csu_logo, flag=wx.ALIGN_CENTER | wx.ALL, border=0) main_sizer = wx.BoxSizer(wx.VERTICAL) main_sizer.Add(self.static_bitmap, flag=wx.ALIGN_CENTER | wx.ALL, border=0) main_sizer.Add(bottom_sizer, flag=wx.ALIGN_CENTER | wx.ALL, border=4) self.SetAutoLayout(True) self.SetSizer(main_sizer) self.Layout() # ----------------------------------------------- self.timer = FrameTimer(self) self.timer.Start(200) # -------------- Event Handleing ---------------- wx.EVT_SIZE(self.static_bitmap, self.onBitmapResize) wx.EVT_LEFT_DOWN(self.static_bitmap, self.onClick) self.Bind(wx.EVT_BUTTON, self.onTrain, id=self.train_button.GetId()) self.Bind(wx.EVT_BUTTON, self.onReset, id=self.reset_button.GetId()) # --------------- Setup State ------------------- self.setupState()
# Open the file to use as output. f = open(args[1],'wb') csv_file = csv.writer(f) headers = ['image_name','detect_number','detect_x','detect_y','detect_width','detect_height','eye1_x','eye1_y','eye2_x','eye2_y'] csv_file.writerow(headers) # Create an image log if this is being saved to a file. ilog = None if options.log_dir != None: print("Creating Image Log...") ilog = pv.ImageLog(options.log_dir) # For each image run face and eye detection face_detect = CascadeDetector(image_scale=1.3*options.scale) locate_eyes = FilterEyeLocator()#locator_filename) c = 0 for pathname in image_names: c += 1 im = pv.Image(pathname) scale = options.log_scale log_im = pv.AffineScale(scale,(int(scale*im.width),int(scale*im.height))).transformImage(im) results = processFaces(im,face_detect,locate_eyes)