def addTraining(self, left_eye, right_eye, im): '''Train an eye detector givin a full image and the eye coordinates.''' # determine the face rect true_rect = face_from_eyes(left_eye, right_eye) # run the face detector rects = self.face_detector.detect(im) # find the best detection if there is one for pred_rect in rects: if is_success(pred_rect, true_rect): laffine, raffine = self.generateTransforms(pred_rect) lcropped = laffine.transformImage(im) rcropped = raffine.transformImage(im) #Normalize the images lcropped = pv.meanStd(lcropped) rcropped = pv.meanStd(rcropped) # Mark the eyes leye = laffine.transformPoint(left_eye) reye = raffine.transformPoint(right_eye) # Add training data to locators self.left_locator.addTraining(lcropped, leye) self.right_locator.addTraining(rcropped, reye) # Just use the first success return # The face was not detected self.detection_failures += 1
def addTraining(self, left_eye, right_eye, im): '''Train an eye detector givin a full image and the eye coordinates.''' # determine the face rect true_rect = face_from_eyes(left_eye,right_eye) # run the face detector rects = self.face_detector.detect(im) # find the best detection if there is one for pred_rect in rects: if is_success(pred_rect,true_rect): laffine,raffine = self.generateTransforms(pred_rect) lcropped = laffine.transformImage(im) rcropped = raffine.transformImage(im) #Normalize the images lcropped = pv.meanStd(lcropped) rcropped = pv.meanStd(rcropped) # Mark the eyes leye = laffine.transformPoint(left_eye) reye = raffine.transformPoint(right_eye) # Add training data to locators self.left_locator.addTraining(lcropped,leye) self.right_locator.addTraining(rcropped,reye) # Just use the first success return # The face was not detected self.detection_failures += 1
def findFace(self, filename, rect): fname = self._parseName(filename) if self.images.has_key(fname): faces = self.images[fname] for each in faces: truth_rect = each[3] if is_success(truth_rect, rect): return each return None
def findFace(self, filename, rect): fname = self._parseName(filename) if self.images.has_key(fname): faces = self.images[fname] for each in faces: truth_rect = each[3] if is_success(truth_rect, rect): return each return None
def addTraining(self, left_eye, right_eye, im): '''Train an eye detector givin a full image and the eye coordinates.''' # determine the face rect true_rect = face_from_eyes(left_eye, right_eye) # run the face detector rects = self.face_detector.detect(im) # find the best detection if there is one for pred_rect in rects: if is_success(pred_rect, true_rect): # Transform the face affine = pv.AffineFromRect(pred_rect, self.tile_size) w, h = self.tile_size if self.perturbations: # Randomly rotate, translate and scale the images center = pv.AffineTranslate(-0.5 * w, -0.5 * h, self.tile_size) rotate = pv.AffineRotate(random.uniform(-pi / 8, pi / 8), self.tile_size) scale = pv.AffineScale(random.uniform(0.9, 1.1), self.tile_size) translate = pv.AffineTranslate( random.uniform(-0.05 * w, 0.05 * w), random.uniform(-0.05 * h, 0.05 * h), self.tile_size) inv_center = pv.AffineTranslate(0.5 * w, 0.5 * h, self.tile_size) affine = inv_center * translate * scale * rotate * center * affine #affine = affine*center*rotate*scale*translate*inv_center cropped = affine.transformImage(im) cropped = pv.meanStd(cropped) # Mark the eyes leye = affine.transformPoint(left_eye) reye = affine.transformPoint(right_eye) # Add training data to locators self.training_labels.append((leye, reye)) self.normalize.addTraining(0.0, cropped) #self.left_locator.addTraining(cropped,leye) #self.right_locator.addTraining(cropped,reye) # Just use the first success return # The face was not detected self.detection_failures += 1
def addTraining(self, left_eye, right_eye, im): '''Train an eye detector givin a full image and the eye coordinates.''' # determine the face rect true_rect = face_from_eyes(left_eye,right_eye) # run the face detector rects = self.face_detector.detect(im) # find the best detection if there is one for pred_rect in rects: if is_success(pred_rect,true_rect): # Transform the face affine = pv.AffineFromRect(pred_rect,self.tile_size) w,h = self.tile_size if self.perturbations: # Randomly rotate, translate and scale the images center = pv.AffineTranslate(-0.5*w,-0.5*h,self.tile_size) rotate = pv.AffineRotate(random.uniform(-pi/8,pi/8),self.tile_size) scale = pv.AffineScale(random.uniform(0.9,1.1),self.tile_size) translate = pv.AffineTranslate(random.uniform(-0.05*w,0.05*w), random.uniform(-0.05*h,0.05*h), self.tile_size) inv_center = pv.AffineTranslate(0.5*w,0.5*h,self.tile_size) affine = inv_center*translate*scale*rotate*center*affine #affine = affine*center*rotate*scale*translate*inv_center cropped = affine.transformImage(im) cropped = pv.meanStd(cropped) # Mark the eyes leye = affine.transformPoint(left_eye) reye = affine.transformPoint(right_eye) # Add training data to locators self.training_labels.append((leye,reye)) self.normalize.addTraining(0.0,cropped) #self.left_locator.addTraining(cropped,leye) #self.right_locator.addTraining(cropped,reye) # Just use the first success return # The face was not detected self.detection_failures += 1
def onFrame(self, img): """ Process a video frame. """ self.eye_time = 0.0 names = [] nFaces = 0 if self.face_processing: faces = self.findFaces(img) nFaces = len(faces) if self.enrolling != None: success = None for rect, leye, reye in faces: img.annotateRect(self.enrolling, color="yellow") if (success == None) and is_success(self.enrolling, rect): success = rect img.annotateRect(rect, color="blue") if self.eye_processing: img.annotatePoint(leye, color="blue") img.annotatePoint(reye, color="blue") self.enroll_list.append([img, rect, leye, reye]) else: img.annotateRect(rect, color="red") if self.eye_processing: img.annotatePoint(leye, color="red") img.annotatePoint(reye, color="red") img.annotateLine( pv.Point(rect.x, rect.y), pv.Point(rect.x + rect.w, rect.y + rect.h), color="red" ) img.annotateLine( pv.Point(rect.x + rect.w, rect.y), pv.Point(rect.x, rect.y + rect.h), color="red" ) if success == None: rect = self.enrolling img.annotateLine( pv.Point(rect.x, rect.y), pv.Point(rect.x + rect.w, rect.y + rect.h), color="yellow" ) img.annotateLine( pv.Point(rect.x + rect.w, rect.y), pv.Point(rect.x, rect.y + rect.h), color="yellow" ) else: # enroll in the identification algorithm pass else: for rect, leye, reye in faces: img.annotateRect(rect, color="blue") if self.eye_processing: img.annotatePoint(leye, color="blue") img.annotatePoint(reye, color="blue") for rect, leye, reye in faces: img.annotateRect(rect, color="blue") img.annotatePoint(leye, color="blue") img.annotatePoint(reye, color="blue") if self.isTrained: self.label_time = time.time() for rect, leye, reye in faces: if self.face_rec.isTrained(): label = self.face_rec.predict(img, leye, reye) names.append([0.5 * (leye + reye), label]) self.label_time = time.time() - self.label_time im = img.asAnnotated() # Flip to mirror image if self.image_flip: im = im.transpose(FLIP_LEFT_RIGHT) if self.enrolling != None: self.enrollCondition.acquire() self.enroll_count += 1 self.enrollCondition.notify() self.enrollCondition.release() # Draw on the image draw = PIL.ImageDraw.Draw(im) x, y = self.enrolling.x, self.enrolling.y if self.image_flip: xsize, ysize = im.size x = xsize - (x + self.enrolling.w) draw.text( (x + 10, y + 10), "Enrolling: %2d of %2d" % (self.enroll_count, self.enroll_max), fill="yellow", font=self.arialblack24, ) del draw facesEntered = [] if len(names) > 0: draw = PIL.ImageDraw.Draw(im) for pt, name in names: x, y = pt.X(), pt.Y() # Draw on the image x, y = pt.X(), pt.Y() w, h = draw.textsize(name, font=self.arialblack24) if self.image_flip: xsize, ysize = im.size x = xsize - x - 0.5 * w else: x = x - 0.5 * w draw.text((x, y - 20 - h), name, fill="green", font=self.arialblack24) facesEntered.append(name) # Publish only new names if name not in self.faceNames: str = "seeing %s" % name rospy.loginfo(str) self.namesPub.publish(String(name)) self.publishPersonEvent(name, "entered") del draw # Find all of the faces that are no longer detected for name in self.faceNames: if name not in facesEntered: self.publishPersonEvent(name, "exited") # print "{0} {1} {2}".format(nFaces, self.faceCount, len(facesEntered)) nFaces = nFaces - len(facesEntered) # For unidentified faces # figure out how many entered/exited if (nFaces - self.faceCount) > 0: self.publishPersonEvent("unknown", "entered") if (nFaces - self.faceCount) < 0: self.publishPersonEvent("unknown", "exited") # Update all for the next round self.faceNames = facesEntered self.faceCount = nFaces # Publish the image cv_img = self.PIL_to_opencv(im) # cv_img = self.pyvision_to_opencv(img) msg = self.bridge.cv_to_imgmsg(cv_img, encoding="rgb8") self.imagePub.publish(msg)
def addSample(self, truth_eyes, detected_eyes, im=None, annotate=False): '''''' self.images += 1 if isinstance(im,pv.Image): name = im.filename if self.pixels != None: self.pixels += im.asPIL().size[0] * im.asPIL().size[1] elif isinstance(im,str): name = im self.pixels = None else: name = "%d"%self.sample_id self.pixels = None self.sample_id += 1 self.stop_time = time.time() for tl,tr in truth_eyes: tface = face_from_eyes(tl,tr) detect_face = False eye_dist = None detect_b25 = False detect_b10 = False detect_b05 = False detect_l25 = False detect_l10 = False detect_l05 = False detect_r25 = False detect_r10 = False detect_r05 = False eye_dist = None tl_x = None tl_y = None tr_x = None tr_y = None pl_x = None pl_y = None pr_x = None pr_y = None dlx = None dly = None dl2 = None dl = None dlfrac= None drx = None dry = None dr2 = None dr = None drfrac= None deye = None dmean = None for pl,pr in detected_eyes: dface = face_from_eyes(pl,pr) if not self.test_detect or is_success(tface,dface): tl_x = tl.X() tl_y = tl.Y() tr_x = tr.X() tr_y = tr.Y() eye_dist = math.sqrt((tl_x-tr_x)*(tl_x-tr_x) + (tl_y-tr_y)*(tl_y-tr_y)) pl_x = pl.X() pl_y = pl.Y() pr_x = pr.X() pr_y = pr.Y() detect_face = True eye_dist = math.sqrt((tl_x-tr_x)*(tl_x-tr_x) + (tl_y-tr_y)*(tl_y-tr_y)) dlx = pl_x-tl_x dly = pl_y-tl_y dl2 = dlx*dlx + dly*dly dl = math.sqrt(dl2) dlfrac = dl/eye_dist drx = pr_x-tr_x dry = pr_y-tr_y dr2 = drx*drx + dry*dry dr = math.sqrt(dr2) drfrac = dr/eye_dist deye = max(drfrac,dlfrac) dmean = 0.5*(dr+dl) detect_l25 = 0.25 > dlfrac detect_l10 = 0.10 > dlfrac detect_l05 = 0.05 > dlfrac detect_r25 = 0.25 > drfrac detect_r10 = 0.10 > drfrac detect_r05 = 0.05 > drfrac detect_b25 = 0.25 > deye detect_b10 = 0.10 > deye detect_b05 = 0.05 > deye break self.table.setElement(self.faces,'name',name) self.table.setElement(self.faces,'detect_face',detect_face) self.table.setElement(self.faces,'detect_l25',detect_l25) self.table.setElement(self.faces,'detect_l10',detect_l10) self.table.setElement(self.faces,'detect_l05',detect_l05) self.table.setElement(self.faces,'detect_r25',detect_r25) self.table.setElement(self.faces,'detect_r10',detect_r10) self.table.setElement(self.faces,'detect_r05',detect_r05) self.table.setElement(self.faces,'detect_b25',detect_b25) self.table.setElement(self.faces,'detect_b10',detect_b10) self.table.setElement(self.faces,'detect_b05',detect_b05) self.table.setElement(self.faces,'eye_dist',eye_dist) self.table.setElement(self.faces,'truth_lx',tl_x) self.table.setElement(self.faces,'truth_ly',tl_y) self.table.setElement(self.faces,'truth_rx',tr_x) self.table.setElement(self.faces,'truth_ry',tr_y) self.table.setElement(self.faces,'pred_lx',pl_x) self.table.setElement(self.faces,'pred_ly',pl_y) self.table.setElement(self.faces,'pred_rx',pr_x) self.table.setElement(self.faces,'pred_ry',pr_y) self.table.setElement(self.faces,'dlx',dlx) self.table.setElement(self.faces,'dly',dly) #self.table.setElement(self.faces,'dl2',dl2) self.table.setElement(self.faces,'dl',dl) # BUGFIX: 20080813 This was outputing dl2. self.table.setElement(self.faces,'dlfrac',dlfrac) self.table.setElement(self.faces,'drx',drx) self.table.setElement(self.faces,'dry',dry) #self.table.setElement(self.faces,'dr2',dr2) self.table.setElement(self.faces,'dr',dr) self.table.setElement(self.faces,'drfrac',drfrac) self.table.setElement(self.faces,'deye',deye) self.table.setElement(self.faces,'dmean',dmean) self.faces += 1 if dlfrac != None: self.bothsse += dlfrac**2 + drfrac**2 self.leftsse += dlfrac**2 self.rightsse += drfrac**2 if detect_face: self.face_successes += 1 if detect_b25: self.both25_successes += 1 if detect_l25: self.left25_successes += 1 if detect_r25: self.right25_successes += 1 if detect_b10: self.both10_successes += 1 if detect_l10: self.left10_successes += 1 if detect_r10: self.right10_successes += 1 if detect_b05: self.both05_successes += 1 if detect_l05: self.left05_successes += 1 if detect_r05: self.right05_successes += 1
def addSample(self, truth_eyes, detected_eyes, im=None, annotate=False): '''''' self.images += 1 if isinstance(im, pv.Image): name = im.filename if self.pixels != None: self.pixels += im.asPIL().size[0] * im.asPIL().size[1] elif isinstance(im, str): name = im self.pixels = None else: name = "%d" % self.sample_id self.pixels = None self.sample_id += 1 self.stop_time = time.time() for tl, tr in truth_eyes: tface = face_from_eyes(tl, tr) detect_face = False eye_dist = None detect_b25 = False detect_b10 = False detect_b05 = False detect_l25 = False detect_l10 = False detect_l05 = False detect_r25 = False detect_r10 = False detect_r05 = False eye_dist = None tl_x = None tl_y = None tr_x = None tr_y = None pl_x = None pl_y = None pr_x = None pr_y = None dlx = None dly = None dl2 = None dl = None dlfrac = None drx = None dry = None dr2 = None dr = None drfrac = None deye = None dmean = None for pl, pr in detected_eyes: dface = face_from_eyes(pl, pr) if not self.test_detect or is_success(tface, dface): tl_x = tl.X() tl_y = tl.Y() tr_x = tr.X() tr_y = tr.Y() eye_dist = math.sqrt((tl_x - tr_x) * (tl_x - tr_x) + (tl_y - tr_y) * (tl_y - tr_y)) pl_x = pl.X() pl_y = pl.Y() pr_x = pr.X() pr_y = pr.Y() detect_face = True eye_dist = math.sqrt((tl_x - tr_x) * (tl_x - tr_x) + (tl_y - tr_y) * (tl_y - tr_y)) dlx = pl_x - tl_x dly = pl_y - tl_y dl2 = dlx * dlx + dly * dly dl = math.sqrt(dl2) dlfrac = dl / eye_dist drx = pr_x - tr_x dry = pr_y - tr_y dr2 = drx * drx + dry * dry dr = math.sqrt(dr2) drfrac = dr / eye_dist deye = max(drfrac, dlfrac) dmean = 0.5 * (dr + dl) detect_l25 = 0.25 > dlfrac detect_l10 = 0.10 > dlfrac detect_l05 = 0.05 > dlfrac detect_r25 = 0.25 > drfrac detect_r10 = 0.10 > drfrac detect_r05 = 0.05 > drfrac detect_b25 = 0.25 > deye detect_b10 = 0.10 > deye detect_b05 = 0.05 > deye break self.table.setElement(self.faces, 'name', name) self.table.setElement(self.faces, 'detect_face', detect_face) self.table.setElement(self.faces, 'detect_l25', detect_l25) self.table.setElement(self.faces, 'detect_l10', detect_l10) self.table.setElement(self.faces, 'detect_l05', detect_l05) self.table.setElement(self.faces, 'detect_r25', detect_r25) self.table.setElement(self.faces, 'detect_r10', detect_r10) self.table.setElement(self.faces, 'detect_r05', detect_r05) self.table.setElement(self.faces, 'detect_b25', detect_b25) self.table.setElement(self.faces, 'detect_b10', detect_b10) self.table.setElement(self.faces, 'detect_b05', detect_b05) self.table.setElement(self.faces, 'eye_dist', eye_dist) self.table.setElement(self.faces, 'truth_lx', tl_x) self.table.setElement(self.faces, 'truth_ly', tl_y) self.table.setElement(self.faces, 'truth_rx', tr_x) self.table.setElement(self.faces, 'truth_ry', tr_y) self.table.setElement(self.faces, 'pred_lx', pl_x) self.table.setElement(self.faces, 'pred_ly', pl_y) self.table.setElement(self.faces, 'pred_rx', pr_x) self.table.setElement(self.faces, 'pred_ry', pr_y) self.table.setElement(self.faces, 'dlx', dlx) self.table.setElement(self.faces, 'dly', dly) #self.table.setElement(self.faces,'dl2',dl2) self.table.setElement( self.faces, 'dl', dl) # BUGFIX: 20080813 This was outputing dl2. self.table.setElement(self.faces, 'dlfrac', dlfrac) self.table.setElement(self.faces, 'drx', drx) self.table.setElement(self.faces, 'dry', dry) #self.table.setElement(self.faces,'dr2',dr2) self.table.setElement(self.faces, 'dr', dr) self.table.setElement(self.faces, 'drfrac', drfrac) self.table.setElement(self.faces, 'deye', deye) self.table.setElement(self.faces, 'dmean', dmean) self.faces += 1 if dlfrac != None: self.bothsse += dlfrac**2 + drfrac**2 self.leftsse += dlfrac**2 self.rightsse += drfrac**2 if detect_face: self.face_successes += 1 if detect_b25: self.both25_successes += 1 if detect_l25: self.left25_successes += 1 if detect_r25: self.right25_successes += 1 if detect_b10: self.both10_successes += 1 if detect_l10: self.left10_successes += 1 if detect_r10: self.right10_successes += 1 if detect_b05: self.both05_successes += 1 if detect_l05: self.left05_successes += 1 if detect_r05: self.right05_successes += 1
def onFrame(self,event=None): ''' Retrieve and process a video frame. ''' self.timer.Stop() starttime = time.time() self.detect_time = 0.0 self.eye_time = 0.0 self.label_time = 0.0 img = self.webcam.query() face_processing = self.face_menuitem.IsChecked() eye_processing = self.eye_menuitem.IsChecked() names = [] if face_processing: faces = self.findFaces(img) if self.enrolling != None: success = None for rect,leye,reye in faces: img.annotateRect(self.enrolling,color='yellow') if (success == None) and is_success(self.enrolling,rect): success = rect img.annotateRect(rect,color='blue') if eye_processing: img.annotatePoint(leye,color='blue') img.annotatePoint(reye,color='blue') self.enroll_list.append([img,rect,leye,reye]) else: img.annotateRect(rect,color='red') if eye_processing: img.annotatePoint(leye,color='red') img.annotatePoint(reye,color='red') img.annotateLine(pv.Point(rect.x,rect.y),pv.Point(rect.x+rect.w,rect.y+rect.h), color='red') img.annotateLine(pv.Point(rect.x+rect.w,rect.y),pv.Point(rect.x,rect.y+rect.h), color='red') if success == None: rect = self.enrolling img.annotateLine(pv.Point(rect.x,rect.y),pv.Point(rect.x+rect.w,rect.y+rect.h), color='yellow') img.annotateLine(pv.Point(rect.x+rect.w,rect.y),pv.Point(rect.x,rect.y+rect.h), color='yellow') else: #enroll in the identification algorithm pass else: for rect,leye,reye in faces: img.annotateRect(rect,color='blue') if eye_processing: img.annotatePoint(leye,color='blue') img.annotatePoint(reye,color='blue') if self.face_rec.isTrained(): self.label_time = time.time() for rect,leye,reye in faces: label = self.face_rec.predict(img,leye,reye) names.append([0.5*(leye+reye),label]) self.label_time = time.time() - self.label_time # Displaying Annotated Frame im = img.asAnnotated() if self.mirror_menuitem.IsChecked(): im = im.transpose(FLIP_LEFT_RIGHT) if self.enrolling != None: draw = PIL.ImageDraw.Draw(im) x,y = self.enrolling.x,self.enrolling.y if self.mirror_menuitem.IsChecked(): x = 640 - (x + self.enrolling.w) self.enroll_count += 1 draw.text((x+10,y+10), "Enrolling: %2d of %2d"%(self.enroll_count,self.enroll_max), fill='yellow', font=self.arialblack24) del draw if self.enroll_count >= self.enroll_max: print "Count:",self.enroll_count if len(self.enroll_list) == 0: warning_dialog = wx.MessageDialog(self, "No faces were detected during the enrollment process. Please face towards the camera and keep your face in the yellow rectangle during enrollment.", style=wx.ICON_EXCLAMATION | wx.OK, caption="Enrollment Error") warning_dialog.ShowModal() else: name_dialog = wx.TextEntryDialog(self, "Please enter a name to associate with the face. (%d faces captured)"%len(self.enroll_list), caption = "Enrollment ID") result = name_dialog.ShowModal() sub_id = name_dialog.GetValue() if result == wx.ID_OK: if sub_id == "": print "Warning: Empty Subject ID" warning_dialog = wx.MessageDialog(self, "A name was entered in the previous dialog so this face will not be enrolled in the database. Please repeat the enrollment process for this person.", style=wx.ICON_EXCLAMATION | wx.OK, caption="Enrollment Error") warning_dialog.ShowModal() else: for data,rect,leye,reye in self.enroll_list: self.face_rec.addTraining(data,leye,reye,sub_id) self.setupState() self.enroll_count = 0 self.enrolling = None self.enroll_list = [] if len(names) > 0: draw = PIL.ImageDraw.Draw(im) for pt,name in names: x,y = pt.X(),pt.Y() w,h = draw.textsize(name,font=self.arialblack24) if self.mirror_menuitem.IsChecked(): x = 640 - x - 0.5*w else: x = x - 0.5*w draw.text((x,y-20-h), name, fill='green', font=self.arialblack24) del draw wxImg = wx.EmptyImage(im.size[0], im.size[1]) wxImg.SetData(im.tostring()) bm = wxImg.ConvertToBitmap() self.static_bitmap.SetBitmap(bm) # Update timing gauges full_time = time.time() - starttime if self.timing_window != None: self.timing_window.update(self.detect_time,self.eye_time,self.label_time,full_time) self.ids_text.SetLabel("%d"%(self.face_rec.n_labels,)) self.faces_text.SetLabel("%d"%(self.face_rec.n_faces,)) sleep_time = 1 if sys.platform.startswith("linux"): sleep_time = 10 # TODO: For macosx milliseconds should be 1 # TODO: For linux milliseconds may need to be set to a higher value 10 self.timer.Start(milliseconds = sleep_time, oneShot = 1)
def onFrame(self, event=None): ''' Retrieve and process a video frame. ''' self.timer.Stop() starttime = time.time() self.detect_time = 0.0 self.eye_time = 0.0 self.label_time = 0.0 img = self.webcam.query() face_processing = self.face_menuitem.IsChecked() eye_processing = self.eye_menuitem.IsChecked() names = [] if face_processing: faces = self.findFaces(img) if self.enrolling != None: success = None for rect, leye, reye in faces: img.annotateRect(self.enrolling, color='yellow') if (success == None) and is_success(self.enrolling, rect): success = rect img.annotateRect(rect, color='blue') if eye_processing: img.annotatePoint(leye, color='blue') img.annotatePoint(reye, color='blue') self.enroll_list.append([img, rect, leye, reye]) else: img.annotateRect(rect, color='red') if eye_processing: img.annotatePoint(leye, color='red') img.annotatePoint(reye, color='red') img.annotateLine(pv.Point(rect.x, rect.y), pv.Point(rect.x + rect.w, rect.y + rect.h), color='red') img.annotateLine(pv.Point(rect.x + rect.w, rect.y), pv.Point(rect.x, rect.y + rect.h), color='red') if success == None: rect = self.enrolling img.annotateLine(pv.Point(rect.x, rect.y), pv.Point(rect.x + rect.w, rect.y + rect.h), color='yellow') img.annotateLine(pv.Point(rect.x + rect.w, rect.y), pv.Point(rect.x, rect.y + rect.h), color='yellow') else: #enroll in the identification algorithm pass else: for rect, leye, reye in faces: img.annotateRect(rect, color='blue') if eye_processing: img.annotatePoint(leye, color='blue') img.annotatePoint(reye, color='blue') if self.face_rec.isTrained(): self.label_time = time.time() for rect, leye, reye in faces: label = self.face_rec.predict(img, leye, reye) names.append([0.5 * (leye + reye), label]) self.label_time = time.time() - self.label_time # Displaying Annotated Frame im = img.asAnnotated() if self.mirror_menuitem.IsChecked(): im = im.transpose(FLIP_LEFT_RIGHT) if self.enrolling != None: draw = PIL.ImageDraw.Draw(im) x, y = self.enrolling.x, self.enrolling.y if self.mirror_menuitem.IsChecked(): x = 640 - (x + self.enrolling.w) self.enroll_count += 1 draw.text( (x + 10, y + 10), "Enrolling: %2d of %2d" % (self.enroll_count, self.enroll_max), fill='yellow', font=self.arialblack24) del draw if self.enroll_count >= self.enroll_max: print "Count:", self.enroll_count if len(self.enroll_list) == 0: warning_dialog = wx.MessageDialog( self, "No faces were detected during the enrollment process. Please face towards the camera and keep your face in the yellow rectangle during enrollment.", style=wx.ICON_EXCLAMATION | wx.OK, caption="Enrollment Error") warning_dialog.ShowModal() else: name_dialog = wx.TextEntryDialog( self, "Please enter a name to associate with the face. (%d faces captured)" % len(self.enroll_list), caption="Enrollment ID") result = name_dialog.ShowModal() sub_id = name_dialog.GetValue() if result == wx.ID_OK: if sub_id == "": print "Warning: Empty Subject ID" warning_dialog = wx.MessageDialog( self, "A name was entered in the previous dialog so this face will not be enrolled in the database. Please repeat the enrollment process for this person.", style=wx.ICON_EXCLAMATION | wx.OK, caption="Enrollment Error") warning_dialog.ShowModal() else: for data, rect, leye, reye in self.enroll_list: self.face_rec.addTraining( data, leye, reye, sub_id) self.setupState() self.enroll_count = 0 self.enrolling = None self.enroll_list = [] if len(names) > 0: draw = PIL.ImageDraw.Draw(im) for pt, name in names: x, y = pt.X(), pt.Y() w, h = draw.textsize(name, font=self.arialblack24) if self.mirror_menuitem.IsChecked(): x = 640 - x - 0.5 * w else: x = x - 0.5 * w draw.text((x, y - 20 - h), name, fill='green', font=self.arialblack24) del draw wxImg = wx.EmptyImage(im.size[0], im.size[1]) wxImg.SetData(im.tostring()) bm = wxImg.ConvertToBitmap() self.static_bitmap.SetBitmap(bm) # Update timing gauges full_time = time.time() - starttime if self.timing_window != None: self.timing_window.update(self.detect_time, self.eye_time, self.label_time, full_time) self.ids_text.SetLabel("%d" % (self.face_rec.n_labels, )) self.faces_text.SetLabel("%d" % (self.face_rec.n_faces, )) sleep_time = 1 if sys.platform.startswith("linux"): sleep_time = 10 # TODO: For macosx milliseconds should be 1 # TODO: For linux milliseconds may need to be set to a higher value 10 self.timer.Start(milliseconds=sleep_time, oneShot=1)