class CaptureVideo(threading.Thread): """ captures video stream from camera and performs various detections (face, edge, circle) """ def __init__(self, videoDevice_index, comm = None): """ initiate variables""" threading.Thread.__init__(self) self.comm = comm self.current_colour = None self.face_detector = CascadeDetector(cascade_name=config.haar_casc,min_size=(50,50), image_scale=0.5) self.webcam = Webcam(videoDevice_index) if config.use_gui: # create windows cv.NamedWindow('Camera', cv.CV_WINDOW_AUTOSIZE) cv.CreateTrackbar ('edge threshold', 'Camera', 50, 100, self.change_value1) cv.CreateTrackbar ('circle threshold', 'Camera', 90, 100, self.change_value2) cv.CreateTrackbar ('gaussian blur', 'Camera', 11, 50, self.change_value3) cv.CreateTrackbar ('hue', 'Camera', 0, 100, self.change_value4) def run(self): self.main_loop() def detect_face(self, img): """ detect faces in the given video stream """ faces = self.findFaces(img) if faces: close_face_rect = None close_face_w = 0.0 for rect, leye, reye in faces: img.annotateRect(rect, color='blue') # draw square around face if rect.w > close_face_w: # get closest face coordinates close_face_w = rect.w close_face_rect = rect if config.eye_d: # draw point on eyes img.annotatePoint(leye,color='blue') img.annotatePoint(reye,color='blue') if config.follow_face_gaze: relative_x = (320 - (close_face_rect.x + (close_face_rect.w/2.0))) relative_y = (240 - (close_face_rect.y + (close_face_rect.h/2.0))) gaze = self.follow_face_with_gaze(relative_x, relative_y, close_face_rect.w) neck = self.follow_face_with_neck(relative_x, relative_y, gaze[1]) if self.comm: if self.comm.last_ack != "wait" and gaze: self.comm.set_neck_gaze(gaze, neck) self.comm.last_ack = "wait" def findFaces(self, im): """ run the face detection algorithm """ rects = self.face_detector.detect(im) faces = [] for rect in rects: affine = pv.AffineFromRect(rect,(1,1)) leye = affine.invertPoint(AVE_LEFT_EYE) reye = affine.invertPoint(AVE_RIGHT_EYE) faces.append([rect,leye,reye]) self.current_faces = faces return faces def follow_face_with_gaze(self, x, y, width): """adjust coordinates of detected faces to mask """ #TODO: change coordinates that are kept in config into something local if config.slow_adjust and (config.face_x is not None and config.face_y is not None): config.face_x += (x - config.face_x) * config.gain config.face_y += (y - config.face_y) * config.gain else: config.face_x = x config.face_y = y face_distance = ((-88.4832801364568 * math.log(width)) + 538.378262966656) x_dist = ((config.face_x/1400.6666)*face_distance)/100 y_dist = ((config.face_y/700.6666)*face_distance)/100 if config.camera_on_projector: return (x_dist, (face_distance/100.0), y_dist) # x is inverted for compatibility else: return (-x_dist, (face_distance/100.0), y_dist) def follow_face_with_neck(self, x, y, face_distance): """adjust coordinates of detected faces to neck movement """ move = False if x > 95 or x < -95: # threshold distance_x = (x/-640.0) * 0.2 * math.pi move = True else: distance_x = 0.0 if y > 60 or y < -60: # threshold distance_y = (y/-480.0) * 0.1 * math.pi move = True else: distance_y = 0.0 if face_distance > 1.0: # threshold for moving forward when perceived face is far config.getting_closer_to_face = 1.0 if config.getting_closer_to_face > 0.05: distance_z = 0.1 config.getting_closer_to_face += -0.1 move = True if face_distance < 0.2: # threshold for moving back when face is too close distance_z = -0.3 + face_distance move = True else: distance_z = 0 if move: return ((distance_y, .0, -distance_x), (.0,distance_z,.0)) def detect_edge(self, image): grayscale = cv.CreateImage(cv.GetSize(image), 8, 1) cv.CvtColor(image, grayscale, cv.CV_BGR2GRAY) cv.Canny(grayscale, grayscale, edge_threshold1, edge_threshold1 * 3, 3) return grayscale def detect_circle(self, image, image_org): grayscale = cv.CreateImage(cv.GetSize(image), 8, 1) grayscale_smooth = cv.CreateImage(cv.GetSize(image), 8, 1) cv.CvtColor(image, grayscale, cv.CV_BGR2GRAY) if config.edge_d_non_vision: cv.Canny(grayscale, grayscale, edge_threshold1, edge_threshold1 * 3, 3) cv.Smooth(grayscale, grayscale_smooth, cv.CV_GAUSSIAN, edge_threshold3) mat = cv.CreateMat(100, 1, cv.CV_32FC3 ) cv.SetZero(mat) cv.HoughCircles(grayscale_smooth, mat, cv.CV_HOUGH_GRADIENT, 2, 50, 200, (edge_threshold2 + 150) ) circles_simple = [] gazing = None if mat.rows != 0: for i in xrange(0, mat.rows): c = mat[i,0] point = (int(c[0]), int(c[1])) radius = int(c[2]) cv.Circle(image, point, radius, (0, 0, 255)) if config.detect_colour: self.get_colour(image, image_org, [int(c[0]), int(c[1])], radius) config.detect_colour = False colour = self.record_colour(image, image_org, [int(c[0]), int(c[1])], radius) circles_simple.append([point, radius, colour]) if config.follow_ball_gaze and circles_simple: x_adjust = 320 - circles_simple[0][0].x y_adjust = 240 - circles_simple[0][0].y gazing = self.follow_ball_with_gaze(x_adjust, y_adjust) if config.follow_ball_neck and circles_simple: #self.comm.send_msg("recognizing;*;1;;;;tag_SPEECH") x_adjust = 320 - circles_simple[0][0].x y_adjust = 240 - circles_simple[0][0].y if x_adjust < 315 or x_adjust > 325: distance_x = (x_adjust/-640.0) * 0.2 * math.pi if y_adjust < 235 or y_adjust > 245: distance_y = (y_adjust/-480.0) * 0.2 * math.pi if self.comm.last_ack != "wait": if gazing: # self.comm.set_neck_gaze(gazing, "(" + str(config.neck_pos[0] + distance_y) + ",0," + str(config.neck_pos[2] + distance_x) + ")", "TRACK_GAZE") pass else: self.comm.set_neck_orientation( "(" + str(config.neck_pos[0] + distance_y) + ",0," + str(config.neck_pos[2] + distance_x) + ")", "TRACKING") config.neck_pos[2] += distance_x config.neck_pos[0] += distance_y self.comm.last_ack = "wait" if config.colour_to_find and circles_simple: dist = [] for i in circles_simple: if i[2]: #dist.append(auks.calculate_distance_hsv(params.colour_to_find, i[2])) dist.append(auks.calculate_distance(config.colour_to_find, i[2])) else: dist.append(999999) index = auks.posMin(dist) #print dist if dist[index] < config.detect_threshold: #self.comm.send_msg("recognizing;*;1;;;;tag_SPEECH") cv.Circle(image, circles_simple[index][0], 2, cvScalar(0, 100, 255), 2) x_adjust = 320 - circles_simple[index][0].x y_adjust = 240 - circles_simple[index][0].y if x_adjust < 315 or x_adjust > 325: distance_x = (x_adjust/-640.0) * 0.2 * math.pi if y_adjust < 235 or y_adjust > 245: distance_y = (y_adjust/-480.0) * 0.2 * math.pi if self.comm.last_ack != "wait": # print "x_dist:", distance_x, " y_dist:", distance_y # print "x_neck:", str(config.neck_pos[2]), " y_neck:", str(config.neck_pos[0]) # print "x:", str(config.neck_pos[2] + distance_x), " y:", str(config.neck_pos[0] + distance_y) if gazing: # self.comm.set_neck_gaze(gazing, "(" + str(config.neck_pos[0] + distance_y) + ",0," + str(config.neck_pos[2] + distance_x) + ")", "TRACK_GAZE") pass else: self.comm.set_neck_orientation( "(" + str(config.neck_pos[0] + distance_y) + ",0," + str(config.neck_pos[2] + distance_x) + ")", "TRACKING") config.neck_pos[2] += distance_x config.neck_pos[0] += distance_y self.comm.last_ack = "wait" return circles_simple def follow_ball_with_gaze(self, x, y): """adjust coordinates of detected faces to mask """ #face_distance = ((-88.4832801364568 * math.log(width)) + 538.378262966656) face_distance = 50.0 x_dist = ((x/1400.6666)*face_distance)/-100 y_dist = ((y/1400.6666)*face_distance)/100 # if self.comm: # if self.comm.last_ack != "wait": # self.comm.set_gaze(str(x_dist) + "," + str(face_distance/100) + "," + str(y_dist)) # self.comm.last_ack = "wait" return str(x_dist) + "," + str(face_distance/100) + "," + str(y_dist) def get_colour(self, image, image_org, pos, radius): radius = int(radius*0.7) rect = cv.Rect(pos[0]-radius,pos[1]-radius, radius*2, radius*2) try: subimage = cv.GetSubRect(image_org, rect) cv.SaveImage("subimage.png", subimage) #cvCvtColor(subimage, subimage, CV_BGR2HSV) # create hsv version scalar = cv.Avg(subimage) #self.current_colour = [int((scalar[0]*2)), int(scalar[1]/255.0*100), int(scalar[2]/255.0*100)] #print "Average colour value: H:"+ str(int((scalar[0]*2))) + " S:"+ str(int(scalar[1]/255.0*100)) + " V:"+ str(int(scalar[2]/255.0*100)) self.current_colour = [int((scalar[2])), int(scalar[1]), int(scalar[0])] print "Average colour value: R:"+ str(int((scalar[2]))) + " G:"+ str(int(scalar[1])) + " B:"+ str(int(scalar[0])) except RuntimeError: print "error" cv.Rectangle(image, cv.Point( pos[0]-radius, pos[1]-radius), cv.Point(pos[0]+ radius, pos[1]+radius),cv.CV_RGB(0, 255, 0), 2, 8, 0) def record_colour(self, image, image_org, pos, radius): radius = int(radius*0.7) if pos[1] > radius: # only record a square when it is in full camera view try: rect = cv.Rect(pos[0]-radius, pos[1]-radius, radius*2, radius*2) subimage = cv.GetSubRect(image_org, rect) #cvCvtColor(subimage, subimage, CV_BGR2HSV) # create hsv version scalar = cv.Avg(subimage) #return scalar1, [['c', [['h', int((scalar[0]*2))], ['s', int(scalar[1]/255.0*100)], ['v', int(scalar[2]/255.0*100)]]]] return [['c', [['r', int((scalar[2]))], ['g', int(scalar[1])], ['b', int(scalar[0])]]]] except RuntimeError: print "error", "radius:", radius, "position:", pos return None else: return None def find_colour(self, image, colour): """ searches for the given colour in the image colour is in hsv """ # Create a 8-bit 1-channel image with same size as the frame color_mask = cv.CreateImage(cv.GetSize(image), 8, 1) image_h = cv.CreateImage(cv.GetSize(image), 8, 1) cv.CvtColor(image, image, cv.CV_BGR2HSV) # convert to hsv cv.Split(image, image_h, None, None, None) # Find the pixels within the color-range, and put the output in the color_mask cv.InRangeS(image_h, cv.Scalar((edge_threshold4*2)-5), cv.Scalar((edge_threshold4*2)+5), color_mask) cv.CvtColor(image, image, cv.CV_HSV2BGR) # convert to bgr cv.Set(image, cv.CV_RGB(0, 255, 0), color_mask) def return_colour(self): """ returns current colour """ return self.current_colour def change_value1(self, new_value): global edge_threshold1 edge_threshold1 = new_value def change_value2(self, new_value): global edge_threshold2 edge_threshold2 = new_value def change_value3(self, new_value): global edge_threshold3 if new_value % 2: edge_threshold3 = new_value else: edge_threshold3 = new_value+1 def change_value4(self, new_value): global edge_threshold4 edge_threshold4 = new_value def main_loop(self): #writer = cv.CreateVideoWriter("out.avi", cv.CV_FOURCC('P','I','M','1'), 30, (640,480),1) while 1: im = self.webcam.query() # handle events key = cv.WaitKey(10) if key != -1 and key < 256: key = chr(key) if key == '1' or config.command == '1': if config.face_d == False: config.face_d = True if key == '2' or config.command == 'edge': if config.edge_d == False: config.edge_d = True print "detecting edges" if key == '3' or config.command == '3': if config.save_video == False: config.save_video = True print "saving video" if key == '4' or config.command == '4': if config.circle_d == False: config.circle_d = True print "detecting circles" if key == '5' or config.command == '5': if config.edge_d_non_vision == False: config.edge_d_non_vision = True print "detecting circles using edge detection" else: config.edge_d_non_vision = False if key == 's' or config.command == 's': if config.show == False: config.show = True print "showing video" if key == 'b' or config.command == 'b': if config.game_coors == "10.0, 50.0, 0.0": self.commr.set_gaze("10.0, 50.0, 0.0") config.game_coors = "0.0, 50.0, 0.0" else: self.commr.set_gaze("0.0, 50.0, 0.0") config.game_coors = "10.0, 50.0, 0.0" if key == 'e' or config.command == 'e': config.face_d = False config.edge_d = False config.circle_d = False config.save_video = False config.colour_s = False print "stop tracking" if key == 'q' or config.command == 'q': config.quit = True config.command = '0' if config.face_d: # face detection if config.face_d_optimised: if self.comm.last_ack != "wait": self.detect_face(im) else: self.detect_face(im) if config.colour_s: self.find_colour(frame, 10) if config.save_video: # save cv.WriteFrame(writer,frame) if config.quit: # quit print 'Camera closed' break pil = im.asAnnotated() # get image as PIL rgb = cv.CreateImageHeader(pil.size, cv.IPL_DEPTH_8U, 3) # create IPL image cv.SetData(rgb, pil.tostring()) frame = cv.CreateImage(cv.GetSize(rgb), cv.IPL_DEPTH_8U,3) # convert to bgr cv.CvtColor(rgb, frame, cv.CV_RGB2BGR) cv.Flip(frame, None, 1) # mirror if config.circle_d: # circle detection frame_org = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U,3) # convert to bgr cv.Copy(frame, frame_org) self.detect_circle(frame, frame_org) if config.edge_d: # edge detection frame_org = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U,3) # convert to bgr cv.Copy(frame, frame_org) frame = self.detect_edge(frame) if frame is None: print "error capturing frame" break if config.use_gui: if config.show: cv.ShowImage('Camera', frame) # display webcam image else: cv.ShowImage('Camera', empty)
class LiveDemoFrame(wx.Frame): def __init__(self, parent, id, name, demos=DEMO_DEFAULTS, size=(800, 550)): wx.Frame.__init__(self, parent, id, name, size=size) # ---------------- Basic Data ------------------- self.webcam = Webcam() self.harris = DetectorHarris() self.dog = DetectorDOG(n=100, selector='best') self.face = CascadeDetector() self.demos = demos # ------------- Other Components ---------------- self.CreateStatusBar() # ------------------- Menu ---------------------- # Creating the menubar. # ----------------- Image List ------------------ # --------------- Image Display ----------------- self.static_bitmap = wx.StaticBitmap(self, wx.NewId(), bitmap=wx.EmptyBitmap(640, 480)) self.radios = wx.RadioBox(self, wx.NewId(), 'Demos', choices=['None'] + self.demos.keys(), style=wx.RA_SPECIFY_ROWS) self.mirror = wx.CheckBox(self, wx.NewId(), 'Mirror') self.mirror.SetValue(True) # --------------- Window Layout ----------------- grid = wx.FlexGridSizer(2, 2) grid.Add(self.static_bitmap) grid.Add(self.radios) grid.Add(self.mirror) self.SetAutoLayout(True) self.SetSizer(grid) self.Layout() # ----------------------------------------------- self.timer = FrameTimer(self) self.timer.Start(200) # -------------- Event Handleing ---------------- wx.EVT_SIZE(self.static_bitmap, self.onBitmapResize) wx.EVT_LEFT_DOWN(self.static_bitmap, self.onClick) wx.EVT_TIMER(self, -1, self.onTmp) #wx.EVT_CLOSE(self,self.onClose) def onTmp(self): print "Notify" def onFrame(self, event=None): self.timer.Stop() starttime = time.time() img = self.webcam.query() selection = self.radios.GetStringSelection() if selection == 'None': pass else: for key, func in self.demos.iteritems(): if key == selection: img = func(img) print "Displaying Annotated Frame:", selection im = img.asAnnotated() if self.mirror.GetValue(): im = im.transpose(FLIP_LEFT_RIGHT) wxImg = wx.EmptyImage(im.size[0], im.size[1]) wxImg.SetData(im.tostring()) bm = wxImg.ConvertToBitmap() self.static_bitmap.SetBitmap(bm) print "Frame Time:", time.time() - starttime self.timer.Start(milliseconds=1, oneShot=1) def onBitmapResize(self, event): w = event.GetSize().GetWidth() h = event.GetSize().GetHeight() self.static_bitmap.SetSize(event.GetSize()) # ------------- Event Handlers --------------- def onClick(self, event): self.onFrame(event)
class VideoWindow(wx.Frame): ''' This is the main FaceL window which includes the webcam video and enrollment and training controls. ''' def __init__(self, parent, id, name, size=(640, 672)): ''' Create all the windows and controls used for the window and ''' wx.Frame.__init__(self, parent, id, name, size=size) self.CenterOnScreen(wx.HORIZONTAL) self.timing_window = None # Initialize timing window # ------------- Face Processing ----------------- self.face_detector = CascadeDetector(cascade_name=CASCADE_NAME, image_scale=0.5) self.fel = FilterEyeLocator(FEL_NAME) self.face_rec = SVMFaceRec() self.svm_mode = SVM_AUTOMATIC self.svm_C = 4.000e+00 self.svm_Gamma = 9.766e-04 self.current_faces = [] self.enrolling = None self.enroll_count = 0 self.enroll_max = 32 self.enroll_list = [] self.previous_time = time.time() self.arialblack24 = PIL.ImageFont.truetype(ARIAL_BLACK_NAME, 24) # ---------------- Basic Data ------------------- try: self.webcam = Webcam() except SystemExit: raise except: trace = traceback.format_exc() message = TraceBackDialog(None, "Camera Error", CAMERA_ERROR, trace) message.ShowModal() sys.stderr.write( "FaceL Error: an error occurred while trying to connect to the camera. Details follow.\n\n" ) sys.stderr.write(trace) sys.exit(CAMERA_ERROR_CODE) # ------------- Other Components ---------------- self.CreateStatusBar() # ------------------- Menu ---------------------- # Creating the menubar. # Menu IDs license_id = wx.NewId() mirror_id = wx.NewId() face_id = wx.NewId() svm_tune_id = wx.NewId() performance_id = wx.NewId() # Menu Items self.file_menu = wx.Menu() self.file_menu.Append(wx.ID_ABOUT, "&About...") self.file_menu.Append(license_id, "FaceL License...") self.file_menu.AppendSeparator() self.file_menu.Append(wx.ID_EXIT, "E&xit") self.options_menu = wx.Menu() self.face_menuitem = self.options_menu.AppendCheckItem( face_id, "Face Processing") self.eye_menuitem = self.options_menu.AppendCheckItem( face_id, "Eye Detection") self.mirror_menuitem = self.options_menu.AppendCheckItem( mirror_id, "Mirror Video") self.options_menu.AppendSeparator() self.options_menu.Append(svm_tune_id, "SVM Tuning...") self.options_menu.Append(performance_id, "Performance...") # Create Menu Bar self.menu_bar = wx.MenuBar() self.menu_bar.Append(self.file_menu, "&File") self.menu_bar.Append(self.options_menu, "&Options") self.SetMenuBar(self.menu_bar) # Menu Events wx.EVT_MENU(self, wx.ID_ABOUT, self.onAbout) wx.EVT_MENU(self, license_id, self.onLicense) wx.EVT_MENU(self, mirror_id, self.onNull) wx.EVT_MENU(self, face_id, self.onNull) wx.EVT_MENU(self, svm_tune_id, self.onSVMTune) wx.EVT_MENU(self, performance_id, self.onTiming) # Set up menu checks self.face_menuitem.Check(True) self.eye_menuitem.Check(True) self.mirror_menuitem.Check(True) # ----------------- Image List ------------------ # --------------- Image Display ----------------- self.static_bitmap = wx.StaticBitmap(self, wx.NewId(), bitmap=wx.EmptyBitmap(640, 480)) self.controls_box = wx.StaticBox(self, wx.NewId(), "Controls") self.facel_logo = wx.StaticBitmap(self, wx.NewId(), bitmap=wx.Bitmap(FACEL_LOGO)) self.csu_logo = wx.StaticBitmap(self, wx.NewId(), bitmap=wx.Bitmap(CSU_LOGO)) # self.performance_box = wx.StaticBox(self, wx.NewId(), "Performance") self.enroll_chioce_label = wx.StaticText(self, wx.NewId(), "Enrollment Count:", style=wx.ALIGN_LEFT) self.enroll_choice = wx.Choice(self, wx.NewId(), wx.Point(0, 0), wx.Size(-1, -1), ['16', '32', '48', '64', '128', '256']) self.enroll_choice.Select(3) self.train_button = wx.Button(self, wx.NewId(), 'Train Labeler') self.reset_button = wx.Button(self, wx.NewId(), 'Clear Labels') # --------------- Instrumentation --------------- self.enroll_label = wx.StaticText( self, wx.NewId(), "Click a face in the video to enroll.", style=wx.ALIGN_LEFT) self.ids_label = wx.StaticText(self, wx.NewId(), "Labels:", size=wx.Size(-1, 16), style=wx.ALIGN_LEFT) self.ids_text = wx.StaticText(self, wx.NewId(), size=wx.Size(30, 16), style=wx.ALIGN_RIGHT) self.faces_label = wx.StaticText(self, wx.NewId(), "Faces:", size=wx.Size(-1, 16), style=wx.ALIGN_LEFT) self.faces_text = wx.StaticText(self, wx.NewId(), size=wx.Size(30, 16), style=wx.ALIGN_RIGHT) # --------------- Window Layout ----------------- enroll_sizer = wx.BoxSizer(wx.HORIZONTAL) enroll_sizer.Add(self.ids_label, flag=wx.ALIGN_CENTER | wx.ALL, border=4) enroll_sizer.Add(self.ids_text, flag=wx.ALIGN_CENTER | wx.ALL, border=4) enroll_sizer.AddSpacer(20) enroll_sizer.Add(self.faces_label, flag=wx.ALIGN_CENTER | wx.ALL, border=4) enroll_sizer.Add(self.faces_text, flag=wx.ALIGN_CENTER | wx.ALL, border=4) training_sizer = wx.BoxSizer(wx.HORIZONTAL) training_sizer.Add(self.train_button, flag=wx.ALIGN_CENTER | wx.ALL, border=4) training_sizer.Add(self.reset_button, flag=wx.ALIGN_CENTER | wx.ALL, border=4) enroll_choice_sizer = wx.BoxSizer(wx.HORIZONTAL) enroll_choice_sizer.Add(self.enroll_chioce_label, flag=wx.ALIGN_CENTER | wx.ALL, border=0) enroll_choice_sizer.Add(self.enroll_choice, flag=wx.ALIGN_CENTER | wx.ALL, border=0) controls_sizer = wx.StaticBoxSizer( self.controls_box, wx.VERTICAL) #wx.BoxSizer(wx.VERTICAL) controls_sizer.Add(self.enroll_label, flag=wx.ALIGN_LEFT | wx.ALL, border=0) controls_sizer.Add(enroll_sizer, flag=wx.ALIGN_LEFT | wx.ALL, border=0) controls_sizer.Add(enroll_choice_sizer, flag=wx.ALIGN_LEFT | wx.ALL, border=4) controls_sizer.Add(training_sizer, flag=wx.ALIGN_LEFT | wx.ALL, border=0) bottom_sizer = wx.BoxSizer(wx.HORIZONTAL) bottom_sizer.Add(self.facel_logo, flag=wx.ALIGN_CENTER | wx.ALL, border=0) bottom_sizer.Add(controls_sizer, flag=wx.ALIGN_TOP | wx.ALL, border=4) bottom_sizer.Add(self.csu_logo, flag=wx.ALIGN_CENTER | wx.ALL, border=0) main_sizer = wx.BoxSizer(wx.VERTICAL) main_sizer.Add(self.static_bitmap, flag=wx.ALIGN_CENTER | wx.ALL, border=0) main_sizer.Add(bottom_sizer, flag=wx.ALIGN_CENTER | wx.ALL, border=4) self.SetAutoLayout(True) self.SetSizer(main_sizer) self.Layout() # ----------------------------------------------- self.timer = FrameTimer(self) self.timer.Start(200) # -------------- Event Handleing ---------------- wx.EVT_SIZE(self.static_bitmap, self.onBitmapResize) wx.EVT_LEFT_DOWN(self.static_bitmap, self.onClick) self.Bind(wx.EVT_BUTTON, self.onTrain, id=self.train_button.GetId()) self.Bind(wx.EVT_BUTTON, self.onReset, id=self.reset_button.GetId()) # --------------- Setup State ------------------- self.setupState() def onTrain(self, event=None): ''' Start the SVM training process. ''' print "Train" #progress = wx.ProgressDialog(title="SVM Training", message="Training the Face Recognition Algorithm. Please Wait...") if self.svm_mode == SVM_AUTOMATIC: # Train with automatic tuning. self.face_rec.train() #callback=progress.Update) self.svm_C = self.face_rec.svm.C self.svm_Gamma = self.face_rec.svm.gamma else: # Train with manual tuning. self.face_rec.train(C=[self.svm_C], Gamma=[self.svm_Gamma ]) # ,callback=progress.Update) #progress.Destroy() def onReset(self, event=None): ''' Clear the enrollment data for the SVM. ''' self.face_rec.reset() self.setupState() def onFrame(self, event=None): ''' Retrieve and process a video frame. ''' self.timer.Stop() starttime = time.time() self.detect_time = 0.0 self.eye_time = 0.0 self.label_time = 0.0 img = self.webcam.query() face_processing = self.face_menuitem.IsChecked() eye_processing = self.eye_menuitem.IsChecked() names = [] if face_processing: faces = self.findFaces(img) if self.enrolling != None: success = None for rect, leye, reye in faces: img.annotateRect(self.enrolling, color='yellow') if (success == None) and is_success(self.enrolling, rect): success = rect img.annotateRect(rect, color='blue') if eye_processing: img.annotatePoint(leye, color='blue') img.annotatePoint(reye, color='blue') self.enroll_list.append([img, rect, leye, reye]) else: img.annotateRect(rect, color='red') if eye_processing: img.annotatePoint(leye, color='red') img.annotatePoint(reye, color='red') img.annotateLine(pv.Point(rect.x, rect.y), pv.Point(rect.x + rect.w, rect.y + rect.h), color='red') img.annotateLine(pv.Point(rect.x + rect.w, rect.y), pv.Point(rect.x, rect.y + rect.h), color='red') if success == None: rect = self.enrolling img.annotateLine(pv.Point(rect.x, rect.y), pv.Point(rect.x + rect.w, rect.y + rect.h), color='yellow') img.annotateLine(pv.Point(rect.x + rect.w, rect.y), pv.Point(rect.x, rect.y + rect.h), color='yellow') else: #enroll in the identification algorithm pass else: for rect, leye, reye in faces: img.annotateRect(rect, color='blue') if eye_processing: img.annotatePoint(leye, color='blue') img.annotatePoint(reye, color='blue') if self.face_rec.isTrained(): self.label_time = time.time() for rect, leye, reye in faces: label = self.face_rec.predict(img, leye, reye) names.append([0.5 * (leye + reye), label]) self.label_time = time.time() - self.label_time # Displaying Annotated Frame im = img.asAnnotated() if self.mirror_menuitem.IsChecked(): im = im.transpose(FLIP_LEFT_RIGHT) if self.enrolling != None: draw = PIL.ImageDraw.Draw(im) x, y = self.enrolling.x, self.enrolling.y if self.mirror_menuitem.IsChecked(): x = 640 - (x + self.enrolling.w) self.enroll_count += 1 draw.text( (x + 10, y + 10), "Enrolling: %2d of %2d" % (self.enroll_count, self.enroll_max), fill='yellow', font=self.arialblack24) del draw if self.enroll_count >= self.enroll_max: print "Count:", self.enroll_count if len(self.enroll_list) == 0: warning_dialog = wx.MessageDialog( self, "No faces were detected during the enrollment process. Please face towards the camera and keep your face in the yellow rectangle during enrollment.", style=wx.ICON_EXCLAMATION | wx.OK, caption="Enrollment Error") warning_dialog.ShowModal() else: name_dialog = wx.TextEntryDialog( self, "Please enter a name to associate with the face. (%d faces captured)" % len(self.enroll_list), caption="Enrollment ID") result = name_dialog.ShowModal() sub_id = name_dialog.GetValue() if result == wx.ID_OK: if sub_id == "": print "Warning: Empty Subject ID" warning_dialog = wx.MessageDialog( self, "A name was entered in the previous dialog so this face will not be enrolled in the database. Please repeat the enrollment process for this person.", style=wx.ICON_EXCLAMATION | wx.OK, caption="Enrollment Error") warning_dialog.ShowModal() else: for data, rect, leye, reye in self.enroll_list: self.face_rec.addTraining( data, leye, reye, sub_id) self.setupState() self.enroll_count = 0 self.enrolling = None self.enroll_list = [] if len(names) > 0: draw = PIL.ImageDraw.Draw(im) for pt, name in names: x, y = pt.X(), pt.Y() w, h = draw.textsize(name, font=self.arialblack24) if self.mirror_menuitem.IsChecked(): x = 640 - x - 0.5 * w else: x = x - 0.5 * w draw.text((x, y - 20 - h), name, fill='green', font=self.arialblack24) del draw wxImg = wx.EmptyImage(im.size[0], im.size[1]) wxImg.SetData(im.tostring()) bm = wxImg.ConvertToBitmap() self.static_bitmap.SetBitmap(bm) # Update timing gauges full_time = time.time() - starttime if self.timing_window != None: self.timing_window.update(self.detect_time, self.eye_time, self.label_time, full_time) self.ids_text.SetLabel("%d" % (self.face_rec.n_labels, )) self.faces_text.SetLabel("%d" % (self.face_rec.n_faces, )) sleep_time = 1 if sys.platform.startswith("linux"): sleep_time = 10 # TODO: For macosx milliseconds should be 1 # TODO: For linux milliseconds may need to be set to a higher value 10 self.timer.Start(milliseconds=sleep_time, oneShot=1) def setupState(self): #print "state",self.face_rec.n_labels,self.IsEnabled() if self.face_rec.n_labels >= 2: self.train_button.Enable() else: self.train_button.Disable() def onBitmapResize(self, event): w = event.GetSize().GetWidth() h = event.GetSize().GetHeight() self.static_bitmap.SetSize(event.GetSize()) def onClick(self, event): ''' Process a click in the Video window which starts the enrollment process. ''' x = event.GetX() y = event.GetY() if self.mirror_menuitem.IsChecked(): x = 640 - x for rect, leye, reye in self.current_faces: if rect.containsPoint(pv.Point(x, y)): self.enrolling = rect self.enroll_count = 0 self.enroll_max = int(self.enroll_choice.GetStringSelection()) def findFaces(self, im): eye_processing = self.eye_menuitem.IsChecked() self.detect_time = time.time() rects = self.face_detector.detect(im) self.detect_time = time.time() - self.detect_time self.eye_time = time.time() if eye_processing: faces = self.fel.locateEyes(im, rects) else: faces = [] for rect in rects: affine = pv.AffineFromRect(rect, (1, 1)) leye = affine.invertPoint(AVE_LEFT_EYE) reye = affine.invertPoint(AVE_RIGHT_EYE) faces.append([rect, leye, reye]) self.eye_time = time.time() - self.eye_time self.current_faces = faces return faces def onAbout(self, event): wx.MessageBox(ABOUT_MESSAGE, "About FaceL", wx.OK | wx.ICON_INFORMATION) def onLicense(self, event): wx.MessageBox(LICENSE_MESSAGE, "FaceL License", wx.OK | wx.ICON_INFORMATION) def onNull(self, *args, **kwargs): pass def onSVMTune(self, event): dialog = SVMTuningDialog(self, self.svm_mode, self.svm_C, self.svm_Gamma) dialog.CenterOnParent() result = dialog.ShowModal() if result == wx.ID_OK: self.svm_mode = dialog.mode self.svm_C = dialog.C self.svm_Gamma = dialog.Gamma print "SVM Tuning Info <MODE:%s; C:%0.2e; Gamma:%0.2e>" % ( self.svm_mode, self.svm_C, self.svm_Gamma) dialog.Destroy() def onTiming(self, event): if self.timing_window == None: self.timing_window = TimingWindow(self, wx.NewId(), "Performance") self.timing_window.CenterOnParent() self.timing_window.Show(True) self.timing_window.Bind(wx.EVT_CLOSE, self.onCloseTiming, id=self.timing_window.GetId()) else: self.timing_window.Show(True) self.timing_window.Raise() def onCloseTiming(self, event): self.timing_window.Destroy() self.timing_window = None
class VideoWindow(wx.Frame): ''' This is the main FaceL window which includes the webcam video and enrollment and training controls. ''' def __init__(self,parent,id,name,size=(640,672)): ''' Create all the windows and controls used for the window and ''' wx.Frame.__init__(self,parent,id,name,size=size) self.CenterOnScreen(wx.HORIZONTAL) self.timing_window = None # Initialize timing window # ------------- Face Processing ----------------- self.face_detector = CascadeDetector(cascade_name=CASCADE_NAME,image_scale=0.5) self.fel = FilterEyeLocator(FEL_NAME) self.face_rec = SVMFaceRec() self.svm_mode = SVM_AUTOMATIC self.svm_C = 4.000e+00 self.svm_Gamma = 9.766e-04 self.current_faces = [] self.enrolling = None self.enroll_count = 0 self.enroll_max = 32 self.enroll_list = [] self.previous_time = time.time() self.arialblack24 = PIL.ImageFont.truetype(ARIAL_BLACK_NAME, 24) # ---------------- Basic Data ------------------- try: self.webcam = Webcam() except SystemExit: raise except: trace = traceback.format_exc() message = TraceBackDialog(None, "Camera Error", CAMERA_ERROR, trace) message.ShowModal() sys.stderr.write("FaceL Error: an error occurred while trying to connect to the camera. Details follow.\n\n") sys.stderr.write(trace) sys.exit(CAMERA_ERROR_CODE) # ------------- Other Components ---------------- self.CreateStatusBar() # ------------------- Menu ---------------------- # Creating the menubar. # Menu IDs license_id = wx.NewId() mirror_id = wx.NewId() face_id = wx.NewId() svm_tune_id = wx.NewId() performance_id = wx.NewId() # Menu Items self.file_menu = wx.Menu(); self.file_menu.Append( wx.ID_ABOUT, "&About..." ) self.file_menu.Append( license_id, "FaceL License..." ) self.file_menu.AppendSeparator(); self.file_menu.Append( wx.ID_EXIT, "E&xit" ) self.options_menu = wx.Menu(); self.face_menuitem = self.options_menu.AppendCheckItem( face_id, "Face Processing" ) self.eye_menuitem = self.options_menu.AppendCheckItem( face_id, "Eye Detection" ) self.mirror_menuitem = self.options_menu.AppendCheckItem( mirror_id, "Mirror Video" ) self.options_menu.AppendSeparator() self.options_menu.Append( svm_tune_id, "SVM Tuning..." ) self.options_menu.Append( performance_id, "Performance..." ) # Create Menu Bar self.menu_bar = wx.MenuBar(); self.menu_bar.Append( self.file_menu, "&File" ) self.menu_bar.Append( self.options_menu, "&Options" ) self.SetMenuBar( self.menu_bar ) # Menu Events wx.EVT_MENU(self, wx.ID_ABOUT, self.onAbout ) wx.EVT_MENU(self, license_id, self.onLicense ) wx.EVT_MENU(self, mirror_id, self.onNull ) wx.EVT_MENU(self, face_id, self.onNull ) wx.EVT_MENU(self, svm_tune_id, self.onSVMTune ) wx.EVT_MENU(self, performance_id, self.onTiming ) # Set up menu checks self.face_menuitem.Check(True) self.eye_menuitem.Check(True) self.mirror_menuitem.Check(True) # ----------------- Image List ------------------ # --------------- Image Display ----------------- self.static_bitmap = wx.StaticBitmap(self,wx.NewId(), bitmap=wx.EmptyBitmap(640, 480)) self.controls_box = wx.StaticBox(self, wx.NewId(), "Controls") self.facel_logo = wx.StaticBitmap(self,wx.NewId(), bitmap=wx.Bitmap(FACEL_LOGO)) self.csu_logo = wx.StaticBitmap(self,wx.NewId(), bitmap=wx.Bitmap(CSU_LOGO)) # self.performance_box = wx.StaticBox(self, wx.NewId(), "Performance") self.enroll_chioce_label = wx.StaticText(self, wx.NewId(), "Enrollment Count:", style=wx.ALIGN_LEFT) self.enroll_choice = wx.Choice(self,wx.NewId(),wx.Point(0,0),wx.Size(-1,-1),['16','32','48','64','128','256']) self.enroll_choice.Select(3) self.train_button = wx.Button(self,wx.NewId(),'Train Labeler') self.reset_button = wx.Button(self,wx.NewId(),'Clear Labels') # --------------- Instrumentation --------------- self.enroll_label = wx.StaticText(self, wx.NewId(), "Click a face in the video to enroll.", style=wx.ALIGN_LEFT) self.ids_label = wx.StaticText(self, wx.NewId(), "Labels:", size=wx.Size(-1,16), style=wx.ALIGN_LEFT) self.ids_text = wx.StaticText(self, wx.NewId(), size = wx.Size(30,16), style= wx.ALIGN_RIGHT ) self.faces_label = wx.StaticText(self, wx.NewId(), "Faces:", size=wx.Size(-1,16), style=wx.ALIGN_LEFT) self.faces_text = wx.StaticText(self, wx.NewId(), size = wx.Size(30,16), style= wx.ALIGN_RIGHT ) # --------------- Window Layout ----------------- enroll_sizer = wx.BoxSizer(wx.HORIZONTAL) enroll_sizer.Add(self.ids_label, flag = wx.ALIGN_CENTER | wx.ALL, border=4) enroll_sizer.Add(self.ids_text, flag = wx.ALIGN_CENTER | wx.ALL, border=4) enroll_sizer.AddSpacer(20) enroll_sizer.Add(self.faces_label, flag = wx.ALIGN_CENTER | wx.ALL, border=4) enroll_sizer.Add(self.faces_text, flag = wx.ALIGN_CENTER | wx.ALL, border=4) training_sizer = wx.BoxSizer(wx.HORIZONTAL) training_sizer.Add(self.train_button, flag = wx.ALIGN_CENTER | wx.ALL, border=4) training_sizer.Add(self.reset_button, flag = wx.ALIGN_CENTER | wx.ALL, border=4) enroll_choice_sizer = wx.BoxSizer(wx.HORIZONTAL) enroll_choice_sizer.Add(self.enroll_chioce_label, flag = wx.ALIGN_CENTER | wx.ALL, border=0) enroll_choice_sizer.Add(self.enroll_choice, flag = wx.ALIGN_CENTER | wx.ALL, border=0) controls_sizer = wx.StaticBoxSizer(self.controls_box,wx.VERTICAL) #wx.BoxSizer(wx.VERTICAL) controls_sizer.Add(self.enroll_label, flag = wx.ALIGN_LEFT | wx.ALL, border=0) controls_sizer.Add(enroll_sizer, flag = wx.ALIGN_LEFT | wx.ALL, border=0) controls_sizer.Add(enroll_choice_sizer, flag = wx.ALIGN_LEFT | wx.ALL, border=4) controls_sizer.Add(training_sizer, flag = wx.ALIGN_LEFT | wx.ALL, border=0) bottom_sizer = wx.BoxSizer(wx.HORIZONTAL) bottom_sizer.Add(self.facel_logo, flag = wx.ALIGN_CENTER | wx.ALL, border=0) bottom_sizer.Add(controls_sizer, flag = wx.ALIGN_TOP | wx.ALL, border=4) bottom_sizer.Add(self.csu_logo, flag = wx.ALIGN_CENTER | wx.ALL, border=0) main_sizer = wx.BoxSizer(wx.VERTICAL) main_sizer.Add(self.static_bitmap, flag = wx.ALIGN_CENTER | wx.ALL, border=0) main_sizer.Add(bottom_sizer, flag = wx.ALIGN_CENTER | wx.ALL, border=4) self.SetAutoLayout(True) self.SetSizer(main_sizer) self.Layout() # ----------------------------------------------- self.timer = FrameTimer(self) self.timer.Start(200) # -------------- Event Handleing ---------------- wx.EVT_SIZE(self.static_bitmap, self.onBitmapResize) wx.EVT_LEFT_DOWN(self.static_bitmap, self.onClick) self.Bind(wx.EVT_BUTTON, self.onTrain, id=self.train_button.GetId()) self.Bind(wx.EVT_BUTTON, self.onReset, id=self.reset_button.GetId()) # --------------- Setup State ------------------- self.setupState() def onTrain(self,event=None): ''' Start the SVM training process. ''' print "Train" #progress = wx.ProgressDialog(title="SVM Training", message="Training the Face Recognition Algorithm. Please Wait...") if self.svm_mode == SVM_AUTOMATIC: # Train with automatic tuning. self.face_rec.train() #callback=progress.Update) self.svm_C = self.face_rec.svm.C self.svm_Gamma = self.face_rec.svm.gamma else: # Train with manual tuning. self.face_rec.train( C=[self.svm_C] , Gamma=[self.svm_Gamma])# ,callback=progress.Update) #progress.Destroy() def onReset(self,event=None): ''' Clear the enrollment data for the SVM. ''' self.face_rec.reset() self.setupState() def onFrame(self,event=None): ''' Retrieve and process a video frame. ''' self.timer.Stop() starttime = time.time() self.detect_time = 0.0 self.eye_time = 0.0 self.label_time = 0.0 img = self.webcam.query() face_processing = self.face_menuitem.IsChecked() eye_processing = self.eye_menuitem.IsChecked() names = [] if face_processing: faces = self.findFaces(img) if self.enrolling != None: success = None for rect,leye,reye in faces: img.annotateRect(self.enrolling,color='yellow') if (success == None) and is_success(self.enrolling,rect): success = rect img.annotateRect(rect,color='blue') if eye_processing: img.annotatePoint(leye,color='blue') img.annotatePoint(reye,color='blue') self.enroll_list.append([img,rect,leye,reye]) else: img.annotateRect(rect,color='red') if eye_processing: img.annotatePoint(leye,color='red') img.annotatePoint(reye,color='red') img.annotateLine(pv.Point(rect.x,rect.y),pv.Point(rect.x+rect.w,rect.y+rect.h), color='red') img.annotateLine(pv.Point(rect.x+rect.w,rect.y),pv.Point(rect.x,rect.y+rect.h), color='red') if success == None: rect = self.enrolling img.annotateLine(pv.Point(rect.x,rect.y),pv.Point(rect.x+rect.w,rect.y+rect.h), color='yellow') img.annotateLine(pv.Point(rect.x+rect.w,rect.y),pv.Point(rect.x,rect.y+rect.h), color='yellow') else: #enroll in the identification algorithm pass else: for rect,leye,reye in faces: img.annotateRect(rect,color='blue') if eye_processing: img.annotatePoint(leye,color='blue') img.annotatePoint(reye,color='blue') if self.face_rec.isTrained(): self.label_time = time.time() for rect,leye,reye in faces: label = self.face_rec.predict(img,leye,reye) names.append([0.5*(leye+reye),label]) self.label_time = time.time() - self.label_time # Displaying Annotated Frame im = img.asAnnotated() if self.mirror_menuitem.IsChecked(): im = im.transpose(FLIP_LEFT_RIGHT) if self.enrolling != None: draw = PIL.ImageDraw.Draw(im) x,y = self.enrolling.x,self.enrolling.y if self.mirror_menuitem.IsChecked(): x = 640 - (x + self.enrolling.w) self.enroll_count += 1 draw.text((x+10,y+10), "Enrolling: %2d of %2d"%(self.enroll_count,self.enroll_max), fill='yellow', font=self.arialblack24) del draw if self.enroll_count >= self.enroll_max: print "Count:",self.enroll_count if len(self.enroll_list) == 0: warning_dialog = wx.MessageDialog(self, "No faces were detected during the enrollment process. Please face towards the camera and keep your face in the yellow rectangle during enrollment.", style=wx.ICON_EXCLAMATION | wx.OK, caption="Enrollment Error") warning_dialog.ShowModal() else: name_dialog = wx.TextEntryDialog(self, "Please enter a name to associate with the face. (%d faces captured)"%len(self.enroll_list), caption = "Enrollment ID") result = name_dialog.ShowModal() sub_id = name_dialog.GetValue() if result == wx.ID_OK: if sub_id == "": print "Warning: Empty Subject ID" warning_dialog = wx.MessageDialog(self, "A name was entered in the previous dialog so this face will not be enrolled in the database. Please repeat the enrollment process for this person.", style=wx.ICON_EXCLAMATION | wx.OK, caption="Enrollment Error") warning_dialog.ShowModal() else: for data,rect,leye,reye in self.enroll_list: self.face_rec.addTraining(data,leye,reye,sub_id) self.setupState() self.enroll_count = 0 self.enrolling = None self.enroll_list = [] if len(names) > 0: draw = PIL.ImageDraw.Draw(im) for pt,name in names: x,y = pt.X(),pt.Y() w,h = draw.textsize(name,font=self.arialblack24) if self.mirror_menuitem.IsChecked(): x = 640 - x - 0.5*w else: x = x - 0.5*w draw.text((x,y-20-h), name, fill='green', font=self.arialblack24) del draw wxImg = wx.EmptyImage(im.size[0], im.size[1]) wxImg.SetData(im.tostring()) bm = wxImg.ConvertToBitmap() self.static_bitmap.SetBitmap(bm) # Update timing gauges full_time = time.time() - starttime if self.timing_window != None: self.timing_window.update(self.detect_time,self.eye_time,self.label_time,full_time) self.ids_text.SetLabel("%d"%(self.face_rec.n_labels,)) self.faces_text.SetLabel("%d"%(self.face_rec.n_faces,)) sleep_time = 1 if sys.platform.startswith("linux"): sleep_time = 10 # TODO: For macosx milliseconds should be 1 # TODO: For linux milliseconds may need to be set to a higher value 10 self.timer.Start(milliseconds = sleep_time, oneShot = 1) def setupState(self): #print "state",self.face_rec.n_labels,self.IsEnabled() if self.face_rec.n_labels >= 2: self.train_button.Enable() else: self.train_button.Disable() def onBitmapResize(self,event): w = event.GetSize().GetWidth() h = event.GetSize().GetHeight() self.static_bitmap.SetSize(event.GetSize()) def onClick(self,event): ''' Process a click in the Video window which starts the enrollment process. ''' x = event.GetX() y = event.GetY() if self.mirror_menuitem.IsChecked(): x = 640-x for rect,leye,reye in self.current_faces: if rect.containsPoint(pv.Point(x,y)): self.enrolling = rect self.enroll_count = 0 self.enroll_max = int(self.enroll_choice.GetStringSelection()) def findFaces(self,im): eye_processing = self.eye_menuitem.IsChecked() self.detect_time = time.time() rects = self.face_detector.detect(im) self.detect_time = time.time() - self.detect_time self.eye_time = time.time() if eye_processing: faces = self.fel.locateEyes(im, rects) else: faces = [] for rect in rects: affine = pv.AffineFromRect(rect,(1,1)) leye = affine.invertPoint(AVE_LEFT_EYE) reye = affine.invertPoint(AVE_RIGHT_EYE) faces.append([rect,leye,reye]) self.eye_time = time.time() - self.eye_time self.current_faces = faces return faces def onAbout(self,event): wx.MessageBox( ABOUT_MESSAGE, "About FaceL", wx.OK | wx.ICON_INFORMATION ) def onLicense(self,event): wx.MessageBox( LICENSE_MESSAGE, "FaceL License", wx.OK | wx.ICON_INFORMATION ) def onNull(self,*args,**kwargs): pass def onSVMTune(self,event): dialog = SVMTuningDialog(self, self.svm_mode, self.svm_C, self.svm_Gamma) dialog.CenterOnParent() result = dialog.ShowModal() if result == wx.ID_OK: self.svm_mode = dialog.mode self.svm_C = dialog.C self.svm_Gamma = dialog.Gamma print "SVM Tuning Info <MODE:%s; C:%0.2e; Gamma:%0.2e>"%(self.svm_mode,self.svm_C,self.svm_Gamma) dialog.Destroy() def onTiming(self,event): if self.timing_window == None: self.timing_window = TimingWindow(self, wx.NewId(),"Performance") self.timing_window.CenterOnParent() self.timing_window.Show(True) self.timing_window.Bind(wx.EVT_CLOSE, self.onCloseTiming, id=self.timing_window.GetId()) else: self.timing_window.Show(True) self.timing_window.Raise() def onCloseTiming(self,event): self.timing_window.Destroy() self.timing_window = None
class LiveDemoFrame(wx.Frame): def __init__(self,parent,id,name,demos=DEMO_DEFAULTS,size=(800,550)): wx.Frame.__init__(self,parent,id,name,size=size) # ---------------- Basic Data ------------------- self.webcam = Webcam() self.harris = DetectorHarris() self.dog = DetectorDOG(n=100,selector='best') self.face = CascadeDetector() self.demos = demos # ------------- Other Components ---------------- self.CreateStatusBar() # ------------------- Menu ---------------------- # Creating the menubar. # ----------------- Image List ------------------ # --------------- Image Display ----------------- self.static_bitmap = wx.StaticBitmap(self,wx.NewId(), bitmap=wx.EmptyBitmap(640, 480)) self.radios = wx.RadioBox(self,wx.NewId(),'Demos', choices=['None'] + self.demos.keys(), style=wx.RA_SPECIFY_ROWS) self.mirror = wx.CheckBox(self,wx.NewId(),'Mirror') self.mirror.SetValue(True) # --------------- Window Layout ----------------- grid = wx.FlexGridSizer(2,2) grid.Add(self.static_bitmap) grid.Add(self.radios) grid.Add(self.mirror) self.SetAutoLayout(True) self.SetSizer(grid) self.Layout() # ----------------------------------------------- self.timer = FrameTimer(self) self.timer.Start(200) # -------------- Event Handleing ---------------- wx.EVT_SIZE(self.static_bitmap, self.onBitmapResize) wx.EVT_LEFT_DOWN(self.static_bitmap, self.onClick) wx.EVT_TIMER(self,-1,self.onTmp) #wx.EVT_CLOSE(self,self.onClose) def onTmp(self): print "Notify" def onFrame(self,event=None): self.timer.Stop() starttime = time.time() img = self.webcam.query() selection = self.radios.GetStringSelection() if selection == 'None': pass else: for key,func in self.demos.iteritems(): if key == selection: img = func(img) print "Displaying Annotated Frame:", selection im = img.asAnnotated() if self.mirror.GetValue(): im = im.transpose(FLIP_LEFT_RIGHT) wxImg = wx.EmptyImage(im.size[0], im.size[1]) wxImg.SetData(im.tostring()) bm = wxImg.ConvertToBitmap() self.static_bitmap.SetBitmap(bm) print "Frame Time:",time.time() - starttime self.timer.Start(milliseconds = 1, oneShot = 1) def onBitmapResize(self,event): w = event.GetSize().GetWidth() h = event.GetSize().GetHeight() self.static_bitmap.SetSize(event.GetSize()) # ------------- Event Handlers --------------- def onClick(self,event): self.onFrame(event)