コード例 #1
0
ファイル: testsuite.py プロジェクト: bolme/pyvision
    def test_ASEFEyeLocalization(self):
        '''FilterEyeLocator: Scrapshots Both10 rate == 0.4800...............'''
        ilog = None
        if 'ilog' in list(globals().keys()):
            ilog = globals()['ilog']
            
        # Load a face database
        ssdb = ScrapShotsDatabase()
                
        # Create a face detector 
        face_detector = CascadeDetector()

        # Create an eye locator
        eye_locator = FilterEyeLocator()
        
        # Create an eye detection test
        edt = EyeDetectionTest(name='asef_scraps')

        #print "Testing..."
        for face_id in list(ssdb.keys())[:25]:
            face = ssdb[face_id]
            im = face.image
            
            dist = face.left_eye.l2(face.right_eye)
            dist = np.ceil(0.1*dist)
            im.annotateCircle(face.left_eye,radius=dist,color='white')
            im.annotateCircle(face.right_eye,radius=dist,color='white')

            # Detect the faces
            faces = face_detector.detect(im)
                            
            # Detect the eyes
            pred_eyes = eye_locator(im,faces)
            for rect,leye,reye in pred_eyes:
                im.annotateRect(rect)
                im.annotateCircle(leye,radius=1,color='red')
                im.annotateCircle(reye,radius=1,color='red')
                
            
            truth_eyes = [[face.left_eye,face.right_eye]]
            
            pred_eyes = [ [leye,reye] for rect,leye,reye in pred_eyes]
            
            # Add to eye detection test
            edt.addSample(truth_eyes, pred_eyes, im=im, annotate=True)
            if ilog != None:
                ilog.log(im,label='test_ASEFEyeLocalization')
                
        edt.createSummary()
        
        # Very poor accuracy on the scrapshots database
        self.assertAlmostEqual( edt.face_rate ,   1.0000, places = 3 )
        self.assertAlmostEqual( edt.both25_rate , 0.8800, places = 3 )
        self.assertAlmostEqual( edt.both10_rate , 0.5200, places = 3 )
        self.assertAlmostEqual( edt.both05_rate , 0.2800, places = 3 )
コード例 #2
0
ファイル: testsuite.py プロジェクト: wolfram2012/MOSSE
    def test_ASEFEyeLocalization(self):
        '''FilterEyeLocator: Scrapshots Both10 rate == 0.4800...............'''
        ilog = None
        if 'ilog' in globals().keys():
            ilog = globals()['ilog']

        # Load a face database
        ssdb = ScrapShotsDatabase()

        # Create a face detector
        face_detector = CascadeDetector()

        # Create an eye locator
        eye_locator = FilterEyeLocator()

        # Create an eye detection test
        edt = EyeDetectionTest(name='asef_scraps')

        #print "Testing..."
        for face_id in ssdb.keys()[:25]:
            face = ssdb[face_id]
            im = face.image

            dist = face.left_eye.l2(face.right_eye)
            dist = np.ceil(0.1 * dist)
            im.annotateCircle(face.left_eye, radius=dist, color='white')
            im.annotateCircle(face.right_eye, radius=dist, color='white')

            # Detect the faces
            faces = face_detector.detect(im)

            # Detect the eyes
            pred_eyes = eye_locator(im, faces)
            for rect, leye, reye in pred_eyes:
                im.annotateRect(rect)
                im.annotateCircle(leye, radius=1, color='red')
                im.annotateCircle(reye, radius=1, color='red')

            truth_eyes = [[face.left_eye, face.right_eye]]

            pred_eyes = [[leye, reye] for rect, leye, reye in pred_eyes]

            # Add to eye detection test
            edt.addSample(truth_eyes, pred_eyes, im=im, annotate=True)
            if ilog != None:
                ilog.log(im, label='test_ASEFEyeLocalization')

        edt.createSummary()

        # Very poor accuracy on the scrapshots database
        self.assertAlmostEqual(edt.face_rate, 1.0000, places=3)
        self.assertAlmostEqual(edt.both25_rate, 0.8800, places=3)
        self.assertAlmostEqual(edt.both10_rate, 0.5200, places=3)
        self.assertAlmostEqual(edt.both05_rate, 0.2800, places=3)
コード例 #3
0
    def __init__(self,
                 face_detector=CascadeDetector(),
                 tile_size=(128, 128),
                 subtile_size=(32, 32),
                 left_center=pv.Point(39.325481787836871, 50.756936769089975),
                 right_center=pv.Point(91.461135538006289, 50.845357457309881),
                 validate=None,
                 n_iter=1,
                 annotate=False,
                 **kwargs):
        ''' 
        Create an eye locator.  This default implentation uses a 
        cascade classifier for face detection and then SVR for eye
        location. 
        '''
        #TODO: Learn the mean eye locations durring training.
        self.face_detector = face_detector
        self.left_center = left_center
        self.right_center = right_center
        self.tile_size = tile_size
        self.subtile_size = subtile_size
        self.validate = validate
        self.n_iter = n_iter
        self.annotate = annotate
        self.perturbations = True

        # Number of training images where the face detection did not work.
        self.detection_failures = 0

        # point locators that learn to find the eyes.
        self.createLocators(**kwargs)
コード例 #4
0
ファイル: videotasks.py プロジェクト: tsoonjin/pyvision
    def execute(self, frame, detector=None):
        if detector == None:
            print "Initializing Face Detector."
            detector = CascadeDetector(min_size=(128, 128))

        faces = detector(frame)
        for rect in faces:
            frame.annotateRect(rect)

        return [('FACES', self.getFrameId(), faces),
                ("_FACE_DETECTOR", self.getFrameId(), detector)]
コード例 #5
0
ファイル: LiveDemo.py プロジェクト: mdqyy/pyvision
    def __init__(self, parent, id, name, demos=DEMO_DEFAULTS, size=(800, 550)):
        wx.Frame.__init__(self, parent, id, name, size=size)

        # ---------------- Basic Data -------------------
        self.webcam = Webcam()
        self.harris = DetectorHarris()
        self.dog = DetectorDOG(n=100, selector='best')
        self.face = CascadeDetector()
        self.demos = demos

        # ------------- Other Components ----------------
        self.CreateStatusBar()

        # ------------------- Menu ----------------------

        # Creating the menubar.

        # ----------------- Image List ------------------

        # --------------- Image Display -----------------
        self.static_bitmap = wx.StaticBitmap(self,
                                             wx.NewId(),
                                             bitmap=wx.EmptyBitmap(640, 480))

        self.radios = wx.RadioBox(self,
                                  wx.NewId(),
                                  'Demos',
                                  choices=['None'] + self.demos.keys(),
                                  style=wx.RA_SPECIFY_ROWS)

        self.mirror = wx.CheckBox(self, wx.NewId(), 'Mirror')
        self.mirror.SetValue(True)

        # --------------- Window Layout -----------------
        grid = wx.FlexGridSizer(2, 2)
        grid.Add(self.static_bitmap)
        grid.Add(self.radios)
        grid.Add(self.mirror)

        self.SetAutoLayout(True)
        self.SetSizer(grid)
        self.Layout()

        # -----------------------------------------------
        self.timer = FrameTimer(self)
        self.timer.Start(200)
        # -------------- Event Handleing ----------------
        wx.EVT_SIZE(self.static_bitmap, self.onBitmapResize)
        wx.EVT_LEFT_DOWN(self.static_bitmap, self.onClick)
        wx.EVT_TIMER(self, -1, self.onTmp)
コード例 #6
0
    def test_detect_scraps_opencv(self):

        fd = CascadeDetector(OPENCV_CASCADE)
        fdt = FaceDetectionTest(name='scraps')

        self.eyes = EyesFile(os.path.join(SCRAPS_FACE_DATA, "coords.txt"))
        for filename in self.eyes.files():
            img = pv.Image(os.path.join(SCRAPS_FACE_DATA, filename + ".pgm"))
            rects = fd(img)
            truth = self.eyes.getFaces(img.filename)
            fdt.addSample(truth, rects, im=img)

        self.assertAlmostEqual(
            fdt.pos_rate, 0.98265895953757221,
            places=2)  # TODO: Version 2 performance is better
コード例 #7
0
ファイル: __init__.py プロジェクト: Dfred/concept-robot
    def __init__(self, videoDevice_index, comm = None):
        """ initiate variables"""
        
        threading.Thread.__init__(self)
        self.comm = comm
        self.current_colour = None

        self.face_detector = CascadeDetector(cascade_name=config.haar_casc,min_size=(50,50), image_scale=0.5)
        self.webcam = Webcam(videoDevice_index)
        
        if config.use_gui: # create windows            
            cv.NamedWindow('Camera', cv.CV_WINDOW_AUTOSIZE)
            cv.CreateTrackbar ('edge threshold', 'Camera', 50, 100, self.change_value1)
            cv.CreateTrackbar ('circle threshold', 'Camera', 90, 100, self.change_value2)
            cv.CreateTrackbar ('gaussian blur', 'Camera', 11, 50, self.change_value3)
            cv.CreateTrackbar ('hue', 'Camera', 0, 100, self.change_value4)
コード例 #8
0
ファイル: facel.py プロジェクト: iliucan/ua-cs665-ros-pkg
    def __init__(self):
        rospy.init_node("fasel")
        self.enrollService = rospy.Service("enroll", Enroll, self.handleEnrollFace)
        self.trainService = rospy.Service("train", Train, self.handleTrain)
        self.resetService = rospy.Service("reset", Reset, self.handleReset)
        self.namesPub = rospy.Publisher("names", String)
        self.personEventPub = rospy.Publisher("person_event", PersonEvent)
        self.imagePub = rospy.Publisher("images", Image)
        self.imageSub = rospy.Subscriber("image", Image, self.handleImage)

        self.bridge = CvBridge()

        self.face_detector = CascadeDetector(cascade_name=CASCADE_NAME, image_scale=0.5)
        self.fel = loadFilterEyeLocator(FEL_NAME)
        self.face_rec = SVMFaceRec()

        self.arialblack24 = PIL.ImageFont.truetype(ARIAL_BLACK_NAME, 24)

        self.svm_mode = SVM_AUTOMATIC
        self.svm_C = 4.000e00
        self.svm_Gamma = 9.766e-04

        self.current_faces = []
        self.enrolling = None
        self.enroll_count = 0
        self.enroll_max = 64
        self.enroll_list = []
        self.enrollCondition = Condition()

        self.isTrained = False

        self.nameList = []
        self.faceNames = []
        self.faceCount = 0
        self.faceTrack = dict()

        self.face_processing = True
        self.eye_processing = True
        self.image_flip = True

        self.reqQueueLock = Lock()
        self.faceTrackLock = Lock()

        self.is_running = True
        self.eventUpdateDelay = 2.0  # seconds
        self.eventUpdateTimer = Thread(target=self.eventUpdate)
        self.eventUpdateTimer.start()
コード例 #9
0
    def test_training(self):
        '''
        This trains the FaceFinder on the scraps database.
        '''
        #import cProfile

        # Load an eyes file
        eyes_filename = join(pv.__path__[0], 'data', 'csuScrapShots',
                             'coords.txt')
        #print "Creating eyes File."
        eyes_file = EyesFile(eyes_filename)

        # Create a face detector
        cascade_file = join(pv.__path__[0], 'config',
                            'facedetector_celebdb2.xml')
        #print "Creating a face detector from:",cascade_file
        face_detector = CascadeDetector(cascade_file)

        image_dir = join(pv.__path__[0], 'data', 'csuScrapShots')

        ed = SVMEyeDetectorFromDatabase(eyes_file,
                                        image_dir,
                                        image_ext=".pgm",
                                        face_detector=face_detector,
                                        random_seed=0)
        edt = EyeDetectionTest(name='scraps')

        #print "Testing..."
        for img in self.images:
            #print img.filename
            faces = ed.detect(img)

            #faces = ed.detect(img)
            pred_eyes = []
            for _, _, pleye, preye in faces:
                #detections.append(rect)
                pred_eyes.append((pleye, preye))

            truth_eyes = self.eyes.getEyes(img.filename)
            edt.addSample(truth_eyes, pred_eyes, im=img, annotate=False)

        #print edt.createSummary()
        self.assertAlmostEqual(edt.face_rate, 0.924855491329, places=3)
コード例 #10
0
    def __init__(self,
                 face_detector=CascadeDetector(),
                 tile_size=(128, 128),
                 validate=None,
                 n_iter=1,
                 annotate=False,
                 **kwargs):
        ''' 
        Create an eye locator.  This default implentation uses a 
        cascade classifier for face detection and then SVR for eye
        location. 
        '''
        self.face_detector = face_detector
        self.left_eye = None
        self.right_eye = None
        self.tile_size = tile_size
        self.validate = validate
        self.n_iter = n_iter
        self.annotate = annotate
        self.perturbations = True

        # this object handles pca normalization
        self.normalize = VectorClassifier.VectorClassifier(
            VectorClassifier.TYPE_REGRESSION,
            reg_norm=VectorClassifier.REG_NORM_NONE)

        # point locators that learn to find the eyes.
        self.left_locator = SVMLocator(
            svm_type=SVM.TYPE_NU_SVR, normalization=VectorClassifier.NORM_NONE)
        self.right_locator = SVMLocator(
            svm_type=SVM.TYPE_NU_SVR, normalization=VectorClassifier.NORM_NONE)

        # Number of training images where the face detection did not work.
        self.detection_failures = 0

        self.training_labels = []
コード例 #11
0
ファイル: LiveDemo.py プロジェクト: mdqyy/pyvision
 def __init__(self):
     self.face = CascadeDetector()
コード例 #12
0
ファイル: __init__.py プロジェクト: Dfred/concept-robot
class CaptureVideo(threading.Thread):
    """ captures video stream from camera and performs various detections (face, edge, circle)
    """
    
    def __init__(self, videoDevice_index, comm = None):
        """ initiate variables"""
        
        threading.Thread.__init__(self)
        self.comm = comm
        self.current_colour = None

        self.face_detector = CascadeDetector(cascade_name=config.haar_casc,min_size=(50,50), image_scale=0.5)
        self.webcam = Webcam(videoDevice_index)
        
        if config.use_gui: # create windows            
            cv.NamedWindow('Camera', cv.CV_WINDOW_AUTOSIZE)
            cv.CreateTrackbar ('edge threshold', 'Camera', 50, 100, self.change_value1)
            cv.CreateTrackbar ('circle threshold', 'Camera', 90, 100, self.change_value2)
            cv.CreateTrackbar ('gaussian blur', 'Camera', 11, 50, self.change_value3)
            cv.CreateTrackbar ('hue', 'Camera', 0, 100, self.change_value4)


    def run(self):
        self.main_loop()
    

    def detect_face(self, img):
        """ detect faces in the given video stream
        """
        
        faces = self.findFaces(img)
        
        if faces:
            close_face_rect = None
            close_face_w = 0.0
            for rect, leye, reye in faces:
                img.annotateRect(rect, color='blue')    # draw square around face
                if rect.w > close_face_w:               # get closest face coordinates
                    close_face_w = rect.w
                    close_face_rect = rect
                if config.eye_d:                             # draw point on eyes
                    img.annotatePoint(leye,color='blue')
                    img.annotatePoint(reye,color='blue')
                    
            if config.follow_face_gaze:
                relative_x = (320 - (close_face_rect.x + (close_face_rect.w/2.0)))
                relative_y = (240 - (close_face_rect.y + (close_face_rect.h/2.0)))
                gaze = self.follow_face_with_gaze(relative_x, relative_y, close_face_rect.w)
                neck = self.follow_face_with_neck(relative_x, relative_y, gaze[1])
                if self.comm:
                    if self.comm.last_ack != "wait" and gaze:
                        self.comm.set_neck_gaze(gaze, neck)
                        self.comm.last_ack = "wait"
                    
                    
    def findFaces(self, im):
        """ run the face detection algorithm
        """
        rects = self.face_detector.detect(im) 
        faces = []
        for rect in rects:
            affine = pv.AffineFromRect(rect,(1,1))
            leye = affine.invertPoint(AVE_LEFT_EYE)
            reye = affine.invertPoint(AVE_RIGHT_EYE)
            faces.append([rect,leye,reye])

        self.current_faces = faces
        return faces
    
    
    def follow_face_with_gaze(self, x, y, width):
        """adjust coordinates of detected faces to mask
        """
        #TODO: change coordinates that are kept in config into something local
        if config.slow_adjust and (config.face_x is not None and config.face_y is not None):
            config.face_x += (x - config.face_x) * config.gain
            config.face_y += (y - config.face_y) * config.gain
        else:
            config.face_x = x
            config.face_y = y
            
        face_distance = ((-88.4832801364568 * math.log(width)) + 538.378262966656)
        x_dist = ((config.face_x/1400.6666)*face_distance)/100
        y_dist = ((config.face_y/700.6666)*face_distance)/100
        if config.camera_on_projector:
	    return (x_dist, (face_distance/100.0), y_dist)  # x is inverted for compatibility
	else:
	    return (-x_dist, (face_distance/100.0), y_dist)
            
            
    def follow_face_with_neck(self, x, y, face_distance):
        """adjust coordinates of detected faces to neck movement
        """
        move = False
        if x > 95 or x < -95: # threshold
            distance_x = (x/-640.0) * 0.2 * math.pi
            move = True
        else:
            distance_x = 0.0
            
        if y > 60 or y < -60: # threshold
            distance_y = (y/-480.0) * 0.1 * math.pi
            move = True
        else:
            distance_y = 0.0

        if face_distance > 1.0:    # threshold for moving forward when perceived face is far
            config.getting_closer_to_face = 1.0
        if config.getting_closer_to_face > 0.05:
            distance_z = 0.1
            config.getting_closer_to_face += -0.1
            move = True
        if face_distance < 0.2:    # threshold for moving back when face is too close
            distance_z = -0.3 + face_distance
            move = True
        else:
            distance_z = 0
        if move:
            return ((distance_y, .0, -distance_x), (.0,distance_z,.0))
                
    
    def detect_edge(self, image):
        grayscale = cv.CreateImage(cv.GetSize(image), 8, 1)
        cv.CvtColor(image, grayscale, cv.CV_BGR2GRAY)
        cv.Canny(grayscale, grayscale, edge_threshold1, edge_threshold1 * 3, 3)
        return grayscale
        
        
    def detect_circle(self, image, image_org):
        grayscale = cv.CreateImage(cv.GetSize(image), 8, 1)
        grayscale_smooth = cv.CreateImage(cv.GetSize(image), 8, 1)
        cv.CvtColor(image, grayscale, cv.CV_BGR2GRAY)
        if config.edge_d_non_vision:
            cv.Canny(grayscale, grayscale, edge_threshold1, edge_threshold1 * 3, 3)
        cv.Smooth(grayscale, grayscale_smooth, cv.CV_GAUSSIAN, edge_threshold3)
        mat = cv.CreateMat(100, 1, cv.CV_32FC3 )
        cv.SetZero(mat)
        cv.HoughCircles(grayscale_smooth, mat, cv.CV_HOUGH_GRADIENT, 2, 50, 200, (edge_threshold2 + 150) )
        circles_simple = []
        gazing = None
        if mat.rows != 0:
            for i in xrange(0, mat.rows):
                c = mat[i,0]
                point = (int(c[0]), int(c[1]))
                radius = int(c[2])
                cv.Circle(image, point, radius, (0, 0, 255))
                if config.detect_colour:
                    self.get_colour(image, image_org, [int(c[0]), int(c[1])], radius)
                    config.detect_colour = False
                    colour = self.record_colour(image, image_org, [int(c[0]), int(c[1])], radius)
                    circles_simple.append([point, radius, colour])
            
        if config.follow_ball_gaze and circles_simple:
            x_adjust = 320 - circles_simple[0][0].x
            y_adjust = 240 - circles_simple[0][0].y
            gazing = self.follow_ball_with_gaze(x_adjust, y_adjust)
        
        if config.follow_ball_neck and circles_simple:
            #self.comm.send_msg("recognizing;*;1;;;;tag_SPEECH")
            x_adjust = 320 - circles_simple[0][0].x
            y_adjust = 240 - circles_simple[0][0].y
            if x_adjust < 315 or x_adjust > 325:
                distance_x = (x_adjust/-640.0) * 0.2 * math.pi
            if y_adjust < 235 or y_adjust > 245:
                distance_y = (y_adjust/-480.0) * 0.2 * math.pi
            if self.comm.last_ack != "wait":
                    if gazing:
#                        self.comm.set_neck_gaze(gazing, "(" + str(config.neck_pos[0] + distance_y) + ",0," + str(config.neck_pos[2] + distance_x) + ")", "TRACK_GAZE")
                        pass
                    else:
                        self.comm.set_neck_orientation( "(" + str(config.neck_pos[0] + distance_y) + ",0," + str(config.neck_pos[2] + distance_x) + ")", "TRACKING")
                    config.neck_pos[2] += distance_x
                    config.neck_pos[0] += distance_y
                    self.comm.last_ack = "wait"
        

        if config.colour_to_find and circles_simple:
            dist = []
            for i in circles_simple:
                if i[2]:
                    #dist.append(auks.calculate_distance_hsv(params.colour_to_find, i[2]))
                    dist.append(auks.calculate_distance(config.colour_to_find, i[2]))
                else:
                    dist.append(999999)
            index = auks.posMin(dist)
            #print dist
            if dist[index] < config.detect_threshold:
                #self.comm.send_msg("recognizing;*;1;;;;tag_SPEECH")
                cv.Circle(image, circles_simple[index][0], 2, cvScalar(0, 100, 255), 2)
                x_adjust = 320 - circles_simple[index][0].x
                y_adjust = 240 - circles_simple[index][0].y
                if x_adjust < 315 or x_adjust > 325:
                    distance_x = (x_adjust/-640.0) * 0.2 * math.pi
                if y_adjust < 235 or y_adjust > 245:
                    distance_y = (y_adjust/-480.0) * 0.2 * math.pi
                if self.comm.last_ack != "wait":
#                        print "x_dist:", distance_x, " y_dist:", distance_y
#                        print "x_neck:", str(config.neck_pos[2]), "   y_neck:", str(config.neck_pos[0])
#                        print "x:", str(config.neck_pos[2] + distance_x), "   y:", str(config.neck_pos[0] + distance_y)
                        if gazing:
#                            self.comm.set_neck_gaze(gazing, "(" + str(config.neck_pos[0] + distance_y) + ",0," + str(config.neck_pos[2] + distance_x) + ")", "TRACK_GAZE")
                            pass
                        else:
                            self.comm.set_neck_orientation( "(" + str(config.neck_pos[0] + distance_y) + ",0," + str(config.neck_pos[2] + distance_x) + ")", "TRACKING")
                        config.neck_pos[2] += distance_x
                        config.neck_pos[0] += distance_y
                        self.comm.last_ack = "wait"
                              
        return circles_simple
                 
                 
    def follow_ball_with_gaze(self, x, y):
        """adjust coordinates of detected faces to mask
        """
            
        #face_distance = ((-88.4832801364568 * math.log(width)) + 538.378262966656)
        face_distance = 50.0
        x_dist = ((x/1400.6666)*face_distance)/-100
        y_dist = ((y/1400.6666)*face_distance)/100
#        if self.comm:
#            if self.comm.last_ack != "wait":
#                self.comm.set_gaze(str(x_dist) + "," + str(face_distance/100) + "," + str(y_dist))
#                self.comm.last_ack = "wait"
        return str(x_dist) + "," + str(face_distance/100) + "," + str(y_dist)
                 
            
    def get_colour(self, image, image_org, pos, radius):
        radius = int(radius*0.7)
        rect = cv.Rect(pos[0]-radius,pos[1]-radius, radius*2, radius*2)
        try:
            subimage = cv.GetSubRect(image_org, rect)
            cv.SaveImage("subimage.png", subimage)
            #cvCvtColor(subimage, subimage, CV_BGR2HSV)  # create hsv version
            scalar = cv.Avg(subimage)
            #self.current_colour = [int((scalar[0]*2)), int(scalar[1]/255.0*100), int(scalar[2]/255.0*100)]
            #print "Average colour value: H:"+ str(int((scalar[0]*2))) + " S:"+ str(int(scalar[1]/255.0*100)) + " V:"+ str(int(scalar[2]/255.0*100))
            self.current_colour = [int((scalar[2])), int(scalar[1]), int(scalar[0])]
            print "Average colour value: R:"+ str(int((scalar[2]))) + " G:"+ str(int(scalar[1])) + " B:"+ str(int(scalar[0]))
        
        except RuntimeError:
            print "error"
        
        cv.Rectangle(image, cv.Point( pos[0]-radius, pos[1]-radius), cv.Point(pos[0]+ radius, pos[1]+radius),cv.CV_RGB(0, 255, 0), 2, 8, 0)
        
        
    def record_colour(self, image, image_org, pos, radius):
        radius = int(radius*0.7)
        if pos[1] > radius: # only record a square when it is in full camera view
            try:
                rect = cv.Rect(pos[0]-radius, pos[1]-radius, radius*2, radius*2)
                subimage = cv.GetSubRect(image_org, rect)
                #cvCvtColor(subimage, subimage, CV_BGR2HSV)  # create hsv version
                scalar = cv.Avg(subimage)
                #return scalar1, [['c', [['h', int((scalar[0]*2))], ['s', int(scalar[1]/255.0*100)], ['v', int(scalar[2]/255.0*100)]]]]
                return [['c', [['r', int((scalar[2]))], ['g', int(scalar[1])], ['b', int(scalar[0])]]]]
            except RuntimeError:
                print "error", "radius:", radius, "position:", pos
                return None
        else:
            return None



    def find_colour(self, image, colour):
        """ searches for the given colour in the image
            colour is in hsv
        """
        # Create a 8-bit 1-channel image with same size as the frame
        color_mask = cv.CreateImage(cv.GetSize(image), 8, 1)
        image_h = cv.CreateImage(cv.GetSize(image), 8, 1)
        
        cv.CvtColor(image, image, cv.CV_BGR2HSV)  # convert to hsv
        
        cv.Split(image, image_h, None, None, None)
        
        # Find the pixels within the color-range, and put the output in the color_mask
        cv.InRangeS(image_h, cv.Scalar((edge_threshold4*2)-5), cv.Scalar((edge_threshold4*2)+5), color_mask)
        cv.CvtColor(image, image, cv.CV_HSV2BGR)  # convert to bgr
        cv.Set(image, cv.CV_RGB(0, 255, 0), color_mask)
        
        
    def return_colour(self):
        """ returns current colour
        """
        return self.current_colour
 
    
    def change_value1(self, new_value):
        global edge_threshold1
        edge_threshold1 = new_value
        
    def change_value2(self, new_value):
        global edge_threshold2
        edge_threshold2 = new_value
        
    def change_value3(self, new_value):
        global edge_threshold3
        if new_value % 2:
            edge_threshold3 = new_value
        else:
            edge_threshold3 = new_value+1
            
    def change_value4(self, new_value):
        global edge_threshold4
        edge_threshold4 = new_value
    
        
    def main_loop(self):
        
        #writer = cv.CreateVideoWriter("out.avi", cv.CV_FOURCC('P','I','M','1'), 30, (640,480),1)
        
        while 1:

            im = self.webcam.query()
            
            # handle events
            key = cv.WaitKey(10)
            if key != -1 and key < 256:
                key = chr(key)
                
            if key == '1' or config.command == '1':
                if config.face_d == False:
                    config.face_d = True                   
                    
            if key == '2' or config.command == 'edge':
                if config.edge_d == False:
                    config.edge_d = True
                    print "detecting edges"
                    
            if key == '3' or config.command == '3':
                if config.save_video == False:
                    config.save_video = True
                    print "saving video"
                    
            if key  == '4' or config.command == '4':
                if config.circle_d == False:
                    config.circle_d = True
                    print "detecting circles"
                    
            if key  == '5' or config.command == '5':
                if config.edge_d_non_vision == False:
                    config.edge_d_non_vision = True
                    print "detecting circles using edge detection"
                else:
                    config.edge_d_non_vision = False
                    
            if key == 's' or config.command == 's':
                if config.show == False:
                    config.show = True
                    print "showing video"

            if key == 'b' or config.command == 'b':
                if config.game_coors == "10.0, 50.0, 0.0":
                    self.commr.set_gaze("10.0, 50.0, 0.0")
                    config.game_coors = "0.0, 50.0, 0.0"
                else:
                    self.commr.set_gaze("0.0, 50.0, 0.0")
                    config.game_coors = "10.0, 50.0, 0.0"
                    
            if key == 'e' or config.command == 'e':
                config.face_d = False
                config.edge_d = False
                config.circle_d = False
                config.save_video = False
                config.colour_s = False
                print "stop tracking"
                    
            if key == 'q' or config.command == 'q':
                config.quit = True
                
            config.command = '0'
          
            if config.face_d:    # face detection
                if config.face_d_optimised:
                    if self.comm.last_ack != "wait":
                        self.detect_face(im)
                else:
                    self.detect_face(im)
                
            if config.colour_s:
                self.find_colour(frame, 10)
            
            if config.save_video:    # save
                cv.WriteFrame(writer,frame)
                
            if config.quit:  # quit
                print 'Camera closed'
                break
            
            pil = im.asAnnotated()                                          # get image as PIL              
            rgb = cv.CreateImageHeader(pil.size, cv.IPL_DEPTH_8U, 3)        # create IPL image
            cv.SetData(rgb, pil.tostring())                                 
            frame = cv.CreateImage(cv.GetSize(rgb), cv.IPL_DEPTH_8U,3)      # convert to bgr
            cv.CvtColor(rgb, frame, cv.CV_RGB2BGR)    
            cv.Flip(frame, None, 1)                                         # mirror                    

            if config.circle_d: # circle detection
                frame_org = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U,3)      # convert to bgr
                cv.Copy(frame, frame_org)
                self.detect_circle(frame, frame_org)
                
            if config.edge_d:    # edge detection
                frame_org = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U,3)      # convert to bgr
                cv.Copy(frame, frame_org)
                frame = self.detect_edge(frame)
                
            if frame is None:
                print "error capturing frame"
                break
     
            if config.use_gui:
                if config.show:
                    cv.ShowImage('Camera', frame) # display webcam image
                else:
                    cv.ShowImage('Camera', empty)
コード例 #13
0
ファイル: facel.py プロジェクト: iliucan/ua-cs665-ros-pkg
class Facel:
    def __init__(self):
        rospy.init_node("fasel")
        self.enrollService = rospy.Service("enroll", Enroll, self.handleEnrollFace)
        self.trainService = rospy.Service("train", Train, self.handleTrain)
        self.resetService = rospy.Service("reset", Reset, self.handleReset)
        self.namesPub = rospy.Publisher("names", String)
        self.personEventPub = rospy.Publisher("person_event", PersonEvent)
        self.imagePub = rospy.Publisher("images", Image)
        self.imageSub = rospy.Subscriber("image", Image, self.handleImage)

        self.bridge = CvBridge()

        self.face_detector = CascadeDetector(cascade_name=CASCADE_NAME, image_scale=0.5)
        self.fel = loadFilterEyeLocator(FEL_NAME)
        self.face_rec = SVMFaceRec()

        self.arialblack24 = PIL.ImageFont.truetype(ARIAL_BLACK_NAME, 24)

        self.svm_mode = SVM_AUTOMATIC
        self.svm_C = 4.000e00
        self.svm_Gamma = 9.766e-04

        self.current_faces = []
        self.enrolling = None
        self.enroll_count = 0
        self.enroll_max = 64
        self.enroll_list = []
        self.enrollCondition = Condition()

        self.isTrained = False

        self.nameList = []
        self.faceNames = []
        self.faceCount = 0
        self.faceTrack = dict()

        self.face_processing = True
        self.eye_processing = True
        self.image_flip = True

        self.reqQueueLock = Lock()
        self.faceTrackLock = Lock()

        self.is_running = True
        self.eventUpdateDelay = 2.0  # seconds
        self.eventUpdateTimer = Thread(target=self.eventUpdate)
        self.eventUpdateTimer.start()

    # Convert opencv2 image to pyvision image
    def opencv_to_pyvision(self, cv_image):
        pil_img = PIL.Image.fromstring("RGB", (cv_image.width, cv_image.height), cv_image.tostring())
        pyimg = pv.Image(pil_img)
        return pyimg

    # Convert pyvision image to opencv2 image
    def pyvision_to_opencv(self, pyimg):
        pil_img = pyimg.asPIL()
        cv_img = cv.CreateImageHeader(pil_img.size, cv.IPL_DEPTH_8U, 3)  # RGB image
        cv.SetData(cv_img, pil_img.tostring(), pil_img.size[0] * 3)
        return cv_img

    def PIL_to_opencv(self, pil_img):
        cv_img = cv.CreateImageHeader(pil_img.size, cv.IPL_DEPTH_8U, 3)  # RGB image
        cv.SetData(cv_img, pil_img.tostring(), pil_img.size[0] * 3)
        return cv_img

    def onFrame(self, img):
        """
        Process a video frame.
        """
        self.eye_time = 0.0

        names = []
        nFaces = 0

        if self.face_processing:

            faces = self.findFaces(img)
            nFaces = len(faces)

            if self.enrolling != None:
                success = None
                for rect, leye, reye in faces:
                    img.annotateRect(self.enrolling, color="yellow")

                    if (success == None) and is_success(self.enrolling, rect):
                        success = rect
                        img.annotateRect(rect, color="blue")
                        if self.eye_processing:
                            img.annotatePoint(leye, color="blue")
                            img.annotatePoint(reye, color="blue")
                        self.enroll_list.append([img, rect, leye, reye])

                    else:
                        img.annotateRect(rect, color="red")
                        if self.eye_processing:
                            img.annotatePoint(leye, color="red")
                            img.annotatePoint(reye, color="red")
                        img.annotateLine(
                            pv.Point(rect.x, rect.y), pv.Point(rect.x + rect.w, rect.y + rect.h), color="red"
                        )
                        img.annotateLine(
                            pv.Point(rect.x + rect.w, rect.y), pv.Point(rect.x, rect.y + rect.h), color="red"
                        )

                if success == None:
                    rect = self.enrolling
                    img.annotateLine(
                        pv.Point(rect.x, rect.y), pv.Point(rect.x + rect.w, rect.y + rect.h), color="yellow"
                    )
                    img.annotateLine(
                        pv.Point(rect.x + rect.w, rect.y), pv.Point(rect.x, rect.y + rect.h), color="yellow"
                    )
                else:
                    # enroll in the identification algorithm
                    pass
            else:
                for rect, leye, reye in faces:
                    img.annotateRect(rect, color="blue")
                    if self.eye_processing:
                        img.annotatePoint(leye, color="blue")
                        img.annotatePoint(reye, color="blue")

                for rect, leye, reye in faces:
                    img.annotateRect(rect, color="blue")
                    img.annotatePoint(leye, color="blue")
                    img.annotatePoint(reye, color="blue")

            if self.isTrained:
                self.label_time = time.time()
                for rect, leye, reye in faces:
                    if self.face_rec.isTrained():
                        label = self.face_rec.predict(img, leye, reye)
                        names.append([0.5 * (leye + reye), label])

                self.label_time = time.time() - self.label_time

        im = img.asAnnotated()

        # Flip to mirror image
        if self.image_flip:
            im = im.transpose(FLIP_LEFT_RIGHT)

        if self.enrolling != None:
            self.enrollCondition.acquire()
            self.enroll_count += 1
            self.enrollCondition.notify()
            self.enrollCondition.release()

            # Draw on the image
            draw = PIL.ImageDraw.Draw(im)
            x, y = self.enrolling.x, self.enrolling.y
            if self.image_flip:
                xsize, ysize = im.size
                x = xsize - (x + self.enrolling.w)
            draw.text(
                (x + 10, y + 10),
                "Enrolling: %2d of %2d" % (self.enroll_count, self.enroll_max),
                fill="yellow",
                font=self.arialblack24,
            )
            del draw

        facesEntered = []

        if len(names) > 0:

            draw = PIL.ImageDraw.Draw(im)

            for pt, name in names:
                x, y = pt.X(), pt.Y()

                # Draw on the image
                x, y = pt.X(), pt.Y()
                w, h = draw.textsize(name, font=self.arialblack24)

                if self.image_flip:
                    xsize, ysize = im.size
                    x = xsize - x - 0.5 * w
                else:
                    x = x - 0.5 * w

                draw.text((x, y - 20 - h), name, fill="green", font=self.arialblack24)

                facesEntered.append(name)

                # Publish only new names
                if name not in self.faceNames:
                    str = "seeing %s" % name
                    rospy.loginfo(str)
                    self.namesPub.publish(String(name))
                    self.publishPersonEvent(name, "entered")

            del draw

        # Find all of the faces that are no longer detected
        for name in self.faceNames:
            if name not in facesEntered:
                self.publishPersonEvent(name, "exited")

        #  print "{0} {1} {2}".format(nFaces, self.faceCount, len(facesEntered))
        nFaces = nFaces - len(facesEntered)

        # For unidentified faces
        # figure out how many entered/exited
        if (nFaces - self.faceCount) > 0:
            self.publishPersonEvent("unknown", "entered")

        if (nFaces - self.faceCount) < 0:
            self.publishPersonEvent("unknown", "exited")

        # Update all for the next round
        self.faceNames = facesEntered
        self.faceCount = nFaces

        # Publish the image
        cv_img = self.PIL_to_opencv(im)
        # cv_img = self.pyvision_to_opencv(img)
        msg = self.bridge.cv_to_imgmsg(cv_img, encoding="rgb8")

        self.imagePub.publish(msg)

    def publishPersonEvent(self, name, event):
        """
        Updates the dictionary
        """
        now = time.time()

        # In case of non-existing records, force the update
        if name in self.faceTrack:
            self.faceTrackLock.acquire()
            pastEvent, lastEvent, recTime = self.faceTrack[name]
            self.faceTrackLock.release()
        else:
            recTime = now - self.eventUpdateDelay
            lastEvent = "undefined"
            pastEvent = lastEvent

        self.faceTrackLock.acquire()
        self.faceTrack[name] = (pastEvent, event, now)
        self.faceTrackLock.release()
        # print "publishPersonEvent for {0} {1} {2} {3} ".format(name,pastEvent,event,now)

    def eventUpdate(self):
        """
        Publishes events and updates the dictionary
        """
        while self.is_running:
            now = time.time()
            # Publish evey n seconds to avoid hysterisis,
            # where n = self.eventUpdateDelay
            self.faceTrackLock.acquire()
            for name in self.faceTrack:
                lastEvent, event, eventTime = self.faceTrack[name]
                if ((now - eventTime) >= self.eventUpdateDelay) and (event <> lastEvent):
                    # print "eventUpdate for {0} {1} {2} {3} ".format(name,lastEvent,event,now)
                    self.personEventPub.publish(event, name)
                    self.faceTrack[name] = (event, event, now)
                    # print "eventUpdate to {0} ".format(event)
            self.faceTrackLock.release()

            time.sleep(self.eventUpdateDelay)

    def enrollFace(self, name):
        with self.reqQueueLock:
            """
            Starts the enrollment process.
            """
            faces = self.current_faces

            if len(faces) > 1:
                return "too many faces to enroll"

            for rect, leye, reye in faces:
                print "Start Enrolling %s" % (name)
                self.enrolling = rect
                self.enroll_count = 0
                self.nameList.append(name)

                self.enrollCondition.acquire()
                while self.enroll_count < self.enroll_max:
                    self.enrollCondition.wait()
                self.enrollCondition.release()

                self.enrolling = None

                for data, rect, leye, reye in self.enroll_list:
                    self.face_rec.addTraining(data, leye, reye, name)

                self.enroll_count = 0
                self.enroll_list = []

        return "ok"

    def trainFaces(self):
        with self.reqQueueLock:
            """
            Start the SVM training process.
            """
            result = "ok"

            # Prevent face prediction while training
            self.isTrained = False

            if self.svm_mode == SVM_AUTOMATIC:
                # Train with automatic tuning.
                print "Training mode: auto tuning"
                self.face_rec.train()  # callback=progress.Update)
                self.svm_C = self.face_rec.svm.C
                self.svm_Gamma = self.face_rec.svm.gamma
            else:
                # Train with manual tuning.
                print "Training mode: manual tuning"
                self.face_rec.train(C=[self.svm_C], Gamma=[self.svm_Gamma])  # ,callback=progress.Update)

            if self.face_rec.isTrained():
                result = "ok"
                self.isTrained = True
            else:
                result = "Error"

        return result

    def resetFaces(self):
        with self.reqQueueLock:
            """
            Clear the enrollment data.
            """
            result = "ok"

            self.isTrained = False

            nameList = []

            # Clear the enrollment data for the SVM
            self.face_rec.reset()

        return result

    def findFaces(self, img):
        faces = []

        self.detect_time = time.time()
        rects = self.face_detector.detect(img)
        self.detect_time = time.time() - self.detect_time

        cvtile = opencv.cvCreateMat(128, 128, opencv.CV_8UC3)
        bwtile = opencv.cvCreateMat(128, 128, opencv.CV_8U)

        cvimg = img.asOpenCV()

        self.eye_time = time.time()

        for rect in rects:
            faceim = opencv.cvGetSubRect(cvimg, rect.asOpenCV())
            opencv.cvResize(faceim, cvtile)

            affine = pv.AffineFromRect(rect, (128, 128))

            opencv.cvCvtColor(cvtile, bwtile, cv.CV_BGR2GRAY)

            leye, reye, lcp, rcp = self.fel.locateEyes(bwtile)
            leye = pv.Point(leye)
            reye = pv.Point(reye)

            leye = affine.invertPoint(leye)
            reye = affine.invertPoint(reye)

            faces.append([rect, leye, reye])

        self.eye_time = time.time() - self.eye_time
        self.current_faces = faces

        return faces

    def handleImage(self, image_message):
        cv_image = self.bridge.imgmsg_to_cv(image_message, desired_encoding="rgb8")
        pyimg = self.opencv_to_pyvision(cv_image)

        self.onFrame(pyimg)

        # msg = self.bridge.cv_to_imgmsg(cv_image, encoding='rgb8')
        # self.imagePub.publish(msg)

    def handleEnrollFace(self, msg):
        result = "ok"
        print "Enrolling new name:" + msg.name

        result = self.enrollFace(msg.name)
        return EnrollResponse(result)

    def handleTrain(self, msg):
        result = "ok"
        print "Training..."

        if len(self.nameList) >= 2:
            result = self.trainFaces()
        else:
            result = "Error: need at least 2 faces to train"

        return TrainResponse(result)

    def handleReset(self, msg):
        result = "ok"
        print "Resetting..."

        result = self.resetFaces()

        return ResetResponse(result)
コード例 #14
0
ファイル: TutFaceEyes.py プロジェクト: sadlifealone/pyvision
from pyvision.face.FilterEyeLocator import FilterEyeLocator

if __name__ == "__main__":
    ilog = pv.ImageLog()

    # Load the face image file
    fname = os.path.join(pv.__path__[0], 'data', 'misc', 'FaceSample.jpg')

    # Create the annotation image in black and white so that color
    # annotations show up better.
    im = pv.Image(fname, bw_annotate=True)

    ilog(pv.Image(fname), "Original")

    # Create a OpenCV cascade face detector object
    cd = CascadeDetector()

    # Create an eye detector object
    el = FilterEyeLocator()

    # Call the face detector like a function to get a list of face rectangles
    rects = cd(im)

    # print the list of rectangles
    print("Face Detection Output:", rects)

    # Also call the eye detector like a function with the original image and
    # the list of face detections to locate the eyes.
    eyes = el(im, rects)

    # print the list of eyes.  Format [ [ face_rect, left_eye, right_eye], ...]
コード例 #15
0
ファイル: FaceL.py プロジェクト: mdqyy/pyvision
class VideoWindow(wx.Frame):
    '''
    This is the main FaceL window which includes the webcam video and enrollment and training controls.
    '''
    def __init__(self, parent, id, name, size=(640, 672)):
        '''
        Create all the windows and controls used for the window and 
        '''
        wx.Frame.__init__(self, parent, id, name, size=size)

        self.CenterOnScreen(wx.HORIZONTAL)
        self.timing_window = None  # Initialize timing window

        # ------------- Face Processing -----------------
        self.face_detector = CascadeDetector(cascade_name=CASCADE_NAME,
                                             image_scale=0.5)
        self.fel = FilterEyeLocator(FEL_NAME)
        self.face_rec = SVMFaceRec()

        self.svm_mode = SVM_AUTOMATIC
        self.svm_C = 4.000e+00
        self.svm_Gamma = 9.766e-04

        self.current_faces = []
        self.enrolling = None
        self.enroll_count = 0
        self.enroll_max = 32
        self.enroll_list = []

        self.previous_time = time.time()

        self.arialblack24 = PIL.ImageFont.truetype(ARIAL_BLACK_NAME, 24)

        # ---------------- Basic Data -------------------
        try:
            self.webcam = Webcam()
        except SystemExit:
            raise
        except:
            trace = traceback.format_exc()
            message = TraceBackDialog(None, "Camera Error", CAMERA_ERROR,
                                      trace)
            message.ShowModal()

            sys.stderr.write(
                "FaceL Error: an error occurred while trying to connect to the camera.  Details follow.\n\n"
            )
            sys.stderr.write(trace)
            sys.exit(CAMERA_ERROR_CODE)

        # ------------- Other Components ----------------
        self.CreateStatusBar()

        # ------------------- Menu ----------------------
        # Creating the menubar.

        # Menu IDs
        license_id = wx.NewId()

        mirror_id = wx.NewId()
        face_id = wx.NewId()
        svm_tune_id = wx.NewId()
        performance_id = wx.NewId()

        # Menu Items
        self.file_menu = wx.Menu()

        self.file_menu.Append(wx.ID_ABOUT, "&About...")
        self.file_menu.Append(license_id, "FaceL License...")
        self.file_menu.AppendSeparator()
        self.file_menu.Append(wx.ID_EXIT, "E&xit")

        self.options_menu = wx.Menu()
        self.face_menuitem = self.options_menu.AppendCheckItem(
            face_id, "Face Processing")
        self.eye_menuitem = self.options_menu.AppendCheckItem(
            face_id, "Eye Detection")
        self.mirror_menuitem = self.options_menu.AppendCheckItem(
            mirror_id, "Mirror Video")
        self.options_menu.AppendSeparator()
        self.options_menu.Append(svm_tune_id, "SVM Tuning...")
        self.options_menu.Append(performance_id, "Performance...")

        # Create Menu Bar
        self.menu_bar = wx.MenuBar()
        self.menu_bar.Append(self.file_menu, "&File")
        self.menu_bar.Append(self.options_menu, "&Options")

        self.SetMenuBar(self.menu_bar)

        # Menu Events
        wx.EVT_MENU(self, wx.ID_ABOUT, self.onAbout)
        wx.EVT_MENU(self, license_id, self.onLicense)

        wx.EVT_MENU(self, mirror_id, self.onNull)
        wx.EVT_MENU(self, face_id, self.onNull)
        wx.EVT_MENU(self, svm_tune_id, self.onSVMTune)
        wx.EVT_MENU(self, performance_id, self.onTiming)

        # Set up menu checks
        self.face_menuitem.Check(True)
        self.eye_menuitem.Check(True)
        self.mirror_menuitem.Check(True)

        # ----------------- Image List ------------------

        # --------------- Image Display -----------------
        self.static_bitmap = wx.StaticBitmap(self,
                                             wx.NewId(),
                                             bitmap=wx.EmptyBitmap(640, 480))

        self.controls_box = wx.StaticBox(self, wx.NewId(), "Controls")

        self.facel_logo = wx.StaticBitmap(self,
                                          wx.NewId(),
                                          bitmap=wx.Bitmap(FACEL_LOGO))
        self.csu_logo = wx.StaticBitmap(self,
                                        wx.NewId(),
                                        bitmap=wx.Bitmap(CSU_LOGO))
        #        self.performance_box = wx.StaticBox(self, wx.NewId(), "Performance")

        self.enroll_chioce_label = wx.StaticText(self,
                                                 wx.NewId(),
                                                 "Enrollment Count:",
                                                 style=wx.ALIGN_LEFT)
        self.enroll_choice = wx.Choice(self, wx.NewId(), wx.Point(0, 0),
                                       wx.Size(-1, -1),
                                       ['16', '32', '48', '64', '128', '256'])
        self.enroll_choice.Select(3)

        self.train_button = wx.Button(self, wx.NewId(), 'Train Labeler')
        self.reset_button = wx.Button(self, wx.NewId(), 'Clear Labels')

        # --------------- Instrumentation ---------------

        self.enroll_label = wx.StaticText(
            self,
            wx.NewId(),
            "Click a face in the video to enroll.",
            style=wx.ALIGN_LEFT)

        self.ids_label = wx.StaticText(self,
                                       wx.NewId(),
                                       "Labels:",
                                       size=wx.Size(-1, 16),
                                       style=wx.ALIGN_LEFT)
        self.ids_text = wx.StaticText(self,
                                      wx.NewId(),
                                      size=wx.Size(30, 16),
                                      style=wx.ALIGN_RIGHT)

        self.faces_label = wx.StaticText(self,
                                         wx.NewId(),
                                         "Faces:",
                                         size=wx.Size(-1, 16),
                                         style=wx.ALIGN_LEFT)
        self.faces_text = wx.StaticText(self,
                                        wx.NewId(),
                                        size=wx.Size(30, 16),
                                        style=wx.ALIGN_RIGHT)

        # --------------- Window Layout -----------------
        enroll_sizer = wx.BoxSizer(wx.HORIZONTAL)
        enroll_sizer.Add(self.ids_label,
                         flag=wx.ALIGN_CENTER | wx.ALL,
                         border=4)
        enroll_sizer.Add(self.ids_text,
                         flag=wx.ALIGN_CENTER | wx.ALL,
                         border=4)
        enroll_sizer.AddSpacer(20)
        enroll_sizer.Add(self.faces_label,
                         flag=wx.ALIGN_CENTER | wx.ALL,
                         border=4)
        enroll_sizer.Add(self.faces_text,
                         flag=wx.ALIGN_CENTER | wx.ALL,
                         border=4)

        training_sizer = wx.BoxSizer(wx.HORIZONTAL)
        training_sizer.Add(self.train_button,
                           flag=wx.ALIGN_CENTER | wx.ALL,
                           border=4)
        training_sizer.Add(self.reset_button,
                           flag=wx.ALIGN_CENTER | wx.ALL,
                           border=4)

        enroll_choice_sizer = wx.BoxSizer(wx.HORIZONTAL)
        enroll_choice_sizer.Add(self.enroll_chioce_label,
                                flag=wx.ALIGN_CENTER | wx.ALL,
                                border=0)
        enroll_choice_sizer.Add(self.enroll_choice,
                                flag=wx.ALIGN_CENTER | wx.ALL,
                                border=0)

        controls_sizer = wx.StaticBoxSizer(
            self.controls_box, wx.VERTICAL)  #wx.BoxSizer(wx.VERTICAL)
        controls_sizer.Add(self.enroll_label,
                           flag=wx.ALIGN_LEFT | wx.ALL,
                           border=0)
        controls_sizer.Add(enroll_sizer, flag=wx.ALIGN_LEFT | wx.ALL, border=0)
        controls_sizer.Add(enroll_choice_sizer,
                           flag=wx.ALIGN_LEFT | wx.ALL,
                           border=4)
        controls_sizer.Add(training_sizer,
                           flag=wx.ALIGN_LEFT | wx.ALL,
                           border=0)

        bottom_sizer = wx.BoxSizer(wx.HORIZONTAL)
        bottom_sizer.Add(self.facel_logo,
                         flag=wx.ALIGN_CENTER | wx.ALL,
                         border=0)
        bottom_sizer.Add(controls_sizer, flag=wx.ALIGN_TOP | wx.ALL, border=4)
        bottom_sizer.Add(self.csu_logo,
                         flag=wx.ALIGN_CENTER | wx.ALL,
                         border=0)

        main_sizer = wx.BoxSizer(wx.VERTICAL)
        main_sizer.Add(self.static_bitmap,
                       flag=wx.ALIGN_CENTER | wx.ALL,
                       border=0)
        main_sizer.Add(bottom_sizer, flag=wx.ALIGN_CENTER | wx.ALL, border=4)

        self.SetAutoLayout(True)
        self.SetSizer(main_sizer)
        self.Layout()

        # -----------------------------------------------
        self.timer = FrameTimer(self)
        self.timer.Start(200)

        # -------------- Event Handleing ----------------
        wx.EVT_SIZE(self.static_bitmap, self.onBitmapResize)
        wx.EVT_LEFT_DOWN(self.static_bitmap, self.onClick)

        self.Bind(wx.EVT_BUTTON, self.onTrain, id=self.train_button.GetId())
        self.Bind(wx.EVT_BUTTON, self.onReset, id=self.reset_button.GetId())

        # --------------- Setup State -------------------
        self.setupState()

    def onTrain(self, event=None):
        '''
        Start the SVM training process.
        '''
        print "Train"
        #progress = wx.ProgressDialog(title="SVM Training", message="Training the Face Recognition Algorithm. Please Wait...")
        if self.svm_mode == SVM_AUTOMATIC:
            # Train with automatic tuning.
            self.face_rec.train()  #callback=progress.Update)
            self.svm_C = self.face_rec.svm.C
            self.svm_Gamma = self.face_rec.svm.gamma
        else:
            # Train with manual tuning.
            self.face_rec.train(C=[self.svm_C],
                                Gamma=[self.svm_Gamma
                                       ])  # ,callback=progress.Update)

        #progress.Destroy()

    def onReset(self, event=None):
        '''
        Clear the enrollment data for the SVM.
        '''
        self.face_rec.reset()
        self.setupState()

    def onFrame(self, event=None):
        '''
        Retrieve and process a video frame.
        '''
        self.timer.Stop()
        starttime = time.time()
        self.detect_time = 0.0
        self.eye_time = 0.0
        self.label_time = 0.0
        img = self.webcam.query()

        face_processing = self.face_menuitem.IsChecked()
        eye_processing = self.eye_menuitem.IsChecked()

        names = []

        if face_processing:
            faces = self.findFaces(img)
            if self.enrolling != None:
                success = None
                for rect, leye, reye in faces:
                    img.annotateRect(self.enrolling, color='yellow')
                    if (success == None) and is_success(self.enrolling, rect):
                        success = rect
                        img.annotateRect(rect, color='blue')
                        if eye_processing:
                            img.annotatePoint(leye, color='blue')
                            img.annotatePoint(reye, color='blue')
                        self.enroll_list.append([img, rect, leye, reye])

                    else:
                        img.annotateRect(rect, color='red')
                        if eye_processing:
                            img.annotatePoint(leye, color='red')
                            img.annotatePoint(reye, color='red')
                        img.annotateLine(pv.Point(rect.x, rect.y),
                                         pv.Point(rect.x + rect.w,
                                                  rect.y + rect.h),
                                         color='red')
                        img.annotateLine(pv.Point(rect.x + rect.w, rect.y),
                                         pv.Point(rect.x, rect.y + rect.h),
                                         color='red')

                if success == None:
                    rect = self.enrolling
                    img.annotateLine(pv.Point(rect.x, rect.y),
                                     pv.Point(rect.x + rect.w,
                                              rect.y + rect.h),
                                     color='yellow')
                    img.annotateLine(pv.Point(rect.x + rect.w, rect.y),
                                     pv.Point(rect.x, rect.y + rect.h),
                                     color='yellow')
                else:
                    #enroll in the identification algorithm
                    pass
            else:
                for rect, leye, reye in faces:
                    img.annotateRect(rect, color='blue')
                    if eye_processing:
                        img.annotatePoint(leye, color='blue')
                        img.annotatePoint(reye, color='blue')

            if self.face_rec.isTrained():
                self.label_time = time.time()
                for rect, leye, reye in faces:
                    label = self.face_rec.predict(img, leye, reye)
                    names.append([0.5 * (leye + reye), label])
                self.label_time = time.time() - self.label_time

        # Displaying Annotated Frame
        im = img.asAnnotated()
        if self.mirror_menuitem.IsChecked():
            im = im.transpose(FLIP_LEFT_RIGHT)

        if self.enrolling != None:
            draw = PIL.ImageDraw.Draw(im)
            x, y = self.enrolling.x, self.enrolling.y
            if self.mirror_menuitem.IsChecked():
                x = 640 - (x + self.enrolling.w)
            self.enroll_count += 1
            draw.text(
                (x + 10, y + 10),
                "Enrolling: %2d of %2d" % (self.enroll_count, self.enroll_max),
                fill='yellow',
                font=self.arialblack24)
            del draw

            if self.enroll_count >= self.enroll_max:
                print "Count:", self.enroll_count

                if len(self.enroll_list) == 0:
                    warning_dialog = wx.MessageDialog(
                        self,
                        "No faces were detected during the enrollment process.  Please face towards the camera and keep your face in the yellow rectangle during enrollment.",
                        style=wx.ICON_EXCLAMATION | wx.OK,
                        caption="Enrollment Error")
                    warning_dialog.ShowModal()
                else:
                    name_dialog = wx.TextEntryDialog(
                        self,
                        "Please enter a name to associate with the face. (%d faces captured)"
                        % len(self.enroll_list),
                        caption="Enrollment ID")
                    result = name_dialog.ShowModal()
                    sub_id = name_dialog.GetValue()
                    if result == wx.ID_OK:
                        if sub_id == "":
                            print "Warning: Empty Subject ID"
                            warning_dialog = wx.MessageDialog(
                                self,
                                "A name was entered in the previous dialog so this face will not be enrolled in the database.  Please repeat the enrollment process for this person.",
                                style=wx.ICON_EXCLAMATION | wx.OK,
                                caption="Enrollment Error")
                            warning_dialog.ShowModal()
                        else:
                            for data, rect, leye, reye in self.enroll_list:
                                self.face_rec.addTraining(
                                    data, leye, reye, sub_id)
                                self.setupState()

                self.enroll_count = 0
                self.enrolling = None
                self.enroll_list = []

        if len(names) > 0:
            draw = PIL.ImageDraw.Draw(im)
            for pt, name in names:
                x, y = pt.X(), pt.Y()
                w, h = draw.textsize(name, font=self.arialblack24)
                if self.mirror_menuitem.IsChecked():
                    x = 640 - x - 0.5 * w
                else:
                    x = x - 0.5 * w
                draw.text((x, y - 20 - h),
                          name,
                          fill='green',
                          font=self.arialblack24)
            del draw

        wxImg = wx.EmptyImage(im.size[0], im.size[1])
        wxImg.SetData(im.tostring())
        bm = wxImg.ConvertToBitmap()

        self.static_bitmap.SetBitmap(bm)

        # Update timing gauges
        full_time = time.time() - starttime
        if self.timing_window != None:
            self.timing_window.update(self.detect_time, self.eye_time,
                                      self.label_time, full_time)

        self.ids_text.SetLabel("%d" % (self.face_rec.n_labels, ))
        self.faces_text.SetLabel("%d" % (self.face_rec.n_faces, ))

        sleep_time = 1
        if sys.platform.startswith("linux"):
            sleep_time = 10
        # TODO: For macosx milliseconds should be 1
        # TODO: For linux milliseconds may need to be set to a higher value 10
        self.timer.Start(milliseconds=sleep_time, oneShot=1)

    def setupState(self):
        #print "state",self.face_rec.n_labels,self.IsEnabled()
        if self.face_rec.n_labels >= 2:
            self.train_button.Enable()
        else:
            self.train_button.Disable()

    def onBitmapResize(self, event):
        w = event.GetSize().GetWidth()
        h = event.GetSize().GetHeight()

        self.static_bitmap.SetSize(event.GetSize())

    def onClick(self, event):
        '''
        Process a click in the Video window which starts the enrollment process.
        '''
        x = event.GetX()
        y = event.GetY()

        if self.mirror_menuitem.IsChecked():
            x = 640 - x

        for rect, leye, reye in self.current_faces:
            if rect.containsPoint(pv.Point(x, y)):
                self.enrolling = rect
                self.enroll_count = 0
                self.enroll_max = int(self.enroll_choice.GetStringSelection())

    def findFaces(self, im):
        eye_processing = self.eye_menuitem.IsChecked()

        self.detect_time = time.time()
        rects = self.face_detector.detect(im)
        self.detect_time = time.time() - self.detect_time

        self.eye_time = time.time()
        if eye_processing:
            faces = self.fel.locateEyes(im, rects)
        else:
            faces = []
            for rect in rects:
                affine = pv.AffineFromRect(rect, (1, 1))
                leye = affine.invertPoint(AVE_LEFT_EYE)
                reye = affine.invertPoint(AVE_RIGHT_EYE)
                faces.append([rect, leye, reye])

        self.eye_time = time.time() - self.eye_time

        self.current_faces = faces

        return faces

    def onAbout(self, event):
        wx.MessageBox(ABOUT_MESSAGE, "About FaceL",
                      wx.OK | wx.ICON_INFORMATION)

    def onLicense(self, event):
        wx.MessageBox(LICENSE_MESSAGE, "FaceL License",
                      wx.OK | wx.ICON_INFORMATION)

    def onNull(self, *args, **kwargs):
        pass

    def onSVMTune(self, event):
        dialog = SVMTuningDialog(self, self.svm_mode, self.svm_C,
                                 self.svm_Gamma)
        dialog.CenterOnParent()

        result = dialog.ShowModal()
        if result == wx.ID_OK:
            self.svm_mode = dialog.mode
            self.svm_C = dialog.C
            self.svm_Gamma = dialog.Gamma

        print "SVM Tuning Info <MODE:%s; C:%0.2e; Gamma:%0.2e>" % (
            self.svm_mode, self.svm_C, self.svm_Gamma)

        dialog.Destroy()

    def onTiming(self, event):
        if self.timing_window == None:
            self.timing_window = TimingWindow(self, wx.NewId(), "Performance")
            self.timing_window.CenterOnParent()
            self.timing_window.Show(True)
            self.timing_window.Bind(wx.EVT_CLOSE,
                                    self.onCloseTiming,
                                    id=self.timing_window.GetId())

        else:
            self.timing_window.Show(True)
            self.timing_window.Raise()

    def onCloseTiming(self, event):
        self.timing_window.Destroy()
        self.timing_window = None
コード例 #16
0
ファイル: FaceL.py プロジェクト: mdqyy/pyvision
    def __init__(self, parent, id, name, size=(640, 672)):
        '''
        Create all the windows and controls used for the window and 
        '''
        wx.Frame.__init__(self, parent, id, name, size=size)

        self.CenterOnScreen(wx.HORIZONTAL)
        self.timing_window = None  # Initialize timing window

        # ------------- Face Processing -----------------
        self.face_detector = CascadeDetector(cascade_name=CASCADE_NAME,
                                             image_scale=0.5)
        self.fel = FilterEyeLocator(FEL_NAME)
        self.face_rec = SVMFaceRec()

        self.svm_mode = SVM_AUTOMATIC
        self.svm_C = 4.000e+00
        self.svm_Gamma = 9.766e-04

        self.current_faces = []
        self.enrolling = None
        self.enroll_count = 0
        self.enroll_max = 32
        self.enroll_list = []

        self.previous_time = time.time()

        self.arialblack24 = PIL.ImageFont.truetype(ARIAL_BLACK_NAME, 24)

        # ---------------- Basic Data -------------------
        try:
            self.webcam = Webcam()
        except SystemExit:
            raise
        except:
            trace = traceback.format_exc()
            message = TraceBackDialog(None, "Camera Error", CAMERA_ERROR,
                                      trace)
            message.ShowModal()

            sys.stderr.write(
                "FaceL Error: an error occurred while trying to connect to the camera.  Details follow.\n\n"
            )
            sys.stderr.write(trace)
            sys.exit(CAMERA_ERROR_CODE)

        # ------------- Other Components ----------------
        self.CreateStatusBar()

        # ------------------- Menu ----------------------
        # Creating the menubar.

        # Menu IDs
        license_id = wx.NewId()

        mirror_id = wx.NewId()
        face_id = wx.NewId()
        svm_tune_id = wx.NewId()
        performance_id = wx.NewId()

        # Menu Items
        self.file_menu = wx.Menu()

        self.file_menu.Append(wx.ID_ABOUT, "&About...")
        self.file_menu.Append(license_id, "FaceL License...")
        self.file_menu.AppendSeparator()
        self.file_menu.Append(wx.ID_EXIT, "E&xit")

        self.options_menu = wx.Menu()
        self.face_menuitem = self.options_menu.AppendCheckItem(
            face_id, "Face Processing")
        self.eye_menuitem = self.options_menu.AppendCheckItem(
            face_id, "Eye Detection")
        self.mirror_menuitem = self.options_menu.AppendCheckItem(
            mirror_id, "Mirror Video")
        self.options_menu.AppendSeparator()
        self.options_menu.Append(svm_tune_id, "SVM Tuning...")
        self.options_menu.Append(performance_id, "Performance...")

        # Create Menu Bar
        self.menu_bar = wx.MenuBar()
        self.menu_bar.Append(self.file_menu, "&File")
        self.menu_bar.Append(self.options_menu, "&Options")

        self.SetMenuBar(self.menu_bar)

        # Menu Events
        wx.EVT_MENU(self, wx.ID_ABOUT, self.onAbout)
        wx.EVT_MENU(self, license_id, self.onLicense)

        wx.EVT_MENU(self, mirror_id, self.onNull)
        wx.EVT_MENU(self, face_id, self.onNull)
        wx.EVT_MENU(self, svm_tune_id, self.onSVMTune)
        wx.EVT_MENU(self, performance_id, self.onTiming)

        # Set up menu checks
        self.face_menuitem.Check(True)
        self.eye_menuitem.Check(True)
        self.mirror_menuitem.Check(True)

        # ----------------- Image List ------------------

        # --------------- Image Display -----------------
        self.static_bitmap = wx.StaticBitmap(self,
                                             wx.NewId(),
                                             bitmap=wx.EmptyBitmap(640, 480))

        self.controls_box = wx.StaticBox(self, wx.NewId(), "Controls")

        self.facel_logo = wx.StaticBitmap(self,
                                          wx.NewId(),
                                          bitmap=wx.Bitmap(FACEL_LOGO))
        self.csu_logo = wx.StaticBitmap(self,
                                        wx.NewId(),
                                        bitmap=wx.Bitmap(CSU_LOGO))
        #        self.performance_box = wx.StaticBox(self, wx.NewId(), "Performance")

        self.enroll_chioce_label = wx.StaticText(self,
                                                 wx.NewId(),
                                                 "Enrollment Count:",
                                                 style=wx.ALIGN_LEFT)
        self.enroll_choice = wx.Choice(self, wx.NewId(), wx.Point(0, 0),
                                       wx.Size(-1, -1),
                                       ['16', '32', '48', '64', '128', '256'])
        self.enroll_choice.Select(3)

        self.train_button = wx.Button(self, wx.NewId(), 'Train Labeler')
        self.reset_button = wx.Button(self, wx.NewId(), 'Clear Labels')

        # --------------- Instrumentation ---------------

        self.enroll_label = wx.StaticText(
            self,
            wx.NewId(),
            "Click a face in the video to enroll.",
            style=wx.ALIGN_LEFT)

        self.ids_label = wx.StaticText(self,
                                       wx.NewId(),
                                       "Labels:",
                                       size=wx.Size(-1, 16),
                                       style=wx.ALIGN_LEFT)
        self.ids_text = wx.StaticText(self,
                                      wx.NewId(),
                                      size=wx.Size(30, 16),
                                      style=wx.ALIGN_RIGHT)

        self.faces_label = wx.StaticText(self,
                                         wx.NewId(),
                                         "Faces:",
                                         size=wx.Size(-1, 16),
                                         style=wx.ALIGN_LEFT)
        self.faces_text = wx.StaticText(self,
                                        wx.NewId(),
                                        size=wx.Size(30, 16),
                                        style=wx.ALIGN_RIGHT)

        # --------------- Window Layout -----------------
        enroll_sizer = wx.BoxSizer(wx.HORIZONTAL)
        enroll_sizer.Add(self.ids_label,
                         flag=wx.ALIGN_CENTER | wx.ALL,
                         border=4)
        enroll_sizer.Add(self.ids_text,
                         flag=wx.ALIGN_CENTER | wx.ALL,
                         border=4)
        enroll_sizer.AddSpacer(20)
        enroll_sizer.Add(self.faces_label,
                         flag=wx.ALIGN_CENTER | wx.ALL,
                         border=4)
        enroll_sizer.Add(self.faces_text,
                         flag=wx.ALIGN_CENTER | wx.ALL,
                         border=4)

        training_sizer = wx.BoxSizer(wx.HORIZONTAL)
        training_sizer.Add(self.train_button,
                           flag=wx.ALIGN_CENTER | wx.ALL,
                           border=4)
        training_sizer.Add(self.reset_button,
                           flag=wx.ALIGN_CENTER | wx.ALL,
                           border=4)

        enroll_choice_sizer = wx.BoxSizer(wx.HORIZONTAL)
        enroll_choice_sizer.Add(self.enroll_chioce_label,
                                flag=wx.ALIGN_CENTER | wx.ALL,
                                border=0)
        enroll_choice_sizer.Add(self.enroll_choice,
                                flag=wx.ALIGN_CENTER | wx.ALL,
                                border=0)

        controls_sizer = wx.StaticBoxSizer(
            self.controls_box, wx.VERTICAL)  #wx.BoxSizer(wx.VERTICAL)
        controls_sizer.Add(self.enroll_label,
                           flag=wx.ALIGN_LEFT | wx.ALL,
                           border=0)
        controls_sizer.Add(enroll_sizer, flag=wx.ALIGN_LEFT | wx.ALL, border=0)
        controls_sizer.Add(enroll_choice_sizer,
                           flag=wx.ALIGN_LEFT | wx.ALL,
                           border=4)
        controls_sizer.Add(training_sizer,
                           flag=wx.ALIGN_LEFT | wx.ALL,
                           border=0)

        bottom_sizer = wx.BoxSizer(wx.HORIZONTAL)
        bottom_sizer.Add(self.facel_logo,
                         flag=wx.ALIGN_CENTER | wx.ALL,
                         border=0)
        bottom_sizer.Add(controls_sizer, flag=wx.ALIGN_TOP | wx.ALL, border=4)
        bottom_sizer.Add(self.csu_logo,
                         flag=wx.ALIGN_CENTER | wx.ALL,
                         border=0)

        main_sizer = wx.BoxSizer(wx.VERTICAL)
        main_sizer.Add(self.static_bitmap,
                       flag=wx.ALIGN_CENTER | wx.ALL,
                       border=0)
        main_sizer.Add(bottom_sizer, flag=wx.ALIGN_CENTER | wx.ALL, border=4)

        self.SetAutoLayout(True)
        self.SetSizer(main_sizer)
        self.Layout()

        # -----------------------------------------------
        self.timer = FrameTimer(self)
        self.timer.Start(200)

        # -------------- Event Handleing ----------------
        wx.EVT_SIZE(self.static_bitmap, self.onBitmapResize)
        wx.EVT_LEFT_DOWN(self.static_bitmap, self.onClick)

        self.Bind(wx.EVT_BUTTON, self.onTrain, id=self.train_button.GetId())
        self.Bind(wx.EVT_BUTTON, self.onReset, id=self.reset_button.GetId())

        # --------------- Setup State -------------------
        self.setupState()
コード例 #17
0
ファイル: LiveDemo.py プロジェクト: mdqyy/pyvision
 def __init__(self):
     self.face = CascadeDetector(image_scale=0.4)
コード例 #18
0
ファイル: FaceL.py プロジェクト: Dfred/concept-robot
class VideoWindow(wx.Frame):
    '''
    This is the main FaceL window which includes the webcam video and enrollment and training controls.
    '''
    
    def __init__(self,parent,id,name,size=(640,672)):
        '''
        Create all the windows and controls used for the window and 
        '''
        wx.Frame.__init__(self,parent,id,name,size=size)
        
        self.CenterOnScreen(wx.HORIZONTAL)
        self.timing_window = None # Initialize timing window
        
        # ------------- Face Processing -----------------
        self.face_detector = CascadeDetector(cascade_name=CASCADE_NAME,image_scale=0.5)
        self.fel = FilterEyeLocator(FEL_NAME)
        self.face_rec = SVMFaceRec()
        
        self.svm_mode  = SVM_AUTOMATIC
        self.svm_C     = 4.000e+00
        self.svm_Gamma = 9.766e-04
        
        self.current_faces = []
        self.enrolling    = None
        self.enroll_count = 0
        self.enroll_max   = 32
        self.enroll_list  = []
        
        self.previous_time = time.time()
        
        self.arialblack24 = PIL.ImageFont.truetype(ARIAL_BLACK_NAME, 24)

        # ---------------- Basic Data -------------------
        try:
            self.webcam = Webcam()
        except SystemExit:
            raise
        except:
            trace = traceback.format_exc()
            message = TraceBackDialog(None, "Camera Error", CAMERA_ERROR, trace)
            message.ShowModal()
            
            sys.stderr.write("FaceL Error: an error occurred while trying to connect to the camera.  Details follow.\n\n")
            sys.stderr.write(trace)
            sys.exit(CAMERA_ERROR_CODE)

        # ------------- Other Components ----------------
        self.CreateStatusBar()
        
        # ------------------- Menu ----------------------
        # Creating the menubar.
        
        # Menu IDs
        license_id = wx.NewId()
        
        mirror_id = wx.NewId()
        face_id = wx.NewId()
        svm_tune_id = wx.NewId()
        performance_id = wx.NewId()
        
        # Menu Items
        self.file_menu = wx.Menu();

        self.file_menu.Append( wx.ID_ABOUT, "&About..." )
        self.file_menu.Append( license_id, "FaceL License..." )
        self.file_menu.AppendSeparator();
        self.file_menu.Append( wx.ID_EXIT, "E&xit" )

        self.options_menu = wx.Menu();
        self.face_menuitem = self.options_menu.AppendCheckItem( face_id, "Face Processing" )
        self.eye_menuitem = self.options_menu.AppendCheckItem( face_id, "Eye Detection" )
        self.mirror_menuitem = self.options_menu.AppendCheckItem( mirror_id, "Mirror Video" )
        self.options_menu.AppendSeparator()
        self.options_menu.Append( svm_tune_id, "SVM Tuning..." )
        self.options_menu.Append( performance_id, "Performance..." )
        
        # Create Menu Bar
        self.menu_bar = wx.MenuBar();
        self.menu_bar.Append( self.file_menu, "&File" )
        self.menu_bar.Append( self.options_menu, "&Options" )

        self.SetMenuBar( self.menu_bar )
        
        # Menu Events
        wx.EVT_MENU(self, wx.ID_ABOUT, self.onAbout )
        wx.EVT_MENU(self, license_id, self.onLicense )

        wx.EVT_MENU(self, mirror_id, self.onNull )
        wx.EVT_MENU(self, face_id, self.onNull )
        wx.EVT_MENU(self, svm_tune_id, self.onSVMTune )
        wx.EVT_MENU(self, performance_id, self.onTiming )
        
        # Set up menu checks
        self.face_menuitem.Check(True)
        self.eye_menuitem.Check(True)
        self.mirror_menuitem.Check(True)
        
        
        # ----------------- Image List ------------------
        
        # --------------- Image Display -----------------
        self.static_bitmap = wx.StaticBitmap(self,wx.NewId(), bitmap=wx.EmptyBitmap(640, 480))
        
        self.controls_box = wx.StaticBox(self, wx.NewId(), "Controls")

        self.facel_logo = wx.StaticBitmap(self,wx.NewId(), bitmap=wx.Bitmap(FACEL_LOGO))
        self.csu_logo = wx.StaticBitmap(self,wx.NewId(), bitmap=wx.Bitmap(CSU_LOGO))
#        self.performance_box = wx.StaticBox(self, wx.NewId(), "Performance")
        
        self.enroll_chioce_label = wx.StaticText(self, wx.NewId(), "Enrollment Count:", style=wx.ALIGN_LEFT)
        self.enroll_choice = wx.Choice(self,wx.NewId(),wx.Point(0,0),wx.Size(-1,-1),['16','32','48','64','128','256'])
        self.enroll_choice.Select(3)
        
        self.train_button = wx.Button(self,wx.NewId(),'Train Labeler')
        self.reset_button = wx.Button(self,wx.NewId(),'Clear Labels')
        
        # --------------- Instrumentation ---------------
        
        
          
        self.enroll_label = wx.StaticText(self, wx.NewId(), "Click a face in the video to enroll.", style=wx.ALIGN_LEFT)

        self.ids_label = wx.StaticText(self, wx.NewId(), "Labels:", size=wx.Size(-1,16), style=wx.ALIGN_LEFT)
        self.ids_text = wx.StaticText(self, wx.NewId(), size = wx.Size(30,16), style= wx.ALIGN_RIGHT )  
        
        self.faces_label = wx.StaticText(self, wx.NewId(), "Faces:", size=wx.Size(-1,16), style=wx.ALIGN_LEFT)
        self.faces_text = wx.StaticText(self, wx.NewId(), size = wx.Size(30,16), style= wx.ALIGN_RIGHT )          
        

        # --------------- Window Layout -----------------
        enroll_sizer = wx.BoxSizer(wx.HORIZONTAL)
        enroll_sizer.Add(self.ids_label, flag = wx.ALIGN_CENTER | wx.ALL, border=4)
        enroll_sizer.Add(self.ids_text, flag = wx.ALIGN_CENTER | wx.ALL, border=4)
        enroll_sizer.AddSpacer(20)
        enroll_sizer.Add(self.faces_label, flag = wx.ALIGN_CENTER | wx.ALL, border=4)
        enroll_sizer.Add(self.faces_text, flag = wx.ALIGN_CENTER | wx.ALL, border=4)

        training_sizer = wx.BoxSizer(wx.HORIZONTAL)
        training_sizer.Add(self.train_button, flag = wx.ALIGN_CENTER | wx.ALL, border=4)
        training_sizer.Add(self.reset_button, flag = wx.ALIGN_CENTER | wx.ALL, border=4)

    
        enroll_choice_sizer = wx.BoxSizer(wx.HORIZONTAL)
        enroll_choice_sizer.Add(self.enroll_chioce_label, flag = wx.ALIGN_CENTER | wx.ALL, border=0)
        enroll_choice_sizer.Add(self.enroll_choice, flag = wx.ALIGN_CENTER | wx.ALL, border=0)

        controls_sizer = wx.StaticBoxSizer(self.controls_box,wx.VERTICAL) #wx.BoxSizer(wx.VERTICAL)
        controls_sizer.Add(self.enroll_label, flag = wx.ALIGN_LEFT | wx.ALL, border=0)
        controls_sizer.Add(enroll_sizer, flag = wx.ALIGN_LEFT | wx.ALL, border=0)
        controls_sizer.Add(enroll_choice_sizer, flag = wx.ALIGN_LEFT | wx.ALL, border=4)
        controls_sizer.Add(training_sizer, flag = wx.ALIGN_LEFT | wx.ALL, border=0)

        bottom_sizer = wx.BoxSizer(wx.HORIZONTAL)
        bottom_sizer.Add(self.facel_logo, flag = wx.ALIGN_CENTER | wx.ALL, border=0)
        bottom_sizer.Add(controls_sizer, flag = wx.ALIGN_TOP | wx.ALL, border=4)
        bottom_sizer.Add(self.csu_logo, flag = wx.ALIGN_CENTER | wx.ALL, border=0)

        main_sizer = wx.BoxSizer(wx.VERTICAL)
        main_sizer.Add(self.static_bitmap, flag = wx.ALIGN_CENTER | wx.ALL, border=0)
        main_sizer.Add(bottom_sizer, flag = wx.ALIGN_CENTER | wx.ALL, border=4)
        

        self.SetAutoLayout(True)
        self.SetSizer(main_sizer)
        self.Layout()
        
        # -----------------------------------------------
        self.timer = FrameTimer(self)
        self.timer.Start(200)
        
        # -------------- Event Handleing ----------------
        wx.EVT_SIZE(self.static_bitmap, self.onBitmapResize)
        wx.EVT_LEFT_DOWN(self.static_bitmap, self.onClick)
                
        self.Bind(wx.EVT_BUTTON, self.onTrain, id=self.train_button.GetId())
        self.Bind(wx.EVT_BUTTON, self.onReset, id=self.reset_button.GetId())
        
        # --------------- Setup State -------------------
        self.setupState()
                
    def onTrain(self,event=None):
        '''
        Start the SVM training process.
        '''
        print "Train"
        #progress = wx.ProgressDialog(title="SVM Training", message="Training the Face Recognition Algorithm. Please Wait...")
        if self.svm_mode == SVM_AUTOMATIC:
            # Train with automatic tuning.
            self.face_rec.train() #callback=progress.Update)
            self.svm_C = self.face_rec.svm.C
            self.svm_Gamma = self.face_rec.svm.gamma
        else:
            # Train with manual tuning.
            self.face_rec.train( C=[self.svm_C] , Gamma=[self.svm_Gamma])# ,callback=progress.Update)
        
        #progress.Destroy()
        
    def onReset(self,event=None):
        '''
        Clear the enrollment data for the SVM.
        '''
        self.face_rec.reset() 
        self.setupState()

        
    def onFrame(self,event=None):
        '''
        Retrieve and process a video frame.
        '''
        self.timer.Stop()        
        starttime = time.time()
        self.detect_time = 0.0
        self.eye_time = 0.0
        self.label_time = 0.0
        img = self.webcam.query()
        
        face_processing = self.face_menuitem.IsChecked()
        eye_processing = self.eye_menuitem.IsChecked()
        
        names = []
        
        if face_processing:
            faces = self.findFaces(img)
            if self.enrolling != None:
                success = None
                for rect,leye,reye in faces:
                    img.annotateRect(self.enrolling,color='yellow')
                    if (success == None) and is_success(self.enrolling,rect):
                        success = rect
                        img.annotateRect(rect,color='blue')
                        if eye_processing:
                            img.annotatePoint(leye,color='blue')
                            img.annotatePoint(reye,color='blue')
                        self.enroll_list.append([img,rect,leye,reye])

                    else:
                        img.annotateRect(rect,color='red')
                        if eye_processing:
                            img.annotatePoint(leye,color='red')
                            img.annotatePoint(reye,color='red')
                        img.annotateLine(pv.Point(rect.x,rect.y),pv.Point(rect.x+rect.w,rect.y+rect.h), color='red')
                        img.annotateLine(pv.Point(rect.x+rect.w,rect.y),pv.Point(rect.x,rect.y+rect.h), color='red')

                if success == None:
                    rect = self.enrolling
                    img.annotateLine(pv.Point(rect.x,rect.y),pv.Point(rect.x+rect.w,rect.y+rect.h), color='yellow')
                    img.annotateLine(pv.Point(rect.x+rect.w,rect.y),pv.Point(rect.x,rect.y+rect.h), color='yellow')
                else:
                    #enroll in the identification algorithm
                    pass
            else:
                for rect,leye,reye in faces:
                    img.annotateRect(rect,color='blue')
                    if eye_processing:
                        img.annotatePoint(leye,color='blue')
                        img.annotatePoint(reye,color='blue')
                    
            
            if self.face_rec.isTrained():
                self.label_time = time.time()
                for rect,leye,reye in faces:
                    label = self.face_rec.predict(img,leye,reye)
                    names.append([0.5*(leye+reye),label])
                self.label_time = time.time() - self.label_time


        # Displaying Annotated Frame
        im = img.asAnnotated()
        if self.mirror_menuitem.IsChecked():
            im = im.transpose(FLIP_LEFT_RIGHT)
            
        if self.enrolling != None:
            draw = PIL.ImageDraw.Draw(im)
            x,y = self.enrolling.x,self.enrolling.y
            if self.mirror_menuitem.IsChecked():
                x = 640 - (x + self.enrolling.w)
            self.enroll_count += 1
            draw.text((x+10,y+10), "Enrolling: %2d of %2d"%(self.enroll_count,self.enroll_max), fill='yellow', font=self.arialblack24)
            del draw
            
            if self.enroll_count >= self.enroll_max:
                print "Count:",self.enroll_count
                
                if len(self.enroll_list) == 0:
                    warning_dialog = wx.MessageDialog(self,
                                                      "No faces were detected during the enrollment process.  Please face towards the camera and keep your face in the yellow rectangle during enrollment.",
                                                      style=wx.ICON_EXCLAMATION | wx.OK,
                                                      caption="Enrollment Error")
                    warning_dialog.ShowModal()
                else:
                    name_dialog = wx.TextEntryDialog(self, "Please enter a name to associate with the face. (%d faces captured)"%len(self.enroll_list), caption = "Enrollment ID")
                    result = name_dialog.ShowModal()
                    sub_id = name_dialog.GetValue()
                    if result == wx.ID_OK:
                        if sub_id == "":
                            print "Warning: Empty Subject ID"
                            warning_dialog = wx.MessageDialog(self,
                                                              "A name was entered in the previous dialog so this face will not be enrolled in the database.  Please repeat the enrollment process for this person.",
                                                              style=wx.ICON_EXCLAMATION | wx.OK,
                                                              caption="Enrollment Error")
                            warning_dialog.ShowModal()
                        else:
                            for data,rect,leye,reye in self.enroll_list:
                                self.face_rec.addTraining(data,leye,reye,sub_id)
                                self.setupState()

                                
                self.enroll_count = 0
                self.enrolling    = None
                self.enroll_list  = []
            
            
        if len(names) > 0:
            draw = PIL.ImageDraw.Draw(im)
            for pt,name in names:
                x,y = pt.X(),pt.Y() 
                w,h = draw.textsize(name,font=self.arialblack24)
                if self.mirror_menuitem.IsChecked():
                    x = 640 - x - 0.5*w
                else:
                    x = x - 0.5*w
                draw.text((x,y-20-h), name, fill='green', font=self.arialblack24)
            del draw

            
            
        wxImg = wx.EmptyImage(im.size[0], im.size[1])
        wxImg.SetData(im.tostring())
        bm = wxImg.ConvertToBitmap()
            
        self.static_bitmap.SetBitmap(bm)
        
        # Update timing gauges
        full_time = time.time() - starttime
        if self.timing_window != None:
            self.timing_window.update(self.detect_time,self.eye_time,self.label_time,full_time)
               
        self.ids_text.SetLabel("%d"%(self.face_rec.n_labels,))
        self.faces_text.SetLabel("%d"%(self.face_rec.n_faces,))
        
        sleep_time = 1
        if sys.platform.startswith("linux"):
            sleep_time = 10 
        # TODO: For macosx milliseconds should be 1
        # TODO: For linux milliseconds may need to be set to a higher value 10
        self.timer.Start(milliseconds = sleep_time, oneShot = 1)


    
    def setupState(self):
        #print "state",self.face_rec.n_labels,self.IsEnabled()
        if self.face_rec.n_labels >= 2:
            self.train_button.Enable()
        else:
            self.train_button.Disable()
            
        
    def onBitmapResize(self,event):
        w = event.GetSize().GetWidth()
        h = event.GetSize().GetHeight()

        self.static_bitmap.SetSize(event.GetSize())
              
              
    def onClick(self,event):
        '''
        Process a click in the Video window which starts the enrollment process.
        '''
        x = event.GetX()
        y = event.GetY()
        
        if self.mirror_menuitem.IsChecked():
            x = 640-x
            
        for rect,leye,reye in self.current_faces:
            if rect.containsPoint(pv.Point(x,y)):
                self.enrolling = rect
                self.enroll_count = 0
                self.enroll_max = int(self.enroll_choice.GetStringSelection())
                

    def findFaces(self,im):
        eye_processing = self.eye_menuitem.IsChecked()
        
        self.detect_time = time.time()
        rects = self.face_detector.detect(im)
        self.detect_time = time.time() - self.detect_time
 
        
        self.eye_time = time.time()       
        if eye_processing:
            faces = self.fel.locateEyes(im, rects)
        else:
            faces = []
            for rect in rects:
                affine = pv.AffineFromRect(rect,(1,1))
                leye = affine.invertPoint(AVE_LEFT_EYE)
                reye = affine.invertPoint(AVE_RIGHT_EYE)
                faces.append([rect,leye,reye])
        
        self.eye_time = time.time() - self.eye_time

        self.current_faces = faces

        return faces
    
    def onAbout(self,event):
        wx.MessageBox( ABOUT_MESSAGE,
                  "About FaceL", wx.OK | wx.ICON_INFORMATION )
        
    def onLicense(self,event):
        wx.MessageBox( LICENSE_MESSAGE,
                  "FaceL License", wx.OK | wx.ICON_INFORMATION )

    def onNull(self,*args,**kwargs):
        pass
    
    def onSVMTune(self,event):
        dialog = SVMTuningDialog(self, self.svm_mode, self.svm_C, self.svm_Gamma)
        dialog.CenterOnParent()

        result = dialog.ShowModal()
        if result == wx.ID_OK:
            self.svm_mode  = dialog.mode
            self.svm_C     = dialog.C
            self.svm_Gamma = dialog.Gamma
        
        print "SVM Tuning Info <MODE:%s; C:%0.2e; Gamma:%0.2e>"%(self.svm_mode,self.svm_C,self.svm_Gamma)
        
        dialog.Destroy()
        
        
    def onTiming(self,event):
        if self.timing_window == None:
            self.timing_window = TimingWindow(self, wx.NewId(),"Performance")
            self.timing_window.CenterOnParent()
            self.timing_window.Show(True)
            self.timing_window.Bind(wx.EVT_CLOSE, self.onCloseTiming, id=self.timing_window.GetId())

        else:
            self.timing_window.Show(True)
            self.timing_window.Raise()
        
        
    def onCloseTiming(self,event):
        self.timing_window.Destroy()
        self.timing_window = None
コード例 #19
0
ファイル: FaceL.py プロジェクト: Dfred/concept-robot
    def __init__(self,parent,id,name,size=(640,672)):
        '''
        Create all the windows and controls used for the window and 
        '''
        wx.Frame.__init__(self,parent,id,name,size=size)
        
        self.CenterOnScreen(wx.HORIZONTAL)
        self.timing_window = None # Initialize timing window
        
        # ------------- Face Processing -----------------
        self.face_detector = CascadeDetector(cascade_name=CASCADE_NAME,image_scale=0.5)
        self.fel = FilterEyeLocator(FEL_NAME)
        self.face_rec = SVMFaceRec()
        
        self.svm_mode  = SVM_AUTOMATIC
        self.svm_C     = 4.000e+00
        self.svm_Gamma = 9.766e-04
        
        self.current_faces = []
        self.enrolling    = None
        self.enroll_count = 0
        self.enroll_max   = 32
        self.enroll_list  = []
        
        self.previous_time = time.time()
        
        self.arialblack24 = PIL.ImageFont.truetype(ARIAL_BLACK_NAME, 24)

        # ---------------- Basic Data -------------------
        try:
            self.webcam = Webcam()
        except SystemExit:
            raise
        except:
            trace = traceback.format_exc()
            message = TraceBackDialog(None, "Camera Error", CAMERA_ERROR, trace)
            message.ShowModal()
            
            sys.stderr.write("FaceL Error: an error occurred while trying to connect to the camera.  Details follow.\n\n")
            sys.stderr.write(trace)
            sys.exit(CAMERA_ERROR_CODE)

        # ------------- Other Components ----------------
        self.CreateStatusBar()
        
        # ------------------- Menu ----------------------
        # Creating the menubar.
        
        # Menu IDs
        license_id = wx.NewId()
        
        mirror_id = wx.NewId()
        face_id = wx.NewId()
        svm_tune_id = wx.NewId()
        performance_id = wx.NewId()
        
        # Menu Items
        self.file_menu = wx.Menu();

        self.file_menu.Append( wx.ID_ABOUT, "&About..." )
        self.file_menu.Append( license_id, "FaceL License..." )
        self.file_menu.AppendSeparator();
        self.file_menu.Append( wx.ID_EXIT, "E&xit" )

        self.options_menu = wx.Menu();
        self.face_menuitem = self.options_menu.AppendCheckItem( face_id, "Face Processing" )
        self.eye_menuitem = self.options_menu.AppendCheckItem( face_id, "Eye Detection" )
        self.mirror_menuitem = self.options_menu.AppendCheckItem( mirror_id, "Mirror Video" )
        self.options_menu.AppendSeparator()
        self.options_menu.Append( svm_tune_id, "SVM Tuning..." )
        self.options_menu.Append( performance_id, "Performance..." )
        
        # Create Menu Bar
        self.menu_bar = wx.MenuBar();
        self.menu_bar.Append( self.file_menu, "&File" )
        self.menu_bar.Append( self.options_menu, "&Options" )

        self.SetMenuBar( self.menu_bar )
        
        # Menu Events
        wx.EVT_MENU(self, wx.ID_ABOUT, self.onAbout )
        wx.EVT_MENU(self, license_id, self.onLicense )

        wx.EVT_MENU(self, mirror_id, self.onNull )
        wx.EVT_MENU(self, face_id, self.onNull )
        wx.EVT_MENU(self, svm_tune_id, self.onSVMTune )
        wx.EVT_MENU(self, performance_id, self.onTiming )
        
        # Set up menu checks
        self.face_menuitem.Check(True)
        self.eye_menuitem.Check(True)
        self.mirror_menuitem.Check(True)
        
        
        # ----------------- Image List ------------------
        
        # --------------- Image Display -----------------
        self.static_bitmap = wx.StaticBitmap(self,wx.NewId(), bitmap=wx.EmptyBitmap(640, 480))
        
        self.controls_box = wx.StaticBox(self, wx.NewId(), "Controls")

        self.facel_logo = wx.StaticBitmap(self,wx.NewId(), bitmap=wx.Bitmap(FACEL_LOGO))
        self.csu_logo = wx.StaticBitmap(self,wx.NewId(), bitmap=wx.Bitmap(CSU_LOGO))
#        self.performance_box = wx.StaticBox(self, wx.NewId(), "Performance")
        
        self.enroll_chioce_label = wx.StaticText(self, wx.NewId(), "Enrollment Count:", style=wx.ALIGN_LEFT)
        self.enroll_choice = wx.Choice(self,wx.NewId(),wx.Point(0,0),wx.Size(-1,-1),['16','32','48','64','128','256'])
        self.enroll_choice.Select(3)
        
        self.train_button = wx.Button(self,wx.NewId(),'Train Labeler')
        self.reset_button = wx.Button(self,wx.NewId(),'Clear Labels')
        
        # --------------- Instrumentation ---------------
        
        
          
        self.enroll_label = wx.StaticText(self, wx.NewId(), "Click a face in the video to enroll.", style=wx.ALIGN_LEFT)

        self.ids_label = wx.StaticText(self, wx.NewId(), "Labels:", size=wx.Size(-1,16), style=wx.ALIGN_LEFT)
        self.ids_text = wx.StaticText(self, wx.NewId(), size = wx.Size(30,16), style= wx.ALIGN_RIGHT )  
        
        self.faces_label = wx.StaticText(self, wx.NewId(), "Faces:", size=wx.Size(-1,16), style=wx.ALIGN_LEFT)
        self.faces_text = wx.StaticText(self, wx.NewId(), size = wx.Size(30,16), style= wx.ALIGN_RIGHT )          
        

        # --------------- Window Layout -----------------
        enroll_sizer = wx.BoxSizer(wx.HORIZONTAL)
        enroll_sizer.Add(self.ids_label, flag = wx.ALIGN_CENTER | wx.ALL, border=4)
        enroll_sizer.Add(self.ids_text, flag = wx.ALIGN_CENTER | wx.ALL, border=4)
        enroll_sizer.AddSpacer(20)
        enroll_sizer.Add(self.faces_label, flag = wx.ALIGN_CENTER | wx.ALL, border=4)
        enroll_sizer.Add(self.faces_text, flag = wx.ALIGN_CENTER | wx.ALL, border=4)

        training_sizer = wx.BoxSizer(wx.HORIZONTAL)
        training_sizer.Add(self.train_button, flag = wx.ALIGN_CENTER | wx.ALL, border=4)
        training_sizer.Add(self.reset_button, flag = wx.ALIGN_CENTER | wx.ALL, border=4)

    
        enroll_choice_sizer = wx.BoxSizer(wx.HORIZONTAL)
        enroll_choice_sizer.Add(self.enroll_chioce_label, flag = wx.ALIGN_CENTER | wx.ALL, border=0)
        enroll_choice_sizer.Add(self.enroll_choice, flag = wx.ALIGN_CENTER | wx.ALL, border=0)

        controls_sizer = wx.StaticBoxSizer(self.controls_box,wx.VERTICAL) #wx.BoxSizer(wx.VERTICAL)
        controls_sizer.Add(self.enroll_label, flag = wx.ALIGN_LEFT | wx.ALL, border=0)
        controls_sizer.Add(enroll_sizer, flag = wx.ALIGN_LEFT | wx.ALL, border=0)
        controls_sizer.Add(enroll_choice_sizer, flag = wx.ALIGN_LEFT | wx.ALL, border=4)
        controls_sizer.Add(training_sizer, flag = wx.ALIGN_LEFT | wx.ALL, border=0)

        bottom_sizer = wx.BoxSizer(wx.HORIZONTAL)
        bottom_sizer.Add(self.facel_logo, flag = wx.ALIGN_CENTER | wx.ALL, border=0)
        bottom_sizer.Add(controls_sizer, flag = wx.ALIGN_TOP | wx.ALL, border=4)
        bottom_sizer.Add(self.csu_logo, flag = wx.ALIGN_CENTER | wx.ALL, border=0)

        main_sizer = wx.BoxSizer(wx.VERTICAL)
        main_sizer.Add(self.static_bitmap, flag = wx.ALIGN_CENTER | wx.ALL, border=0)
        main_sizer.Add(bottom_sizer, flag = wx.ALIGN_CENTER | wx.ALL, border=4)
        

        self.SetAutoLayout(True)
        self.SetSizer(main_sizer)
        self.Layout()
        
        # -----------------------------------------------
        self.timer = FrameTimer(self)
        self.timer.Start(200)
        
        # -------------- Event Handleing ----------------
        wx.EVT_SIZE(self.static_bitmap, self.onBitmapResize)
        wx.EVT_LEFT_DOWN(self.static_bitmap, self.onClick)
                
        self.Bind(wx.EVT_BUTTON, self.onTrain, id=self.train_button.GetId())
        self.Bind(wx.EVT_BUTTON, self.onReset, id=self.reset_button.GetId())
        
        # --------------- Setup State -------------------
        self.setupState()
コード例 #20
0
import pyvision as pv
import cv2
from pyvision.face.CascadeDetector import CascadeDetector
from pyvision.face.FilterEyeLocator import FilterEyeLocator

def mouseCallback(event, x, y, flags, param):
    if event in [cv2.EVENT_LBUTTONDOWN,cv2.EVENT_LBUTTONUP]:
        print("Mouse Event:",event,x,y)

if __name__ == '__main__':
    
    # Setup the webcam
    webcam  = pv.Webcam(size=(640,360))
    
    # Setup the face and eye detectors
    cd = CascadeDetector(min_size=(100,100))
    el = FilterEyeLocator()
    
    # Setup the mouse callback to handle mause events (optional)
    cv2.namedWindow("PyVision Live Demo")
    cv2.setMouseCallback("PyVision Live Demo", mouseCallback)
    
    while True:
        # Grab a frame from the webcam
        frame = webcam.query()
        
        # Run Face and Eye Detection
        rects = cd(frame)
        eyes = el(frame,rects)

        # Annotate the result
コード例 #21
0
    
    
    # Open the file to use as output.
    f = open(args[1],'wb')
    csv_file = csv.writer(f)
    headers = ['image_name','detect_number','detect_x','detect_y','detect_width','detect_height','eye1_x','eye1_y','eye2_x','eye2_y']
    csv_file.writerow(headers)
    
    # Create an image log if this is being saved to a file.
    ilog = None
    if options.log_dir != None:
        print("Creating Image Log...")
        ilog = pv.ImageLog(options.log_dir)
    
    # For each image run face and eye detection
    face_detect = CascadeDetector(image_scale=1.3*options.scale)
    locate_eyes = FilterEyeLocator()#locator_filename)
    
    c = 0
    for pathname in image_names:
        c += 1
        
        im = pv.Image(pathname)

        scale = options.log_scale
        log_im = pv.AffineScale(scale,(int(scale*im.width),int(scale*im.height))).transformImage(im)
        
            
 
        results = processFaces(im,face_detect,locate_eyes)