Exemplo n.º 1
0
class HeadTracker(object):
    def __init__(self, i_viola_scale=0.5, i_img_resize_scale=1.0):

        self.__params = {'filter_size': 0, 'viola_scale': i_viola_scale}
        self.__normaliser = IplImageNormaliser()
        self.__normaliser.setParams(i_resize_scale=i_img_resize_scale,
                                    i_filter_size=self.__params['filter_size'],
                                    i_eq=False,
                                    i_roi=None)
        self.__roi_detector = ViolaJonesRoi(
            i_scale=self.__params['viola_scale'])

    def getParams(self):
        return self.__params

    def setGain(self, i_gain):
        self.__xy_gain = float(i_gain)

    def detectRoi(self, i_data, i_roi_scale_factor=1.2, i_track=True):
        ipl_roi = self.__normaliser.getRoi()
        if (ipl_roi is not None) and not (i_track):
            x = numpy.float(ipl_roi.x) + 0.5 * ipl_roi.width
            y = numpy.float(ipl_roi.y) + 0.5 * ipl_roi.height
            self.__roi_detector.setPrev(x, y, ipl_roi.width, ipl_roi.height)
            return (0., 0., x, y, ipl_roi.width, ipl_roi.height)
        if ipl_roi is None:
            face_roi = self.__roi_detector.compute([i_data], i_ipl=True)
            if face_roi is None:
                return (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
            face_roi = self.__roi_detector.scaleRoi(face_roi,
                                                    i_roi_scale_factor,
                                                    i_data.width - 1,
                                                    i_data.height - 1)
            ipl_roi = image_utils.Numpy2CvRect(i_face_roi=face_roi)
            self.__normaliser.setRoi(ipl_roi)

            x = numpy.float(ipl_roi.x) + 0.5 * ipl_roi.width
            y = numpy.float(ipl_roi.y) + 0.5 * ipl_roi.height
            self.__roi_detector.setPrev(x, y, ipl_roi.width, ipl_roi.height)

            return (0.0, 0.0, x, y, ipl_roi.width, ipl_roi.height)
        #At this point i_track=True, ipl_roi is not None
        return self.__roi_detector.trackRoi(i_data)

    def update(self, i_ipl_image, i_track=False):
        (delta_x, delta_y, x, y, w, h) = self.detectRoi(i_ipl_image,
                                                        i_track=i_track)
        return (delta_x, delta_y, x, y, w, h)

    def setRoi(self, i_roi):
        self.__normaliser.setRoi(i_roi)
        self.__roi_detector.setRoi(image_utils.Cv2NumpyRect)

    def getRoi(self, i_ipl=False):
        if i_ipl:
            return self.__normaliser.getRoi()
        return image_utils.Cv2NumpyRect(self.__normaliser.getRoi())

    def clearRoi(self):
        self.__normaliser.clearRoi()
Exemplo n.º 2
0
class  HeadTracker(object): 
    def __init__(self, i_viola_scale=0.5, i_img_resize_scale=1.0):
         
        self.__params = {
            'filter_size' : 0,  
            'viola_scale': i_viola_scale
        }
        self.__normaliser =  IplImageNormaliser()
        self.__normaliser.setParams( i_resize_scale=i_img_resize_scale, i_filter_size=self.__params['filter_size'],
                        i_eq=False, i_roi=None)
        self.__roi_detector = ViolaJonesRoi( i_scale= self.__params['viola_scale'])
        
    def getParams(self):
        return self.__params 
    
    def setGain(self, i_gain):
        self.__xy_gain = float(i_gain)
    
    def detectRoi(self, i_data, i_roi_scale_factor=1.2, i_track=True):
        ipl_roi = self.__normaliser.getRoi() 
        if (ipl_roi is not None) and not(i_track):
            x = numpy.float(ipl_roi.x) + 0.5*ipl_roi.width
            y = numpy.float(ipl_roi.y) + 0.5*ipl_roi.height
            self.__roi_detector.setPrev(x, y, ipl_roi.width, ipl_roi.height)
            return (0., 0., x,y, ipl_roi.width, ipl_roi.height)
        if ipl_roi is None:
            face_roi = self.__roi_detector.compute([i_data], i_ipl=True)
            if face_roi is None:
                return (0.0, 0.0, 0.0, 0.0,0.0, 0.0)
            face_roi = self.__roi_detector.scaleRoi(face_roi, i_roi_scale_factor, i_data.width-1, i_data.height-1)
            ipl_roi = image_utils.Numpy2CvRect( i_face_roi = face_roi)
            self.__normaliser.setRoi(ipl_roi)

            x = numpy.float(ipl_roi.x) + 0.5*ipl_roi.width
            y = numpy.float(ipl_roi.y) + 0.5*ipl_roi.height
            self.__roi_detector.setPrev(x, y, ipl_roi.width, ipl_roi.height)
        
            return (0.0, 0.0, x, y, ipl_roi.width, ipl_roi.height)
        #At this point i_track=True, ipl_roi is not None
        return self.__roi_detector.trackRoi(i_data )
 
    def update(self, i_ipl_image, i_track=False):
        (delta_x, delta_y, x,y,w,h) = self.detectRoi(i_ipl_image,i_track=i_track)
        return (delta_x, delta_y, x, y,w,h)
    
    def setRoi(self, i_roi):
        self.__normaliser.setRoi(i_roi)
        self.__roi_detector.setRoi(image_utils.Cv2NumpyRect)
        
    def getRoi(self, i_ipl=False):
        if i_ipl:
            return self.__normaliser.getRoi()
        return image_utils.Cv2NumpyRect(self.__normaliser.getRoi())
    
    def clearRoi(self):
        self.__normaliser.clearRoi()
Exemplo n.º 3
0
        o_images = x
        o_transforms = y
        o_n_jittered = n_jittered
        return (o_images, o_transforms, o_n_jittered)


if __name__ == "__main__":
    from PyQt4 import QtCore, QtGui
    from sys import stdin, exit, argv
    from qt_image_display import ImageDisplay
    from roi_detector import ViolaJonesRoi

    data = image_utils.Video2Numpy("recordings/calibration.avi", 1)
    detector = ViolaJonesRoi()
    #Compute a region of interest automatically
    face_roi = detector.compute(data)
    eye_roi = detector.convertFace2EyeRoi(face_roi)
    detector.setRoi(eye_roi)
    (min_row, min_col, max_row, max_col) = eye_roi

    roi = image_utils.Numpy2CvRect(min_row, min_col, max_row, max_col)
    #Setup normaliser
    normaliser = IplImageNormaliser()
    normaliser.setParams(i_resize_scale=1.,
                         i_filter_size=0,
                         i_eq=False,
                         i_roi=roi)
    center = cv.cvPoint2D32f(roi.x + roi.width / 2, roi.y + roi.height / 2)
    normaliser.setAffineTransform(center, i_scale=1., i_rot_angle=0)
    nframe = 0
    app = QtGui.QApplication(argv)
Exemplo n.º 4
0
        o_images = x
        o_transforms = y
        o_n_jittered = n_jittered
        return (o_images, o_transforms, o_n_jittered)

if __name__ ==  "__main__":
    from PyQt4 import QtCore, QtGui
    from sys import stdin, exit, argv
    from qt_image_display import ImageDisplay 
    from roi_detector import ViolaJonesRoi

   
    data = image_utils.Video2Numpy("recordings/calibration.avi", 1) 
    detector = ViolaJonesRoi()
    #Compute a region of interest automatically
    face_roi= detector.compute(data)
    eye_roi = detector.convertFace2EyeRoi(face_roi)
    detector.setRoi(eye_roi)
    (min_row, min_col, max_row, max_col)  = eye_roi
        
    roi = image_utils.Numpy2CvRect(min_row, min_col, max_row, max_col)
    #Setup normaliser
    normaliser = IplImageNormaliser()
    normaliser.setParams(i_resize_scale=1., i_filter_size=0, i_eq=False, i_roi=roi)
    center = cv.cvPoint2D32f( roi.x + roi.width/2, roi.y + roi.height/2 )
    normaliser.setAffineTransform(center, i_scale=1., i_rot_angle=0)
    nframe = 0
    app = QtGui.QApplication(argv)
    timer = QtCore.QTimer()
    n_jitter_examples = 5
    display = ImageDisplay(n_jitter_examples + 2)