Example #1
0
        o_transforms = y
        o_n_jittered = n_jittered
        return (o_images, o_transforms, o_n_jittered)


if __name__ == "__main__":
    from PyQt4 import QtCore, QtGui
    from sys import stdin, exit, argv
    from qt_image_display import ImageDisplay
    from roi_detector import ViolaJonesRoi

    data = image_utils.Video2Numpy("recordings/calibration.avi", 1)
    detector = ViolaJonesRoi()
    #Compute a region of interest automatically
    face_roi = detector.compute(data)
    eye_roi = detector.convertFace2EyeRoi(face_roi)
    detector.setRoi(eye_roi)
    (min_row, min_col, max_row, max_col) = eye_roi

    roi = image_utils.Numpy2CvRect(min_row, min_col, max_row, max_col)
    #Setup normaliser
    normaliser = IplImageNormaliser()
    normaliser.setParams(i_resize_scale=1.,
                         i_filter_size=0,
                         i_eq=False,
                         i_roi=roi)
    center = cv.cvPoint2D32f(roi.x + roi.width / 2, roi.y + roi.height / 2)
    normaliser.setAffineTransform(center, i_scale=1., i_rot_angle=0)
    nframe = 0
    app = QtGui.QApplication(argv)
    timer = QtCore.QTimer()
        o_transforms = y
        o_n_jittered = n_jittered
        return (o_images, o_transforms, o_n_jittered)

if __name__ ==  "__main__":
    from PyQt4 import QtCore, QtGui
    from sys import stdin, exit, argv
    from qt_image_display import ImageDisplay 
    from roi_detector import ViolaJonesRoi

   
    data = image_utils.Video2Numpy("recordings/calibration.avi", 1) 
    detector = ViolaJonesRoi()
    #Compute a region of interest automatically
    face_roi= detector.compute(data)
    eye_roi = detector.convertFace2EyeRoi(face_roi)
    detector.setRoi(eye_roi)
    (min_row, min_col, max_row, max_col)  = eye_roi
        
    roi = image_utils.Numpy2CvRect(min_row, min_col, max_row, max_col)
    #Setup normaliser
    normaliser = IplImageNormaliser()
    normaliser.setParams(i_resize_scale=1., i_filter_size=0, i_eq=False, i_roi=roi)
    center = cv.cvPoint2D32f( roi.x + roi.width/2, roi.y + roi.height/2 )
    normaliser.setAffineTransform(center, i_scale=1., i_rot_angle=0)
    nframe = 0
    app = QtGui.QApplication(argv)
    timer = QtCore.QTimer()
    n_jitter_examples = 5
    display = ImageDisplay(n_jitter_examples + 2)
    (jittered_images, transforms, n_jittered) = normaliser.jitter_video(data, n_jitter_examples)