def __init__(self, face_classifier_filepath=None, eye_classifier_filepath=None, parent=None): super().__init__(parent) if face_classifier_filepath is None: face_classifier_filepath = get_haarcascade_filepath() if eye_classifier_filepath is None: eye_classifier_filepath = get_haarcascade_filepath('eyes') self.fisher_faces = cv2.faces.createFisherFaceRecognizer() # Need an integer as the key, and image as the self._images = {} self._eye_classifier = cv2.CascadeClassifier(eye_classifier_filepath)
def main(): # We need to make the QApplication before our QMainWindow # We also need to pass in our system argument values (sys.argv) app = QtWidgets.QApplication(sys.argv) main_window = QtWidgets.QMainWindow() # QMainWindow requires a central widget. haar_file = get_haarcascade_filepath() central_widget = FaceSignalWidget(haar_file) main_window.setCentralWidget(central_widget) # Show our main window main_window.show() # Start the event loop processing app.exec()
def __init__(self, parent=None): super().__init__(parent) haar_filepath = get_haarcascade_filepath() self.face_detect_widget = FaceDetectionWidget(haar_filepath) self.video_recording = RecordVideo() record_button = QtWidgets.QPushButton('Run') layout = Qtwidgets.QVBoxLayout() layout.addWidget(self.face_detect_widget) layout.addWidget(record_button) self.setLayout(layout) # Connect our signal `clicked` to our method `start_recording` record_button.clicked.connect(self.video_recording.start_recording) # alias out the method call `image_data_slot` to make the code # line shorter image_data_slot = self.face_detect_widget.image_data_slot # connect our signal `image_data` to our method `image_data_slot` self.video_recording.image_data.connect(image_data_slot)