def __init__(self, parent=None): super(Camera, self).__init__(parent) global API API = AlgorithmAPIs(template_dir="templates", threshold=0.5, use_multiprocessing=False) self.ui = Ui_Camera() self.camera = None self.imageCapture = None # self.mediaRecorder = None self.isCapturingImage = False self.applicationExiting = False self.imageSettings = QImageEncoderSettings() self.audioSettings = QAudioEncoderSettings() self.ui.setupUi(self) cameraDevice = QByteArray() videoDevicesGroup = QActionGroup(self) videoDevicesGroup.setExclusive(True) for deviceName in QCamera.availableDevices(): description = QCamera.deviceDescription(deviceName) videoDeviceAction = QAction(description, videoDevicesGroup) videoDeviceAction.setCheckable(True) videoDeviceAction.setData(deviceName) if cameraDevice.isEmpty(): cameraDevice = deviceName videoDeviceAction.setChecked(True) self.ui.menuDevices.addAction(videoDeviceAction) videoDevicesGroup.triggered.connect(self.updateCameraDevice) self.ui.lcdNumber_2.display(0) self.ui.dial.valueChanged.connect(self.dial_display) global dial_value dial_value = 3 self.ui.lcdNumber_2.display(dial_value) self.setCamera(cameraDevice) # Create and load model path_pretrained = "apis/models/facenet/20180402-114759.pb" path_SVM = "apis/models/SVM/SVM.pkl" self.recognizer = Recognizer() self.recognizer.create_graph(path_pretrained, path_SVM)
class Camera(QMainWindow): """docstring for Camera""" def __init__(self, arg): super(Camera, self).__init__(parent) # self.arg = arg # Attributes variables self.ui = Ui_Camera() self.camera = None self.imageCapture = None self.mediaRecorder = None self.isCapturingImage = False self.applicationExiting = False self.imageSettings = QImageEncoderSettings() self.audioSettings = QAudioEncoderSettings() self.videoSettings = QVideoEncoderSettings() self.videoContainerFormat = '' self.ui.setupUi(self) #get device camera cameraDevice = QByteArray() videoDevicesGroup = QActionGroup(self) videoDevicesGroup.setExclusive(True) # Get informations about available cameras for deviceName in QCamera.availableDevices(): description = QCamera.deviceDescription(deviceName) videoDeviceAction = QAction(description, videoDevicesGroup) videoDeviceAction.setCheckable(True) videoDeviceAction.setData(deviceName) if cameraDevice.isEmpty(): cameraDevice = deviceName videoDeviceAction.setChecked(True) self.ui.menuDevices.addAction(videoDeviceAction) videoDevicesGroup.triggered.connect(self.updateCameraDevice) self.ui.captureWidget.currentChanged.connect(self.updateCaptureMode) self.ui.lockButton.hide() self.setCamera(cameraDevice)
def __init__(self, parent=None): super(Camera, self).__init__(parent) self.ui = Ui_Camera() self.pre_id = 0 self.cur_id = 0 self.count = 0 self.checked = 0 self.audio_settime = 0 self.allow_flag = 1 self.check_list = [] self.camera = None self.imageCapture = None self.isCapturingImage = False self.applicationExiting = False self.ui.setupUi(self) cameraDevice = QByteArray() videoDevicesGroup = QActionGroup(self) videoDevicesGroup.setExclusive(True) for deviceName in QCamera.availableDevices(): description = QCamera.deviceDescription(deviceName) videoDeviceAction = QAction(description, videoDevicesGroup) videoDeviceAction.setCheckable(True) videoDeviceAction.setData(deviceName) if cameraDevice.isEmpty(): cameraDevice = deviceName videoDeviceAction.setChecked(True) self.ui.menuDevices.addAction(videoDeviceAction) videoDevicesGroup.triggered.connect(self.updateCameraDevice) self.setCamera(cameraDevice) # Create and load model path_pretrained = "apis/models/facenet/20180402-114759.pb" path_SVM = "apis/models/SVM/SVM.pkl" self.recognizer = Recognizer() self.recognizer.create_graph(path_pretrained, path_SVM) # Others self.file_path = "" self.audios = [ "../data/tone.mp3", "../data/face_stable.mp3", "look_ahead.mp3" ]
def __init__(self, parent=None): super(Camera, self).__init__(parent) self.ui = Ui_Camera() self.camera = None self.imageCapture = None self.mediaRecorder = None self.isCapturingImage = False self.applicationExiting = False self.imageSettings = QImageEncoderSettings() self.audioSettings = QAudioEncoderSettings() self.videoSettings = QVideoEncoderSettings() self.videoContainerFormat = "" self.ui.setupUi(self) cameraDevice = QByteArray() videoDevicesGroup = QActionGroup(self) videoDevicesGroup.setExclusive(True) for deviceName in QCamera.availableDevices(): description = QCamera.deviceDescription(deviceName) videoDeviceAction = QAction(description, videoDevicesGroup) videoDeviceAction.setCheckable(True) videoDeviceAction.setData(deviceName) if cameraDevice.isEmpty(): cameraDevice = deviceName videoDeviceAction.setChecked(True) self.ui.menuDevices.addAction(videoDeviceAction) videoDevicesGroup.triggered.connect(self.updateCameraDevice) self.ui.captureWidget.currentChanged.connect(self.updateCaptureMode) self.ui.lockButton.hide() self.setCamera(cameraDevice)
def __init__(self, parent=None): super(Camera, self).__init__(parent) self.ui = Ui_Camera() self.camera = None self.imageCapture = None self.mediaRecorder = None self.isCapturingImage = False self.applicationExiting = False self.imageSettings = QImageEncoderSettings() self.audioSettings = QAudioEncoderSettings() self.videoSettings = QVideoEncoderSettings() self.videoContainerFormat = '' self.ui.setupUi(self) cameraDevice = QByteArray() videoDevicesGroup = QActionGroup(self) videoDevicesGroup.setExclusive(True) #for multiple cameras ata for deviceName in QCamera.availableDevices(): description = QCamera.deviceDescription(deviceName) videoDeviceAction = QAction(description, videoDevicesGroup) videoDeviceAction.setCheckable(True) videoDeviceAction.setData(deviceName) if cameraDevice.isEmpty(): cameraDevice = deviceName videoDeviceAction.setChecked(True) self.ui.menuDevices.addAction(videoDeviceAction) videoDevicesGroup.triggered.connect(self.updateCameraDevice) self.ui.captureWidget.currentChanged.connect(self.updateCaptureMode) self.ui.lockButton.hide() self.setCamera(cameraDevice)
class Camera(QMainWindow): def __init__(self, parent=None): super(Camera, self).__init__(parent) self.ui = Ui_Camera() self.camera = None self.imageCapture = None self.mediaRecorder = None self.isCapturingImage = False self.applicationExiting = False self.imageSettings = QImageEncoderSettings() self.audioSettings = QAudioEncoderSettings() self.videoSettings = QVideoEncoderSettings() self.videoContainerFormat = '' self.ui.setupUi(self) cameraDevice = QByteArray() videoDevicesGroup = QActionGroup(self) videoDevicesGroup.setExclusive(True) for deviceName in QCamera.availableDevices(): description = QCamera.deviceDescription(deviceName) videoDeviceAction = QAction(description, videoDevicesGroup) videoDeviceAction.setCheckable(True) videoDeviceAction.setData(deviceName) if cameraDevice.isEmpty(): cameraDevice = deviceName videoDeviceAction.setChecked(True) self.ui.menuDevices.addAction(videoDeviceAction) videoDevicesGroup.triggered.connect(self.updateCameraDevice) self.ui.captureWidget.currentChanged.connect(self.updateCaptureMode) self.ui.lockButton.hide() self.setCamera(cameraDevice) def setCamera(self, cameraDevice): if cameraDevice.isEmpty(): self.camera = QCamera() else: self.camera = QCamera(cameraDevice) self.camera.stateChanged.connect(self.updateCameraState) self.camera.error.connect(self.displayCameraError) self.mediaRecorder = QMediaRecorder(self.camera) self.mediaRecorder.stateChanged.connect(self.updateRecorderState) self.imageCapture = QCameraImageCapture(self.camera) self.mediaRecorder.durationChanged.connect(self.updateRecordTime) self.mediaRecorder.error.connect(self.displayRecorderError) self.mediaRecorder.setMetaData(QMediaMetaData.Title, "Test Title") self.ui.exposureCompensation.valueChanged.connect( self.setExposureCompensation) self.camera.setViewfinder(self.ui.viewfinder) self.updateCameraState(self.camera.state()) self.updateLockStatus(self.camera.lockStatus(), QCamera.UserRequest) self.updateRecorderState(self.mediaRecorder.state()) self.imageCapture.readyForCaptureChanged.connect(self.readyForCapture) self.imageCapture.imageCaptured.connect(self.processCapturedImage) self.imageCapture.imageSaved.connect(self.imageSaved) self.camera.lockStatusChanged.connect(self.updateLockStatus) self.ui.captureWidget.setTabEnabled(0, self.camera.isCaptureModeSupported(QCamera.CaptureStillImage)) self.ui.captureWidget.setTabEnabled(1, self.camera.isCaptureModeSupported(QCamera.CaptureVideo)) self.updateCaptureMode() self.camera.start() def keyPressEvent(self, event): if event.isAutoRepeat(): return if event.key() == Qt.Key_CameraFocus: self.displayViewfinder() self.camera.searchAndLock() event.accept() elif event.key() == Qt.Key_Camera: if self.camera.captureMode() == QCamera.CaptureStillImage: self.takeImage() elif self.mediaRecorder.state() == QMediaRecorder.RecordingState: self.stop() else: self.record() event.accept() else: super(Camera, self).keyPressEvent(event) def keyReleaseEvent(self, event): if event.isAutoRepeat(): return if event.key() == Qt.Key_CameraFocus: self.camera.unlock() else: super(Camera, self).keyReleaseEvent(event) def updateRecordTime(self): msg = "Recorded %d sec" % self.mediaRecorder.duration() // 1000 self.ui.statusbar.showMessage(msg) def processCapturedImage(self, requestId, img): scaledImage = img.scaled(self.ui.viewfinder.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation) self.ui.lastImagePreviewLabel.setPixmap(QPixmap.fromImage(scaledImage)) self.displayCapturedImage() QTimer.singleShot(4000, self.displayViewfinder) def configureCaptureSettings(self): if self.camera.captureMode() == QCamera.CaptureStillImage: self.configureImageSettings() elif self.camera.captureMode() == QCamera.CaptureVideo: self.configureVideoSettings() def configureVideoSettings(self): settingsDialog = VideoSettings(self.mediaRecorder) settingsDialog.setAudioSettings(self.audioSettings) settingsDialog.setVideoSettings(self.videoSettings) settingsDialog.setFormat(self.videoContainerFormat) if settingsDialog.exec_(): self.audioSettings = settingsDialog.audioSettings() self.videoSettings = settingsDialog.videoSettings() self.videoContainerFormat = settingsDialog.format() self.mediaRecorder.setEncodingSettings(self.audioSettings, self.videoSettings, self.videoContainerFormat) def configureImageSettings(self): settingsDialog = ImageSettings(self.imageCapture) settingsDialog.setImageSettings(self.imageSettings) if settingsDialog.exec_(): self.imageSettings = settingsDialog.imageSettings() imageCapture.setEncodingSettings(self.imageSettings) def record(self): self.mediaRecorder.record() self.updateRecordTime() def pause(self): self.mediaRecorder.pause() def stop(self): self.mediaRecorder.stop() def setMuted(self, muted): self.mediaRecorder.setMuted(muted) def toggleLock(self): if self.camera.lockStatus() in (QCamera.Searching, QCamera.Locked): self.camera.unlock() elif self.camera.lockStatus() == QCamera.Unlocked: self.camera.searchAndLock() def updateLockStatus(self, status, reason): indicationColor = Qt.black if status == QCamera.Searching: self.ui.statusbar.showMessage("Focusing...") self.ui.lockButton.setText("Focusing...") indicationColor = Qt.yellow elif status == QCamera.Locked: self.ui.lockButton.setText("Unlock") self.ui.statusbar.showMessage("Focused", 2000) indicationColor = Qt.darkGreen elif status == QCamera.Unlocked: self.ui.lockButton.setText("Focus") if reason == QCamera.LockFailed: self.ui.statusbar.showMessage("Focus Failed", 2000) indicationColor = Qt.red palette = self.ui.lockButton.palette() palette.setColor(QPalette.ButtonText, indicationColor) self.ui.lockButton.setPalette(palette) def takeImage(self): self.isCapturingImage = True self.imageCapture.capture() def startCamera(self): self.camera.start() def stopCamera(self): self.camera.stop() def updateCaptureMode(self): tabIndex = self.ui.captureWidget.currentIndex() captureMode = QCamera.CaptureStillImage if tabIndex == 0 else QCamera.CaptureVideo if self.camera.isCaptureModeSupported(captureMode): self.camera.setCaptureMode(captureMode) def updateCameraState(self, state): if state == QCamera.ActiveState: self.ui.actionStartCamera.setEnabled(False) self.ui.actionStopCamera.setEnabled(True) self.ui.captureWidget.setEnabled(True) self.ui.actionSettings.setEnabled(True) elif state in (QCamera.UnloadedState, QCamera.LoadedState): self.ui.actionStartCamera.setEnabled(True) self.ui.actionStopCamera.setEnabled(False) self.ui.captureWidget.setEnabled(False) self.ui.actionSettings.setEnabled(False) def updateRecorderState(self, state): if state == QMediaRecorder.StoppedState: self.ui.recordButton.setEnabled(True) self.ui.pauseButton.setEnabled(True) self.ui.stopButton.setEnabled(False) elif state == QMediaRecorder.PausedState: self.ui.recordButton.setEnabled(True) self.ui.pauseButton.setEnabled(False) self.ui.stopButton.setEnabled(True) elif state == QMediaRecorder.RecordingState: self.ui.recordButton.setEnabled(False) self.ui.pauseButton.setEnabled(True) self.ui.stopButton.setEnabled(True) def setExposureCompensation(self, index): self.camera.exposure().setExposureCompensation(index * 0.5) def displayRecorderError(self): QMessageBox.warning(self, "Capture error", self.mediaRecorder.errorString()) def displayCameraError(self): QMessageBox.warning(self, "Camera error", self.camera.errorString()) def updateCameraDevice(self, action): self.setCamera(action.data()) def displayViewfinder(self): self.ui.stackedWidget.setCurrentIndex(0) def displayCapturedImage(self): self.ui.stackedWidget.setCurrentIndex(1) def readyForCapture(self, ready): self.ui.takeImageButton.setEnabled(ready) def imageSaved(self, id, fileName): self.isCapturingImage = False if self.applicationExiting: self.close() def closeEvent(self, event): if self.isCapturingImage: self.setEnabled(False) self.applicationExiting = True event.ignore() else: event.accept()
class Camera(QMainWindow): def __init__(self, parent=None): super(Camera, self).__init__(parent) self.ui = Ui_Camera() self.camera = None self.imageCapture = None # self.mediaRecorder = None self.isCapturingImage = False self.applicationExiting = False self.imageSettings = QImageEncoderSettings() self.audioSettings = QAudioEncoderSettings() # self.videoSettings = QVideoEncoderSettings() # self.videoContainerFormat = '' self.ui.setupUi(self) cameraDevice = QByteArray() videoDevicesGroup = QActionGroup(self) videoDevicesGroup.setExclusive(True) for deviceName in QCamera.availableDevices(): description = QCamera.deviceDescription(deviceName) videoDeviceAction = QAction(description, videoDevicesGroup) videoDeviceAction.setCheckable(True) videoDeviceAction.setData(deviceName) if cameraDevice.isEmpty(): cameraDevice = deviceName videoDeviceAction.setChecked(True) self.ui.menuDevices.addAction(videoDeviceAction) videoDevicesGroup.triggered.connect(self.updateCameraDevice) self.ui.captureWidget.currentChanged.connect(self.updateCaptureMode) #self.ui.lockButton.hide() self.setCamera(cameraDevice) def setCamera(self, cameraDevice): if cameraDevice.isEmpty(): self.camera = QCamera() else: self.camera = QCamera(cameraDevice) self.camera.stateChanged.connect(self.updateCameraState) self.camera.error.connect(self.displayCameraError) # self.mediaRecorder = QMediaRecorder(self.camera) # self.mediaRecorder.stateChanged.connect(self.updateRecorderState) self.imageCapture = QCameraImageCapture(self.camera) # self.mediaRecorder.durationChanged.connect(self.updateRecordTime) # self.mediaRecorder.error.connect(self.displayRecorderError) # self.mediaRecorder.setMetaData(QMediaMetaData.Title, "Test Title") # self.ui.exposureCompensation.valueChanged.connect( # self.setExposureCompensation) self.camera.setViewfinder(self.ui.viewfinder) self.updateCameraState(self.camera.state()) # self.updateLockStatus(self.camera.lockStatus(), QCamera.UserRequest) # self.updateRecorderState(self.mediaRecorder.state()) self.imageCapture.readyForCaptureChanged.connect(self.readyForCapture) self.imageCapture.imageCaptured.connect(self.processCapturedImage) self.imageCapture.imageSaved.connect(self.imageSaved) # self.camera.lockStatusChanged.connect(self.updateLockStatus) self.ui.captureWidget.setTabEnabled( 0, self.camera.isCaptureModeSupported(QCamera.CaptureStillImage)) self.ui.captureWidget.setTabEnabled( 1, self.camera.isCaptureModeSupported(QCamera.CaptureVideo)) self.updateCaptureMode() self.camera.start() def keyPressEvent(self, event): if event.isAutoRepeat(): return if event.key() == Qt.Key_CameraFocus: self.displayViewfinder() self.camera.searchAndLock() event.accept() elif event.key() == Qt.Key_Camera: if self.camera.captureMode() == QCamera.CaptureStillImage: self.takeImage() # elif self.mediaRecorder.state() == QMediaRecorder.RecordingState: # self.stop() # else: # self.record() event.accept() else: super(Camera, self).keyPressEvent(event) def keyReleaseEvent(self, event): if event.isAutoRepeat(): return if event.key() == Qt.Key_CameraFocus: self.camera.unlock() else: super(Camera, self).keyReleaseEvent(event) # def updateRecordTime(self): # msg = "Recorded %d sec" % (self.mediaRecorder.duration() // 1000) # self.ui.statusbar.showMessage(msg) def processCapturedImage(self, requestId, img): scaledImage = img.scaled(self.ui.viewfinder.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation) self.ui.lastImagePreviewLabel.setPixmap(QPixmap.fromImage(scaledImage)) self.displayCapturedImage() QTimer.singleShot(4000, self.displayViewfinder) def configureCaptureSettings(self): if self.camera.captureMode() == QCamera.CaptureStillImage: self.configureImageSettings() elif self.camera.captureMode() == QCamera.CaptureVideo: self.configureVideoSettings() # def configureVideoSettings(self): # settingsDialog = VideoSettings(self.mediaRecorder) # settingsDialog.setAudioSettings(self.audioSettings) # settingsDialog.setVideoSettings(self.videoSettings) # settingsDialog.setFormat(self.videoContainerFormat) # if settingsDialog.exec_(): # self.audioSettings = settingsDialog.audioSettings() # self.videoSettings = settingsDialog.videoSettings() # self.videoContainerFormat = settingsDialog.format() # self.mediaRecorder.setEncodingSettings(self.audioSettings, # self.videoSettings, self.videoContainerFormat) def configureOpenExcels(self): settingsopenexcelDialog = OpenExcels() settingsopenexcelDialog.initUI() def configureImageSettings(self): settingsDialog = ImageSettings(self.imageCapture) settingsDialog.setImageSettings(self.imageSettings) if settingsDialog.exec_(): self.imageSettings = settingsDialog.imageSettings() self.imageCapture.setEncodingSettings(self.imageSettings) # def record(self): # self.mediaRecorder.record() # self.updateRecordTime() # def pause(self): # self.mediaRecorder.pause() # def stop(self): # self.mediaRecorder.stop() # def setMuted(self, muted): # self.mediaRecorder.setMuted(muted) def toggleLock(self): if self.camera.lockStatus() in (QCamera.Searching, QCamera.Locked): self.camera.unlock() elif self.camera.lockStatus() == QCamera.Unlocked: self.camera.searchAndLock() # def updateLockStatus(self, status, reason): # indicationColor = Qt.black # if status == QCamera.Searching: # self.ui.statusbar.showMessage("Focusing...") # self.ui.lockButton.setText("Focusing...") # indicationColor = Qt.yellow # elif status == QCamera.Locked: # self.ui.lockButton.setText("Unlock") # self.ui.statusbar.showMessage("Focused", 2000) # indicationColor = Qt.darkGreen # # elif status == QCamera.Unlocked: # # self.ui.lockButton.setText("Focus") # if reason == QCamera.LockFailed: # self.ui.statusbar.showMessage("Focus Failed", 2000) # indicationColor = Qt.red # palette = self.ui.lockButton.palette() # palette.setColor(QPalette.ButtonText, indicationColor) # self.ui.lockButton.setPalette(palette) def display_absences(self, absences): self.ui.absenceNumber.display(absences) def takeImage(self): self.isCapturingImage = True self.imageCapture.capture() def startCamera(self): self.camera.start() def stopCamera(self): self.camera.stop() def updateCaptureMode(self): tabIndex = self.ui.captureWidget.currentIndex() captureMode = QCamera.CaptureStillImage if tabIndex == 0 else QCamera.CaptureVideo if self.camera.isCaptureModeSupported(captureMode): self.camera.setCaptureMode(captureMode) def updateCameraState(self, state): if state == QCamera.ActiveState: self.ui.actionStartCamera.setEnabled(False) self.ui.actionStopCamera.setEnabled(True) self.ui.captureWidget.setEnabled(True) self.ui.actionSettings.setEnabled(True) elif state in (QCamera.UnloadedState, QCamera.LoadedState): self.ui.actionStartCamera.setEnabled(True) self.ui.actionStopCamera.setEnabled(False) self.ui.captureWidget.setEnabled(False) self.ui.actionSettings.setEnabled(False) # def updateRecorderState(self, state): # if state == QMediaRecorder.StoppedState: # # self.ui.recordButton.setEnabled(True) # self.ui.pauseButton.setEnabled(True) # self.ui.stopButton.setEnabled(False) # elif state == QMediaRecorder.PausedState: # self.ui.recordButton.setEnabled(True) # self.ui.pauseButton.setEnabled(False) # self.ui.stopButton.setEnabled(True) # elif state == QMediaRecorder.RecordingState: # self.ui.recordButton.setEnabled(False) # self.ui.pauseButton.setEnabled(True) # self.ui.stopButton.setEnabled(True) def setExposureCompensation(self, index): self.camera.exposure().setExposureCompensation(index * 0.5) # def displayRecorderError(self): # QMessageBox.warning(self, "Capture error", # self.mediaRecorder.errorString()) def displayCameraError(self): QMessageBox.warning(self, "Camera error", self.camera.errorString()) def updateCameraDevice(self, action): self.setCamera(action.data()) def displayViewfinder(self): self.ui.stackedWidget.setCurrentIndex(0) def displayCapturedImage(self): self.ui.stackedWidget.setCurrentIndex(1) def readyForCapture(self, ready): self.ui.takeImageButton.setEnabled(ready) def imageSaved(self, id, fileName): self.isCapturingImage = False if self.applicationExiting: self.close() def closeEvent(self, event): if self.isCapturingImage: self.setEnabled(False) self.applicationExiting = True event.ignore() else: event.accept()
class Camera(QMainWindow): def __init__(self, parent=None): super(Camera, self).__init__(parent) self.ui = Ui_Camera() self.camera = None self.imageCapture = None self.mediaRecorder = None self.isCapturingImage = False self.applicationExiting = False self.imageSettings = QImageEncoderSettings() self.audioSettings = QAudioEncoderSettings() self.videoSettings = QVideoEncoderSettings() self.videoContainerFormat = '' self.ui.setupUi(self) cameraDevice = QByteArray() videoDevicesGroup = QActionGroup(self) videoDevicesGroup.setExclusive(True) for deviceName in QCamera.availableDevices(): description = QCamera.deviceDescription(deviceName) videoDeviceAction = QAction(description, videoDevicesGroup) videoDeviceAction.setCheckable(True) videoDeviceAction.setData(deviceName) if cameraDevice.isEmpty(): cameraDevice = deviceName videoDeviceAction.setChecked(True) self.ui.menuDevices.addAction(videoDeviceAction) videoDevicesGroup.triggered.connect(self.updateCameraDevice) self.ui.captureWidget.currentChanged.connect(self.updateCaptureMode) self.ui.lockButton.hide() self.setCamera(cameraDevice) def setCamera(self, cameraDevice): if cameraDevice.isEmpty(): self.camera = QCamera() else: self.camera = QCamera(cameraDevice) self.camera.stateChanged.connect(self.updateCameraState) self.camera.error.connect(self.displayCameraError) self.mediaRecorder = QMediaRecorder(self.camera) self.mediaRecorder.stateChanged.connect(self.updateRecorderState) self.imageCapture = QCameraImageCapture(self.camera) self.mediaRecorder.durationChanged.connect(self.updateRecordTime) self.mediaRecorder.error.connect(self.displayRecorderError) self.mediaRecorder.setMetaData(QMediaMetaData.Title, "Test Title") self.ui.exposureCompensation.valueChanged.connect( self.setExposureCompensation) self.camera.setViewfinder(self.ui.viewfinder) self.updateCameraState(self.camera.state()) self.updateLockStatus(self.camera.lockStatus(), QCamera.UserRequest) self.updateRecorderState(self.mediaRecorder.state()) self.imageCapture.readyForCaptureChanged.connect(self.readyForCapture) self.imageCapture.imageCaptured.connect(self.processCapturedImage) self.imageCapture.imageSaved.connect(self.imageSaved) self.camera.lockStatusChanged.connect(self.updateLockStatus) self.ui.captureWidget.setTabEnabled( 0, self.camera.isCaptureModeSupported(QCamera.CaptureStillImage)) self.ui.captureWidget.setTabEnabled( 1, self.camera.isCaptureModeSupported(QCamera.CaptureVideo)) self.updateCaptureMode() self.camera.start() def keyPressEvent(self, event): if event.isAutoRepeat(): return if event.key() == Qt.Key_CameraFocus: self.displayViewfinder() self.camera.searchAndLock() event.accept() elif event.key() == Qt.Key_Camera: if self.camera.captureMode() == QCamera.CaptureStillImage: self.takeImage() elif self.mediaRecorder.state() == QMediaRecorder.RecordingState: self.stop() else: self.record() event.accept() else: super(Camera, self).keyPressEvent(event) def keyReleaseEvent(self, event): if event.isAutoRepeat(): return if event.key() == Qt.Key_CameraFocus: self.camera.unlock() else: super(Camera, self).keyReleaseEvent(event) def updateRecordTime(self): msg = "Recorded %d sec" % (self.mediaRecorder.duration() // 1000) self.ui.statusbar.showMessage(msg) ############################################################################### def detectChars(self, qImg): incomingImage = qImg.convertToFormat(4) width = incomingImage.width() height = incomingImage.height() ptr = incomingImage.bits() ptr.setsize(incomingImage.byteCount()) cvImg = np.array(ptr).reshape(height, width, 4) # Copies the data ###################### centerx = int(cvImg.shape[1] / 2) centery = int(cvImg.shape[0] / 2) half_width = 200 half_height = 100 y = centery - half_height x = centerx - half_width print(type(cvImg)) cvImgCroped = cvImg[y:y + half_height * 2, x:x + half_width * 2] # Crop from x, y, w, h -> 100, 200, 300, 400 cvImgCContinues = np.zeros(cvImgCroped.shape, np.uint8) cvImgCContinues = cvImgCroped.copy() # NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h] # cvImgCroped = cv2.rectangle(cvImgCroped, # (centerx - half_width, centery - half_height), # (centerx + half_width, centery + half_height), # (0, 0, 255), # 2) ###################### # Convert to RGB for QImage. cvImgCContinues = cv2.cvtColor(cvImgCContinues, cv2.COLOR_BGR2RGB) # cv2.imshow("",cvImgCContinues) # cv2.waitKey(0) height, width, bytesPerComponent = cvImgCContinues.shape bytesPerLine = bytesPerComponent * width self.image = QImage(cvImgCContinues.data, width, height, bytesPerLine, QImage.Format_RGB888) return self.image ############################################################################### def processCapturedImage(self, requestId, img): detectedImage = self.detectChars(img) scaledImage = detectedImage.scaled(self.ui.viewfinder.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation) self.ui.lastImagePreviewLabel.setPixmap(QPixmap.fromImage(scaledImage)) self.displayCapturedImage() QTimer.singleShot(40000, self.displayViewfinder) ############################################################################### def configureCaptureSettings(self): if self.camera.captureMode() == QCamera.CaptureStillImage: self.configureImageSettings() elif self.camera.captureMode() == QCamera.CaptureVideo: self.configureVideoSettings() def configureVideoSettings(self): settingsDialog = VideoSettings(self.mediaRecorder) settingsDialog.setAudioSettings(self.audioSettings) settingsDialog.setVideoSettings(self.videoSettings) settingsDialog.setFormat(self.videoContainerFormat) if settingsDialog.exec_(): self.audioSettings = settingsDialog.audioSettings() self.videoSettings = settingsDialog.videoSettings() self.videoContainerFormat = settingsDialog.format() self.mediaRecorder.setEncodingSettings(self.audioSettings, self.videoSettings, self.videoContainerFormat) def configureImageSettings(self): settingsDialog = ImageSettings(self.imageCapture) settingsDialog.setImageSettings(self.imageSettings) if settingsDialog.exec_(): self.imageSettings = settingsDialog.imageSettings() self.imageCapture.setEncodingSettings(self.imageSettings) def record(self): self.mediaRecorder.record() self.updateRecordTime() def pause(self): self.mediaRecorder.pause() def stop(self): self.mediaRecorder.stop() def setMuted(self, muted): self.mediaRecorder.setMuted(muted) def toggleLock(self): if self.camera.lockStatus() in (QCamera.Searching, QCamera.Locked): self.camera.unlock() elif self.camera.lockStatus() == QCamera.Unlocked: self.camera.searchAndLock() def updateLockStatus(self, status, reason): indicationColor = Qt.black if status == QCamera.Searching: self.ui.statusbar.showMessage("Focusing...") self.ui.lockButton.setText("Focusing...") indicationColor = Qt.yellow elif status == QCamera.Locked: self.ui.lockButton.setText("Unlock") self.ui.statusbar.showMessage("Focused", 2000) indicationColor = Qt.darkGreen elif status == QCamera.Unlocked: self.ui.lockButton.setText("Focus") if reason == QCamera.LockFailed: self.ui.statusbar.showMessage("Focus Failed", 2000) indicationColor = Qt.red palette = self.ui.lockButton.palette() palette.setColor(QPalette.ButtonText, indicationColor) self.ui.lockButton.setPalette(palette) def takeImage(self): self.isCapturingImage = True self.imageCapture.capture() def startCamera(self): self.camera.start() def stopCamera(self): self.camera.stop() def updateCaptureMode(self): tabIndex = self.ui.captureWidget.currentIndex() captureMode = QCamera.CaptureStillImage if tabIndex == 0 else QCamera.CaptureVideo if self.camera.isCaptureModeSupported(captureMode): self.camera.setCaptureMode(captureMode) def updateCameraState(self, state): if state == QCamera.ActiveState: self.ui.actionStartCamera.setEnabled(False) self.ui.actionStopCamera.setEnabled(True) self.ui.captureWidget.setEnabled(True) self.ui.actionSettings.setEnabled(True) elif state in (QCamera.UnloadedState, QCamera.LoadedState): self.ui.actionStartCamera.setEnabled(True) self.ui.actionStopCamera.setEnabled(False) self.ui.captureWidget.setEnabled(False) self.ui.actionSettings.setEnabled(False) def updateRecorderState(self, state): if state == QMediaRecorder.StoppedState: self.ui.recordButton.setEnabled(True) self.ui.pauseButton.setEnabled(True) self.ui.stopButton.setEnabled(False) elif state == QMediaRecorder.PausedState: self.ui.recordButton.setEnabled(True) self.ui.pauseButton.setEnabled(False) self.ui.stopButton.setEnabled(True) elif state == QMediaRecorder.RecordingState: self.ui.recordButton.setEnabled(False) self.ui.pauseButton.setEnabled(True) self.ui.stopButton.setEnabled(True) def setExposureCompensation(self, index): self.camera.exposure().setExposureCompensation(index * 0.5) def displayRecorderError(self): QMessageBox.warning(self, "Capture error", self.mediaRecorder.errorString()) def displayCameraError(self): QMessageBox.warning(self, "Camera error", self.camera.errorString()) def updateCameraDevice(self, action): self.setCamera(action.data()) def displayViewfinder(self): self.ui.stackedWidget.setCurrentIndex(0) def displayCapturedImage(self): self.ui.stackedWidget.setCurrentIndex(1) def readyForCapture(self, ready): self.ui.takeImageButton.setEnabled(ready) def imageSaved(self, id, fileName): self.isCapturingImage = False if self.applicationExiting: self.close() def closeEvent(self, event): if self.isCapturingImage: self.setEnabled(False) self.applicationExiting = True event.ignore() else: event.accept()
class Camera(QMainWindow): def __init__(self, parent=None): super(Camera, self).__init__(parent) self.ui = Ui_Camera() self.pre_id = 0 self.cur_id = 0 self.count = 0 self.checked = 0 self.audio_settime = 0 self.allow_flag = 1 self.check_list = [] self.camera = None self.imageCapture = None self.isCapturingImage = False self.applicationExiting = False self.ui.setupUi(self) cameraDevice = QByteArray() videoDevicesGroup = QActionGroup(self) videoDevicesGroup.setExclusive(True) for deviceName in QCamera.availableDevices(): description = QCamera.deviceDescription(deviceName) videoDeviceAction = QAction(description, videoDevicesGroup) videoDeviceAction.setCheckable(True) videoDeviceAction.setData(deviceName) if cameraDevice.isEmpty(): cameraDevice = deviceName videoDeviceAction.setChecked(True) self.ui.menuDevices.addAction(videoDeviceAction) videoDevicesGroup.triggered.connect(self.updateCameraDevice) self.setCamera(cameraDevice) # Create and load model path_pretrained = "apis/models/facenet/20180402-114759.pb" path_SVM = "apis/models/SVM/SVM.pkl" self.recognizer = Recognizer() self.recognizer.create_graph(path_pretrained, path_SVM) # Others self.file_path = "" self.audios = [ "../data/tone.mp3", "../data/face_stable.mp3", "look_ahead.mp3" ] def setCamera(self, cameraDevice): '''[setup camera] [correct camera parameters] Arguments: cameraDevice -- [laptop camera] ''' self.camera = cv2.VideoCapture(0) self.image = None self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, 640) self.timer = QTimer() self.timer.timeout.connect(self.update_frame) self.timer.stop() def check_db_table(self, filepath): '''[check if there is table.] [if there is table, save to excel then blank new table, if there isn't table create new table.] Arguments: filepath {[string]} -- [excel path] ''' mssv = [] with sqlite3.connect('.TempExcels.db') as db: c = db.cursor() c.execute("SELECT name FROM sqlite_master WHERE type='table';") exist = c.fetchone() if exist: self.ui.textBrowser.append("unsolved data") c.execute("SELECT * FROM Temp") for row in c.fetchall(): mssv.append(row) filecheck = AttendanceChecking(filepath) failcases = filecheck.start_checking(mssv) fail_str = "Incomplete IDs:\n" if failcases: for failcase in failcases: fail_str = fail_str + str(failcase) + "\n" QMessageBox.warning(self, 'Failcase list', fail_str) self.ui.textBrowser.append("completely solved") c.execute('drop table if exists Temp') c.execute('create table if not exists Temp(mssv INT NOT NULL)') self.ui.textBrowser.append("create a new table - Ready to start") else: self.ui.textBrowser.append("Data cleared - Ready to start") c.execute('create table if not exists Temp(mssv INT NOT NULL)') db.commit() c.close() db.close() def update_frame(self): '''[process frame] [face recognition ] ''' ret, self.image = self.camera.read(0) self.image = cv2.flip(self.image, 1) # Remove motion-blur frame if not detect_blur(self.image, thres=5.0): face_locs = find_bbox(self.image) n_faces = len(face_locs) # Remove multi-face frame if 0 < n_faces <= 3: is_frontal, _ = check_front_view(self.image, face_locs) # Remove non-frontal-view frame if is_frontal: self.image, _, _ = draw_bbox(self.image, face_locs, color="green") image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB) id, score = self.recognizer.recognize( image, face_locs, 0.18825) self.pre_id = self.cur_id self.cur_id = id dis_str = "Student ID: %s, Score: %.4f" % (id, score) # Verification: ID was checked or not self.ui.textBrowser.append(dis_str) for check_idx in self.check_list: if check_idx == id: self.checked = True else: pass # Process if ID has not been checked if not self.checked: if not id == "unknown": # positive ID if self.pre_id == self.cur_id: self.count += 1 # popup after 5 times if self.count == 5: id = int(id) mssv_check = self.correct_mssv(int(id)) if mssv_check: self.insert_to_db(mssv_check) # display the number of absences get_total(self.file_path, id) self.check_list.append(mssv_check) #print(self.check_list) self.checked = False else: pass else: self.count = 0 else: pass else: self.ui.textBrowser.append( "Student ID had been checked") else: dis_str = "Face is not in frontal view" self.audio_settime += 1 if self.audio_settime >= 40: self.allow_flag = 1 if self.allow_flag: AudioPlayback(self.audios[2]) self.audio_settime = 0 self.allow_flag = 0 else: pass self.ui.textBrowser.append(dis_str) else: dis_str = "Require no more than 3 faces" self.ui.textBrowser.append(dis_str) else: dis_str = "Frame is montion-blurred" self.ui.textBrowser.append(dis_str) self.displayImage(self.image, 1) def displayImage(self, img, window=1): """[display frame] [correct image type and on-screen display] Arguments: img {[cv2 image]} -- [processed frame] Keyword Arguments: window {number} -- [description] (default: {1}) """ qformat = QImage.Format_Indexed8 if len(img.shape) == 3: if img.shape[2] == 4: qformat = QImage.Format_RGBA8888 else: qformat = QImage.Format_RGB888 outImage = QImage(img, img.shape[1], img.shape[0], img.strides[0], qformat) outImage = outImage.rgbSwapped() if window == 1: self.ui.img_label.setPixmap(QPixmap.fromImage(outImage)) self.ui.img_label.setScaledContents(True) def configureOpenExcels(self): """[config to open File] """ settingsopenexcelDialog = OpenExcels() settingsopenexcelDialog.openinitUI() def configureSavetemplate(self): """[config to save File] """ settingssaveexcelDialog = OpenExcels() settingssaveexcelDialog.saveinitUI() def insert_to_db(self, in_value): """[save value to .db file] [Student IDs received after recognising will be saved to a temp table called Temp ] Arguments: in_value {[int]} -- [(int)value to save into .db file ] """ with sqlite3.connect('.TempExcels.db') as db: c = db.cursor() c.execute('insert into Temp values(?)', (in_value, )) db.commit() c.close() def display_absences(self, absences): """[display the number of absences] [The number of absences of each ID will be on-screen if absent times >= threshold, a notification will appear on the screen] Arguments: absences {[int]} -- [absent times] """ self.ui.absenceNumber.display(absences) if absences == 3: QMessageBox.warning(self, 'Absent Warning', 'This is your last absence') elif absences > 3: QMessageBox.critical( None, 'Absent Fail', "Your absences exceeded the allowable threshold", QMessageBox.Abort) def startCamera(self): """[start camera] [Unless file path is invalid, camera is closed] """ if not self.file_path: QMessageBox.warning(self, "Missing Excel file", "Open Excel File to Start Camera") else: self.timer.start(5) def stopCamera(self): self.timer.stop() def displayCameraError(self): QMessageBox.warning(self, "Camera error", self.camera.errorString()) def updateCameraDevice(self, action): """[update camera] [Look for active cameras] Arguments: action -- [flag of a active camera] """ self.setCamera(action.data()) def close(self): """[close event] [if there is a close event,data in the table will be saved to excel file ] """ QtCore.QCoreApplication.instance().quit reply = QMessageBox.question(self, 'Message', "Are you sure to quit?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if reply == QMessageBox.Yes: if self.file_path: self.Save_to_excel(self.file_path) sys.exit() def closeEvent(self, event): """[close event ] [if we have a force exit event, a notification will be display for checking quit action if event is accepted, data will be saved and program close ] Arguments: event {[event]} -- [exit event from click (X)] """ reply = QMessageBox.question(self, 'Message', "Are you sure to quit?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if reply == QMessageBox.Yes: if self.file_path: self.Save_to_excel(self.file_path) event.accept() else: event.ignore() def Save_to_excel(self, filepath): """[save data to excels] [Fetch data from table and save them to Excels] Arguments: filepath {[string]} -- [Excel file path] """ if self.file_path: mssv = [] with sqlite3.connect('.TempExcels.db') as db: c = db.cursor() c.execute("SELECT * FROM Temp") for row in c.fetchall(): mssv.append(row[0]) if mssv: filecheck = AttendanceChecking(self.file_path) failcases = filecheck.start_checking(mssv) fail_str = "Incomplete IDs:\n" if failcases: for failcase in failcases: fail_str = fail_str + str(failcase) + "\n" QMessageBox.warning(self, 'Failcase list', fail_str) c.execute('drop table if exists Temp') c.execute( 'create table if not exists Temp(mssv INT NOT NULL)') else: self.ui.textBrowser.append("There is nothing to save") db.commit() c.close() db.close() else: self.ui.textBrowser.append("Open Excel File to process") def correct_mssv(self, mssv): """[confirm student ID] [confirm recognised student ID if ID is wrong, fill out another one and press Enter] Arguments: mssv {[int]} -- [student ID] """ mssv_check, okPressed = QInputDialog.getInt(self, "Student confirm", "MSSV:", mssv, 0, 100000000, 1) if okPressed: return (mssv_check) else: return (0)
class Camera(QMainWindow): def __init__(self, parent=None): super(Camera, self).__init__(parent) global API API = AlgorithmAPIs(template_dir="templates", threshold=0.5, use_multiprocessing=False) self.ui = Ui_Camera() self.camera = None self.imageCapture = None # self.mediaRecorder = None self.isCapturingImage = False self.applicationExiting = False self.imageSettings = QImageEncoderSettings() self.audioSettings = QAudioEncoderSettings() self.ui.setupUi(self) cameraDevice = QByteArray() videoDevicesGroup = QActionGroup(self) videoDevicesGroup.setExclusive(True) for deviceName in QCamera.availableDevices(): description = QCamera.deviceDescription(deviceName) videoDeviceAction = QAction(description, videoDevicesGroup) videoDeviceAction.setCheckable(True) videoDeviceAction.setData(deviceName) if cameraDevice.isEmpty(): cameraDevice = deviceName videoDeviceAction.setChecked(True) self.ui.menuDevices.addAction(videoDeviceAction) videoDevicesGroup.triggered.connect(self.updateCameraDevice) self.ui.lcdNumber_2.display(0) self.ui.dial.valueChanged.connect(self.dial_display) global dial_value dial_value = 3 self.ui.lcdNumber_2.display(dial_value) self.setCamera(cameraDevice) # Create and load model path_pretrained = "apis/models/facenet/20180402-114759.pb" path_SVM = "apis/models/SVM/SVM.pkl" self.recognizer = Recognizer() self.recognizer.create_graph(path_pretrained, path_SVM) def setCamera(self, cameraDevice): self.camera = cv2.VideoCapture(0) self.image = None self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, 640) self.timer = QTimer() self.timer.timeout.connect(self.update_frame) self.timer.start(5) def dial_display(self, value): self.ui.lcdNumber_2.display(value) global dial_value dial_value = value def update_frame(self): ret, self.image = self.camera.read(0) self.image = cv2.flip(self.image, 1) # Remove motion-blur frame if not detect_blur(self.image, thres=5.0): face_locs = find_bbox(self.image) n_faces = len(face_locs) # Remove multi-face frame if n_faces == 1: is_frontal, _ = check_front_view(self.image, face_locs) # Remove non-frontal-view frame if is_frontal: self.image, _, _ = draw_bbox(self.image, face_locs, color="green") image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB) id, score = self.recognizer.recognize( image, (0, 0, 182, 182), 0.29) print("Student ID: %s, Score: %.4f" % (id, score)) else: print("Face is not in frontal view") else: print("Many faces in a frame") else: print("Frame is montion-blur") self.displayImage(self.image, 1) def displayImage(self, img, window=1): qformat = QImage.Format_Indexed8 if len(img.shape) == 3: if img.shape[2] == 4: qformat = QImage.Format_RGBA8888 else: qformat = QImage.Format_RGB888 outImage = QImage(img, img.shape[1], img.shape[0], img.strides[0], qformat) outImage = outImage.rgbSwapped() if window == 1: self.ui.img_label.setPixmap(QPixmap.fromImage(outImage)) self.ui.img_label.setScaledContents(True) def configureCaptureSettings(self): if self.camera.isOpened(): self.configureImageSettings() def configureOpenExcels(self): settingsopenexcelDialog = OpenExcels() settingsopenexcelDialog.initUI() def configureImageSettings(self): settingsDialog = ImageSettings(self.imageCapture) settingsDialog.setImageSettings(self.imageSettings) if settingsDialog.exec_(): self.imageSettings = settingsDialog.imageSettings() self.imageCapture.setEncodingSettings(self.imageSettings) def train(self): PIL_obj = Image.open("images/putin/putin1.jpg") img = np.array(PIL_obj) # Bounding box face_locs = API.find_bbox(img) img_draw_bbox, _, _ = API.draw_bbox(img, face_locs, color="green") # Extract embedding embeddings, faces = API.extract_embedding(img, face_locs) n_embeddings = len(embeddings) print("Number of embeddings: %d" % (n_embeddings)) # Save template template = { "name": "putin", "embedding": embeddings[0], "face": faces[0] } savemat(os.path.join(API.template_dir, "putin.mat"), template) plt.figure(1) plt.imshow(faces[0]) plt.axis('off') plt.title("Registration face") plt.show() def starttest(self): PIL_obj = Image.open("images/putin/putin2.jpg") img = np.array(PIL_obj) # Bounding box face_locs = API.find_bbox(img) img_draw_bbox, _, _ = API.draw_bbox(img, face_locs, color="green") # Extract embedding embeddings, faces = API.extract_embedding(img, face_locs) n_embeddings = len(embeddings) print("Number of embeddings: %d" % (n_embeddings)) # Identify person results = API.matching(embeddings) matched, name, face_reg = results[0] print("Identified name: %s" % (name)) if name != "": plt.figure(1) plt.subplot(1, 2, 1) plt.title("Input image") plt.axis('off') plt.imshow(img_draw_bbox) plt.subplot(1, 2, 2) plt.title("Registration face") plt.axis('off') plt.imshow(face_reg) plt.show() def display_absences(self, absences): self.ui.absenceNumber.display(absences) if absences == dial_value: QMessageBox.warning(self, 'Absent Warning', 'This is your last absence') elif absences > dial_value: QMessageBox.critical( None, 'Absent Fail', "Your absences exceeded the allowable threshold", QMessageBox.Abort) def takeImage(self): self.isCapturingImage = True # self.imageCapture.capture() s, capture_img = self.camera.read() # 0 -> index of camera if s: # Camera initialized without any errors namedWindow("capture_image", WINDOW_AUTOSIZE) imshow("capture_image", capture_img) cv2.waitKey(0) cv2.destroyWindow("capture_image") self.imageSaved("capture_img.png", capture_img) def startCamera(self): self.timer.start(5) def stopCamera(self): self.timer.stop() def displayCameraError(self): QMessageBox.warning(self, "Camera error", self.camera.errorString()) def updateCameraDevice(self, action): self.setCamera(action.data()) def close(self): QtCore.QCoreApplication.instance().quit sys.exit() def imageSaved(self, id, fileName): cv2.imwrite(id, fileName)