def __init__(self, **traits): super(MainWindow, self).__init__(**traits) # Build the camera selection dialog box self.cameras_dialog.on_trait_change(self.on_cameras_response, 'closed') self.on_cameras_response() self.processing_thread = ProcessingThread(self, self.processing_queue, self.display_frame_rate) self.processing_thread.start()
def __init__(self, parent): self.parent = parent self.queue = Queue.Queue() self.processThread = ProcessingThread(self.queue) #self.processThread = threading.Thread(target=self.consumer.run()) self.processThread.start() self.fileLoader = DicomLoader(self, "file") self.folderLoader = DicomLoader(self, "series") self.importSrc() self.initGUI() self.initToolbar() self.initConnections()
def __init__(self, **traits): super(MainWindow, self).__init__(**traits) # Build the camera selection dialog box self.cameras_dialog.on_trait_change(self.on_cameras_response, 'closed') # Open the default plugin info = self.cameras_dialog.get_plugin_info() try: self.select_plugin(*info) except ImportError: # some module was not available, select the dummy self.cameras_dialog.select_fallback() info = self.cameras_dialog.get_plugin_info() self.select_plugin(*info) self.processing_thread = ProcessingThread(self, self.processing_queue, self.display_frame_rate) self.processing_thread.start()
class MainWindow(HasTraits): '''The main window for the Beams application.''' # Current folder for file dialog _current_folder = None camera = Instance(Camera) id_string = DelegatesTo('camera') resolution = DelegatesTo('camera') status = Str() screen = Instance(CameraImage, args=()) cmap = DelegatesTo('screen') display_frame_rate = Range(1, 60, 15) transform_plugins = List(Instance(TransformPlugin)) display_plugins = List(Instance(DisplayPlugin)) acquisition_thread = Instance(AcquisitionThread) # default: None processing_thread = Instance(ProcessingThread) # default: None processing_queue = Instance(queue.Queue, kw={'maxsize': MAX_QUEUE_SIZE}) cameras_dialog = Instance(CameraDialog, args=()) # Actions about = Action(name='&About...', tooltip='About Beams', image=find_icon('about'), action='action_about') save = Action(name='&Save Image', accelerator='Ctrl+S', tooltip='Save the current image to a file', image=find_icon('save'), action='action_save') quit = Action(name='&Quit', accelerator='Ctrl+Q', tooltip='Exit the application', image=find_icon('quit'), action='_on_close') choose_camera = Action(name='Choose &Camera...', tooltip='Choose from a number of camera plugins', action='action_choose_camera') take_video = Action(name='Take &Video', style='toggle', tooltip='Start viewing the video feed from the camera', image=find_icon('camera-video'), action='action_take_video') take_photo = Action(name='Take &Photo', tooltip='Take one snapshot from the camera', image=find_icon('camera-photo'), action='action_take_photo', enabled_when='self.take_video.checked == False') find_resolution = Button() view = View( VGroup( HSplit( Tabbed( VGroup(Item('id_string', style='readonly', label='Camera'), Item('resolution', style='readonly', format_str=u'%i \N{multiplication sign} %i'), Group(Item('camera', show_label=False, style='custom'), label='Camera properties', show_border=True), label='Camera'), VGroup(Item('cmap', label='Color scale', editor=EnumEditor( values={ None: '0:None (image default)', gray: '1:Grayscale', bone: '2:Bone', pink: '3:Copper', jet: '4:Rainbow (considered harmful)', isoluminant: '5:Isoluminant', awesome: '6:Low-intensity contrast' })), Item('screen', show_label=False, editor=ColorMapEditor(width=256)), Item('display_frame_rate'), label='Video'), # FIXME: mutable=False means the items can't be deleted, # added, or rearranged, but we do actually want them to # be rearranged. VGroup(Item('transform_plugins', show_label=False, editor=ListEditor(style='custom', mutable=False)), label='Transform'), VGroup(Item('display_plugins', show_label=False, editor=ListEditor(style='custom', mutable=False)), label='Math')), Item('screen', show_label=False, width=640, height=480, style='custom')), Item('status', style='readonly', show_label=False)), menubar=MenuBar( # vertical bar is undocumented but it seems to keep the menu # items in the order they were specified in Menu('|', save, '_', quit, name='&File'), Menu(name='&Edit'), Menu(name='&View'), Menu('|', choose_camera, '_', take_photo, take_video, name='&Camera'), Menu(name='&Math'), Menu(about, name='&Help')), toolbar=ToolBar('|', save, '_', take_photo, take_video), title='Beams', resizable=True, handler=MainHandler) def _find_resolution_fired(self): return self.view.handler.action_find_resolution(None) def _display_frame_rate_changed(self, value): self.processing_thread.update_frequency = value def _transform_plugins_default(self): plugins = [] for name in ['Rotator', 'BackgroundSubtract']: module = __import__(name, globals(), locals(), [name]) plugins.append(getattr(module, name)()) return plugins def _display_plugins_default(self): plugins = [] for name in [ 'BeamProfiler', 'MinMaxDisplay', 'DeltaDetector', 'Centroid' ]: module = __import__(name, globals(), locals(), [name]) plugins.append(getattr(module, name)(screen=self.screen)) return plugins def __init__(self, **traits): super(MainWindow, self).__init__(**traits) # Build the camera selection dialog box self.cameras_dialog.on_trait_change(self.on_cameras_response, 'closed') self.on_cameras_response() self.processing_thread = ProcessingThread(self, self.processing_queue, self.display_frame_rate) self.processing_thread.start() def on_cameras_response(self): plugin_obj = self.cameras_dialog.get_plugin_object() try: self.select_plugin(plugin_obj) except ImportError: # some module was not available, select the dummy error( None, 'Loading the {} camera plugin failed. ' 'Taking you back to the dummy plugin.'.format( plugin_obj['name'])) self.cameras_dialog.select_fallback() info = self.cameras_dialog.get_plugin_info() self.select_plugin(*info) # Select camera plugin def select_plugin(self, plugin_obj): # Set up image capturing self.camera = plugin_obj() try: self.camera.open() except CameraError: error(None, 'No camera was detected. Did you forget to plug it in?') sys.exit()
class MainWindow(HasTraits): '''The main window for the Beams application.''' # Current folder for file dialog _current_folder = None camera = Instance(Camera) id_string = DelegatesTo('camera') resolution = DelegatesTo('camera') status = Str() screen = Instance(CameraImage, args=()) cmap = DelegatesTo('screen') display_frame_rate = Range(1, 60, 15) transform_plugins = List(Instance(TransformPlugin)) display_plugins = List(Instance(DisplayPlugin)) acquisition_thread = Instance(AcquisitionThread) # default: None processing_thread = Instance(ProcessingThread) # default: None processing_queue = Instance(queue.Queue, kw={'maxsize': MAX_QUEUE_SIZE}) cameras_dialog = Instance(CameraDialog, args=()) # Actions about = Action( name='&About...', tooltip='About Beams', image=find_icon('about'), action='action_about') save = Action( name='&Save Image', accelerator='Ctrl+S', tooltip='Save the current image to a file', image=find_icon('save'), action='action_save') quit = Action( name='&Quit', accelerator='Ctrl+Q', tooltip='Exit the application', image=find_icon('quit'), action='_on_close') choose_camera = Action( name='Choose &Camera...', tooltip='Choose from a number of camera plugins', action='action_choose_camera') take_video = Action( name='Take &Video', style='toggle', tooltip='Start viewing the video feed from the camera', image=find_icon('camera-video'), action='action_take_video') take_photo = Action( name='Take &Photo', tooltip='Take one snapshot from the camera', image=find_icon('camera-photo'), action='action_take_photo', enabled_when='self.take_video.checked == False') find_resolution = Button() view = View( VGroup( HSplit( Tabbed( VGroup( Item('id_string', style='readonly', label='Camera'), Item('resolution', style='readonly', format_str=u'%i \N{multiplication sign} %i'), Group( Item('camera', show_label=False, style='custom'), label='Camera properties', show_border=True), label='Camera'), VGroup( Item('cmap', label='Color scale', editor=EnumEditor(values={ None: '0:None (image default)', gray: '1:Grayscale', bone: '2:Bone', pink: '3:Copper', jet: '4:Rainbow (considered harmful)', isoluminant: '5:Isoluminant', awesome: '6:Low-intensity contrast' })), Item('screen', show_label=False, editor=ColorMapEditor(width=256)), Item('display_frame_rate'), label='Video'), # FIXME: mutable=False means the items can't be deleted, # added, or rearranged, but we do actually want them to # be rearranged. VGroup(Item('transform_plugins', show_label=False, editor=ListEditor(style='custom', mutable=False)), label='Transform'), VGroup(Item('display_plugins', show_label=False, editor=ListEditor(style='custom', mutable=False)), label='Math')), Item('screen', show_label=False, width=640, height=480, style='custom')), Item('status', style='readonly', show_label=False)), menubar=MenuBar( # vertical bar is undocumented but it seems to keep the menu # items in the order they were specified in Menu('|', save, '_', quit, name='&File'), Menu(name='&Edit'), Menu(name='&View'), Menu('|', choose_camera, name='&Camera'), Menu(name='&Math'), Menu(about, name='&Help')), toolbar=ToolBar('|', save, '_', take_photo, take_video), title='Beams', resizable=True, handler=MainHandler) def _find_resolution_fired(self): return self.view.handler.action_find_resolution(None) def _display_frame_rate_changed(self, value): self.processing_thread.update_frequency = value def _transform_plugins_default(self): plugins = [] for name in ['Rotator', 'BackgroundSubtract']: module = __import__(name, globals(), locals(), [name]) plugins.append(getattr(module, name)()) return plugins def _display_plugins_default(self): plugins = [] for name in ['BeamProfiler', 'MinMaxDisplay', 'DeltaDetector', 'Centroid']: module = __import__(name, globals(), locals(), [name]) plugins.append(getattr(module, name)(screen=self.screen)) return plugins def __init__(self, **traits): super(MainWindow, self).__init__(**traits) # Build the camera selection dialog box self.cameras_dialog.on_trait_change(self.on_cameras_response, 'closed') self.on_cameras_response() self.processing_thread = ProcessingThread(self, self.processing_queue, self.display_frame_rate) self.processing_thread.start() def on_cameras_response(self): plugin_obj = self.cameras_dialog.get_plugin_object() try: self.select_plugin(plugin_obj) except ImportError: # some module was not available, select the dummy error(None, 'Loading the {} camera plugin failed. ' 'Taking you back to the dummy plugin.'.format(plugin_obj['name'])) self.cameras_dialog.select_fallback() info = self.cameras_dialog.get_plugin_info() self.select_plugin(*info) # Select camera plugin def select_plugin(self, plugin_obj): # Set up image capturing self.camera = plugin_obj() try: self.camera.open() except CameraError: error(None, 'No camera was detected. Did you forget to plug it in?') sys.exit()
class AppWindow(QtGui.QMainWindow): def __init__(self, parent): self.parent = parent self.queue = Queue.Queue() self.processThread = ProcessingThread(self.queue) #self.processThread = threading.Thread(target=self.consumer.run()) self.processThread.start() self.fileLoader = DicomLoader(self, "file") self.folderLoader = DicomLoader(self, "series") self.importSrc() self.initGUI() self.initToolbar() self.initConnections() def importSrc(self): # will be shown as errors in pyCharm pass def exit(self): self.close() def initConnections(self): self.connect(self.ui.actionLoad, QtCore.SIGNAL("triggered()"), self.loadSingleFile) self.connect(self.ui.actionExit, QtCore.SIGNAL("triggered()"), self.exit) def initGUI(self): QtGui.QMainWindow.__init__(self) self.ui = Ui_MainWindow() self.ui.setupUi(self) self.setWindowTitle("Simple Dicom Viewer") #self.dicomReader = vtk.vtkDICOMImageReader() #self.dicomReader = vtkgdcm.vtkGDCMImageReader() self.show() self.figure = plt.figure() self.canvas = FigureCanvas(self.figure) self.toolbar = NavigationToolbar(self.canvas, self) self.ui.numpyLayout.addWidget(self.toolbar) self.ui.numpyLayout.addWidget(self.canvas) self.vtkWidget = QVTKRenderWindowInteractor(self.ui.imageFrame) self.vtkWidget.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) #self.ui.imageLayout.removeWidget(self.ui.dicomSlider) self.ui.imageLayout.addWidget(self.vtkWidget) #self.ui.imageLayout.addWidget(self.ui.dicomSlider) self.disableSlider() self.viewer= vtk.vtkImageViewer() #self.viewer.SetupInteractor(MyInteractor()) self.vtkWidget.GetRenderWindow().AddRenderer(self.viewer.GetRenderer()) self.iren = self.vtkWidget.GetRenderWindow().GetInteractor() #self.iren.SetRenderWindow(self.vtkWidget.GetRenderWindow()) self.drag = False self.measuring = False self.iren.AddObserver("LeftButtonPressEvent", self.leftClick) self.iren.AddObserver("LeftButtonReleaseEvent", self.leftRelease) self.iren.AddObserver("EnterEvent", self.mouseEntered) self.iren.AddObserver("LeaveEvent", self.mouseLeft) self.iren.AddObserver("MouseMoveEvent", self.mouseMoved) def mouseMoved(self, *args): if self.drag: self.temp = args[0].GetEventPosition() print(self.temp) print(self.begin) self.drawLine([self.begin, self.temp]) return time.sleep(0.1) #print(args[0].GetEventPosition()) def mouseEntered(self, *args): self.drag = False print("Entered") def mouseLeft(self, *args): self.drag = False print("Left") def leftClick(self, *args): if self.measuring: if self.drag == False: self.drag = True self.begin = args[0].GetEventPosition() print(self.begin) print(self.drag) else: self.drag = False self.end = args[0].GetEventPosition() self.drawLine([self.begin, self.end]) self.printDistance(self.dist) def leftRelease(self, *args): #self.drag = False #print(self.drag) pass def printDistance(self, dist): box = QtGui.QMessageBox(self) box.setInformativeText("Distance: " + str(dist) + " cm.") box.show() def initToolbar(self): self.actions = Actions(self) def drawLine(self, points): try: self.viewer.GetRenderer().RemoveActor(self.actor) self.viewer.GetRenderer().Render() except: pass point1 = points[0] point2 = points[1] points = vtk.vtkPoints() points.SetNumberOfPoints(2) points.Allocate(2) points.InsertPoint(0, point1[0], point1[1], 0.001) points.InsertPoint(1, point2[0], point2[1], 0.001) dist = numpy.sqrt(numpy.square((point1[0]-point2[0])*0.028) + numpy.square((point1[1]-point2[1])*0.030)) self.cells = vtk.vtkCellArray() self.cells.Initialize() line = vtk.vtkLine() line.GetPointIds().SetId(0,0) line.GetPointIds().SetId(1,1) self.cells.InsertNextCell(line) self.poly = vtk.vtkPolyData() self.poly.Initialize() self.poly.SetPoints(points) self.poly.SetLines(self.cells) self.poly.Modified() mapper = vtk.vtkPolyDataMapper2D() #print(dir(mapper)) mapper.SetInput(self.poly) mapper.ScalarVisibilityOn() mapper.SetScalarModeToUsePointData() mapper.Update() self.actor = vtk.vtkActor2D() self.actor.SetMapper(mapper) self.viewer.GetRenderer().AddActor2D(self.actor) self.dist = dist def loadSingleFile(self): loader = self.fileLoader loader.loadFile() if loader.accepted: loader.setDir(os.path.dirname(str(loader.selectedFile))) self.disableSlider() self.dicomReader = vtkgdcm.vtkGDCMImageReader() self.dicomReader.SetFileName(str(loader.selectedFile)) print(dir(self.dicomReader)) print(self.dicomReader.GetScale()) self.dicomReader.Update() imageData = self.dicomReader.GetOutput() size = imageData.GetDimensions() width = size[0] height = size[1] self.vtkWidget.setMaximumSize(QtCore.QSize(width, height)) self.vtkWidget.setMinimumSize(QtCore.QSize(width, height)) RefDs = dicom.read_file(str(loader.selectedFile)) ConstPixelDims = (int(RefDs.Rows), int(RefDs.Columns), 1) pointData = imageData.GetPointData() arrayData = pointData.GetArray(0) arrayDicom = numpy_support.vtk_to_numpy(arrayData) arrayDicom = arrayDicom.reshape(ConstPixelDims, order='F') shape = arrayDicom.shape wtf = arrayDicom.reshape(shape[0], shape[1]) wtf = numpy.fliplr(wtf).transpose() max = numpy.max(wtf) min = numpy.min(wtf) print(numpy.max(wtf)) print(numpy.min(wtf)) grad = numpy.gradient(wtf) print(wtf) computed = numpy.sqrt(numpy.square(grad[0]) + numpy.square(grad[1])) #self.proc.start() ax = self.figure.add_subplot(111) ax.imshow(wtf, interpolation="nearest", cmap=plt.get_cmap('gray'), vmin=0, vmax=max) self.canvas.draw() #points = vtk.vtkPoints() #points.SetNumberOfPoints(2) #points.Allocate(2) #points.InsertPoint(0, 100, 100, 0.001) #points.InsertPoint(0, 200, 200, 0.001) #cells = vtk.vtkCellArray() #cells.Initialize() #line = vtk.vtkLine() #line.GetPointIds().SetId(0,0) #line.GetPointIds().SetId(1,1) #cells.InsertNextCell(line) #poly = vtk.vtkPolyData() #poly.Initialize() #poly.SetPoints(points) #poly.SetLines(cells) #poly.Modified() #mapper = vtk.vtkPolyDataMapper2D() #print(dir(mapper)) #mapper.SetInput(poly) #mapper.ScalarVisibilityOn() #mapper.SetScalarModeToUsePointData() #mapper.Update() #self.drawLine([(200,200), (300,300)]) #actor = vtk.vtkActor2D() #actor.SetMapper(mapper) blend = vtk.vtkImageBlend() blend.AddInputConnection(self.dicomReader.GetOutputPort()) #blend.AddInputConnection(actor.GetOutputPort()) self.viewer.SetInputConnection(blend.GetOutputPort()) #print(dir(self.viewer.GetRenderer())) #self.viewer.GetRenderer().AddActor2D(actor) #self.viewer.SetInputConnection(self.dicomReader.GetOutputPort()) self.viewer.SetZSlice(0) self.getMedicalData() self.iren.ReInitialize() self.iren.Render() self.iren.Start() #actor = vtk.vtkImageActor() #self.viewer.GetRenderer().AddActor(actor) self.viewer.GetRenderer().Render() def getMedicalData(self): #print(self.dicomReader) splitter = "Medical Image Properties:" data = str(self.dicomReader).split(splitter) data = [x.strip() for x in data] data = data[1].split('\n') data = [x.split(":") for x in data if x] self.ui.dicomData.setRowCount(len(data)) self.ui.dicomData.setColumnCount(1) self.ui.dicomData.setHorizontalHeaderItem(0, QtGui.QTableWidgetItem("Data")) for i in xrange(0, len(data)): self.ui.dicomData.setVerticalHeaderItem(i, QtGui.QTableWidgetItem((data[i][0]))) self.ui.dicomData.setItem(i, 0, QtGui.QTableWidgetItem((data[i][1]))) self.ui.dicomData.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch) self.ui.dicomData.setSortingEnabled(False) #print(data) def disableSlider(self): self.ui.playButton.setDisabled(True) self.ui.dicomSlider.setDisabled(True) self.ui.dicomSlider.setValue(0) self.ui.dicomSlider.disconnect(self.ui.dicomSlider, QtCore.SIGNAL("valueChanged(int)"), self.sliderMoved) self.ui.playButton.disconnect(self.ui.playButton, QtCore.SIGNAL("clicked()"), self.playMovie) @QtCore.pyqtSlot(int) def sliderMoved(self, value): try: self.viewer.SetZSlice(value) self.iren.Render() except: raise ValueError @QtCore.pyqtSlot(int) def movieStep(self, value): self.ui.dicomSlider.setValue(value) self.ui.dicomSlider.setSliderDown(value) self.viewer.SetZSlice(value) self.iren.Render() @QtCore.pyqtSlot() def playMovie(self): self.ui.dicomSlider.disconnect(self.ui.dicomSlider, QtCore.SIGNAL("valueChanged(int)"), self.sliderMoved) self.viewer.SetZSlice(0) self.obj = Waiter() self.obj.trigger.connect(self.movieStep) self.obj.ended.connect(self.movieEnded) thr = ThreadWait(self.obj, self.ui.dicomSlider.maximum()) self.queue.put((self.processThread.playMovie, thr, )) @QtCore.pyqtSlot() def movieEnded(self): self.ui.dicomSlider.connect(self.ui.dicomSlider, QtCore.SIGNAL("valueChanged(int)"), self.sliderMoved) def enableSlider(self, max): self.disableSlider() self.ui.playButton.setEnabled(True) self.ui.dicomSlider.setTracking(True) self.ui.dicomSlider.setEnabled(True) self.ui.dicomSlider.setValue(0) self.ui.dicomSlider.setMinimum(0) self.ui.dicomSlider.setMaximum(max) self.ui.dicomSlider.connect(self.ui.dicomSlider, QtCore.SIGNAL("valueChanged(int)"), self.sliderMoved) self.ui.playButton.connect(self.ui.playButton, QtCore.SIGNAL("clicked()"), self.playMovie) def loadFolder(self): loader = self.folderLoader loader.loadFile() if loader.accepted: loader.setDir(os.path.dirname(str(loader.selectedFolder))) self.dicomReader = vtkgdcm.vtkGDCMImageReader() regex = re.compile(r'.+\.dcm') files = [x for x in os.listdir(loader.selectedFolder) if re.match(regex, x)] self.seriesSize = len(files) temp = vtk.vtkStringArray() temp.SetNumberOfValues(len(files)) i = 0 for file in sorted(files): temp.SetValue(i, os.path.join(str(loader.selectedFolder), file)) i = i + 1 self.dicomReader.SetFileNames(temp) self.dicomReader.Update() imageData = self.dicomReader.GetOutput() size = imageData.GetDimensions() width = size[0] height = size[1] self.vtkWidget.setMaximumSize(QtCore.QSize(width, height)) self.vtkWidget.setMinimumSize(QtCore.QSize(width, height)) self.viewer.SetInputConnection(self.dicomReader.GetOutputPort()) self.iren.ReInitialize() self.getMedicalData() self.enableSlider(self.seriesSize-1) self.ui.dicomSlider.setFocus() def undo(self): print("Undo") def redo(self): print("Redo") def magnify(self): print("Magnify") def cut(self): print("Cut") def measure(self): self.measuring = not self.measuring
def connectToCamera(self, dropFrameIfBufferFull, apiPreference, capThreadPrio, procThreadPrio, enableFrameProcessing, width, height): # Set frame label text if self.sharedImageBuffer.isSyncEnabledForDeviceUrl(self.deviceUrl): self.frameLabel.setText("Camera connected. Waiting...") else: self.frameLabel.setText("Connecting to camera...") # Create capture thread self.captureThread = CaptureThread(self.sharedImageBuffer, self.deviceUrl, dropFrameIfBufferFull, apiPreference, width, height) # Attempt to connect to camera if self.captureThread.connectToCamera(): # Create processing thread self.processingThread = ProcessingThread(self.sharedImageBuffer, self.deviceUrl, self.cameraId) # Setup signal/slot connections self.processingThread.newFrame.connect(self.updateFrame) self.processingThread.updateStatisticsInGUI.connect( self.updateProcessingThreadStats) self.captureThread.updateStatisticsInGUI.connect( self.updateCaptureThreadStats) self.imageProcessingSettingsDialog.newImageProcessingSettings.connect( self.processingThread.updateImageProcessingSettings) self.newImageProcessingFlags.connect( self.processingThread.updateImageProcessingFlags) self.setROI.connect(self.processingThread.setROI) # Remove imageBuffer from shared buffer by deviceUrl after captureThread stop/finished self.captureThread.finished.connect(self.afterCaptureThreadFinshed) self.processingThread.finished.connect( self.afterProcessingThreadFinshed) # Only enable ROI setting/resetting if frame processing is enabled if enableFrameProcessing: self.frameLabel.newMouseData.connect(self.newMouseData) # Set initial data in processing thread self.setROI.emit( QRect(0, 0, self.captureThread.getInputSourceWidth(), self.captureThread.getInputSourceHeight())) self.newImageProcessingFlags.emit(self.imageProcessingFlags) self.imageProcessingSettingsDialog.updateStoredSettingsFromDialog() # Start capturing frames from camera self.captureThread.start(capThreadPrio) # Start processing captured frames (if enabled) if enableFrameProcessing: self.processingThread.start(procThreadPrio) # Setup imageBufferBar with minimum and maximum values self.imageBufferBar.setMinimum(0) self.imageBufferBar.setMaximum( self.sharedImageBuffer.getByDeviceUrl( self.deviceUrl).maxSize()) # Enable "Clear Image Buffer" push button self.clearImageBufferButton.setEnabled(True) # Set text in labels self.deviceUrlLabel.setText(self.deviceUrl) self.cameraResolutionLabel.setText( "%dx%d" % (self.captureThread.getInputSourceWidth(), self.captureThread.getInputSourceHeight())) # Set internal flag and return self.isCameraConnected = True # Set frame label text if not enableFrameProcessing: self.frameLabel.setText("Frame processing disabled.") return True # Failed to connect to camera else: return False
class CameraView(QWidget, Ui_CameraView): newImageProcessingFlags = pyqtSignal(ImageProcessingFlags) setROI = pyqtSignal(QRect) def __init__(self, parent, deviceUrl, sharedImageBuffer, cameraId): super(CameraView, self).__init__(parent) self.sharedImageBuffer = sharedImageBuffer self.cameraId = cameraId # Create image processing settings dialog self.imageProcessingSettingsDialog = ImageProcessingSettingsDialog( self) # Setup UI self.setupUi(self) # Save Device Url self.deviceUrl = deviceUrl # Initialize internal flag self.isCameraConnected = False # Set initial GUI state self.frameLabel.setText("No camera connected.") self.imageBufferBar.setValue(0) self.imageBufferLabel.setText("[000/000]") self.captureRateLabel.setText("") self.processingRateLabel.setText("") self.deviceUrlLabel.setText("") self.cameraResolutionLabel.setText("") self.roiLabel.setText("") self.mouseCursorPosLabel.setText("") self.clearImageBufferButton.setDisabled(True) # Initialize ImageProcessingFlags structure self.imageProcessingFlags = ImageProcessingFlags() # Connect signals/slots self.clearImageBufferButton.released.connect(self.clearImageBuffer) self.frameLabel.onMouseMoveEvent.connect( self.updateMouseCursorPosLabel) self.frameLabel.menu.triggered.connect(self.handleContextMenuAction) self.startButton.released.connect(self.startThread) self.pauseButton.released.connect(self.pauseThread) def delete(self): if self.isCameraConnected: # Stop processing thread if self.processingThread.isRunning(): self.stopProcessingThread() # Stop capture thread if self.captureThread.isRunning(): self.stopCaptureThread() # Automatically start frame processing (for other streams) if self.sharedImageBuffer.isSyncEnabledForDeviceUrl( self.deviceUrl): self.sharedImageBuffer.setSyncEnabled(True) # Disconnect camera if self.captureThread.disconnectCamera(): qDebug("[%s] Camera successfully disconnected." % self.deviceUrl) else: qDebug("[%s] WARNING: Camera already disconnected." % self.deviceUrl) def afterCaptureThreadFinshed(self): # Delete Buffer self.sharedImageBuffer.removeByDeviceUrl(self.deviceUrl) def afterProcessingThreadFinshed(self): qDebug("[%s] WARNING: SQL already disconnected." % self.deviceUrl) def connectToCamera(self, dropFrameIfBufferFull, apiPreference, capThreadPrio, procThreadPrio, enableFrameProcessing, width, height): # Set frame label text if self.sharedImageBuffer.isSyncEnabledForDeviceUrl(self.deviceUrl): self.frameLabel.setText("Camera connected. Waiting...") else: self.frameLabel.setText("Connecting to camera...") # Create capture thread self.captureThread = CaptureThread(self.sharedImageBuffer, self.deviceUrl, dropFrameIfBufferFull, apiPreference, width, height) # Attempt to connect to camera if self.captureThread.connectToCamera(): # Create processing thread self.processingThread = ProcessingThread(self.sharedImageBuffer, self.deviceUrl, self.cameraId) # Setup signal/slot connections self.processingThread.newFrame.connect(self.updateFrame) self.processingThread.updateStatisticsInGUI.connect( self.updateProcessingThreadStats) self.captureThread.updateStatisticsInGUI.connect( self.updateCaptureThreadStats) self.imageProcessingSettingsDialog.newImageProcessingSettings.connect( self.processingThread.updateImageProcessingSettings) self.newImageProcessingFlags.connect( self.processingThread.updateImageProcessingFlags) self.setROI.connect(self.processingThread.setROI) # Remove imageBuffer from shared buffer by deviceUrl after captureThread stop/finished self.captureThread.finished.connect(self.afterCaptureThreadFinshed) self.processingThread.finished.connect( self.afterProcessingThreadFinshed) # Only enable ROI setting/resetting if frame processing is enabled if enableFrameProcessing: self.frameLabel.newMouseData.connect(self.newMouseData) # Set initial data in processing thread self.setROI.emit( QRect(0, 0, self.captureThread.getInputSourceWidth(), self.captureThread.getInputSourceHeight())) self.newImageProcessingFlags.emit(self.imageProcessingFlags) self.imageProcessingSettingsDialog.updateStoredSettingsFromDialog() # Start capturing frames from camera self.captureThread.start(capThreadPrio) # Start processing captured frames (if enabled) if enableFrameProcessing: self.processingThread.start(procThreadPrio) # Setup imageBufferBar with minimum and maximum values self.imageBufferBar.setMinimum(0) self.imageBufferBar.setMaximum( self.sharedImageBuffer.getByDeviceUrl( self.deviceUrl).maxSize()) # Enable "Clear Image Buffer" push button self.clearImageBufferButton.setEnabled(True) # Set text in labels self.deviceUrlLabel.setText(self.deviceUrl) self.cameraResolutionLabel.setText( "%dx%d" % (self.captureThread.getInputSourceWidth(), self.captureThread.getInputSourceHeight())) # Set internal flag and return self.isCameraConnected = True # Set frame label text if not enableFrameProcessing: self.frameLabel.setText("Frame processing disabled.") return True # Failed to connect to camera else: return False def stopCaptureThread(self): qDebug("[%s] About to stop capture thread..." % self.deviceUrl) self.captureThread.stop() self.sharedImageBuffer.wakeAll( ) # This allows the thread to be stopped if it is in a wait-state # Take one frame off a FULL queue to allow the capture thread to finish if self.sharedImageBuffer.getByDeviceUrl(self.deviceUrl).isFull(): self.sharedImageBuffer.getByDeviceUrl(self.deviceUrl).get() self.captureThread.wait() qDebug("[%s] Capture thread successfully stopped." % self.deviceUrl) def stopProcessingThread(self): qDebug("[%s] About to stop processing thread..." % self.deviceUrl) self.processingThread.stop() self.sharedImageBuffer.wakeAll( ) # This allows the thread to be stopped if it is in a wait-state self.processingThread.wait() qDebug("[%s] Processing thread successfully stopped." % self.deviceUrl) def startThread(self): pass def pauseThread(self): pass def updateCaptureThreadStats(self, statData): imageBuffer = self.sharedImageBuffer.getByDeviceUrl(self.deviceUrl) # Show [number of images in buffer / image buffer size] in imageBufferLabel self.imageBufferLabel.setText( "[%d/%d]" % (imageBuffer.size(), imageBuffer.maxSize())) # Show percentage of image buffer full in imageBufferBar self.imageBufferBar.setValue(imageBuffer.size()) # Show processing rate in captureRateLabel self.captureRateLabel.setText("{:>6,.2f} fps".format( statData.averageFPS)) # Show number of frames captured in nFramesCapturedLabel self.nFramesCapturedLabel.setText("[%d]" % statData.nFramesProcessed) def updateProcessingThreadStats(self, statData): # Show processing rate in processingRateLabel self.processingRateLabel.setText("{:>6,.2f} fps".format( statData.averageFPS)) # Show ROI information in roiLabel self.roiLabel.setText("(%d,%d) %dx%d" % (self.processingThread.getCurrentROI().x(), self.processingThread.getCurrentROI().y(), self.processingThread.getCurrentROI().width(), self.processingThread.getCurrentROI().height())) # Show number of frames processed in nFramesProcessedLabel self.nFramesProcessedLabel.setText("[%d]" % statData.nFramesProcessed) def updateFrame(self, frame): # Display frame self.frameLabel.setPixmap( QPixmap.fromImage(frame).scaled(self.frameLabel.width(), self.frameLabel.height(), Qt.KeepAspectRatio)) def clearImageBuffer(self): if self.sharedImageBuffer.getByDeviceUrl(self.deviceUrl).clear(): qDebug("[%s] Image buffer successfully cleared." % self.deviceUrl) else: qDebug("[%s] WARNING: Could not clear image buffer." % self.deviceUrl) def setImageProcessingSettings(self): # Prompt user: # If user presses OK button on dialog, update image processing settings if self.imageProcessingSettingsDialog.exec() == QDialog.Accepted: self.imageProcessingSettingsDialog.updateStoredSettingsFromDialog() # Else, restore dialog state else: self.imageProcessingSettingsDialog.updateDialogSettingsFromStored() def updateMouseCursorPosLabel(self): # Update mouse cursor position in mouseCursorPosLabel self.mouseCursorPosLabel.setText( "(%d,%d)" % (self.frameLabel.getMouseCursorPos().x(), self.frameLabel.getMouseCursorPos().y())) # Show pixel cursor position if camera is connected (image is being shown) if self.frameLabel.pixmap(): # Scaling factor calculation depends on whether frame is scaled to fit label or not if not self.frameLabel.hasScaledContents(): xScalingFactor = (self.frameLabel.getMouseCursorPos().x() - (self.frameLabel.width() - self.frameLabel.pixmap().width()) / 2) / self.frameLabel.pixmap().width() yScalingFactor = (self.frameLabel.getMouseCursorPos().y() - (self.frameLabel.height() - self.frameLabel.pixmap().height()) / 2) / self.frameLabel.pixmap().height() else: xScalingFactor = self.frameLabel.getMouseCursorPos().x( ) / self.frameLabel.width() yScalingFactor = self.frameLabel.getMouseCursorPos().y( ) / self.frameLabel.height() self.mouseCursorPosLabel.setText( '%s [%d,%d]' % (self.mouseCursorPosLabel.text(), xScalingFactor * self.processingThread.getCurrentROI().width(), yScalingFactor * self.processingThread.getCurrentROI().height())) def newMouseData(self, mouseData): # Local variable(s) selectionBox = QRect() # Set ROI if mouseData.leftButtonRelease and self.frameLabel.pixmap(): # Selection box calculation depends on whether frame is scaled to fit label or not if not self.frameLabel.hasScaledContents(): xScalingFactor = (mouseData.selectionBox.x() - (self.frameLabel.width() - self.frameLabel.pixmap().width()) / 2) / self.frameLabel.pixmap().width() yScalingFactor = (mouseData.selectionBox.y() - (self.frameLabel.height() - self.frameLabel.pixmap().height()) / 2) / self.frameLabel.pixmap().height() wScalingFactor = self.processingThread.getCurrentROI().width( ) / self.frameLabel.pixmap().width() hScalingFactor = self.processingThread.getCurrentROI().height( ) / self.frameLabel.pixmap().height() else: xScalingFactor = mouseData.selectionBox.x( ) / self.frameLabel.width() yScalingFactor = mouseData.selectionBox.y( ) / self.frameLabel.height() wScalingFactor = self.processingThread.getCurrentROI().width( ) / self.frameLabel.width() hScalingFactor = self.processingThread.getCurrentROI().height( ) / self.frameLabel.height() # Set selection box properties (new ROI) selectionBox.setX(xScalingFactor * self.processingThread.getCurrentROI().width() + self.processingThread.getCurrentROI().x()) selectionBox.setY(yScalingFactor * self.processingThread.getCurrentROI().height() + self.processingThread.getCurrentROI().y()) selectionBox.setWidth(wScalingFactor * mouseData.selectionBox.width()) selectionBox.setHeight(hScalingFactor * mouseData.selectionBox.height()) # Check if selection box has NON-ZERO dimensions if selectionBox.width() != 0 and selectionBox.height() != 0: # Selection box can also be drawn from bottom-right to top-left corner if selectionBox.width() < 0: x_temp = selectionBox.x() width_temp = selectionBox.width() selectionBox.setX(x_temp + selectionBox.width()) selectionBox.setWidth(width_temp * -1) if selectionBox.height() < 0: y_temp = selectionBox.y() height_temp = selectionBox.height() selectionBox.setY(y_temp + selectionBox.height()) selectionBox.setHeight(height_temp * -1) # Check if selection box is not outside window if (selectionBox.x() < 0 or selectionBox.y() < 0 or selectionBox.x() + selectionBox.width() > self.processingThread.getCurrentROI().x() + self.processingThread.getCurrentROI().width() or selectionBox.y() + selectionBox.height() > self.processingThread.getCurrentROI().y() + self.processingThread.getCurrentROI().height() or selectionBox.x() < self.processingThread.getCurrentROI().x() or selectionBox.y() < self.processingThread.getCurrentROI().y()): # Display error message QMessageBox.warning( self, "ERROR:", "Selection box outside range. Please try again.") # Set ROI else: self.setROI.emit(selectionBox) def handleContextMenuAction(self, action): if action.text() == "Reset ROI": self.setROI.emit( QRect(0, 0, self.captureThread.getInputSourceWidth(), self.captureThread.getInputSourceHeight())) elif action.text() == "Scale to Fit Frame": self.frameLabel.setScaledContents(action.isChecked()) elif action.text() == "Grayscale": self.imageProcessingFlags.grayscaleOn = action.isChecked() self.newImageProcessingFlags.emit(self.imageProcessingFlags) elif action.text() == "Smooth": self.imageProcessingFlags.smoothOn = action.isChecked() self.newImageProcessingFlags.emit(self.imageProcessingFlags) elif action.text() == "Dilate": self.imageProcessingFlags.dilateOn = action.isChecked() self.newImageProcessingFlags.emit(self.imageProcessingFlags) elif action.text() == "Erode": self.imageProcessingFlags.erodeOn = action.isChecked() self.newImageProcessingFlags.emit(self.imageProcessingFlags) elif action.text() == "Flip": self.imageProcessingFlags.flipOn = action.isChecked() self.newImageProcessingFlags.emit(self.imageProcessingFlags) elif action.text() == "Canny": self.imageProcessingFlags.cannyOn = action.isChecked() self.newImageProcessingFlags.emit(self.imageProcessingFlags) elif action.text() == "Settings...": self.setImageProcessingSettings()
class CameraView(QWidget, Ui_CameraView): newImageProcessingFlags = pyqtSignal(ImageProcessingFlags) setROI = pyqtSignal(QRect) def __init__(self, parent, deviceUrl, sharedImageBuffer, cameraId): super(CameraView, self).__init__(parent) self.sharedImageBuffer = sharedImageBuffer self.cameraId = cameraId # Create image processing settings dialog self.imageProcessingSettingsDialog = ImageProcessingSettingsDialog( self) # Setup UI self.setupUi(self) # Save Device Url self.deviceUrl = deviceUrl # Initialize internal flag self.isCameraConnected = False # Set initial GUI state self.frameLabel.setText("No camera connected.") self.imageBufferBar.setValue(0) self.imageBufferLabel.setText("[000/000]") self.captureRateLabel.setText("") self.processingRateLabel.setText("") self.deviceUrlLabel.setText("") self.cameraResolutionLabel.setText("") self.roiLabel.setText("") self.mouseCursorPosLabel.setText("") self.clearImageBufferButton.setDisabled(True) # Initialize ImageProcessingFlags structure self.imageProcessingFlags = ImageProcessingFlags() # Connect signals/slots self.clearImageBufferButton.released.connect(self.clearImageBuffer) self.frameLabel.onMouseMoveEvent.connect( self.updateMouseCursorPosLabel) self.frameLabel.menu.triggered.connect(self.handleContextMenuAction) self.startButton.released.connect(self.startThread) self.pauseButton.released.connect(self.pauseThread) self.add_vehicle.released.connect(self.addVehicle) self.remove_vehicle.released.connect(self.remove_from_vehicle_list) self.save_and_quit.released.connect(self.save_quit) self.vehicle_up.toggled.connect(lambda: self.btnstate(self.vehicle_up)) self.vehicle_down.toggled.connect( lambda: self.btnstate(self.vehicle_down)) self.startButton.setEnabled(False) self.pauseButton.setEnabled(True) self.direction = "Up" self.add_vehicle_to_vehicle_list() self.group = QtWidgets.QButtonGroup() self.group.addButton(self.vehicle_up) self.group.addButton(self.vehicle_down) self.video_speed.currentIndexChanged.connect(self.change_speed) self.draw_polygon.released.connect(self.draw_polygon_f) self.polygon = PolygonDrawing() def draw_polygon_f(self): self.polygon.updateImage(self.frameLabel.pixmap()) if self.polygon.exec() == QDialog.Accepted: print("Ok Karo") else: print("Not Okay") def change_speed(self): self.processingThread.speed = int(self.video_speed.currentText()) def add_vehicle_to_vehicle_list(self): self.vehicle_select.addItems([ "2 Wheeler", "Auto Rick", "Car-PVT", "Car-Taxi", "Car-Share", "Bus-Govt.", "Bus-2 Axle Private", "Bus-3 Axle Private", "Bus-Institution", "Bus-Mini Bus", "Bus-2 Axle", "Bus-MAV", "Govt. Cars/ Jeep/ Vans", "Army Vehicles/ Ambulance", "Govt trucks", "Cycle", "Animal Drawn", "Truck-2 Axle", "Truck-3 Axle", "Truck-4 Axle", "Truck-5 Axle", "Truck-Axle>=6", "Truck-HC,EME", "LCV-4 Tyre", "LCV-6 Tyre", "LCV-Tata Ace", "LCV-Mini LCV", "LCV-Goods Auto", "Const.-2 Axle Truck", "Const.-3 Axle Truck", "Const.-MAV up to 6 Axle", "Tractor & Trailer", "Chakra" ]) def remove_from_vehicle_list(self): self.vehicle_list.takeItem(self.vehicle_list.currentRow()) def save_quit(self): opt_list = [] n = self.vehicle_list.count() i = 0 while i < n: opt_list.append(self.vehicle_list.item(i).text()) i += 1 # print(opt_list) list_df = pd.DataFrame(opt_list) path = f"{time.time()}.xlsx" with pd.ExcelWriter(path) as writer: list_df.to_excel(writer, index=False, header=False) self.delete() self.close() def addVehicle(self): dt_string = self.sharedImageBuffer.video_date_time.strftime( '%Y-%m-%d_%H:%M:%S') item = QtWidgets.QListWidgetItem() font = QtGui.QFont() font.setBold(False) font.setItalic(False) font.setUnderline(False) font.setWeight(50) font.setStrikeOut(False) font.setKerning(False) item.setFont(font) brush = QtGui.QBrush(QtGui.QColor(0, 0, 0)) brush.setStyle(QtCore.Qt.NoBrush) item.setBackground(brush) brush = QtGui.QBrush(QtGui.QColor(0, 0, 255)) brush.setStyle(QtCore.Qt.NoBrush) item.setForeground(brush) item.setText( f"{dt_string}_{self.direction}_{self.vehicle_select.currentText()}" ) self.vehicle_list.insertItem(0, item) def btnstate(self, b): self.direction = b.text() def delete(self): if self.isCameraConnected: # Stop processing thread if self.processingThread.isRunning(): self.stopProcessingThread() # Stop capture thread if self.captureThread.isRunning(): self.stopCaptureThread() # Automatically start frame processing (for other streams) if self.sharedImageBuffer.isSyncEnabledForDeviceUrl( self.deviceUrl): self.sharedImageBuffer.setSyncEnabled(True) # Disconnect camera if self.captureThread.disconnectCamera(): qDebug("[%s] Camera successfully disconnected." % self.deviceUrl) else: qDebug("[%s] WARNING: Camera already disconnected." % self.deviceUrl) def afterCaptureThreadFinshed(self): # Delete Buffer self.sharedImageBuffer.removeByDeviceUrl(self.deviceUrl) def afterProcessingThreadFinshed(self): qDebug("[%s] WARNING: SQL already disconnected." % self.deviceUrl) def connectToCamera(self, dropFrameIfBufferFull, apiPreference, capThreadPrio, procThreadPrio, enableFrameProcessing, width, height, setting): # Set frame label text if self.sharedImageBuffer.isSyncEnabledForDeviceUrl(self.deviceUrl): self.frameLabel.setText("Camera connected. Waiting...") else: self.frameLabel.setText("Connecting to camera...") # Create capture thread self.captureThread = CaptureThread(self.sharedImageBuffer, self.deviceUrl, dropFrameIfBufferFull, apiPreference, width, height, setting) # Attempt to connect to camera if self.captureThread.connectToCamera(): # Create processing thread self.processingThread = ProcessingThread(self.sharedImageBuffer, self.deviceUrl, self.cameraId, self) self.roi = [ (0, self.processingThread.currentROI.height() * 50 / 100), (self.processingThread.currentROI.width(), self.processingThread.currentROI.height() * 50 / 100), (self.processingThread.currentROI.width(), self.processingThread.currentROI.height() * 70 / 100), (0, self.processingThread.currentROI.height() * 70 / 100), ] self.processingThread.app.setRoi(self.roi) self.polygon.processingThread = self.processingThread # Setup signal/slot connections self.processingThread.newFrame.connect(self.updateFrame) self.processingThread.updateStatisticsInGUI.connect( self.updateProcessingThreadStats) self.captureThread.updateStatisticsInGUI.connect( self.updateCaptureThreadStats) self.imageProcessingSettingsDialog.newImageProcessingSettings.connect( self.processingThread.updateImageProcessingSettings) self.newImageProcessingFlags.connect( self.processingThread.updateImageProcessingFlags) self.setROI.connect(self.processingThread.setROI) # Remove imageBuffer from shared buffer by deviceUrl after captureThread stop/finished self.captureThread.finished.connect(self.afterCaptureThreadFinshed) self.processingThread.finished.connect( self.afterProcessingThreadFinshed) # Only enable ROI setting/resetting if frame processing is enabled if enableFrameProcessing: self.frameLabel.newMouseData.connect(self.newMouseData) # Set initial data in processing thread self.setROI.emit( QRect(0, 0, self.captureThread.getInputSourceWidth(), self.captureThread.getInputSourceHeight())) self.newImageProcessingFlags.emit(self.imageProcessingFlags) self.imageProcessingSettingsDialog.updateStoredSettingsFromDialog() # Start capturing frames from camera self.captureThread.start(capThreadPrio) # Start processing captured frames (if enabled) if enableFrameProcessing: self.processingThread.start(procThreadPrio) # Setup imageBufferBar with minimum and maximum values self.imageBufferBar.setMinimum(0) self.imageBufferBar.setMaximum( self.sharedImageBuffer.getByDeviceUrl( self.deviceUrl).maxSize()) # Enable "Clear Image Buffer" push button self.clearImageBufferButton.setEnabled(True) # Set text in labels self.deviceUrlLabel.setText(self.deviceUrl) self.cameraResolutionLabel.setText( "%dx%d" % (self.captureThread.getInputSourceWidth(), self.captureThread.getInputSourceHeight())) # Set internal flag and return self.isCameraConnected = True # Set frame label text if not enableFrameProcessing: self.frameLabel.setText("Frame processing disabled.") return True # Failed to connect to camera else: return False def stopCaptureThread(self): qDebug("[%s] About to stop capture thread..." % self.deviceUrl) self.captureThread.stop() self.sharedImageBuffer.wakeAll( ) # This allows the thread to be stopped if it is in a wait-state # Take one frame off a FULL queue to allow the capture thread to finish if self.sharedImageBuffer.getByDeviceUrl(self.deviceUrl).isFull(): self.sharedImageBuffer.getByDeviceUrl(self.deviceUrl).get() self.captureThread.wait() qDebug("[%s] Capture thread successfully stopped." % self.deviceUrl) def stopProcessingThread(self): qDebug("[%s] About to stop processing thread..." % self.deviceUrl) self.processingThread.stop() self.sharedImageBuffer.wakeAll( ) # This allows the thread to be stopped if it is in a wait-state self.processingThread.wait() qDebug("[%s] Processing thread successfully stopped." % self.deviceUrl) def startThread(self): self.processingThread.pause = False self.captureThread.pause = False self.startButton.setEnabled(False) self.pauseButton.setEnabled(True) def pauseThread(self): self.processingThread.pause = True self.captureThread.pause = True self.startButton.setEnabled(True) self.pauseButton.setEnabled(False) def updateCaptureThreadStats(self, statData): imageBuffer = self.sharedImageBuffer.getByDeviceUrl(self.deviceUrl) # Show [number of images in buffer / image buffer size] in imageBufferLabel self.imageBufferLabel.setText( "[%d/%d]" % (imageBuffer.size(), imageBuffer.maxSize())) # Show percentage of image buffer full in imageBufferBar self.imageBufferBar.setValue(imageBuffer.size()) # Show processing rate in captureRateLabel self.captureRateLabel.setText("{:>6,.2f} fps".format( statData.averageFPS)) # Show number of frames captured in nFramesCapturedLabel self.nFramesCapturedLabel.setText("[%d]" % statData.nFramesProcessed) def updateProcessingThreadStats(self, statData): # Show processing rate in processingRateLabel self.processingRateLabel.setText("{:>6,.2f} fps".format( statData.averageFPS)) # Show ROI information in roiLabel self.roiLabel.setText("(%d,%d) %dx%d" % (self.processingThread.getCurrentROI().x(), self.processingThread.getCurrentROI().y(), self.processingThread.getCurrentROI().width(), self.processingThread.getCurrentROI().height())) # Show number of frames processed in nFramesProcessedLabel self.nFramesProcessedLabel.setText("[%d]" % statData.nFramesProcessed) def updateFrame(self, frame): # Display frame pixmap = QPixmap.fromImage(frame).scaled(self.frameLabel.width(), self.frameLabel.height(), Qt.KeepAspectRatio) pixmap = self.draw_something(pixmap) self.frameLabel.setPixmap(pixmap) def clearImageBuffer(self): if self.sharedImageBuffer.getByDeviceUrl(self.deviceUrl).clear(): qDebug("[%s] Image buffer successfully cleared." % self.deviceUrl) else: qDebug("[%s] WARNING: Could not clear image buffer." % self.deviceUrl) def setImageProcessingSettings(self): # Prompt user: # If user presses OK button on dialog, update image processing settings if self.imageProcessingSettingsDialog.exec() == QDialog.Accepted: self.imageProcessingSettingsDialog.updateStoredSettingsFromDialog() # Else, restore dialog state else: self.imageProcessingSettingsDialog.updateDialogSettingsFromStored() def updateMouseCursorPosLabel(self): # Update mouse cursor position in mouseCursorPosLabel self.mouseCursorPosLabel.setText( "(%d,%d)" % (self.frameLabel.getMouseCursorPos().x(), self.frameLabel.getMouseCursorPos().y())) # Show pixel cursor position if camera is connected (image is being shown) if self.frameLabel.pixmap(): # Scaling factor calculation depends on whether frame is scaled to fit label or not if not self.frameLabel.hasScaledContents(): xScalingFactor = (self.frameLabel.getMouseCursorPos().x() - (self.frameLabel.width() - self.frameLabel.pixmap().width()) / 2) / self.frameLabel.pixmap().width() yScalingFactor = (self.frameLabel.getMouseCursorPos().y() - (self.frameLabel.height() - self.frameLabel.pixmap().height()) / 2) / self.frameLabel.pixmap().height() else: xScalingFactor = self.frameLabel.getMouseCursorPos().x( ) / self.frameLabel.width() yScalingFactor = self.frameLabel.getMouseCursorPos().y( ) / self.frameLabel.height() self.mouseCursorPosLabel.setText( '%s [%d,%d]' % (self.mouseCursorPosLabel.text(), xScalingFactor * self.processingThread.getCurrentROI().width(), yScalingFactor * self.processingThread.getCurrentROI().height())) def newMouseData(self, mouseData): # Local variable(s) selectionBox = QRect() # Set ROI if mouseData.leftButtonRelease and self.frameLabel.pixmap(): # Selection box calculation depends on whether frame is scaled to fit label or not if not self.frameLabel.hasScaledContents(): xScalingFactor = (mouseData.selectionBox.x() - (self.frameLabel.width() - self.frameLabel.pixmap().width()) / 2) / self.frameLabel.pixmap().width() yScalingFactor = (mouseData.selectionBox.y() - (self.frameLabel.height() - self.frameLabel.pixmap().height()) / 2) / self.frameLabel.pixmap().height() wScalingFactor = self.processingThread.getCurrentROI().width( ) / self.frameLabel.pixmap().width() hScalingFactor = self.processingThread.getCurrentROI().height( ) / self.frameLabel.pixmap().height() else: xScalingFactor = mouseData.selectionBox.x( ) / self.frameLabel.width() yScalingFactor = mouseData.selectionBox.y( ) / self.frameLabel.height() wScalingFactor = self.processingThread.getCurrentROI().width( ) / self.frameLabel.width() hScalingFactor = self.processingThread.getCurrentROI().height( ) / self.frameLabel.height() # Set selection box properties (new ROI) selectionBox.setX(xScalingFactor * self.processingThread.getCurrentROI().width() + self.processingThread.getCurrentROI().x()) selectionBox.setY(yScalingFactor * self.processingThread.getCurrentROI().height() + self.processingThread.getCurrentROI().y()) selectionBox.setWidth(wScalingFactor * mouseData.selectionBox.width()) selectionBox.setHeight(hScalingFactor * mouseData.selectionBox.height()) # Check if selection box has NON-ZERO dimensions if selectionBox.width() != 0 and selectionBox.height() != 0: # Selection box can also be drawn from bottom-right to top-left corner if selectionBox.width() < 0: x_temp = selectionBox.x() width_temp = selectionBox.width() selectionBox.setX(x_temp + selectionBox.width()) selectionBox.setWidth(width_temp * -1) if selectionBox.height() < 0: y_temp = selectionBox.y() height_temp = selectionBox.height() selectionBox.setY(y_temp + selectionBox.height()) selectionBox.setHeight(height_temp * -1) # Check if selection box is not outside window if (selectionBox.x() < 0 or selectionBox.y() < 0 or selectionBox.x() + selectionBox.width() > self.processingThread.getCurrentROI().x() + self.processingThread.getCurrentROI().width() or selectionBox.y() + selectionBox.height() > self.processingThread.getCurrentROI().y() + self.processingThread.getCurrentROI().height() or selectionBox.x() < self.processingThread.getCurrentROI().x() or selectionBox.y() < self.processingThread.getCurrentROI().y()): # Display error message QMessageBox.warning( self, "ERROR:", "Selection box outside range. Please try again.") # Set ROI else: self.setROI.emit(selectionBox) def handleContextMenuAction(self, action): if action.text() == "Reset ROI": self.setROI.emit( QRect(0, 0, self.captureThread.getInputSourceWidth(), self.captureThread.getInputSourceHeight())) elif action.text() == "Scale to Fit Frame": self.frameLabel.setScaledContents(action.isChecked()) elif action.text() == "Grayscale": self.imageProcessingFlags.grayscaleOn = action.isChecked() self.newImageProcessingFlags.emit(self.imageProcessingFlags) elif action.text() == "Smooth": self.imageProcessingFlags.smoothOn = action.isChecked() self.newImageProcessingFlags.emit(self.imageProcessingFlags) elif action.text() == "Dilate": self.imageProcessingFlags.dilateOn = action.isChecked() self.newImageProcessingFlags.emit(self.imageProcessingFlags) elif action.text() == "Erode": self.imageProcessingFlags.erodeOn = action.isChecked() self.newImageProcessingFlags.emit(self.imageProcessingFlags) elif action.text() == "Flip": self.imageProcessingFlags.flipOn = action.isChecked() self.newImageProcessingFlags.emit(self.imageProcessingFlags) elif action.text() == "Canny": self.imageProcessingFlags.cannyOn = action.isChecked() self.newImageProcessingFlags.emit(self.imageProcessingFlags) elif action.text() == "Yolo": self.imageProcessingFlags.yoloOn = action.isChecked() self.newImageProcessingFlags.emit(self.imageProcessingFlags) elif action.text() == "Settings...": self.setImageProcessingSettings() def draw_something(self, pixmap): painter = QPainter(pixmap) painter.setPen(QPen(Qt.black, 5, Qt.SolidLine)) painter.setBrush(QBrush(Qt.red, Qt.VerPattern)) points = QPolygon([QPoint(*i) for i in self.roi]) painter.drawPolygon(points) return pixmap