def __save_snapshot(self): ''' Triggers the snapshot save job. ''' runner = SnapshotSaver(int(self.snapSlotSelector.currentText()), self.preferences, lambda: self.__capture_snap(convert=False), self.__measurement_store, self.snapshot_saved) QThreadPool.globalInstance().start(runner)
def change_pool_size(self, size): ''' Changes the pool size. :param size: size. ''' logger.info(f"Changing thread pool size to {size}") QThreadPool.globalInstance().setMaxThreadCount(size)
def load_workspaces(self, row_index_pair, get_states_func): self._worker = Worker(self._load_workspaces_on_thread, row_index_pair, get_states_func) self._worker.signals.finished.connect(self.on_finished) self._worker.signals.error.connect(self.on_error) QThreadPool.globalInstance().start(self._worker)
def set_max_thread_count(num: int): """Set the maximum number of threads used by the thread pool. Note: The thread pool will always use at least 1 thread, even if maxThreadCount limit is zero or negative. """ QThreadPool.globalInstance().setMaxThreadCount(num)
def start(self): """Start this worker in a thread and add it to the global threadpool. The order of method calls when starting a worker is: .. code-block:: none calls QThreadPool.globalInstance().start(worker) | triggered by the QThreadPool.start() method | | called by worker.run | | | V V V worker.start -> worker.run -> worker.work """ if self in WorkerBase._worker_set: raise RuntimeError( trans._( 'This worker is already started!', deferred=True, )) # This will raise a RunTimeError if the worker is already deleted repr(self) WorkerBase._worker_set.add(self) self.finished.connect(lambda: WorkerBase._worker_set.discard(self)) QThreadPool.globalInstance().start(self)
def process_states(self, states, use_optimizations, output_mode, plot_results, output_graph, save_can=False): self._worker = Worker(self._process_states_on_thread, states, use_optimizations, output_mode, plot_results, output_graph, save_can) self._worker.signals.finished.connect(self.on_finished) self._worker.signals.error.connect(self.on_error) QThreadPool.globalInstance().start(self._worker)
def _compute_in_background(self, func, slot, *args, **kwargs): """ Run function `func` in a background thread. Send the signal `self.computations_complete` once computation is finished. Parameters ---------- func: function Reference to a function that is supposed to be executed at the background. The function return value is passed as a signal parameter once computation is complete. slot: qtpy.QtCore.Slot or None Reference to a slot. If not None, then the signal `self.computation_complete` is connected to this slot. args, kwargs arguments of the function `func`. """ signal_complete = self.computations_complete def func_to_run(func, *args, **kwargs): class RunTask(QRunnable): def run(self): result_dict = func(*args, **kwargs) signal_complete.emit(result_dict) return RunTask() if slot is not None: self.computations_complete.connect(slot) self.gui_vars["gui_state"]["running_computations"] = True self.update_global_state.emit() QThreadPool.globalInstance().start(func_to_run(func, *args, **kwargs))
def test_that_process_states_emits_row_failed_signal_after_each_failed_row( self): self.batch_process_runner.row_processed_signal = mock.MagicMock() self.batch_process_runner.row_failed_signal = mock.MagicMock() self.sans_batch_instance.side_effect = Exception('failure') get_states_mock = mock.MagicMock() states = {0: mock.MagicMock()} errors = {} get_states_mock.return_value = states, errors self.batch_process_runner.process_states( row_index_pair=self._mock_rows, get_states_func=get_states_mock, use_optimizations=False, output_mode=OutputMode.BOTH, plot_results=False, output_graph='') QThreadPool.globalInstance().waitForDone() self.assertEqual( 3, self.batch_process_runner.row_failed_signal.emit.call_count) self.batch_process_runner.row_failed_signal.emit.assert_any_call( 0, 'failure') self.batch_process_runner.row_failed_signal.emit.assert_any_call( 1, 'failure') self.batch_process_runner.row_failed_signal.emit.assert_any_call( 2, 'failure') self.assertEqual( self.batch_process_runner.row_processed_signal.emit.call_count, 0)
def test_that_load_workspaces_emits_row_processed_signal_after_each_row( self): self.batch_process_runner.row_processed_signal = mock.MagicMock() self.batch_process_runner.row_failed_signal = mock.MagicMock() states = {0: mock.MagicMock()} errors = {} get_states_mock = mock.MagicMock() get_states_mock.return_value = states, errors self.batch_process_runner.load_workspaces( row_index_pair=self._mock_rows, get_states_func=get_states_mock) QThreadPool.globalInstance().waitForDone() self.assertEqual( 3, self.batch_process_runner.row_processed_signal.emit.call_count) self.batch_process_runner.row_processed_signal.emit.assert_any_call( 0, [], []) self.batch_process_runner.row_processed_signal.emit.assert_any_call( 1, [], []) self.batch_process_runner.row_processed_signal.emit.assert_any_call( 2, [], []) self.assertEqual( self.batch_process_runner.row_failed_signal.emit.call_count, 0)
def show_release_notes(self): ''' Shows the release notes ''' QThreadPool.globalInstance().start( VersionChecker(self.preferences.get(SYSTEM_CHECK_FOR_BETA_UPDATES), self.__alert_on_old_version, self.__alert_on_version_check_fail, self.__version, signal_anyway=True))
def disconnect(self): ''' Disconnects the listener if we have one. ''' if self.__listener is not None: logger.info(f"Disconnecting from {self.ip_address}") self.__listener.kill() self.__listener = None QThreadPool.globalInstance().releaseThread() logger.info(f"Disconnected from {self.ip_address}")
def updateBeq(self): ''' Pulls or clones the named repository ''' from model.minidsp import RepoRefresher refresher = RepoRefresher(self.beqFiltersDir.text(), self.__get_beq_repos()) refresher.signals.on_end.connect( lambda: self.__count_and_return_cursor()) QThreadPool.globalInstance().start(refresher)
def clear_filter_from_minidsp(self, slot=None): ''' Sets the config to bypass. ''' fp = FilterPublisher( [], slot, self.__minidsp_rs_exe, self.__minidsp_rs_options, lambda c: self.__on_send_filter_event(c, self.bypassMinidspButton)) QThreadPool.globalInstance().start(fp)
def __init__(self, parent, prefs, filter_loader): super(CatalogueDialog, self).__init__(parent=parent) self.__filter_loader = filter_loader self.__preferences = prefs minidsp_rs_path = prefs.get(BINARIES_MINIDSP_RS) self.__minidsp_rs_exe = None if minidsp_rs_path: minidsp_rs_exe = os.path.join(minidsp_rs_path, 'minidsp') if os.path.isfile(minidsp_rs_exe): self.__minidsp_rs_exe = minidsp_rs_exe else: minidsp_rs_exe = os.path.join(minidsp_rs_path, 'minidsp.exe') if os.path.isfile(minidsp_rs_exe): self.__minidsp_rs_exe = minidsp_rs_exe self.__minidsp_rs_options = None if self.__minidsp_rs_exe: self.__minidsp_rs_options = prefs.get(MINIDSP_RS_OPTIONS) self.__catalogue = {} self.setupUi(self) self.sendToMinidspButton.setMenu( self.__make_minidsp_menu(self.send_filter_to_minidsp)) self.bypassMinidspButton.setMenu( self.__make_minidsp_menu(self.clear_filter_from_minidsp)) self.__beq_dir = self.__preferences.get(BEQ_DOWNLOAD_DIR) self.__db_csv_file = os.path.join(self.__beq_dir, 'database.csv') self.__db_csv = {} self.browseCatalogueButton.setEnabled(False) self.browseCatalogueButton.setIcon(qta.icon('fa5s.folder-open')) QThreadPool.globalInstance().start( DatabaseDownloader(self.__on_database_load, self.__alert_on_database_load_error, self.__db_csv_file)) self.loadFilterButton.setEnabled(False) self.showInfoButton.setEnabled(False) self.openAvsButton.setEnabled(False) self.openCatalogueButton.setEnabled(False) for r in self.__preferences.get(BEQ_REPOS).split('|'): self.__populate_catalogue(r) years = SortedSet({c.year for c in self.__catalogue.values()}) for y in reversed(years): self.yearFilter.addItem(y) self.yearMinFilter.setMinimum(int(years[0])) self.yearMinFilter.setMaximum(int(years[-1]) - 1) self.yearMaxFilter.setMinimum(int(years[0]) + 1) self.yearMaxFilter.setMaximum(int(years[-1])) self.yearMinFilter.setValue(int(years[0])) self.filter_min_year(self.yearMinFilter.value()) self.yearMaxFilter.setValue(int(years[-1])) self.filter_max_year(self.yearMaxFilter.value()) content_types = SortedSet( {c.content_type for c in self.__catalogue.values()}) for c in content_types: self.contentTypeFilter.addItem(c) self.filter_content_type('') self.totalCount.setValue(len(self.__catalogue))
def process_states(self, row_index_pair, get_states_func, use_optimizations, output_mode, plot_results, output_graph, save_can=False): self._worker = Worker(self._process_states_on_thread, row_index_pair=row_index_pair, get_states_func=get_states_func, use_optimizations=use_optimizations, output_mode=output_mode, plot_results=plot_results, output_graph=output_graph, save_can=save_can) self._worker.signals.finished.connect(self.on_finished) self._worker.signals.error.connect(self.on_error) QThreadPool.globalInstance().start(self._worker)
def send_filter_to_minidsp(self, slot=None): ''' Sends the currently selected filter to the filter publisher. ''' beq = self.__get_beq_from_results() filt = load_filter_file(beq.filename, 96000) fp = FilterPublisher( filt, slot, self.__minidsp_rs_exe, self.__minidsp_rs_options, lambda c: self.__on_send_filter_event(c, self.sendToMinidspButton)) QThreadPool.globalInstance().start(fp)
def test_that_process_states_emits_row_processed_signal_after_each_row(self): self.batch_process_runner.row_processed_signal = mock.MagicMock() self.batch_process_runner.row_failed_signal = mock.MagicMock() self.batch_process_runner.process_states(self.states, False, OutputMode.Both, False, '') QThreadPool.globalInstance().waitForDone() self.assertEqual(self.batch_process_runner.row_processed_signal.emit.call_count, 3) self.batch_process_runner.row_processed_signal.emit.assert_any_call(0, [], []) self.batch_process_runner.row_processed_signal.emit.assert_any_call(1, [], []) self.batch_process_runner.row_processed_signal.emit.assert_any_call(2, [], []) self.assertEqual(self.batch_process_runner.row_failed_signal.emit.call_count, 0)
def test_that_process_states_emits_row_processed_signal_after_each_row(self): self.batch_process_runner.row_processed_signal = mock.MagicMock() self.batch_process_runner.row_failed_signal = mock.MagicMock() self.batch_process_runner.process_states(self.states, False, OutputMode.Both, False, '') QThreadPool.globalInstance().waitForDone() self.assertEqual(self.batch_process_runner.row_processed_signal.emit.call_count, 3) self.batch_process_runner.row_processed_signal.emit.assert_any_call(0, [], []) self.batch_process_runner.row_processed_signal.emit.assert_any_call(1, [], []) self.batch_process_runner.row_processed_signal.emit.assert_any_call(2, [], []) self.assertEqual(self.batch_process_runner.row_failed_signal.emit.call_count, 0)
def test_that_load_workspaces_emits_row_processed_signal_after_each_row(self): self.batch_process_runner.row_processed_signal = mock.MagicMock() self.batch_process_runner.row_failed_signal = mock.MagicMock() self.batch_process_runner.load_workspaces(self.states) QThreadPool.globalInstance().waitForDone() self.assertEqual(self.batch_process_runner.row_processed_signal.emit.call_count, 3) self.batch_process_runner.row_processed_signal.emit.assert_any_call(0, [], []) self.batch_process_runner.row_processed_signal.emit.assert_any_call(1, [], []) self.batch_process_runner.row_processed_signal.emit.assert_any_call(2, [], []) self.assertEqual(self.batch_process_runner.row_failed_signal.emit.call_count, 0)
def execute(self): ''' Executes the command. ''' if self.__ffmpeg_cmd is not None: self.__extractor = AudioExtractor( self.__ffmpeg_cmd, port=self.__progress_port, cancel=self.__cancel, progress_handler=self.progress_handler, is_remux=self.__is_remux) QThreadPool.globalInstance().start(self.__extractor)
def __init__(self, parent, prefs, cache_dir, repo, img): super(ImageViewerDialog, self).__init__(parent=parent) self.__img = img self.__preferences = prefs self.__pm = None self.setupUi(self) self.scrollArea.setWidgetResizable(True) self.label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) self.label.setScaledContents(True) QThreadPool.globalInstance().start( ImgDownloader(cache_dir, repo, self.__on_load, self.__on_error, self.__img))
def test_that_load_workspaces_emits_row_processed_signal_after_each_row(self): self.batch_process_runner.row_processed_signal = mock.MagicMock() self.batch_process_runner.row_failed_signal = mock.MagicMock() self.batch_process_runner.load_workspaces(self.states) QThreadPool.globalInstance().waitForDone() self.assertEqual(self.batch_process_runner.row_processed_signal.emit.call_count, 3) self.batch_process_runner.row_processed_signal.emit.assert_any_call(0, [], []) self.batch_process_runner.row_processed_signal.emit.assert_any_call(1, [], []) self.batch_process_runner.row_processed_signal.emit.assert_any_call(2, [], []) self.assertEqual(self.batch_process_runner.row_failed_signal.emit.call_count, 0)
def test_that_process_states_calls_batch_reduce_for_specified_row(self): get_states_mock = mock.MagicMock() states = {0: mock.MagicMock()} errors = {} get_states_mock.return_value = states, errors self.batch_process_runner.process_states(row_index_pair=self._mock_rows, get_states_func=get_states_mock, use_optimizations=False, output_mode=OutputMode.BOTH, plot_results=False, output_graph='') QThreadPool.globalInstance().waitForDone() self.assertEqual(self.sans_batch_instance.call_count, 3)
def __start_analysers(self): for a in self.__analysers.values(): logger.info(f"Starting processor for {a.__class__.__name__}") QThreadPool.globalInstance().reserveThread() a.processor.start() def stop_processor(): logger.info(f"Stopping processor for {a.__class__.__name__}") a.processor.stop() QThreadPool.globalInstance().releaseThread() logger.info(f"Stopped processor for {a.__class__.__name__}") self.app.aboutToQuit.connect(stop_processor) logger.info(f"Started processor for {a.__class__.__name__}")
def search(self): ''' Searches for files matching the filter in the input directory. ''' self.filter.setEnabled(False) self.__candidates = ExtractCandidates( self, self.__preferences.get(ANALYSIS_TARGET_FS)) self.__preferences.set(EXTRACTION_BATCH_FILTER, self.filter.text()) globs = self.filter.text().split(';') job = FileSearch(globs) job.signals.started.connect(self.__on_search_start) job.signals.on_error.connect(self.__on_search_error) job.signals.on_match.connect(self.__on_search_match) job.signals.finished.connect(self.__on_search_complete) QThreadPool.globalInstance().start(job)
def doAction(self, mode): if not self.commandRunning: agilentAsync = AgilentAsync() agilentAsync.timerStatus.connect(self.debug) agilentAsync.started.connect(self.started) agilentAsync.finished.connect(self.finished) runnable = AgilentAsyncRunnable( agilentAsync, mode=mode, voltage=self.parameters.voltage, step_to_fixed_delay=self.parameters.delay, devices_selection=self.devices.getSelectedDevices(), ) QThreadPool.globalInstance().start(runnable)
def run(self): ''' Saves the snapshot to preferences. ''' start = time.time() dat = self.__capturer() logger.info(f"Captured in {to_millis(start, time.time())}ms") prefs = [] if len(dat.keys()) > 0: for ip, v in dat.items(): prefs.append( SetPreference(self.__preferences, f"{SNAPSHOT_GROUP}/{self.__id}/{ip}", v)) self.__signal.emit(self.__id, ip, v) for p in prefs: QThreadPool.globalInstance().start(p, priority=-1) logger.info(f"Saved snapshot in {to_millis(start, time.time())}ms")
def wait_for_workers_to_quit(msecs: int = None): """Ask all workers to quit, and wait up to `msec` for quit. Attempts to clean up all running workers by calling ``worker.quit()`` method. Any workers in the ``WorkerBase._worker_set`` set will have this method. By default, this function will block indefinitely, until worker threads finish. If a timeout is provided, a ``RuntimeError`` will be raised if the workers do not gracefully exit in the time requests, but the threads will NOT be killed. It is (currently) left to the user to use their OS to force-quit rogue threads. .. important:: If the user does not put any yields in their function, and the function is super long, it will just hang... For instance, there's no graceful way to kill this thread in python: .. code-block:: python @thread_worker def ZZZzzz(): time.sleep(10000000) This is why it's always advisable to use a generator that periodically yields for long-running computations in another thread. See `this stack-overflow post <https://stackoverflow.com/questions/323972/is-there-any-way-to-kill-a-thread>`_ for a good discussion on the difficulty of killing a rogue python thread: Parameters ---------- msecs : int, optional Waits up to msecs milliseconds for all threads to exit and removes all threads from the thread pool. If msecs is `None` (the default), the timeout is ignored (waits for the last thread to exit). Raises ------ RuntimeError If a timeout is provided and workers do not quit successfully within the time allotted. """ for worker in WorkerBase._worker_set: worker.quit() msecs = msecs if msecs is not None else -1 if not QThreadPool.globalInstance().waitForDone(msecs): raise RuntimeError( trans._( "Workers did not quit gracefully in the time allotted ({msecs} ms)", deferred=True, msecs=msecs, ))
def process_files(self): ''' Creates the output content. ''' self.__preferences.set(BEQ_CONFIG_FILE, self.configFile.text()) self.__preferences.set(BEQ_MERGE_DIR, self.outputDirectory.text()) self.__preferences.set(BEQ_MINIDSP_TYPE, self.minidspType.currentText()) self.__preferences.set(BEQ_EXTRA_DIR, self.userSourceDir.text()) if self.__clear_output_directory(): self.filesProcessed.setValue(0) optimise_filters = False if MinidspType.parse(self.minidspType.currentText()).is_fixed_point_hardware(): result = QMessageBox.question(self, 'Are you feeling lucky?', f"Do you want to automatically optimise filters to fit in the 6 biquad limit? \n\n" f"Note this feature is experimental. \n" f"You are strongly encouraged to review the generated filters to ensure they are safe to use.\n" f"USE AT YOUR OWN RISK!\n\n" f"Are you sure you want to continue?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No) optimise_filters = result == QMessageBox.Yes self.__start_spinning() self.errors.clear() self.errors.setEnabled(False) self.copyErrorsButton.setEnabled(False) self.optimised.clear() self.optimised.setEnabled(False) self.copyOptimisedButton.setEnabled(False) self.optimised.setVisible(optimise_filters) self.copyOptimisedButton.setVisible(optimise_filters) self.optimisedLabel.setVisible(optimise_filters) QThreadPool.globalInstance().start(XmlProcessor(self.__beq_dir, self.userSourceDir.text(), self.outputDirectory.text(), self.configFile.text(), self.minidspType.currentText(), self.__on_file_fail, self.__on_file_ok, self.__on_complete, self.__on_optimised, optimise_filters))
def __init__(self, parent, preferences): super(BatchExtractDialog, self).__init__(parent) self.setupUi(self) self.setWindowFlags(self.windowFlags() | Qt.WindowSystemMenuHint | Qt.WindowMinMaxButtonsHint) self.__candidates = None self.__preferences = preferences self.__search_spinner = None default_output_dir = self.__preferences.get(EXTRACTION_OUTPUT_DIR) if os.path.isdir(default_output_dir): self.outputDir.setText(default_output_dir) filt = self.__preferences.get(EXTRACTION_BATCH_FILTER) if filt is not None: self.filter.setText(filt) self.outputDirPicker.setIcon(qta.icon('fa5s.folder-open')) self.statusBar = QStatusBar() self.verticalLayout.addWidget(self.statusBar) try: core_count = QThreadPool.globalInstance().maxThreadCount() self.threads.setMaximum(core_count) self.threads.setValue(core_count) except Exception as e: logger.warning(f"Unable to get cpu_count()", e)
def active_thread_count() -> int: """Return the number of active threads in the global ThreadPool.""" return QThreadPool.globalInstance().activeThreadCount()
def load_workspaces(self, states): self._worker = Worker(self._load_workspaces_on_thread, states) self._worker.signals.finished.connect(self.on_finished) self._worker.signals.error.connect(self.on_error) QThreadPool.globalInstance().start(self._worker)
def test_that_process_states_calls_batch_reduce_for_each_row(self): self.batch_process_runner.process_states(self.states, False, OutputMode.Both, False, '') QThreadPool.globalInstance().waitForDone() self.assertEqual(self.sans_batch_instance.call_count, 3)
def __init__(self, app, prefs, parent=None): super(QVibe, self).__init__(parent) self.logger = logging.getLogger('qvibe') self.app = app self.preferences = prefs # basic setup and version checking if getattr(sys, 'frozen', False): self.__style_path_root = sys._MEIPASS else: self.__style_path_root = os.path.dirname(__file__) self.__version = '0.0.0-alpha.1' v_path = os.path.abspath( os.path.join(self.__style_path_root, 'VERSION')) try: with open(v_path) as version_file: self.__version = version_file.read().strip() except: logger.exception(f"Unable to read {v_path}") global_thread_pool = QThreadPool.globalInstance() global_thread_pool.setMaxThreadCount(QThread.idealThreadCount() + 4) if self.preferences.get(SYSTEM_CHECK_FOR_UPDATES): global_thread_pool.start( VersionChecker( self.preferences.get(SYSTEM_CHECK_FOR_BETA_UPDATES), self.__alert_on_old_version, self.__alert_on_version_check_fail, self.__version)) # UI initialisation self.setupUi(self) # run a twisted reactor as its responsiveness is embarrassingly better than QTcpSocket from twisted.internet import reactor self.__reactor = reactor runner = ReactorRunner(self.__reactor) global_thread_pool.reserveThread() global_thread_pool.start(runner) self.app.aboutToQuit.connect(runner.stop) # core domain stores self.__timer = None self.__start_time = None self.__target_config = self.__load_config() self.__display_target_config() self.__measurement_store = MeasurementStore(self.measurementLayout, self.measurementBox, self.bufferSize, self.preferences, self.__target_config) self.__measurement_store.signals.data_changed.connect( self.__display_measurement) self.__measurement_store.signals.measurement_added.connect( self.__display_measurement) self.__measurement_store.signals.visibility_changed.connect( self.__set_visible_measurements) self.__recorder_store = RecorderStore(self.__target_config, self.recordersLayout, self.centralwidget, self.__reactor, self.__measurement_store) self.__recorder_store.signals.on_status_change.connect( self.__handle_recorder_connect_event) target_resolution = f"{self.preferences.get(ANALYSIS_RESOLUTION)} Hz" self.resolutionHz.setCurrentText(target_resolution) # menus self.log_viewer = RollingLogger(self.preferences, parent=self) self.actionShow_Logs.triggered.connect(self.log_viewer.show_logs) self.action_Preferences.triggered.connect(self.show_preferences) self.actionSave_Chart.triggered.connect(self.export_chart) self.actionExport_Wav.triggered.connect(self.export_wav) # buffer self.bufferSize.setValue(self.preferences.get(BUFFER_SIZE)) # magnitude range self.magMin.setValue(self.preferences.get(CHART_MAG_MIN)) self.magMax.setValue(self.preferences.get(CHART_MAG_MAX)) def keep_min_mag_range(): keep_range(self.magMin, self.magMax, 20) self.magMin.valueChanged['int'].connect(lambda v: keep_min_mag_range()) self.magMax.valueChanged['int'].connect(lambda v: keep_min_mag_range()) # frequency range self.freqMin.setValue(self.preferences.get(CHART_FREQ_MIN)) self.freqMax.setValue(self.preferences.get(CHART_FREQ_MAX)) def keep_min_freq_range(): keep_range(self.freqMin, self.freqMax, 20) self.freqMin.valueChanged['int'].connect( lambda v: keep_min_freq_range()) self.freqMax.valueChanged['int'].connect( lambda v: keep_min_freq_range()) # charts colour_provider = ColourProvider() self.__analysers = { 0: Vibration(self.liveVibrationChart, self.preferences, self.targetSampleRate, self.fps, self.actualFPS, self.resolutionHz, self.targetAccelSens, self.bufferSize, self.vibrationAnalysis, self.leftMarker, self.rightMarker, self.timeRange, self.zoomInButton, self.zoomOutButton, self.findPeaksButton, colour_provider), 1: RTA(self.rtaLayout, self.rtaTab, self.rtaChart, self.preferences, self.targetSampleRate, self.resolutionHz, self.fps, self.actualFPS, self.magMin, self.magMax, self.freqMin, self.freqMax, self.refCurve, self.showValueFor, self.__measurement_store.signals, colour_provider), 2: Spectrogram(self.spectrogramView, self.preferences, self.targetSampleRate, self.fps, self.actualFPS, self.resolutionHz, self.bufferSize, self.magMin, self.magMax, self.freqMin, self.freqMax, self.visibleCurves, self.__measurement_store), } self.__start_analysers() self.set_visible_chart(self.chartTabs.currentIndex()) self.applyTargetButton.setIcon(qta.icon('fa5s.check', color='green')) self.resetTargetButton.setIcon(qta.icon('fa5s.undo')) self.visibleCurves.selectAll() # load saved recorders saved_recorders = self.preferences.get(RECORDER_SAVED_IPS) warn_on_no_recorders = False if saved_recorders is not None: self.__recorder_store.load(saved_recorders.split('|')) else: warn_on_no_recorders = True # show preferences if we have no IPs if warn_on_no_recorders is True: result = QMessageBox.question( self, 'No Recorders', f"No qvibe-recorders have been added. \n\nUse the preferences screen to add then.\n\nWould you like to add one now?", QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if result == QMessageBox.Yes: self.show_preferences() self.saveSnapshotButton.setIcon(qta.icon('fa5s.save')) self.saveSnapshotButton.clicked.connect(self.__save_snapshot) self.zoomInButton.setIcon(qta.icon('fa5s.compress-arrows-alt')) self.zoomOutButton.setIcon(qta.icon('fa5s.expand-arrows-alt')) self.loadMeasurementButton.setIcon(qta.icon('fa5s.folder-open')) self.actionSave_Signal.triggered.connect(self.__save_signal) self.actionLoad_Signal.triggered.connect(self.__load_signal) self.loadMeasurementButton.clicked.connect(self.__load_signal) self.connectAllButton.clicked.connect(self.__recorder_store.connect) self.disconnectAllButton.clicked.connect( self.__recorder_store.disconnect) self.snapshot_saved.connect(self.__add_snapshot) self.__measurement_store.load_snapshots()