def __save_pixel_perfect(self): ''' saves an image based on passing the image through directly ''' if len(self.image.text()) > 0: file_name = QFileDialog.getSaveFileName(parent=self, caption='Export Report', filter='Report File (*.jpg *.png *.jpeg)') if file_name: output_file = str(file_name[0]).strip() if len(output_file) == 0: return else: format = os.path.splitext(output_file)[1][1:].strip() if format in VALID_IMG_FORMATS: from app import wait_cursor with wait_cursor(): self.__status_bar.showMessage(f"Saving report to {output_file}", 5000) self.preview.canvas.figure.savefig(output_file, format=format, dpi=self.__dpi) if self.__concat_images(format, output_file): self.__status_bar.showMessage(f"Saved report to {output_file}", 5000) else: msg_box = QMessageBox() msg_box.setText(f"Invalid output file format - {output_file} is not one of {VALID_IMG_FORMATS}") msg_box.setIcon(QMessageBox.Critical) msg_box.setWindowTitle('Unexpected Error') msg_box.exec() else: msg_box = QMessageBox() msg_box.setText('Unable to create report, no image selected') msg_box.setIcon(QMessageBox.Information) msg_box.setWindowTitle('No Image') msg_box.exec()
def load_file(self): ''' Loads a signal from the given file. ''' start = end = None start_millis = self.startTime.time().msecsSinceStartOfDay() if start_millis > 0: start = start_millis end_millis = self.endTime.time().msecsSinceStartOfDay() if end_millis < self.__duration or start is not None: end = end_millis channel = int(self.channelSelector.currentText()) from model.preferences import ANALYSIS_TARGET_FS from app import wait_cursor with wait_cursor(f"Loading {self.__info.name}"): self.__signal = readWav( 'analysis', self.__preferences, self.__info.name, channel=channel, start=start, end=end, target_fs=self.__preferences.get(ANALYSIS_TARGET_FS)) self.__filtered_signals = {} self.__init_resolution_selector(self.__signal) self.show_chart()
def refresh(self): ''' Pulls or clones the named repository ''' from app import wait_cursor try: with wait_cursor(): os.makedirs(self.repo_dir, exist_ok=True) for repo in self.repos: subdir = get_repo_subdir(repo) git_metadata_dir = os.path.abspath( os.path.join(self.repo_dir, subdir, '.git')) local_dir = os.path.join(self.repo_dir, subdir) if os.path.exists(git_metadata_dir): from dulwich.errors import NotGitRepository try: self.__pull_beq(repo, local_dir) except NotGitRepository as e: logger.exception( '.git exists but is not a git repo, attempting to delete .git directory and clone' ) os.rmdir(git_metadata_dir) self.__clone_beq(repo, local_dir) else: self.__clone_beq(repo, local_dir) except: logger.exception('fail')
def __create_signals(self): ''' Creates signals from the output file just created. :return: True if we created the signals. ''' loader = AutoWavLoader(self.__preferences) output_file = self.__executor.get_output_path() if os.path.exists(output_file): from app import wait_cursor with wait_cursor(f"Creating signals for {output_file}"): logger.info(f"Creating signals for {output_file}") name_provider = lambda channel, channel_count: get_channel_name( self.signalName.text(), channel, channel_count, channel_layout_name=self.__executor.channel_layout_name) loader.load(output_file) signal = loader.auto_load(name_provider, self.decimateAudio.isChecked()) self.__signal_model.add(signal) return True else: msg_box = QMessageBox() msg_box.setText( f"Extracted audio file does not exist at: \n\n {output_file}") msg_box.setIcon(QMessageBox.Critical) msg_box.setWindowTitle('Unexpected Error') msg_box.exec() return False
def __save_report(self): ''' writes the figure to the specified format ''' formats = "Report Files (*.png *.jpg *.jpeg)" file_name = QFileDialog.getSaveFileName(parent=self, caption='Export Report', filter=formats) if file_name: output_file = str(file_name[0]).strip() if len(output_file) == 0: return else: format = os.path.splitext(output_file)[1][1:].strip() if format in VALID_IMG_FORMATS: scale_factor = self.widthPixels.value() / self.__x from app import wait_cursor with wait_cursor(): self.__status_bar.showMessage( f"Saving report to {output_file}", 5000) self.preview.canvas.figure.savefig(output_file, format=format, dpi=self.__dpi * scale_factor, pad_inches=0, bbox_inches='tight') self.__status_bar.showMessage( f"Saved report to {output_file}", 5000) else: msg_box = QMessageBox() msg_box.setText( f"Invalid output file format - {output_file} is not one of {VALID_IMG_FORMATS}" ) msg_box.setIcon(QMessageBox.Critical) msg_box.setWindowTitle('Unexpected Error') msg_box.exec()
def analyse(self): ''' Calculates the spectrum view. ''' from app import wait_cursor with wait_cursor(f"Analysing"): step = 1.0 / self.signal.fs x = np.arange(0, self.signal.duration_seconds, step) y = self.signal.samples if self.__ui.magnitudeDecibels.isChecked(): y = np.copy(y) y[y == 0.0] = 0.000000001 y = 20 * np.log10(np.abs(y)) self.__limits.y1_max = 0.0 self.__limits.y1_min = math.floor(np.min(y)) else: self.__limits.y1_min = -1.0 self.__limits.y1_max = 1.0 self.__waveform_range.is_db = self.__ui.magnitudeDecibels.isChecked( ) if self.__curve is None: self.__curve = self.__axes.plot(x, y, linewidth=1, color='cyan')[0] else: self.__curve.set_data(x, y) self.__limits.on_data_change( (self.__limits.y1_min, self.__limits.y1_max), []) self.__limits.propagate_to_axes(draw=True)
def send_filters_to_device(self): ''' Sends the selected filters to the device ''' if self.__in_complex_mode(): ops, unsupported_filter_types_per_channel = self.__convert_filter_mappings_to_ops() channels_to_update = [i.text().split(' ')[1] for i in self.filterMapping.selectedItems()] unsynced_channels = [] for c, s in self.__channel_to_signal.items(): if s is not None: if c not in channels_to_update: if c in self.__current_device_filters_by_channel: current = s.filter device = self.__current_device_filters_by_channel[c] if current != device: unsynced_channels.append(c) if unsynced_channels: result = QMessageBox.question(self, 'Sync all changed channels?', f"Filters in {len(unsynced_channels)} channels have changed but will not be " f"synced to the HTP-1." f"\n\nChannels: {', '.join(sorted([k for k in unsynced_channels]))}" f"\n\nDo you want to sync all changed channels? ", QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if result == QMessageBox.Yes: for c in unsynced_channels: for i in range(self.filterMapping.count()): item: QListWidgetItem = self.filterMapping.item(i) if c == item.text().split(' ')[1]: item.setSelected(True) self.send_filters_to_device() return else: ops, unsupported_filter_types_per_channel = self.__convert_current_filter_to_ops() do_send = True if unsupported_filter_types_per_channel: printed = '\n'.join([f"{k} - {', '.join(v)}" for k, v in unsupported_filter_types_per_channel.items()]) result = QMessageBox.question(self, 'Unsupported Filters Detected', f"Unsupported filter types found in the filter set:" f"\n\n{printed}" f"\n\nDo you want sync the supported filters only? ", QMessageBox.Yes | QMessageBox.No, QMessageBox.No) do_send = result == QMessageBox.Yes if do_send: from app import wait_cursor with wait_cursor(): all_ops = [op for slot_ops in ops for op in slot_ops] self.__last_requested_msoupdate = all_ops msg = f"changemso {json.dumps(self.__last_requested_msoupdate)}" logger.debug(f"Sending to {self.ipAddress.text()} -> {msg}") self.__spinner = StoppableSpin(self.syncStatus, 'sync') spin_icon = qta.icon('fa5s.spinner', color='green', animation=self.__spinner) self.syncStatus.setIcon(spin_icon) self.__ws_client.sendTextMessage(msg)
def apply(self): ''' Updates the parameters and reanalyses the model. ''' from app import wait_cursor with wait_cursor(): self.__display_model.accept(self.colourMapSelector.currentText(), self.yAxisRange.value(), self.normaliseCheckBox.isChecked(), self.normalisationAngle.currentText(), self.polarRange.isChecked())
def analyse(self): ''' Calculates the spectrum view. ''' from app import wait_cursor with wait_cursor(f"Analysing"): self.__cache_xyz(self.__left_signal, self.__left_cache) self.__cache_xyz(self.__right_signal, self.__right_cache) self.__init_mag_range() self.update_chart()
def __init__(self, signal, rms, level_adjust=0.0, parent=None): from app import wait_cursor with wait_cursor(): super(StatsDialog, self).__init__( parent=parent) if parent is not None else super( StatsDialog, self).__init__() self.__level_adjust = level_adjust self.__signal = signal self.setupUi(self) self.rms.setValue(rms) self.__set_peaks() self.set_extension()
def findFirstPeak(self): ''' Searches for the 1st reflection and updates the right window to that location. ''' if len(self._measurementModel) > 0: from app import wait_cursor with wait_cursor('Finding peak in first measurement'): start = time.time() self._rightWindow['position'].setValue( self._measurementModel[0].firstReflectionIndex()) end = time.time() logger.debug(f"Found peaks in {to_millis(start, end)}ms") self.updateRightWindow()
def apply(self): ''' Updates the parameters and reanalyses the model. ''' from app import wait_cursor with wait_cursor(): self.__display_model.lock() self.__display_model.smoothing_type = self.smoothingType.currentText( ) self.__display_model.dBRange = self.yAxisRange.value() self.__display_model.normalised = self.normaliseCheckBox.isChecked( ) self.__display_model.normalisationAngle = self.normalisationAngle.currentText( ) self.__display_model.colour_map = self.colourMapSelector.currentText( ) self.__display_model.unlock()
def update_chart(self): ''' Updates the chart for the cached data''' from app import wait_cursor with wait_cursor(f"Updating"): self.__clear_on_layout_change() if self.__left_signal is None: self.__render_one_only() else: self.__render_both() if self.__cb is None: divider = make_axes_locatable(self.__right_axes) cax = divider.append_axes("right", size="5%", pad=0.05) self.__cb = self.__right_axes.figure.colorbar( self.__right_scatter, cax=cax) self.__redraw()
def calculate_gain_adjustment(self): ''' Based on the filters applied, calculates the gain adjustment that is required to avoid clipping. ''' filts = list(set(self.__executor.channel_to_filter.values())) if len(filts) > 1 or filts[0] is not None: from app import wait_cursor with wait_cursor(): headroom = min([ min( self.__calc_headroom( x.filter_signal(filt=True, clip=False).samples), 0.0) for x in filts if x is not None ]) self.remuxedAudioOffset.setValue(headroom)
def analyse(self): ''' Calculates the spectrum view. ''' from app import wait_cursor with wait_cursor(f"Analysing"): x = np.linspace(0, self.signal.duration_seconds, endpoint=False, num=len(self.signal.samples)) y = self.signal.samples if self.__curve is None: self.__curve = self.__chart.plot(x, y, pen=mkPen('c', width=1)) self.zoom_out() else: self.__curve.setData(x, y)
def toggle_hard_clip(self, state): ''' Applies or removes the hard clip option from the visible waveform ''' signal_name = self.__selector.currentText() signal_data = self.__get_signal_data(signal_name) if signal_data is not None: from app import wait_cursor with wait_cursor(): signal = signal_data.filter_signal( filt=self.__is_filtered.isChecked(), clip=state == Qt.Checked, post_filt=self.__get_post_filt_hpf()) self.__active_signal = signal self.__waveform_chart_model.signal = signal self.__waveform_chart_model.idx = self.__selector.currentIndex( ) - 1 self.__waveform_chart_model.analyse() if self.__magnitude_model.is_visible(): self.__magnitude_model.redraw()
def apply(self): ''' Updates the parameters and reanalyses the model. ''' logger.info('Updating modal parameters') self.__parameters.measurementDistance = self.measurementDistance.value( ) self.__parameters.driverRadius = self.driverRadius.value( ) / 100 # convert cm to m self.__parameters.modalCoeffs = self.modalCoeffs.value() self.__parameters.f0 = self.f0.value() self.__parameters.q0 = self.q0.value() self.__parameters.transFreq = self.transFreq.value() self.__parameters.lfGain = self.lfGain.value() self.__parameters.boxRadius = self.boxRadius.value() self.__parameters.save() from app import wait_cursor with wait_cursor('Applying modal parameter change'): self.__measurement_model.reanalyse() self.__display_model.redrawVisible()
def __probe_file(self): ''' Probes the specified file using ffprobe in order to discover the audio streams. ''' file_name = self.inputFile.text() self.__executor = Executor( file_name, self.targetDir.text(), mono_mix=self.monoMix.isChecked(), decimate_audio=self.decimateAudio.isChecked(), audio_format=self.audioFormat.currentText(), audio_bitrate=self.eacBitRate.value(), include_original=self.includeOriginalAudio.isChecked(), include_subtitles=self.includeSubtitles.isChecked(), signal_model=self.__signal_model if self.__is_remux else None, decimate_fs=self.__preferences.get(ANALYSIS_TARGET_FS), bm_fs=self.__preferences.get(BASS_MANAGEMENT_LPF_FS)) self.__executor.progress_handler = self.__handle_ffmpeg_process from app import wait_cursor with wait_cursor(f"Probing {file_name}"): self.__executor.probe_file() self.showProbeButton.setEnabled(True) if self.__executor.has_audio(): for a in self.__executor.audio_stream_data: text, duration_micros = parse_audio_stream( self.__executor.probe, a) self.audioStreams.addItem(text) self.__stream_duration_micros.append(duration_micros) self.videoStreams.addItem('No Video') for a in self.__executor.video_stream_data: self.videoStreams.addItem( parse_video_stream(self.__executor.probe, a)) if self.__is_remux and self.videoStreams.count() > 1: if self.audioFormat.findText(COMPRESS_FORMAT_EAC3) == -1: self.audioFormat.addItem(COMPRESS_FORMAT_EAC3) if self.__preferences.get(EXTRACTION_COMPRESS): self.audioFormat.setCurrentText(COMPRESS_FORMAT_EAC3) self.eacBitRate.setVisible(True) else: self.audioFormat.setCurrentText(COMPRESS_FORMAT_NATIVE) self.eacBitRate.setVisible(False) self.videoStreams.setCurrentIndex(1) self.adjustRemuxedAudio.setEnabled(True) self.remuxedAudioOffset.setEnabled(True) self.gainOffsetLabel.setEnabled(True) self.calculateGainAdjustment.setEnabled(True) self.audioStreams.setEnabled(True) self.videoStreams.setEnabled(True) self.channelCount.setEnabled(True) self.lfeChannelIndex.setEnabled(True) self.monoMix.setEnabled(True) self.bassManage.setEnabled(True) self.decimateAudio.setEnabled(True) self.audioFormat.setEnabled(True) self.eacBitRate.setEnabled(True) self.includeOriginalAudio.setEnabled(True) self.outputFilename.setEnabled(True) self.ffmpegCommandLine.setEnabled(True) self.filterMapping.setEnabled(True) self.limitRange.setEnabled(True) self.showRemuxCommand.setEnabled(True) self.__fit_options_to_selected() else: self.statusBar.showMessage( f"{file_name} contains no audio streams!")
def refresh_repo(self): from app import wait_cursor with wait_cursor(): refresher = RepoRefresher(self.__beq_dir, self.__beq_repos) refresher.signals.on_end.connect(self.__refresh_complete) QThreadPool.globalInstance().start(refresher)
def __count_and_return_cursor(self): from app import wait_cursor with wait_cursor(): self.__count_beq_files()