コード例 #1
0
class MainWindow(QWidget):
    sig_sound_play_at = pyqtSignal(float)
    sig_sound_pause = pyqtSignal()
    sig_sound_stop = pyqtSignal()
    sig_record_stop = pyqtSignal()

    def __init__(self):
        super().__init__()

        self.sound = None
        self.signal = None

        self.plotbackground = None

        self.playing = False
        self.sound_paused = False
        self.sound_start_at = 0

        self.recording = False
        self.doppler = False  # Doppler simulation

        self.initUI()

    def initUI(self):
        spacer = QSpacerItem(50, 0, QSizePolicy.Minimum)

        # File selector
        lbl_file = QLabel("File:")
        self.txt_file = QLineEdit()
        self.txt_file.setPlaceholderText("Select file ...")
        btn_file = QPushButton("Select")
        btn_file.clicked.connect(self.show_open_dialog)

        # Save
        self.btn_save = QPushButton("Save")
        self.btn_save.setDisabled(True)
        self.btn_save.clicked.connect(self.show_save_dialog)

        # Audio controls
        self.btn_pause = QPushButton("Pause")
        self.btn_pause.setDisabled(True)
        self.btn_pause.clicked.connect(self.sound_pause)
        self.sound_mutex = QMutex()
        self.sound_pause_cond = QWaitCondition()

        self.btn_play = QPushButton("Play")
        self.btn_play.setDisabled(True)
        self.btn_play.clicked.connect(self.sound_play)

        self.btn_stop = QPushButton("Stop")
        self.btn_stop.setDisabled(True)
        self.btn_stop.clicked.connect(self.sound_stop)

        # Doppler Shift simulation
        self.cb_source_speed = QComboBox()
        self.cb_source_speed.setToolTip("Source speed")
        self.cb_source_speed.addItems(
            ["20 km/h", "50 km/h", "100 km/h", "150 km/h", "200 km/h"])
        self.cb_source_speed.setCurrentIndex(2)
        self.source_speeds = [5.56, 13.89, 27.78, 41.67,
                              55.56]  # Same indexes as text above (in m/s)
        self.btn_doppler = QPushButton("Simulate Doppler")
        self.btn_doppler.setToolTip("Apply simple Doppler Shift simulation")
        self.btn_doppler.setDisabled(True)
        self.btn_doppler.clicked.connect(self.doppler_simulate)

        # Effects
        self.cb_effect = QComboBox()
        self.cb_effect.setToolTip("Preset effects")
        self.cb_effect.setMaximumWidth(150)
        self.effects = []
        for root, dirs, files in os.walk("resources/impulses"):
            for file in files:
                if file.endswith(".wav") or file.endswith(".mp3"):
                    self.cb_effect.addItem(file.split(".")[0])
                    self.effects.append(os.path.join(root, file))
        self.btn_effect = QPushButton("Apply Effect")
        self.btn_effect.clicked.connect(self.effect_apply)
        self.btn_effect_load = QPushButton("Load Effect")
        self.btn_effect_load.clicked.connect(self.effect_load)

        # Recording
        self.cb_sample_rate = QComboBox()
        self.cb_sample_rate.setToolTip("Sampling rate")
        self.cb_sample_rate.addItems(
            ["8.000 Hz", "11.025 Hz", "22.050 Hz", "44.100 Hz"])
        self.cb_sample_rate.setCurrentIndex(3)
        self.sampling_rates = [8000, 11025, 22050,
                               44100]  # Same indexes as text above
        self.btn_record = QPushButton("Record")
        self.btn_record.setMinimumWidth(100)
        self.btn_record.clicked.connect(self.record)

        self.cb_bit_depth = QComboBox()
        self.cb_bit_depth.setToolTip("Bit depth")
        self.cb_bit_depth.addItems(["8 b", "16 b"])
        self.cb_bit_depth.setCurrentIndex(1)
        self.bit_depths = [pyaudio.paUInt8,
                           pyaudio.paInt16]  # Same indexes as text above

        # Analysis (ST-DFT)
        self.stdft_window = QLineEdit()
        self.stdft_window.setText("256")
        self.stdft_window.setToolTip("Window length")
        self.stdft_window.setMaximumWidth(35)
        self.stdft_window.setValidator(QIntValidator(0, 2147483647))
        self.stdft_noverlap = QLineEdit()
        self.stdft_noverlap.setText("128")
        self.stdft_noverlap.setMaximumWidth(35)
        self.stdft_noverlap.setValidator(QIntValidator(0, 2147483647))
        self.stdft_noverlap.setToolTip(
            "Overlap between windows (must be smaller than window length)")
        self.btn_analyse = QPushButton("Analyse")
        self.btn_analyse.setToolTip(
            "Perform Short Time Discrete Fourier Transform analysis (spectrogram)"
        )
        self.btn_analyse.setDisabled(True)
        self.btn_analyse.clicked.connect(lambda: self.analyse())

        # Filter
        self.filter_order = QLineEdit()
        self.filter_order.setText("5")
        self.filter_order.setToolTip("Filter order")
        self.filter_order.setMaximumWidth(25)
        self.filter_order.setValidator(QIntValidator(0, 100))
        self.filter_cut_low = QLineEdit()
        self.filter_cut_low.setText("500")
        self.filter_cut_low.setToolTip("Low critical frequency")
        self.filter_cut_low.setMaximumWidth(35)
        self.filter_cut_low.setValidator(QIntValidator(0, 2147483647))
        self.filter_cut_high = QLineEdit()
        self.filter_cut_high.setText("5000")
        self.filter_cut_high.setToolTip("High critical frequency")
        self.filter_cut_high.setMaximumWidth(35)
        self.filter_cut_high.setValidator(QIntValidator(0, 2147483647))
        self.btn_filter = QPushButton("Filter")
        self.btn_filter.setToolTip("Filter frequencies")
        self.btn_filter.setDisabled(True)
        self.btn_filter.clicked.connect(self.filter)

        # Graph space
        self.figure = Figure()
        FigureCanvas(self.figure)
        self.figure.canvas.setMinimumHeight(400)
        self.figure.canvas.mpl_connect("button_press_event",
                                       self.on_plot_click)
        self.figure.canvas.mpl_connect("motion_notify_event",
                                       self.on_plot_over)

        # Graph toolbar
        self.plotnav = NavigationToolbar(self.figure.canvas,
                                         self.figure.canvas)
        self.plotnav.setStyleSheet("QToolBar { border: 0px }")
        self.plotnav.setOrientation(Qt.Vertical)

        # Layout
        hbox_top = QHBoxLayout()
        hbox_top.addWidget(lbl_file)
        hbox_top.addWidget(self.txt_file)
        hbox_top.addWidget(btn_file)
        hbox_top.addWidget(self.btn_save)
        hbox_top.addStretch()
        hbox_top.addSpacerItem(spacer)
        hbox_top.addWidget(self.btn_pause)
        hbox_top.addWidget(self.btn_play)
        hbox_top.addWidget(self.btn_stop)

        hbox_bot = QHBoxLayout()
        hbox_bot.addWidget(self.cb_source_speed)
        hbox_bot.addWidget(self.btn_doppler)
        hbox_bot.addStretch()
        hbox_bot.addSpacerItem(spacer)
        hbox_bot.addWidget(self.cb_effect)
        hbox_bot.addWidget(self.btn_effect)
        hbox_bot.addWidget(self.btn_effect_load)
        hbox_bot.addStretch()
        hbox_bot.addSpacerItem(spacer)
        hbox_bot.addWidget(self.cb_sample_rate)
        hbox_bot.addWidget(self.cb_bit_depth)
        hbox_bot.addWidget(self.btn_record)

        hbox_bot2 = QHBoxLayout()
        hbox_bot2.addWidget(self.stdft_window)
        hbox_bot2.addWidget(self.stdft_noverlap)
        hbox_bot2.addWidget(self.btn_analyse)
        hbox_bot2.addStretch()
        hbox_bot2.addWidget(self.filter_order)
        hbox_bot2.addWidget(self.filter_cut_low)
        hbox_bot2.addWidget(self.filter_cut_high)
        hbox_bot2.addWidget(self.btn_filter)

        vbox = QVBoxLayout()
        vbox.addLayout(hbox_top)
        vbox.addWidget(self.figure.canvas)
        vbox.addLayout(hbox_bot)
        vbox.addLayout(hbox_bot2)

        # Window
        self.setLayout(vbox)
        self.setGeometry(300, 300, 1000, 500)
        self.setWindowTitle("Signal Processor - Sound")
        self.show()

    # Overriden resize event
    def resizeEvent(self, resizeEvent):
        if self.is_sound_loaded():
            self.on_plot_change(None)
        self.plotnav.move(self.width() - 55, 0)

    def update_ui(self):
        block_general = self.playing or self.sound_paused or self.recording

        self.btn_save.setDisabled(not self.is_sound_loaded())

        self.btn_pause.setDisabled(not self.playing)
        self.btn_pause.setText("Resume" if self.sound_paused else "Pause")
        self.btn_play.setDisabled(self.playing or self.recording)
        self.btn_stop.setDisabled(not self.playing or self.recording)

        self.plotnav.setDisabled(self.playing and not self.sound_paused)

        self.btn_doppler.setDisabled(not self.is_sound_loaded()
                                     or self.doppler)

        self.btn_effect.setDisabled(block_general)
        self.btn_effect_load.setDisabled(block_general)

        self.btn_record.setDisabled(self.playing or self.sound_paused)
        self.btn_record.setText(
            "Stop Recording" if self.recording else "Record")

        self.btn_analyse.setDisabled(block_general)
        self.btn_filter.setDisabled(block_general)

    def show_open_dialog(self):
        fname = QFileDialog.getOpenFileName(self,
                                            "Open file",
                                            filter="Audio (*.wav *.mp3)")
        if fname[0] and self.load_sound(fname[0]):
            self.txt_file.setText(fname[0])

    def show_save_dialog(self):
        fname = QFileDialog.getSaveFileName(self,
                                            "Save file",
                                            filter="Audio (*.wav *.mp3)")
        if fname[0] and self.is_sound_loaded():
            ext = fname[0].rsplit(".", 1)[-1]
            try:
                self.sound.export(fname[0], format=ext)
            except exceptions.CouldntEncodeError:
                print("Failed to save signal!")
            else:
                self.txt_file.setText(fname[0])

    def load_sound(self, file):
        self.sound_stop()
        self.doppler = False

        try:
            self.sound = AudioSegment.from_file(file)
            self.signal = np.array(self.sound.get_array_of_samples())
        except exceptions.CouldntDecodeError:
            print("Failed to load sound!")
            self.sound = None
            self.signal = None
            return False
        else:
            self.update_ui()
            self.plot(self.signal, self.sound)
            return True

    def is_sound_loaded(self):
        return self.sound is not None and self.signal is not None

    def load_signal(self, data, sample_width, rate, channels):
        self.sound = AudioSegment(
            data=data,
            sample_width=sample_width,  # 3 (24-bit) not supported by pydub
            frame_rate=rate,
            channels=channels)
        self.signal = np.array(self.sound.get_array_of_samples())
        self.update_ui()
        self.plot(self.signal, self.sound)

    def effect_load(self):
        feffect = self.effects[self.cb_effect.currentIndex()]
        if self.load_sound(feffect):
            self.txt_file.setText(feffect)
            self.plot(self.signal, self.sound)

    def effect_apply(self):
        if not self.is_sound_loaded():
            print("Failed to apply effect! No sound loaded!")
            return

        if self.sound.channels > 2:
            print("Failed to apply effect! Sound has more than 2 channels!")
            return

        feffect = self.effects[self.cb_effect.currentIndex()]
        try:
            effect_sound = AudioSegment.from_file(feffect)
            effect_signal = np.array(effect_sound.get_array_of_samples())
        except exceptions.CouldntDecodeError:
            print("Failed to load effect!")

        if effect_sound.frame_rate != self.sound.frame_rate:
            print(
                "Failed to apply effect! Effect rate ({}) not same as sound rate ({})!"
                .format(effect_sound.frame_rate, self.sound.frame_rate))
            return

        # Create stereo in case original sound is mono
        sound_channels = self.sound.channels
        if self.sound.channels < 2:
            self.sound = AudioSegment.from_mono_audiosegments(
                self.sound, self.sound)
            self.signal = np.array(self.sound.get_array_of_samples())

        # Convolve signals using fast fourier transform (into stereo, each channel separately)
        step = effect_sound.channels
        left = None
        right = None

        for i in range(0, sound_channels):
            if MANUAL_CONVOLVE:
                # Manual convolve
                n = fftpack.helper.next_fast_len(
                    len(self.signal[i::step]) + len(effect_signal[i::step]) -
                    1)
                x = np.fft.rfft(
                    np.append(self.signal[i::step],
                              np.zeros(len(effect_signal[i::step]) - 1)), n)
                y = np.fft.rfft(
                    np.append(effect_signal[i::step],
                              np.zeros(len(self.signal[i::step]) - 1)), n)
                ch = np.fft.irfft(x * y)
            else:
                # SciPy fftconvolve
                ch = signal.fftconvolve(self.signal[i::step],
                                        effect_signal[i::step])

            # Normalize and amplify
            ch = np.array(ch / np.linalg.norm(ch))
            ch = np.multiply(ch, 65535)  # float to int
            volume_diff = np.max(self.signal[i::step]) / np.max(ch)
            ch = np.multiply(ch, volume_diff)

            if i == 0:
                left = ch
                if sound_channels == 1:
                    right = left  # Mono input, copy channel
            else:
                right = ch

        # Join channels back together and load signal
        final = np.empty(left.size + right.size, np.int16)
        final[0::step] = left.astype(np.int16)
        final[1::step] = right.astype(np.int16)
        self.load_signal(b''.join(final), 2, self.sound.frame_rate,
                         effect_sound.channels)

    def doppler_simulate(self):
        self.doppler = True
        self.update_ui()

        speed_source = self.source_speeds[self.cb_source_speed.currentIndex()]

        # Frequency manipulation
        speed_sound = constants.speed_of_sound
        freq_in = speed_sound / (speed_sound -
                                 speed_source) * self.sound.frame_rate
        freq_out = speed_sound / (speed_sound +
                                  speed_source) * self.sound.frame_rate

        half1 = self.sound[0:int(len(self.sound) * 0.5)]
        half1 = AudioSegment(data=half1.get_array_of_samples(),
                             sample_width=self.sound.sample_width,
                             frame_rate=int(freq_in),
                             channels=self.sound.channels)

        half2 = self.sound[int(len(self.sound) * 0.5):]
        half2 = AudioSegment(data=half2.get_array_of_samples(),
                             sample_width=self.sound.sample_width,
                             frame_rate=int(freq_out),
                             channels=self.sound.channels)

        self.sound = half1.append(half2, crossfade=100)
        self.signal = np.array(self.sound.get_array_of_samples())

        # Volume manipulation (decrease with distance)
        half_time = half1.duration_seconds
        dist_max = speed_source * half_time
        print("Maximum distance: {} m".format(dist_max))

        distances = np.linspace(
            0.0,
            speed_source *
            (len(self.signal) / self.sound.frame_rate / self.sound.channels),
            num=int(len(self.signal) / self.sound.channels))  # Plot distances
        distances -= dist_max  # Take away maximum distance to get relative from center
        distances = np.absolute(
            distances)  # Make positive in both directions (_/^\_)
        distances = np.maximum(distances, 1.0)  # Prevent center clipping

        new_volumes = np.power(distances, -1.0)  # Scale volume with distance
        for i in range(0, self.sound.channels):  # Apply to all channels
            self.signal[i::self.sound.channels] = np.multiply(
                self.signal[i::self.sound.channels], new_volumes)

        self.signal = self.signal.astype(np.int16)

        # Load and plot new signal with doppler and visualization subplot
        self.load_signal(b''.join(self.signal), self.sound.sample_width,
                         self.sound.frame_rate, self.sound.channels)
        self.plot(self.signal, self.sound, doppler_max=half_time)

    def analyse(self,
                filter_w=[],
                filter_h=[],
                filter_cl=-1.0,
                filter_ch=-1.0):
        if not self.stdft_window.text() or not self.stdft_noverlap.text():
            print("Failed to analyse! Invalid input (must be integers)!")
            return

        window = int(self.stdft_window.text())
        noverlap = int(self.stdft_noverlap.text())

        if window <= 0 or noverlap <= 0:
            print(
                "Failed to analyse! Invalid input (must be integers greater than 0)!"
            )
            return

        if noverlap >= window:
            print("Failed to analyse! Overlap must be less than window size!")
            return

        if self.sound.channels > 1:
            print("Warning! Analysing only first channel!")

        self.plot(self.signal,
                  self.sound,
                  stdft_window=window,
                  stdft_noverlap=noverlap,
                  filter_w=filter_w,
                  filter_h=filter_h,
                  filter_cl=filter_cl,
                  filter_ch=filter_ch)

    def filter(self):
        if not self.filter_order.text() or not self.filter_cut_low.text(
        ) or not self.filter_cut_high.text():
            print("Failed to filter! Invalid input (must be integers)!")
            return

        order = int(self.filter_order.text())
        cut_low = int(self.filter_cut_low.text())
        cut_high = int(self.filter_cut_high.text())

        if order < 0 or cut_low < 0 or cut_high < 0:
            print(
                "Failed to filter! Invalid input (must be integers greater or equal 0)!"
            )
            return

        # Normalize critical frequencies (Nyquist as 1)
        cut_low = cut_low / (self.sound.frame_rate * 0.5)
        cut_high = cut_high / (self.sound.frame_rate * 0.5)

        # Design filter
        b, a = signal.butter(order, [cut_low, cut_high], "bandstop")
        w, h = signal.freqz(b, a)

        # Filter each channel
        for i in range(0, self.sound.channels):
            x = np.array(self.signal[i::self.sound.channels])  # Original
            y = np.zeros(len(x))  # Filtered

            if MANUAL_FILTER:
                # Manual filter
                for n in range(len(x)):
                    y[n] = 0
                    for k in range(len(b)):
                        if n - k >= 0:
                            y[n] = y[n] + b[k] * x[n - k]

                    for k in range(1, len(a)):
                        if n - k >= 0:
                            y[n] = y[n] - a[k] * y[n - k]

                if MANUAL_FILTER_TEST:
                    y_sp = signal.lfilter(b, a, x)
                    if np.allclose(y, y_sp, rtol=1e-02, atol=1e-08):
                        print("Manual filter test passed!")
                    else:
                        print("Manual filter test failed!")
            else:
                # SciPy lfilter
                y = signal.lfilter(b, a, x)

            self.signal[i::self.sound.channels] = y

        # Load and analyse filtered signal
        self.load_signal(b''.join(self.signal), self.sound.sample_width,
                         self.sound.frame_rate, self.sound.channels)
        self.analyse(filter_w=w,
                     filter_h=h,
                     filter_cl=cut_low,
                     filter_ch=cut_high)

    def plot(self,
             sig,
             sound,
             doppler_max=-1.0,
             stdft_window=-1,
             stdft_noverlap=-1,
             filter_w=[],
             filter_h=[],
             filter_cl=-1.0,
             filter_ch=-1.0):
        self.figure.clear()
        self.subplots = []
        self.lclick = []
        self.lclick_pos = 0
        self.lover = []
        self.lover_pos = 0
        self.lframe = []
        self.lframe_pos = 0
        self.sound_start_at = 0

        doppler = doppler_max != -1.0
        analysis = stdft_window != -1 and stdft_noverlap != -1
        filter_fr = len(filter_w) != 0 and len(
            filter_h) != 0 and filter_cl != -1.0 and filter_ch != -1.0
        subplots = sound.channels + doppler + analysis + filter_fr + (
            1 if filter_fr else 0)

        # X axis as time in seconds
        time = np.linspace(0, sound.duration_seconds, num=len(sig))

        for i in range(0, sound.channels):
            ax = self.figure.add_subplot(subplots, 1, i + 1)

            # Plot current channel, slicing it away
            ax.plot(time[i::sound.channels],
                    sig[i::sound.channels])  # [samp1L, samp1R, samp2L, samp2R]
            ax.margins(0)

            # Hide X axis on all but last channel
            if i + 1 < subplots - filter_fr:
                ax.get_xaxis().set_visible(False)
            # Display Y label somewhere in the middle
            if i == max(int(sound.channels / 2) - 1, 0):
                ax.set_ylabel("Amplitude")

            self.subplots.append(ax)

        if doppler:
            ax = self.figure.add_subplot(subplots, 1,
                                         sound.channels + analysis + 1)
            ax.margins(0)
            ax.plot(time, sig * [0])
            ax.axhline(0, linewidth=2, color="black")
            ax.axvline(doppler_max,
                       ymin=0.25,
                       ymax=0.75,
                       linewidth=2,
                       color="blue")
            ax.set_ylim([-1, 1])
            ax.get_yaxis().set_ticks([])
            ax.set_ylabel("Doppler Sim")
            self.subplots.append(ax)

        if analysis:
            ax = self.figure.add_subplot(subplots, 1,
                                         sound.channels + doppler + 1)
            ax.margins(0)
            ax.specgram(sig[0::sound.channels],
                        Fs=self.sound.frame_rate,
                        NFFT=stdft_window,
                        noverlap=stdft_noverlap)
            ax.set_ylabel("Freq (Hz)")
            self.subplots.append(ax)

        self.figure.subplots_adjust(hspace=0.0)
        ax.set_xlabel("Time (s)")

        if filter_fr:
            ax = self.figure.add_subplot(subplots, 1,
                                         sound.channels + analysis + 2)
            ax.margins(0, 0.1)
            ax.plot(filter_w / np.pi * self.sound.frame_rate * 0.5,
                    abs(filter_h) * max(sig[0::sound.channels]))
            ax.set_xlabel("Frequency (Hz)")
            ax.set_ylabel("Amplitude")
            ax.axvline(filter_cl, color="green")  # Cutoff frequency start
            ax.axvline(filter_ch, color="green")  # Cutoff frequency stop
            self.subplots.append(ax)

        # Handle zoom/pan events
        for ax in self.subplots:
            ax.callbacks.connect("xlim_changed", self.on_plot_change)
            ax.callbacks.connect("ylim_changed", self.on_plot_change)

        self.figure.canvas.draw()

        # Save background for updating on the fly
        self.plotbackground = self.figure.canvas.copy_from_bbox(
            self.figure.bbox)

        # Create lines (for later use, hidden until first update)
        for ax in self.subplots:
            line = ax.axvline(0, linewidth=1, color="black")
            self.lclick.append(line)
            line = ax.axvline(0, linewidth=1, color="grey")
            self.lover.append(line)
            line = ax.axvline(0, linewidth=1, color="blue")
            self.lframe.append(line)

    def on_plot_change(self, axes):
        # Hide all lines to not save them as part of background
        for line in itertools.chain(self.lclick, self.lover, self.lframe):
            line.set_visible(False)

        # Redraw and resave new layout background
        self.figure.canvas.draw()
        self.plotbackground = self.figure.canvas.copy_from_bbox(
            self.figure.bbox)

        # Reshow all lines
        for line in itertools.chain(self.lclick, self.lover, self.lframe):
            line.set_visible(True)

    def is_plotnav_active(self):
        return self.plotnav._active is None

    def on_plot_click(self, event):
        if not self.is_plotnav_active():
            return

        if event.xdata is not None and event.ydata is not None:
            self.sound_start_at = event.xdata
            self.sound_play()
            self.update_ui()

            # Update lines
            self.lclick_pos = event.xdata
            self.plot_update()

    def on_plot_over(self, event):
        if not self.is_plotnav_active():
            return

        # Update lines
        if event.xdata is not None and event.ydata is not None:
            self.lover_pos = event.xdata
        else:
            self.lover_pos = 0

        if self.plotbackground is not None:
            self.plot_update()

    def plot_frame(self, x):
        # Update lines
        self.lframe_pos = x
        self.plot_update()

    def plot_update(self):
        self.figure.canvas.restore_region(self.plotbackground)
        for i, (lclick, lover,
                lframe) in enumerate(zip(self.lclick, self.lover,
                                         self.lframe)):
            lclick.set_xdata([self.lclick_pos])
            lover.set_xdata([self.lover_pos])
            lframe.set_xdata([self.lframe_pos])
            self.subplots[i].draw_artist(lclick)
            self.subplots[i].draw_artist(lover)
            self.subplots[i].draw_artist(lframe)
        self.figure.canvas.blit(self.figure.bbox)

    def sound_play(self):
        if self.playing:
            self.sig_sound_play_at.emit(self.sound_start_at)
        elif self.is_sound_loaded():
            self.sound_thread = SoundThread(self.sound, self.sound_start_at,
                                            self.sound_mutex,
                                            self.sound_pause_cond)
            self.sig_sound_play_at.connect(self.sound_thread.play_at)
            self.sig_sound_pause.connect(self.sound_thread.pause)
            self.sig_sound_stop.connect(self.sound_thread.stop)
            self.sound_thread.sig_frame.connect(self.plot_frame)
            self.sound_thread.finished.connect(self.on_sound_done)
            self.sound_thread.start()

            self.playing = True
            self.update_ui()

    def sound_stop(self):
        self.sig_sound_stop.emit()
        self.sound_mutex.lock()
        self.sound_pause_cond.wakeAll()
        self.sound_mutex.unlock()

    def sound_pause(self):  # Toggle
        if self.sound_paused:
            self.sig_sound_pause.emit()
            self.sound_mutex.lock()
            self.sound_pause_cond.wakeAll()
            self.sound_mutex.unlock()
        else:
            self.sig_sound_pause.emit()
        self.sound_paused = not self.sound_paused
        self.update_ui()

    def on_sound_done(self):
        self.playing = False
        self.sound_paused = False
        self.update_ui()
        self.lframe_pos = 0
        self.plot_update()

    def record(self):  # Toggle
        if self.recording:
            self.sig_record_stop.emit()
        else:
            self.recording = True
            bit_depth = self.bit_depths[self.cb_bit_depth.currentIndex()]
            rate = self.sampling_rates[self.cb_sample_rate.currentIndex()]
            self.record_thread = RecordThread(
                bit_depth, rate, 2)  # Always record in stereo (2 channels)
            self.sig_record_stop.connect(self.record_thread.stop)
            self.record_thread.sig_return.connect(self.on_record_return)
            self.record_thread.start()
            self.update_ui()

    def on_record_return(self, data, sample_width, rate, channels):
        self.load_signal(data, sample_width, rate, channels)
        self.recording = False
        self.update_ui()
コード例 #2
0
class MainWindow(QWidget):
    def __init__(self):
        super().__init__()

        self.ax = None

        self.orig_img = None
        self.img = None

        self.initUI()

    def initUI(self):
        spacer = QSpacerItem(50, 0, QSizePolicy.Minimum)
        spacer_small = QSpacerItem(10, 0, QSizePolicy.Minimum)

        # File selector
        lbl_file = QLabel("File:")
        self.txt_file = QLineEdit()
        self.txt_file.setPlaceholderText("Select file ...")
        btn_file = QPushButton("Select")
        btn_file.clicked.connect(self.show_open_dialog)

        # Save
        self.btn_save = QPushButton("Save")
        self.btn_save.clicked.connect(self.show_save_dialog)

        # Reset
        self.btn_reset = QPushButton("Reset")
        self.btn_reset.setToolTip(
            "Show originally loaded image (reset all modifications)")
        self.btn_reset.clicked.connect(lambda: self.plot_image(self.orig_img))

        # Histogram
        self.btn_hist = QPushButton("Histogram")
        self.btn_hist.setToolTip("Draw histogram of current image")
        self.btn_hist.clicked.connect(self.histogram)

        # Graph space
        self.figure = Figure()
        FigureCanvas(self.figure)
        self.figure.canvas.setMinimumHeight(300)

        # Conversion to Grayscale
        self.cb_gray = QComboBox()
        self.cb_gray.setToolTip("Grayscale conversion method")
        self.cb_gray.addItems(["Average", "Red", "Green", "Blue"])
        self.btn_gray = QPushButton("Grayscale")
        self.btn_gray.setToolTip("Convert loaded image to grayscale image")
        self.btn_gray.clicked.connect(
            lambda: self.grayscale(self.cb_gray.currentIndex() - 1))

        # Segmentation / Binarization
        self.segment_thresh = QLineEdit()
        self.segment_thresh.setText("100")
        self.segment_thresh.setToolTip("Segmentation threshold")
        self.segment_thresh.setMaximumWidth(30)
        self.segment_thresh.setValidator(QIntValidator(0, 255))
        self.btn_segment = QPushButton("Binarize")
        self.btn_segment.setToolTip(
            "Convert loaded image to binary image using segmentation")
        self.btn_segment.clicked.connect(
            lambda: self.binarize(int(self.segment_thresh.text())))

        # Graph toolbar
        self.plotnav = NavigationToolbar(self.figure.canvas,
                                         self.figure.canvas)
        self.plotnav.setStyleSheet("QToolBar { border: 0px }")
        self.plotnav.setOrientation(Qt.Vertical)

        # Image processing implementation
        self.cb_imgproc_impl = QComboBox()
        self.cb_imgproc_impl.setToolTip("Processing implementation")
        self.cb_imgproc_impl.addItems(["OpenCV", "SciPy", "Manual"])

        # Smooth / Blur
        self.smooth_intensity = QLineEdit()
        self.smooth_intensity.setText("5")
        self.smooth_intensity.setToolTip(
            "Smooth intensity (must at least 3 and odd)")
        self.smooth_intensity.setMaximumWidth(30)
        self.smooth_intensity.setValidator(QIntValidator(0, 255))
        self.btn_smooth = QPushButton("Smooth")
        self.btn_smooth.setToolTip("Smooth (blur) current image")
        self.btn_smooth.clicked.connect(
            lambda: self.smooth(int(self.smooth_intensity.text())))

        # Sharpen
        self.sharpen_intensity = QLineEdit()
        self.sharpen_intensity.setText("5")
        self.sharpen_intensity.setToolTip(
            "Sharpen intensity (must be at least 5)")
        self.sharpen_intensity.setMaximumWidth(30)
        self.sharpen_intensity.setValidator(QIntValidator(0, 255))
        self.btn_sharpen = QPushButton("Sharpen")
        self.btn_sharpen.setToolTip("Sharpen current image")
        self.btn_sharpen.clicked.connect(
            lambda: self.sharpen(int(self.sharpen_intensity.text())))

        # Edge detection
        self.edge_intensity = QLineEdit()
        self.edge_intensity.setText("4")
        self.edge_intensity.setToolTip(
            "Edge detection intensity (must be at least 4)")
        self.edge_intensity.setMaximumWidth(30)
        self.edge_intensity.setValidator(QIntValidator(0, 255))
        self.btn_edge = QPushButton("Detect Edges")
        self.btn_edge.setToolTip("Detect edges on current image")
        self.btn_edge.clicked.connect(
            lambda: self.detect_edges(int(self.edge_intensity.text())))

        # Dilate
        self.dilate_intensity = QLineEdit()
        self.dilate_intensity.setText("5")
        self.dilate_intensity.setToolTip(
            "Dilation intensity (must be at least 2)")
        self.dilate_intensity.setMaximumWidth(30)
        self.dilate_intensity.setValidator(QIntValidator(0, 255))
        self.btn_dilate = QPushButton("Dilate")
        self.btn_dilate.setToolTip("Dilate current image")
        self.btn_dilate.clicked.connect(
            lambda: self.dilate(int(self.dilate_intensity.text())))

        # Erode
        self.erode_intensity = QLineEdit()
        self.erode_intensity.setText("5")
        self.erode_intensity.setToolTip(
            "Erosion intensity (must be at least 2)")
        self.erode_intensity.setMaximumWidth(30)
        self.erode_intensity.setValidator(QIntValidator(0, 255))
        self.btn_erode = QPushButton("Erode")
        self.btn_erode.setToolTip("Erode current image")
        self.btn_erode.clicked.connect(
            lambda: self.erode(int(self.erode_intensity.text())))

        # Layout
        hbox_top = QHBoxLayout()
        hbox_top.addWidget(lbl_file)
        hbox_top.addWidget(self.txt_file)
        hbox_top.addWidget(btn_file)
        hbox_top.addWidget(self.btn_save)
        hbox_top.addWidget(self.btn_reset)
        hbox_top.addStretch()
        hbox_top.addSpacerItem(spacer)
        hbox_top.addWidget(self.btn_hist)
        hbox_top.addStretch()
        hbox_top.addSpacerItem(spacer)
        hbox_top.addWidget(self.cb_gray)
        hbox_top.addWidget(self.btn_gray)
        hbox_top.addSpacerItem(spacer_small)
        hbox_top.addWidget(self.segment_thresh)
        hbox_top.addWidget(self.btn_segment)

        hbox_bot = QHBoxLayout()
        hbox_bot.addWidget(self.cb_imgproc_impl)
        hbox_bot.addStretch()
        hbox_bot.addSpacerItem(spacer)
        hbox_bot.addWidget(self.smooth_intensity)
        hbox_bot.addWidget(self.btn_smooth)
        hbox_bot.addWidget(self.sharpen_intensity)
        hbox_bot.addWidget(self.btn_sharpen)
        hbox_bot.addWidget(self.edge_intensity)
        hbox_bot.addWidget(self.btn_edge)
        hbox_bot.addStretch()
        hbox_bot.addSpacerItem(spacer)
        hbox_bot.addWidget(self.dilate_intensity)
        hbox_bot.addWidget(self.btn_dilate)
        hbox_bot.addWidget(self.erode_intensity)
        hbox_bot.addWidget(self.btn_erode)

        vbox = QVBoxLayout()
        vbox.addLayout(hbox_top)
        vbox.addWidget(self.figure.canvas)
        vbox.addLayout(hbox_bot)

        self.update_ui()

        # Window
        self.setLayout(vbox)
        self.setGeometry(300, 300, 1000, 500)
        self.setWindowTitle("Signal Processor - Image")
        self.show()

    # Overriden resize event
    def resizeEvent(self, resizeEvent):
        self.plotnav.move(self.width() - 55, 0)

    def update_ui(self):
        block_general = not self.is_image_loaded()

        self.btn_save.setDisabled(block_general)
        self.btn_reset.setDisabled(block_general)
        self.btn_hist.setDisabled(block_general)
        self.btn_gray.setDisabled(block_general)
        self.btn_segment.setDisabled(block_general)
        self.btn_smooth.setDisabled(block_general)
        self.btn_sharpen.setDisabled(block_general)
        self.btn_dilate.setDisabled(block_general)
        self.btn_erode.setDisabled(block_general)
        self.btn_edge.setDisabled(block_general)

    def show_open_dialog(self):
        fname, ext = QFileDialog.getOpenFileName(
            self, "Open file", filter="Image (*.png *.jpg *.bmp)")
        if fname and self.load_image(fname):
            self.txt_file.setText(fname)

    def show_save_dialog(self):
        fname, ext = QFileDialog.getSaveFileName(
            self, "Save file", filter="Image (*.png *.jpg *.bmp)")
        if fname and self.is_image_loaded():
            # Save as PNG if not set
            if '.' not in fname:
                fname += ".png"

            cv2.imwrite(fname, cv2.cvtColor(self.img, cv2.COLOR_RGB2BGR))
            self.txt_file.setText(fname)

    def load_image(self, file):
        if not os.path.isfile(file):
            return False

        # Read image and convert from BGR (OpenCV default) to RGB
        self.orig_img = cv2.imread(file)
        self.orig_img = cv2.cvtColor(self.orig_img, cv2.COLOR_BGR2RGB)
        self.img = self.orig_img

        self.plot_image(self.orig_img)
        self.update_ui()
        return True

    def is_image_loaded(self):
        return self.img is not None

    def reset_plot(self):
        self.figure.clear()
        self.ax = self.figure.add_subplot(1, 1, 1)

    def plot_image(self, img):
        self.reset_plot()
        self.ax.axis("off")

        self.ax.imshow(img, cmap='gray' if len(img.shape) < 3 else None)
        self.figure.canvas.draw()

        self.img = img

    # Draw histogram of current image
    def histogram(self):
        self.reset_plot()
        self.ax.margins(0)

        # Plot each channel on RGB image or only first channel on grayscale image
        colors = ('r', 'g', 'b') if len(self.img.shape) > 2 else ('b', )
        for i, color in enumerate(colors):
            hist = cv2.calcHist([self.img], [i], None, [256], [0, 256])
            self.ax.plot(hist, color=color)

        self.figure.canvas.draw()

    # Convert current image to grayscale
    def grayscale(self, type=-1):  # -1 - Average, 0 - Red, 1 - Green, 2 - Blue
        # Do nothing if already grayscale
        if len(self.img.shape) < 3:
            return self.img

        if type < 0:
            # Convert to grayscale by averaging all channels
            img_gray = cv2.cvtColor(self.img, cv2.COLOR_RGB2GRAY)
        else:
            # Convert to grayscale by taking one channel
            img_gray = self.img[:, :, type]

        self.plot_image(img_gray)

    # Binarize current image
    def binarize(self, threshold=0):
        # Make sure we are operating on grayscale image (applied to original image)
        self.grayscale()
        _, img_bin = cv2.threshold(self.img, threshold, 255,
                                   cv2.THRESH_BINARY_INV)

        self.plot_image(img_bin)

    # Get convolution implementation from combo box (lower-case text)
    def get_imgproc_impl(self):
        return self.cb_imgproc_impl.currentText().lower()

        # Smooth (blur) current image
    def smooth(self, intensity=5):
        if intensity < 3 or intensity % 2 == 0:
            print(
                "Error! Smooth intensity should be at least 3 and an odd integer!"
            )

        kernel = np.ones((intensity, intensity)) / intensity**2
        img_smooth = self.convolve2d(kernel)

        self.plot_image(img_smooth)

    # Sharpen current image
    def sharpen(self, intensity=5):
        if intensity < 5:
            print(
                "Warning! Sharpen intensity should be at least 5! Defaulting to 5!"
            )

        kernel = np.array(([0, -1, 0], [-1, max(intensity, 5), -1], [0, -1,
                                                                     0]))
        img_sharp = self.convolve2d(kernel)

        self.plot_image(img_sharp)

    # Detect edges on current image
    def detect_edges(self, intensity=5):
        if intensity < 4:
            print(
                "Warning! Edge detection intensity should be at least 4! Defaulting to 4!"
            )

        kernel = np.array(([0, 1, 0], [1, -max(intensity, 4), 1], [0, 1, 0]))
        img_edges = self.convolve2d(kernel)

        self.plot_image(img_edges)

    # Dilate current image
    def dilate(self, intensity=5):
        if intensity < 2:
            print(
                "Warning! Dilation intensity should be at least 2! Defaulting to 2!"
            )
            intensity = 2

        kernel = np.full((intensity, intensity), 255)

        imgproc = self.get_imgproc_impl()
        if imgproc == "opencv":
            # OpenCV dilate
            img_dilate = cv2.dilate(self.img, kernel)
        elif imgproc == "scipy":
            # SciPy grey_dilation
            img_dilate = self.morph2d_scipy(
                self.img, kernel, morph_func=morphology.grey_dilation)
        elif imgproc == "manual":
            # Manual morphology
            img_dilate = self.convolve2d_manual(
                self.img,
                kernel,
                func=lambda roi, kernel: np.max(roi[kernel.astype(np.bool)]))
        else:
            print("Error! Unknown image processing implementation!")
            img_dilate = self.img

        self.plot_image(img_dilate)

    # Erode current image
    def erode(self, intensity=5):
        if intensity < 2:
            print(
                "Warning! Erosion intensity should be at least 2! Defaulting to 2!"
            )
            intensity = 2

        kernel = np.full((intensity, intensity), 255, dtype=np.uint8)

        imgproc = self.get_imgproc_impl()
        if imgproc == "opencv":
            img_erode = cv2.erode(self.img, kernel)
        elif imgproc == "scipy":
            img_erode = self.morph2d_scipy(self.img,
                                           kernel,
                                           morph_func=morphology.grey_erosion)
        elif imgproc == "manual":
            img_erode = self.convolve2d_manual(
                self.img,
                kernel,
                func=lambda roi, kernel: np.min(roi[kernel.astype(np.bool)]))
        else:
            print("Error! Unknown image processing implementation!")
            img_erode = self.img

        self.plot_image(img_erode)

    # Convolve given image
    def convolve2d(self, kernel):
        imgproc = self.get_imgproc_impl()
        if imgproc == "opencv":
            return cv2.filter2D(self.img, -1, kernel)
        elif imgproc == "scipy":
            return self.convolve2d_scipy(self.img, kernel)
        elif imgproc == "manual":
            return self.convolve2d_manual(self.img,
                                          kernel,
                                          func=lambda roi, kernel:
                                          (roi * kernel).sum())

        print("Error! Unknown image processing implementation!")
        return self.img

    # Convolve given image with SciPy
    def convolve2d_scipy(self, img, kernel):
        if len(img.shape) < 3:
            # Grayscale
            return signal.convolve2d(img, kernel, mode="same", boundary="symm")
        else:
            # Color - convolve each channel
            img_conv = []
            for ch in range(img.shape[2]):
                img_conv_ch = signal.convolve2d(img[:, :, ch],
                                                kernel,
                                                mode="same",
                                                boundary="symm")
                img_conv.append(img_conv_ch)

            # Stack channels, clip them to [0, 255] and represent as original image (prevent invalid range)
            return np.clip(np.stack(img_conv, axis=2), 0,
                           255).astype(img.dtype)

    # Convolve given image with manual implementation and given pixel functor
    def convolve2d_manual(self, img, kernel, func=None):
        if func is None:
            print("Error! Invalid convolution functor!")
            return img

        # Get spatial dimensions of the image and kernel
        (img_h, img_w) = img.shape[:2]
        (kern_h, kern_w) = kernel.shape[:2]

        # Pad border
        pad = int((kern_w - 1) / 2)
        img = cv2.copyMakeBorder(img, pad, pad, pad, pad, cv2.BORDER_REPLICATE)

        if len(img.shape) < 3:
            # Grayscale
            return self.convolve2d_manual_channel(img,
                                                  kernel, (img_h, img_w),
                                                  pad,
                                                  func=func)
        else:
            # Color - convolve each channel
            img_conv = []
            for ch in range(img.shape[2]):
                img_conv_ch = self.convolve2d_manual_channel(img[:, :, ch],
                                                             kernel,
                                                             (img_h, img_w),
                                                             pad,
                                                             func=func)
                img_conv.append(img_conv_ch)

            # Stack channels, clip them to [0, 255] and represent as original image (prevent invalid range)
            return np.clip(np.stack(img_conv, axis=2), 0,
                           255).astype(img.dtype)

    # Convolve one channel of given image with manual implementation
    def convolve2d_manual_channel(self, img, kernel, img_size, pad, func):
        (img_h, img_w) = img_size

        # Slide the kernel over the image from left to right and top to bottom
        img_conv = np.zeros((img_h, img_w))
        for y in np.arange(pad, img_h + pad):
            for x in np.arange(pad, img_w + pad):
                # Extract region of interest (ROI) of the image by extracting the center region
                roi = img[y - pad:y + pad + 1, x - pad:x + pad + 1]
                # Perform convolution (element-wise multiplication between ROI and kernel and sum of matrix)
                k = func(roi, kernel)
                # Store convolved value in the current coordinate
                img_conv[y - pad, x - pad] = k

        # Rescale convolved image to be in range [0, 255]
        return rescale_intensity(img_conv, in_range=(0, 255)) * 255

    # Morph current image with SciPy
    def morph2d_scipy(self, img, kernel, morph_func=None):
        if morph_func is None:
            print("Error! Invalid morphology functor!")
            return img

        # SciPy does not like non-zero kernels
        kernel = np.zeros(kernel.shape)

        if len(img.shape) < 3:
            # Grayscale
            return morph_func(img, structure=kernel)
        else:
            # Color - erode each channel
            img_morph = []
            for ch in range(img.shape[2]):
                img_morph_ch = morph_func(img[:, :, ch],
                                          structure=kernel).astype(img.dtype)
                img_morph.append(img_morph_ch)

            # Stack channels, clip them to [0, 255] and represent as original image (prevent invalid range)
            return np.clip(np.stack(img_morph, axis=2), 0,
                           255).astype(img.dtype)
コード例 #3
0
ファイル: MeasureGui.py プロジェクト: pawel21/Py3LabDevice
class App(QMainWindow, MeasureDeviceConnect):
    STOP_CURRENT = 0
    POINTS_TO_MEASURE = 0

    def __init__(self):
        super(App, self).__init__()
        self.left = 10
        self.top = 10
        self.title = 'Measure Laser'
        self.width = 1340
        self.height = 900
        self.initUI()

    def initUI(self):
        self.setWindowTitle(self.title)
        self.setGeometry(self.left, self.top, self.width, self.height)

        self.plot_canvas = PlotCanvas(self, width=10, height=6)
        self.plot_canvas.move(0, 0)
        self.matplotlib_toolbar = NavigationToolbar(self.plot_canvas, self)
        self.matplotlib_toolbar.move(300, 0)
        self.matplotlib_toolbar.resize(500, 50)

        buton_to_save_data = QPushButton("Save data", self)
        buton_to_save_data.clicked.connect(self.save_data)
        buton_to_save_data.move(1020, 100)
        buton_to_save_data.resize(140, 50)

        button_to_start_measure = QPushButton('Start', self)
        button_to_start_measure.move(1020, 10)
        button_to_start_measure.resize(140, 50)
        button_to_start_measure.clicked.connect(self.click_to_start_measure)

        button_to_stop_measure = QPushButton('Stop', self)
        button_to_stop_measure.move(1180, 10)
        button_to_stop_measure.resize(140, 50)
        button_to_stop_measure.clicked.connect(self.click_to_stop_measure)

        button_to_set_current = QPushButton('Set stop current [mA]', self)
        button_to_set_current.move(30, 650)
        button_to_set_current.resize(150, 30)
        button_to_set_current.clicked.connect(self.set_current)
        self.line_to_enter_stop_current = QLineEdit(self)
        self.line_to_enter_stop_current.setText("0")
        self.line_to_enter_stop_current.move(200, 650)

        button_to_set_numer_of_points_to_measure = QPushButton(
            'Set numer of points to measure', self)
        button_to_set_numer_of_points_to_measure.move(350, 650)
        button_to_set_numer_of_points_to_measure.resize(220, 30)
        button_to_set_numer_of_points_to_measure.clicked.connect(
            self.set_points_to_measure)
        self.POINTS_TO_MEASURE = 0
        self.line_to_enter_points_to_measure = QLineEdit(self)
        self.line_to_enter_points_to_measure.setText("0")
        self.line_to_enter_points_to_measure.move(600, 650)

        self.label_info = QPlainTextEdit(self)
        self.label_info.setReadOnly(True)
        self.label_info.move(100, 750)
        self.label_info.resize(500, 100)
        self.OUT_MSG += "\nPlease set parameters to measure"
        self.label_info.setPlainText(self.OUT_MSG)
        self.show()

    def click_to_start_measure(self):
        try:
            _thread.start_new_thread(Measure.do_measure, (
                float(self.STOP_CURRENT) * 1e-3,
                float(self.POINTS_TO_MEASURE),
            ))
            self.OUT_MSG += "\nstart new measure"
            self.label_info.setPlainText(self.OUT_MSG)
        except Exception as err:
            print(err)
            print("Error, unable to start thread")

        time.sleep(4)
        self.plot_canvas.real_time_plot()

    def click_to_stop_measure(self):
        MeasureDeviceConnect.ldc.off()

    def set_current(self):
        self.STOP_CURRENT = self.line_to_enter_stop_current.text()
        self.OUT_MSG = self.OUT_MSG + "\nstop current set to " + str(
            self.STOP_CURRENT) + "mA"
        self.label_info.setPlainText(self.OUT_MSG)

    def set_points_to_measure(self):
        self.POINTS_TO_MEASURE = self.line_to_enter_points_to_measure.text()
        self.OUT_MSG = self.OUT_MSG + "\nnumbers of points to measure set to " + self.POINTS_TO_MEASURE
        self.label_info.setPlainText(self.OUT_MSG)

    def save_data(self):
        fname = QFileDialog.getSaveFileName(self, 'Open file', '\home',
                                            "Image files (*.txt )")
        with open(str(fname[0]), "w") as f:
            f.write("ii")
コード例 #4
0
class App(QMainWindow, MeasureDeviceConnect):
    START_CURRENT = 0
    STOP_CURRENT = 0
    POINTS_TO_MEASURE = 0

    def __init__(self):
        super(App, self).__init__()
        self.left = 10
        self.top = 10
        self.title = 'Measure Laser'
        self.width = 1340
        self.height = 900
        self.initUI()

    def initUI(self):
        self.setWindowTitle(self.title)
        self.setGeometry(self.left, self.top, self.width, self.height)

        self.plot_canvas = PlotCanvas(self, width=10, height=6)
        self.plot_canvas.move(0, 0)
        self.matplotlib_toolbar = NavigationToolbar(self.plot_canvas, self)
        self.matplotlib_toolbar.move(300, 0)
        self.matplotlib_toolbar.resize(500, 50)

        self.open_ldc_settings_window = QPushButton(self)
        self.open_ldc_settings_window.setText("Ldc settings")
        self.open_ldc_settings_window.move(1150, 750)
        self.open_ldc_settings_window.resize(120, 80)
        self.open_ldc_settings_window.clicked.connect(
            self.click_to_open_ldc_settings_window)
        self.ldc_settings_window = LdcSettingsWindow(self)

        button_to_start_measure = QPushButton('Start', self)
        button_to_start_measure.setStyleSheet(
            'QPushButton {background-color: #A3C1DA; color: red;}')
        button_to_start_measure.move(1020, 10)
        button_to_start_measure.resize(140, 50)
        button_to_start_measure.clicked.connect(self.click_to_start_measure)

        button_to_stop_measure = QPushButton('Stop', self)
        button_to_stop_measure.setStyleSheet(
            'QPushButton {background-color: #A3C1DA; color: red;}')
        button_to_stop_measure.move(1180, 10)
        button_to_stop_measure.resize(140, 50)
        button_to_stop_measure.clicked.connect(self.click_to_stop_measure)

        buton_to_save_data = QPushButton("Save data", self)
        buton_to_save_data.setStyleSheet(
            'QPushButton {background-color: #A3C1DA; color: red;}')
        buton_to_save_data.clicked.connect(self.save_data)
        buton_to_save_data.move(1020, 100)
        buton_to_save_data.resize(140, 50)

        self.label_with_current_wavelength = QLabel(
            'Current wavelength: ' + str(self.WAVELENGTH) + " nm", self)
        self.label_with_current_wavelength.move(1020, 180)
        self.label_with_current_wavelength.resize(200, 80)
        button_to_set_wavelength = QPushButton('Set wavelength [nm]', self)
        button_to_set_wavelength.move(1020, 250)
        button_to_set_wavelength.resize(150, 30)
        button_to_set_wavelength.setStyleSheet(
            'QPushButton {background-color: #6dad49; color: red;}')
        button_to_set_wavelength.clicked.connect(self.set_wavelength)
        self.line_to_set_wavelength = QLineEdit(self)
        self.line_to_set_wavelength.setText(str(self.WAVELENGTH))
        self.line_to_set_wavelength.move(1180, 250)

        button_to_set_start_current = QPushButton('Set start current [mA]',
                                                  self)
        button_to_set_start_current.move(30, 650)
        button_to_set_start_current.resize(150, 30)
        button_to_set_start_current.clicked.connect(self.set_start_current)
        self.line_to_enter_start_current = QLineEdit(self)
        self.line_to_enter_start_current.setText("0")
        self.line_to_enter_start_current.move(200, 650)

        button_to_set_stop_current = QPushButton('Set stop current [mA]', self)
        button_to_set_stop_current.move(30, 700)
        button_to_set_stop_current.resize(150, 30)
        button_to_set_stop_current.clicked.connect(self.set_stop_current)
        self.line_to_enter_stop_current = QLineEdit(self)
        self.line_to_enter_stop_current.setText("0")
        self.line_to_enter_stop_current.move(200, 700)

        button_to_set_numer_of_points_to_measure = QPushButton(
            'Set points to measure', self)
        button_to_set_numer_of_points_to_measure.move(350, 650)
        button_to_set_numer_of_points_to_measure.resize(220, 30)
        button_to_set_numer_of_points_to_measure.clicked.connect(
            self.set_points_to_measure)
        self.POINTS_TO_MEASURE = 0
        self.line_to_enter_points_to_measure = QLineEdit(self)
        self.line_to_enter_points_to_measure.setText("0")
        self.line_to_enter_points_to_measure.move(600, 650)

        button_to_set_timeout_measure = QPushButton('Set timeout in seconds',
                                                    self)
        button_to_set_timeout_measure.move(350, 700)
        button_to_set_timeout_measure.resize(220, 30)
        button_to_set_timeout_measure.clicked.connect(self.set_timeout)
        self.TIMEOUT_SECONDS_MEASURE = 0
        self.line_to_enter_timeout = QLineEdit(self)
        self.line_to_enter_timeout.setText("0")
        self.line_to_enter_timeout.move(600, 700)

        self.label_info = QPlainTextEdit(self)
        self.label_info.setReadOnly(True)
        self.label_info.move(100, 750)
        self.label_info.resize(500, 100)
        self.OUT_MSG += "\nPlease set parameters to measure"
        self.label_info.setPlainText(self.OUT_MSG)
        self.label_info.moveCursor(QTextCursor.End)
        self.label_info.ensureCursorVisible()
        self.show()

    @pyqtSlot()
    def click_to_open_ldc_settings_window(self):
        self.ldc_settings_window.exec_()

    def click_to_start_measure(self):
        try:
            measure_instance = Measure()
            measure_thread = threading.Thread(
                target=measure_instance.do_measure,
                args=(
                    float(self.START_CURRENT) * 1e-3,
                    float(self.STOP_CURRENT) * 1e-3,
                    float(self.POINTS_TO_MEASURE),
                    float(self.TIMEOUT_SECONDS_MEASURE),
                ))
            measure_thread.start()
            data_now = datetime.datetime.now()
            self.OUT_MSG += "\n"
            self.OUT_MSG += data_now.strftime("%Y-%m-%d %H:%M:%S")
            self.OUT_MSG += "  Start new measure"
            self.label_info.setPlainText(self.OUT_MSG)
        except Exception as err:
            print(err)
            print("Error, unable to start thread")
        time.sleep(4)
        self.plot_canvas.real_time_plot()

    def click_to_stop_measure(self):
        MeasureDeviceConnect.ldc.off()

    def save_data(self):
        file_name_to_save = QFileDialog.getSaveFileName(
            self, "Open file", "", "Image files (*.txt )")
        current_working_directory = os.getcwd()
        shutil.copy(os.path.join(current_working_directory, "data.txt"),
                    str(file_name_to_save[0]))

    def set_wavelength(self):
        self.WAVELENGTH = self.line_to_set_wavelength.text()
        MeasureDeviceConnect.pm100.set_wavelength_in_nm(self.WAVELENGTH)
        self.WAVELENGTH = MeasureDeviceConnect.pm100.get_current_wavelength_in_nm(
        )
        self.label_with_current_wavelength.setText("Current wavelength: " +
                                                   str(self.WAVELENGTH) +
                                                   " nm")

    def set_start_current(self):
        self.START_CURRENT = self.line_to_enter_start_current.text()
        self.OUT_MSG = self.OUT_MSG + "\nstart current set to " + str(
            self.START_CURRENT) + " mA"
        self.label_info.setPlainText(self.OUT_MSG)

    def set_stop_current(self):
        self.STOP_CURRENT = self.line_to_enter_stop_current.text()
        self.OUT_MSG = self.OUT_MSG + "\nstop current set to " + str(
            self.STOP_CURRENT) + " mA"
        self.label_info.setPlainText(self.OUT_MSG)

    def set_points_to_measure(self):
        self.POINTS_TO_MEASURE = self.line_to_enter_points_to_measure.text()
        self.OUT_MSG = self.OUT_MSG + "\nnumbers of points to measure set to " + self.POINTS_TO_MEASURE
        self.label_info.setPlainText(self.OUT_MSG)

    def set_timeout(self):
        self.TIMEOUT_SECONDS_MEASURE = self.line_to_enter_timeout.text()
        self.OUT_MSG = self.OUT_MSG + "\ntimeout set to " + self.TIMEOUT_SECONDS_MEASURE + " s"
        self.label_info.setPlainText(self.OUT_MSG)