Ejemplo n.º 1
0
    def __init__(self, device):
        super(MainWindow, self).__init__()

        self.series = QtCharts.QLineSeries()
        self.chart = QtCharts.QChart()
        self.chart.addSeries(self.series)
        self.axisX = QtCharts.QValueAxis()
        self.axisX.setRange(0, sampleCount)
        self.axisX.setLabelFormat("%g")
        self.axisX.setTitleText("Samples")
        self.axisY = QtCharts.QValueAxis()
        self.axisY.setRange(-1, 1)
        self.axisY.setTitleText("Audio level")
        self.chart.setAxisX(self.axisX, self.series)
        self.chart.setAxisY(self.axisY, self.series)
        self.chart.legend().hide()
        self.chart.setTitle("Data from the microphone ({})".format(device.deviceName()))

        formatAudio = QAudioFormat()
        formatAudio.setSampleRate(8000)
        formatAudio.setChannelCount(1)
        formatAudio.setSampleSize(8)
        formatAudio.setCodec("audio/pcm")
        formatAudio.setByteOrder(QAudioFormat.LittleEndian)
        formatAudio.setSampleType(QAudioFormat.UnSignedInt)

        self.audioInput = QAudioInput(device, formatAudio, self)
        self.ioDevice = self.audioInput.start()
        self.ioDevice.readyRead.connect(self._readyRead)

        self.chartView = QtCharts.QChartView(self.chart)
        self.setCentralWidget(self.chartView)

        self.buffer = [QPointF(x, 0) for x in range(sampleCount)]
        self.series.append(self.buffer)
Ejemplo n.º 2
0
    def __init__(self, inference_thread):
        super(Dialog, self).__init__()
        self._btn = QPushButton('Record', self)
        self._label = QLabel('', self)

        layout = QVBoxLayout()
        layout.addWidget(self._btn)
        layout.addWidget(self._label)
        self.setLayout(layout)

        self._btn.clicked.connect(self._btn_clicked)
        self._is_recording = False

        self._inference_thread = inference_thread
        self._inference_thread.finished.connect(
            self._on_transcription_finished)

        audio_format = QAudioFormat()
        audio_format.setCodec('audio/pcm')
        audio_format.setChannelCount(1)
        audio_format.setSampleSize(16)
        audio_format.setSampleRate(16000)
        audio_format.setByteOrder(QAudioFormat.LittleEndian)
        audio_format.setSampleType(QAudioFormat.SignedInt)

        input_device_info = QAudioDeviceInfo.defaultInputDevice()
        if not input_device_info.isFormatSupported(audio_format):
            print('Can\'t record audio in 16kHz 16-bit signed PCM format.')
            exit(1)

        self._audio_input = QAudioInput(audio_format)
Ejemplo n.º 3
0
class MainWindow(QMainWindow):
    def __init__(self, device):
        super(MainWindow, self).__init__()

        self.series = QtCharts.QLineSeries()
        self.chart = QtCharts.QChart()
        self.chart.addSeries(self.series)
        self.axisX = QtCharts.QValueAxis()
        self.axisX.setRange(0, sampleCount)
        self.axisX.setLabelFormat("%g")
        self.axisX.setTitleText("Samples")
        self.axisY = QtCharts.QValueAxis()
        self.axisY.setRange(-1, 1)
        self.axisY.setTitleText("Audio level")
        self.chart.setAxisX(self.axisX, self.series)
        self.chart.setAxisY(self.axisY, self.series)
        self.chart.legend().hide()
        self.chart.setTitle("Data from the microphone ({})".format(
            device.deviceName()))

        formatAudio = QAudioFormat()
        formatAudio.setSampleRate(8000)
        formatAudio.setChannelCount(1)
        formatAudio.setSampleSize(8)
        formatAudio.setCodec("audio/pcm")
        formatAudio.setByteOrder(QAudioFormat.LittleEndian)
        formatAudio.setSampleType(QAudioFormat.UnSignedInt)

        self.audioInput = QAudioInput(device, formatAudio, self)
        self.ioDevice = self.audioInput.start()
        self.ioDevice.readyRead.connect(self._readyRead)

        self.chartView = QtCharts.QChartView(self.chart)
        self.setCentralWidget(self.chartView)

        self.buffer = [QPointF(x, 0) for x in range(sampleCount)]
        self.series.append(self.buffer)

    def closeEvent(self, event):
        if self.audioInput is not None:
            self.audioInput.stop()
        event.accept()

    def _readyRead(self):
        data = self.ioDevice.readAll()
        availableSamples = data.size() // resolution
        start = 0
        if (availableSamples < sampleCount):
            start = sampleCount - availableSamples
            for s in range(start):
                self.buffer[s].setY(self.buffer[s + availableSamples].y())

        dataIndex = 0
        for s in range(start, sampleCount):
            value = (ord(data[dataIndex]) - 128) / 128
            self.buffer[s].setY(value)
            dataIndex = dataIndex + resolution
        self.series.replace(self.buffer)
Ejemplo n.º 4
0
class MainWindow(QMainWindow):
    def __init__(self, device):
        super(MainWindow, self).__init__()

        self.series = QtCharts.QLineSeries()
        self.chart = QtCharts.QChart()
        self.chart.addSeries(self.series)
        self.axisX = QtCharts.QValueAxis()
        self.axisX.setRange(0, sampleCount)
        self.axisX.setLabelFormat("%g")
        self.axisX.setTitleText("Samples")
        self.axisY = QtCharts.QValueAxis()
        self.axisY.setRange(-1, 1)
        self.axisY.setTitleText("Audio level")
        self.chart.setAxisX(self.axisX, self.series)
        self.chart.setAxisY(self.axisY, self.series)
        self.chart.legend().hide()
        self.chart.setTitle("Data from the microphone ({})".format(device.deviceName()))

        formatAudio = QAudioFormat()
        formatAudio.setSampleRate(8000)
        formatAudio.setChannelCount(1)
        formatAudio.setSampleSize(8)
        formatAudio.setCodec("audio/pcm")
        formatAudio.setByteOrder(QAudioFormat.LittleEndian)
        formatAudio.setSampleType(QAudioFormat.UnSignedInt)

        self.audioInput = QAudioInput(device, formatAudio, self)
        self.ioDevice = self.audioInput.start()
        self.ioDevice.readyRead.connect(self._readyRead)

        self.chartView = QtCharts.QChartView(self.chart)
        self.setCentralWidget(self.chartView)

        self.buffer = [QPointF(x, 0) for x in range(sampleCount)]
        self.series.append(self.buffer)

    def closeEvent(self, event):
        if self.audioInput is not None:
            self.audioInput.stop()
        event.accept()

    def _readyRead(self):
        data = self.ioDevice.readAll()
        availableSamples = data.size() // resolution
        start = 0
        if (availableSamples < sampleCount):
            start = sampleCount - availableSamples
            for s in range(start):
                self.buffer[s].setY(self.buffer[s + availableSamples].y())

        dataIndex = 0
        for s in range(start, sampleCount):
            value = (ord(data[dataIndex]) - 128) / 128
            self.buffer[s].setY(value)
            dataIndex = dataIndex + resolution
        self.series.replace(self.buffer)
Ejemplo n.º 5
0
    def __init__(self, device):
        super(MainWindow, self).__init__()

        self.series = QtCharts.QLineSeries()
        self.chart = QtCharts.QChart()
        self.chart.addSeries(self.series)
        self.axisX = QtCharts.QValueAxis()
        self.axisX.setRange(0, sampleCount)
        self.axisX.setLabelFormat("%g")
        self.axisX.setTitleText("Samples")
        self.axisY = QtCharts.QValueAxis()
        self.axisY.setRange(-1, 1)
        self.axisY.setTitleText("Audio level")
        self.chart.setAxisX(self.axisX, self.series)
        self.chart.setAxisY(self.axisY, self.series)
        self.chart.legend().hide()
        self.chart.setTitle("Data from the microphone ({})".format(device.deviceName()))

        formatAudio = QAudioFormat()
        formatAudio.setSampleRate(8000)
        formatAudio.setChannelCount(1)
        formatAudio.setSampleSize(8)
        formatAudio.setCodec("audio/pcm")
        formatAudio.setByteOrder(QAudioFormat.LittleEndian)
        formatAudio.setSampleType(QAudioFormat.UnSignedInt)

        self.audioInput = QAudioInput(device, formatAudio, self)
        self.ioDevice = self.audioInput.start()
        self.ioDevice.readyRead.connect(self._readyRead)

        self.chartView = QtCharts.QChartView(self.chart)
        self.setCentralWidget(self.chartView)

        self.buffer = [QPointF(x, 0) for x in range(sampleCount)]
        self.series.append(self.buffer)
class AudioRecorder(QObject):
    def __init__(self, dialog, inference_thread):
        super(AudioRecorder, self).__init__()

        self.dialog = dialog
        self.inference_thread = inference_thread

        self.inference_thread.finished.connect(self.on_transcription_finished)

        self.format = QAudioFormat()
        self.format.setSampleRate(16000)
        self.format.setChannelCount(1)
        self.format.setSampleSize(16)
        self.format.setCodec("audio/pcm")
        self.format.setByteOrder(QAudioFormat.LittleEndian)
        self.format.setSampleType(QAudioFormat.SignedInt)
        self.recorder = QAudioInput(self.format, self)

        self.is_recording = False

    @Slot()
    def toggle_record(self):
        if self.is_recording == False:
            logging.info("Capturing sound")
            self.is_recording = True
            self.inference_thread.send_cmd(("start",))
            self.recorded_message = self.recorder.start()
            self.recorded_message.readyRead.connect(self.read_from_IO_devide)
        else:
            logging.info("Finished sound capturing")
            self.is_recording = False
            self.recorder.stop()
            self.inference_thread.send_cmd(("finish",))

    @Slot()
    def read_from_IO_devide(self):
        ''' Forward available audio data to the inference thread. '''
        # self.sender() is the IO device returned by QAudioInput.start()
        self.inference_thread.send_cmd(("data", self.sender().readAll()))

    @Slot(str)
    def on_transcription_finished(self, result):
        logging.info("Transcription: {result}")

        self.dialog.set_user_message(result)
        self.dialog.process_user_message()
        self.dialog.process_machine_message()
    def __init__(self, dialog, inference_thread):
        super(AudioRecorder, self).__init__()

        self.dialog = dialog
        self.inference_thread = inference_thread

        self.inference_thread.finished.connect(self.on_transcription_finished)

        self.format = QAudioFormat()
        self.format.setSampleRate(16000)
        self.format.setChannelCount(1)
        self.format.setSampleSize(16)
        self.format.setCodec("audio/pcm")
        self.format.setByteOrder(QAudioFormat.LittleEndian)
        self.format.setSampleType(QAudioFormat.SignedInt)
        self.recorder = QAudioInput(self.format, self)

        self.is_recording = False
Ejemplo n.º 8
0
    def initialize_audio(self):
        """Set up parameters for audio recording."""
        self._format = QAudioFormat()
        self._format.setSampleRate(44100)
        self._format.setChannelCount(1)
        self._format.setSampleSize(8)
        self._format.setSampleType(QAudioFormat.UnSignedInt)
        self._format.setByteOrder(QAudioFormat.LittleEndian)
        self._format.setCodec("audio/pcm")

        device_info = QAudioDeviceInfo(self._device)
        if not device_info.isFormatSupported(self._format):
            print("Default format not supported - trying to use nearest.")
            self._format = device_info.nearestFormat(self._format)

        self._audio_data_handler = AudioDataHandler(self._format)

        self._audio_input = QAudioInput(self._device, self._format)
        self._audio_data_handler.data_ready.connect(self.data_ready)
Ejemplo n.º 9
0
    def __init__(self, inputDevice, title, xlim, ylim):
        super(ZeStreamWidget, self).__init__()

        self.inputDevice = inputDevice

        # Creating QChart
        self.series = QtCharts.QLineSeries()
        self.chart = QtCharts.QChart()
        self.chart.addSeries(self.series)

        self.axisX = QtCharts.QValueAxis()
        self.axisX.setRange(0, sampleCount)
        self.axisX.setLabelFormat("%g")
        self.axisX.setTitleText("Samples")
        self.chart.setAxisX(self.axisX, self.series)

        self.axisY = QtCharts.QValueAxis()
        self.axisY.setRange(-1, 1)
        self.chart.setAxisY(self.axisY, self.series)

        self.chart.legend().hide()

        self.chart.setTitle(title)

        formatAudio = QAudioFormat()
        formatAudio.setSampleRate(8000)
        formatAudio.setChannelCount(1)
        formatAudio.setSampleSize(8)
        formatAudio.setCodec("audio/pcm")
        formatAudio.setByteOrder(QAudioFormat.LittleEndian)
        formatAudio.setSampleType(QAudioFormat.UnSignedInt)

        self.audioInput = QAudioInput(self.inputDevice, formatAudio, self)
        self.ioDevice = self.audioInput.start()
        self.ioDevice.readyRead.connect(self._readyRead)

        self.buffer = [QPointF(x, 0) for x in range(sampleCount)]
        self.series.append(self.buffer)
Ejemplo n.º 10
0
class ZeStreamWidget(QtWidgets.QWidget):
    def __init__(self, inputDevice, title, xlim, ylim):
        super(ZeStreamWidget, self).__init__()

        self.inputDevice = inputDevice

        # Creating QChart
        self.series = QtCharts.QLineSeries()
        self.chart = QtCharts.QChart()
        self.chart.addSeries(self.series)

        self.axisX = QtCharts.QValueAxis()
        self.axisX.setRange(0, sampleCount)
        self.axisX.setLabelFormat("%g")
        self.axisX.setTitleText("Samples")
        self.chart.setAxisX(self.axisX, self.series)

        self.axisY = QtCharts.QValueAxis()
        self.axisY.setRange(-1, 1)
        self.chart.setAxisY(self.axisY, self.series)

        self.chart.legend().hide()

        self.chart.setTitle(title)

        formatAudio = QAudioFormat()
        formatAudio.setSampleRate(8000)
        formatAudio.setChannelCount(1)
        formatAudio.setSampleSize(8)
        formatAudio.setCodec("audio/pcm")
        formatAudio.setByteOrder(QAudioFormat.LittleEndian)
        formatAudio.setSampleType(QAudioFormat.UnSignedInt)

        self.audioInput = QAudioInput(self.inputDevice, formatAudio, self)
        self.ioDevice = self.audioInput.start()
        self.ioDevice.readyRead.connect(self._readyRead)

        self.buffer = [QPointF(x, 0) for x in range(sampleCount)]
        self.series.append(self.buffer)


    def _readyRead(self):

        data = self.ioDevice.readAll()
        availableSamples = data.size() // resolution
        start = 0
        if (availableSamples < sampleCount):
            start = sampleCount - availableSamples
            for s in range(start):
                self.buffer[s].setY(self.buffer[s + availableSamples].y())

        dataIndex = 0
        for s in range(start, sampleCount):
            value = (ord(data[dataIndex]) - 128) / 128
            self.buffer[s].setY(value)
            dataIndex = dataIndex + resolution
        self.series.replace(self.buffer)


    def closeEvent(self, event):
        if self.audioDevice is not None:
            self.audioDevice.stop()
        event.accept()
Ejemplo n.º 11
0
class AudioDevice(QObject):
    """Class for storing computer's audio system information."""
    data_ready = Signal(np.ndarray)
    audio_inputs = Signal(object)

    def __init__(self, default_device_name):
        super().__init__()
        self.default_device_name = default_device_name
        self._pull_timer = QTimer()
        self._pull_timer.setInterval(3000)
        self._pull_timer.timeout.connect(self.write_to_buffer)

        self._audio_input = None
        self._input = None
        self._audio_data_handler = None

        devices = QAudioDeviceInfo.availableDevices(QAudio.AudioInput)
        self._device = None
        self.monitors = []

        for item in devices:
            dev_name = item.deviceName()
            if dev_name.endswith(".monitor"):
                self.monitors.append(item)
            if self.default_device_name and self.default_device_name == item.deviceName(
            ):
                self._device = item
        if not self._device:
            try:
                self._device = self.monitors[0]
            except IndexError:
                self._device = QAudioDeviceInfo.defaultInputDevice()

        self.initialize_audio()

        self.start_audio()

    def initialize_audio(self):
        """Set up parameters for audio recording."""
        self._format = QAudioFormat()
        self._format.setSampleRate(44100)
        self._format.setChannelCount(1)
        self._format.setSampleSize(8)
        self._format.setSampleType(QAudioFormat.UnSignedInt)
        self._format.setByteOrder(QAudioFormat.LittleEndian)
        self._format.setCodec("audio/pcm")

        device_info = QAudioDeviceInfo(self._device)
        if not device_info.isFormatSupported(self._format):
            print("Default format not supported - trying to use nearest.")
            self._format = device_info.nearestFormat(self._format)

        self._audio_data_handler = AudioDataHandler(self._format)

        self._audio_input = QAudioInput(self._device, self._format)
        self._audio_data_handler.data_ready.connect(self.data_ready)

    def start_audio(self):
        """Start running all audio objects."""
        self._audio_data_handler.start()
        self._audio_input.start(self._audio_data_handler)
        self._input = self._audio_input.start()
        self._pull_timer.start()

    def get_input_devices(self):
        devices = []
        if self._device:
            devices.append(self._device)
        for item in self.monitors:
            if item.deviceName() not in [x.deviceName() for x in devices]:
                devices.append(item)
        system_default = QAudioDeviceInfo.defaultInputDevice()
        if system_default not in [x.deviceName() for x in devices]:
            devices.append(system_default)
        return devices

    def get_input_device_names(self):
        devices = self.get_input_devices()
        names = [x.deviceName() for x in devices]
        return names

    @Slot(str)
    def change_audio_input(self, input_name):
        input_devices = self.monitors
        input_devices.append(QAudioDeviceInfo.defaultInputDevice())
        for i in range(len(input_devices)):
            if input_devices[i].deviceName() == input_name:
                self._device = input_devices[i]
                self.initialize_audio()
                self.start_audio()
                break

    @Slot()
    def write_to_buffer(self):
        """Write data to buffer for later analysis."""
        len_ = self._audio_input.bytesReady()
        if len_ > 0:
            self._audio_data_handler.writeData(self._input.readAll(), len_)
Ejemplo n.º 12
0
class Dialog(QWidget):
    def __init__(self, inference_thread):
        super(Dialog, self).__init__()
        self._btn = QPushButton('Record', self)
        self._label = QLabel('', self)

        layout = QVBoxLayout()
        layout.addWidget(self._btn)
        layout.addWidget(self._label)
        self.setLayout(layout)

        self._btn.clicked.connect(self._btn_clicked)
        self._is_recording = False

        self._inference_thread = inference_thread
        self._inference_thread.finished.connect(
            self._on_transcription_finished)

        audio_format = QAudioFormat()
        audio_format.setCodec('audio/pcm')
        audio_format.setChannelCount(1)
        audio_format.setSampleSize(16)
        audio_format.setSampleRate(16000)
        audio_format.setByteOrder(QAudioFormat.LittleEndian)
        audio_format.setSampleType(QAudioFormat.SignedInt)

        input_device_info = QAudioDeviceInfo.defaultInputDevice()
        if not input_device_info.isFormatSupported(audio_format):
            print('Can\'t record audio in 16kHz 16-bit signed PCM format.')
            exit(1)

        self._audio_input = QAudioInput(audio_format)

    @Slot()
    def _btn_clicked(self):
        if self._is_recording:
            # Was recording -> Button clicked -> Stop recording
            self._is_recording = False
            self._btn.setText('Record')
            self._audio_input.stop()
            self._inference_thread.send_cmd(('finish', ))
        else:
            # Was not recording -> Button clicked -> Start recording
            self._is_recording = True
            self._label.setText('...')
            self._btn.setText('Stop')
            self._buffer = bytearray()
            self._inference_thread.send_cmd(('start', ))
            # QAudioInput retains the QIODevice returned here internally so
            # there's no need to keep a reference to it
            io_device = self._audio_input.start()
            io_device.readyRead.connect(self._read_from_io_device)

    @Slot()
    def _read_from_io_device(self):
        ''' Forward available audio data to the inference thread. '''
        # self.sender() is the IO device returned by QAudioInput.start()
        data = self.sender().readAll()
        self._inference_thread.send_cmd(('data', data))
        self._buffer += data.data()

    @Slot(str)
    def _on_transcription_finished(self, result):
        print('Transcription:', result)
        self._label.setText(result)