def __init__(self, chunksize=512, rate=44100, channel=2, sample_size=8 ,codec="audio/pcm", threshold=500, save_dir=None): self.chunksize = chunksize self.rate = rate self.sample_size = sample_size self.channel = channel self.sampleWidth = 2 # self.format = QAudioFormat() self.format.setChannelCount(self.channel) self.format.setSampleRate(self.rate) self.format.setSampleSize(self.sample_size) self.format.setCodec(codec) self.format.setByteOrder(QAudioFormat.LittleEndian) # 1 self.format.setSampleType(QAudioFormat.UnSignedInt) # 2 这个应该就决定了录音的质量,不然会有很强的滋滋声音 # self.block = b"" # bytes 类型 self.record_buffer = QBuffer() self.play_buffer = QBuffer() # 不能用QIODevice(),因为这是个c++的虚类(还没有python实体化?), # 顺便也就不用所谓的QAudioBuffer类了 self.pos = 0 self.duration = 0 # self.threshold = threshold self.save_dir = save_dir self.save_path = "./sound/test.wav"
def initAudio(self): self.audioformat = QAudioFormat() self.audioformat.setSampleRate(self.samplerate) self.audioformat.setChannelCount(2) self.audioformat.setSampleSize(32) self.audioformat.setCodec('audio/pcm') self.audioformat.setByteOrder(QAudioFormat.LittleEndian) self.audioformat.setSampleType(QAudioFormat.Float) self.audiooutput = QAudioOutput(self.audioformat) self.audiooutput.setVolume(1.0)
def initAudio(self): self.audioformat = QAudioFormat() self.audioformat.setSampleRate(self.samplerate) self.audioformat.setChannelCount(2) self.audioformat.setSampleSize(32) self.audioformat.setCodec('audio/pcm') self.audioformat.setByteOrder(QAudioFormat.LittleEndian) self.audioformat.setSampleType(QAudioFormat.Float) # self.audiodeviceinfo = QAudioDeviceInfo(QAudioDeviceInfo.defaultOutputDevice()) self.audiooutput = QAudioOutput(self.audioformat) self.audiooutput.setVolume(self.initVolume)
def __init__(self, window_width=256, incr=128, minFreqShow=0, maxFreqShow=0): # maxFreq = 0 means fall back to Fs/2 for any file. self.window_width=window_width self.incr=incr self.minFreqShow = minFreqShow self.maxFreqShow = maxFreqShow self.data = [] # only accepting wav files of this format self.audioFormat = QAudioFormat() self.audioFormat.setCodec("audio/pcm") self.audioFormat.setByteOrder(QAudioFormat.LittleEndian) self.audioFormat.setSampleType(QAudioFormat.SignedInt)
def __init__(self): super(JackStreamListen, self).__init__() self.ip = '127.0.0.1' self.port = '23' self.state = 'disconnected' self.channel_select = -1 self.channel_count = -1 # self.tsfmt = '%y%m%d-%H%M%S:' # self.textFgColor = 'rgb(0,0,0)' # self.textBgColor = 'rgb(255,255,255)' # self.fontstr = 'Helvetica' logging.debug('DBG: state = ' + self.state) # self.insFile = "";self.insDir="" # self.insSleepAmount = 0; self.qsock = QtNetwork.QTcpSocket(self) self.qsock.readyRead.connect(self.onReadyRead) # create None/empty variables to be created/updated later self.prevpkt = bytearray() # note a QByteArray b/c reusing msgify_pkt self.audiodata = bytearray() self.clips = [] self.rms = [] self.audiofmt = QAudioFormat() self.audioout = QAudioOutput() self.iodevice = self.createIoDevice() #FIXME self.tcpSocket.error.connect(self.displayError) # self.font = QtGui.QFont() self.loadSettings() self.initUI()
def set_data(self, mono_sig, sr): # if not self.format: self.format = QAudioFormat() self.format.setChannelCount(1) self.format.setSampleRate(sr) #numpy is in bites, qt in bits self.format.setSampleSize(mono_sig.dtype.itemsize * 8) self.format.setCodec("audio/pcm") self.format.setByteOrder(QAudioFormat.LittleEndian) self.format.setSampleType(QAudioFormat.Float) self.output = QAudioOutput(self.format, self) self.output.stateChanged.connect(self.audio_state_changed) #change the content without stopping playback p = self.buffer.pos() if self.buffer.isOpen(): self.buffer.close() self.data = mono_sig.tobytes() self.buffer.setData(self.data) self.buffer.open(QIODevice.ReadWrite) self.buffer.seek(p)
def initializeAudio(self): self.m_pullTimer = QTimer(self, timeout=self.pullTimerExpired) self.m_pullMode = True self.m_format = QAudioFormat() self.m_format.setSampleRate(self.DataSampleRateHz) self.m_format.setChannelCount(1) self.m_format.setSampleSize(16) self.m_format.setCodec('audio/pcm') self.m_format.setByteOrder(QAudioFormat.LittleEndian) self.m_format.setSampleType(QAudioFormat.SignedInt) info = QAudioDeviceInfo(QAudioDeviceInfo.defaultOutputDevice()) if not info.isFormatSupported(self.m_format): qWarning("Default format not supported - trying to use nearest") self.m_format = info.nearestFormat(self.m_format) self.m_generator = Generator(self.m_format, self.DurationSeconds * 1000000, self.ToneSampleRateHz, self) self.createAudioOutput()
def __init__(self, parent=None): super(AudioTest, self).__init__(parent) self.deviceInfo = QAudioDeviceInfo() self.settings = QAudioFormat() self.mode = QAudio.AudioOutput self.testButton.clicked.connect(self.test) self.modeBox.activated.connect(self.modeChanged) self.deviceBox.activated.connect(self.deviceChanged) self.sampleRateBox.activated.connect(self.sampleRateChanged) self.channelsBox.activated.connect(self.channelChanged) self.codecsBox.activated.connect(self.codecChanged) self.sampleSizesBox.activated.connect(self.sampleSizeChanged) self.sampleTypesBox.activated.connect(self.sampleTypeChanged) self.endianBox.activated.connect(self.endianChanged) self.populateTableButton.clicked.connect(self.populateTable) self.modeBox.setCurrentIndex(0) self.modeChanged(0) self.deviceBox.setCurrentIndex(0) self.deviceChanged(0)
def __init__(self, parent=None): QWidget.__init__(self, parent) super().__init__(parent) # start new thread that constantly polls MIDI input self.create_MIDI() # create GUI (windows, slider, etc...) self.create_UI(parent) format = QAudioFormat() self.create_AUDIO(format) self.generator = Flute(format, self) self.generator.start() self.output.start(self.generator)
def __init__(self, parent: QWidget = None) -> None: super().__init__(parent) self.m_deviceInfo: QAudioDeviceInfo = QAudioDeviceInfo() self.m_settings: QAudioFormat = QAudioFormat() self.testButton.clicked.connect(self.test) self.modeBox.activated[int].connect(self.modeChanged) self.deviceBox.activated[int].connect(self.deviceChanged) self.sampleRateBox.activated[int].connect(self.sampleRateChanged) self.channelsBox.activated[int].connect(self.channelChanged) self.codecsBox.activated[int].connect(self.codecChanged) self.sampleSizesBox.activated[int].connect(self.sampleSizeChanged) self.sampleTypesBox.activated[int].connect(self.sampleTypeChanged) self.endianBox.activated[int].connect(self.endianChanged) self.populateTableButton.clicked.connect(self.populateTable) self.modeBox.setCurrentIndex(0) self.modeChanged(0) self.deviceBox.setCurrentIndex(0) self.deviceChanged(0)
def __init__(self, parent=None): QWidget.__init__(self, parent) format = QAudioFormat() format.setChannelCount(AUDIO_CHANS) format.setSampleRate(SAMPLE_RATE) format.setSampleSize(SAMPLE_SIZE) format.setCodec("audio/pcm") format.setByteOrder(QAudioFormat.LittleEndian) format.setSampleType(QAudioFormat.SignedInt) self.output = QAudioOutput(format, self) output_buffer_size = \ int(2*SAMPLE_RATE \ *CTRL_INTERVAL/1000) self.output.setBufferSize(output_buffer_size) self.generator = Meep(format, self) self.generator.start() self.output.start(self.generator)
class MayRenderer(QWidget): shouldSave = pyqtSignal() texsize = 512 samplerate = 44100 shaderHeader = '#version 130\nuniform float iTexSize;\nuniform float iBlockOffset;\nuniform float iSampleRate;\n\n' def __init__(self, parent): super().__init__() self.parent = parent self.blocksize = (self.texsize * self.texsize) / self.samplerate self.initState() self.initUI() self.initAudio() def initState(self): self.playing = False self.initVolume = 1 self.useWatchFile = False self.watchFileName = '' self.storeCodeIfNotWatching = '' self.useSynDump = False self.synDrumName = '' self.synFileName = '' def initUI(self): self.mainLayout = QVBoxLayout() self.codeLayout = QVBoxLayout() self.codeButtonBar = QHBoxLayout() self.codeWatchFileBar = QHBoxLayout() self.renderBar = QHBoxLayout() self.playbackBar = QHBoxLayout() self.renderGroup = QGroupBox() self.renderGroupLayout = QVBoxLayout() self.renderGroupLayout.addLayout(self.renderBar) self.renderGroupLayout.addLayout(self.playbackBar) self.renderGroup.setLayout(self.renderGroupLayout) self.renderGroup.setObjectName("renderGroup") self.synGroupLayout = QHBoxLayout() self.synGroup = QGroupBox() self.synGroup.setLayout(self.synGroupLayout) self.synDumpCheckBox = QCheckBox('Dump as') self.synDrumNameBox = QLineEdit(self) self.synFileNameBox = QLineEdit(self) self.synFileButton = QPushButton('...') self.synGroupLayout.addWidget(self.synDumpCheckBox, 1) self.synGroupLayout.addWidget(self.synDrumNameBox, 2) self.synGroupLayout.addWidget(QLabel('in'), .1) self.synGroupLayout.addWidget(self.synFileNameBox, 5) self.synGroupLayout.addWidget(self.synFileButton, 0.1) self.synDumpCheckBox.stateChanged.connect(self.toggleSynDump) self.synDrumNameBox.setPlaceholderText('drumname') self.synDrumNameBox.textChanged.connect(self.setSynDrumName) self.synFileNameBox.setPlaceholderText('some aMaySyn .syn file') self.synFileNameBox.textChanged.connect(self.setSynFileName) self.synFileButton.setMaximumWidth(40) self.synFileButton.clicked.connect(self.chooseSynFile) self.codeGroup = QGroupBox() self.buttonCopy = QPushButton('↬ Clipboard', self) self.buttonCopy.clicked.connect(self.copyToClipboard) self.buttonPaste = QPushButton('Paste ↴', self) self.buttonPaste.clicked.connect(self.pasteClipboard) self.buttonClear = QPushButton('×', self) self.buttonClear.clicked.connect(self.clearEditor) self.codeEditor = QPlainTextEdit(self) self.codeEditor.setLineWrapMode(QPlainTextEdit.WidgetWidth) #self.codeEditor.setCenterOnScroll(True) #self.codeEditor.textChanged.formatEditor()) # this gives a recursion problem, but how to filter e.g. tabs? self.codeEditor.cursorPositionChanged.connect(self.updatePosLabel) self.codeEditor.setTabStopWidth(14) self.watchFileCheckBox = QCheckBox('watch file:', self) self.watchFileCheckBox.stateChanged.connect(self.toggleWatchFile) self.watchFileNameBox = QLineEdit(self) self.watchFileNameBox.setPlaceholderText( 'use GLSL code file instead of the above editor...') self.watchFileNameBox.textChanged.connect(self.setWatchFileName) self.buttonWatchFile = QPushButton('...', self) self.buttonWatchFile.setMaximumWidth(40) self.buttonWatchFile.clicked.connect(self.chooseWatchFile) self.renderButton = QPushButton(self) self.renderButton.clicked.connect(self.pressRenderShader) self.renderLengthBox = QDoubleSpinBox(self) self.renderLengthBox.setMinimum(0) self.renderLengthBox.setValue(4 * self.blocksize - .01) self.renderLengthBox.setSingleStep(self.blocksize) self.renderLengthBox.setSuffix(' sec') self.renderLengthBox.setToolTip('render length') self.renderBpmBox = QSpinBox(self) self.renderBpmBox.setRange(1, 999) self.renderBpmBox.setValue(160) self.renderBpmBox.setPrefix('BPM ') self.renderBpmBox.setToolTip('--> determines SPB') self.playbackVolumeSlider = QSlider(Qt.Horizontal) self.playbackVolumeSlider.setMaximum(100) self.playbackVolumeSlider.setValue(self.initVolume * 100) self.playbackVolumeSlider.setToolTip('volume') self.playbackVolumeSlider.sliderMoved.connect(self.setVolume) self.renderBar.addWidget(self.renderButton, 60) self.renderBar.addWidget(self.renderBpmBox, 20) self.renderBar.addWidget(self.renderLengthBox, 20) self.progressBar = QProgressBar(self) self.progressBar.setEnabled(False) self.pauseButton = QPushButton(self) self.pauseButton.setEnabled(False) self.pauseButton.clicked.connect(self.pressPauseButton) self.playbackBar.addWidget(self.progressBar, 60) self.playbackBar.addWidget(self.playbackVolumeSlider, 20) self.playbackBar.addWidget(self.pauseButton, 20) self.codeButtonBar.addWidget(self.buttonCopy) self.codeButtonBar.addWidget(self.buttonPaste) self.codeButtonBar.addWidget(self.buttonClear) self.codeWatchFileBar.addWidget(self.watchFileCheckBox) self.codeWatchFileBar.addWidget(self.watchFileNameBox) self.codeWatchFileBar.addWidget(self.buttonWatchFile) self.codeHeader = QHBoxLayout() self.codePosLabel = QLabel('(0,0)') self.codeHeader.addWidget(QLabel('GLSL code')) self.codeHeader.addStretch() self.codeHeader.addWidget(self.codePosLabel) self.codeLayout.addLayout(self.codeHeader) self.codeLayout.addLayout(self.codeButtonBar) self.codeLayout.addWidget(self.codeEditor) self.codeLayout.addLayout(self.codeWatchFileBar) self.codeGroup.setLayout(self.codeLayout) self.mainLayout.addWidget(self.synGroup) self.mainLayout.addWidget(self.codeGroup) self.mainLayout.addWidget(self.renderGroup) self.updatePlayingUI() self.setLayout(self.mainLayout) def updatePlayingUI(self, keepActive=False): self.renderButton.setText( 'shut the f**k up' if self.playing else 'send to hell') if not self.playing and not keepActive: self.progressBar.setValue(0) self.progressBar.setEnabled(self.playing if not keepActive else True) self.pauseButton.setEnabled(self.playing if not keepActive else True) self.pauseButton.setText('||' if ( self.playing and self.audiooutput.state() != QAudio.SuspendedState ) else '▶') def initAudio(self): self.audioformat = QAudioFormat() self.audioformat.setSampleRate(self.samplerate) self.audioformat.setChannelCount(2) self.audioformat.setSampleSize(32) self.audioformat.setCodec('audio/pcm') self.audioformat.setByteOrder(QAudioFormat.LittleEndian) self.audioformat.setSampleType(QAudioFormat.Float) # self.audiodeviceinfo = QAudioDeviceInfo(QAudioDeviceInfo.defaultOutputDevice()) self.audiooutput = QAudioOutput(self.audioformat) self.audiooutput.setVolume(self.initVolume) def paste(self, source): self.codeEditor.clear() source = source.replace(4 * ' ', '\t').replace(3 * ' ', '\t') self.codeEditor.insertPlainText(source) #self.codeEditor.setFocus() #TODO: think about whether we want this self.codeEditor.ensureCursorVisible() def pasteClipboard(self): self.paste(self.shaderHeader + QApplication.clipboard().text()) def copyToClipboard(self): text = self.codeEditor.toPlainText().replace('\t', 4 * ' ') QApplication.clipboard().setText(text) def clearEditor(self): self.codeEditor.setPlainText('') self.codeEditor.setFocus() def updatePosLabel(self): cursor = self.codeEditor.textCursor() self.codePosLabel.setText( f'({cursor.blockNumber()},{cursor.positionInBlock()})') # def formatEditor(self): # plainText = self.codeEditor.toPlainText().replace('\t', 4*' ') # self.codeEditor.setPlainText(plainText) def toggleSynDump(self, state): self.useSynDump = (state == Qt.Checked) self.shouldSave.emit() def chooseSynFile(self): dialogResult, _ = QFileDialog.getSaveFileName( self, 'Choose SYN definition file', '', 'aMaySyn definition files (*.syn);;All files (*)') print(dialogResult) self.synFileNameBox.setText(dialogResult) self.synDumpCheckBox.setCheckState(Qt.Checked) if self.synFileName == '': self.synFileNameBox.setFocus() self.shouldSave.emit() def setSynFileName(self): self.synFileName = self.synFileNameBox.text() self.shouldSave.emit() def setSynDrumName(self): self.synDrumName = self.synDrumNameBox.text() self.shouldSave.emit() def setSynDumpParameters(self, useSynDump, synFileName, synDrumName): self.synDumpCheckBox.setChecked(useSynDump) self.synFileNameBox.setText(synFileName) self.synDrumNameBox.setText(synDrumName) def toggleWatchFile(self, state): if not self.useWatchFile and state == Qt.Checked: self.storeCodeIfNotWatching = self.codeEditor.toPlainText() self.useWatchFile = (state == Qt.Checked) self.codeEditor.setEnabled(not self.useWatchFile) if self.useWatchFile: self.showWatchFileInfo() else: self.codeEditor.setPlainText(self.storeCodeIfNotWatching) def chooseWatchFile(self): dialogResult = QFileDialog.getOpenFileName( self, 'Choose file with GLSL code', '', 'GLSL files (*.glsl);;All files (*)') print(dialogResult) self.watchFileNameBox.setText(dialogResult[0]) self.watchFileCheckBox.setCheckState(Qt.Checked) self.shouldSave.emit() def setWatchFileName(self): self.watchFileName = self.watchFileNameBox.text() self.showWatchFileInfo() def showWatchFileInfo(self): if self.useWatchFile: fileInfo = QFileInfo(self.watchFileName) infoText = 'use code from file:\n' + self.watchFileName + '\n' + ( '(exists)' if fileInfo.exists() else '(doesn\'t exist)') self.codeEditor.setPlainText(infoText) def pressRenderShader(self): self.playing = not self.playing if self.playing: self.renderShaderAndPlay() else: self.stopShader() def pressPauseButton(self): state = self.audiooutput.state() if state == QAudio.ActiveState: self.audiooutput.suspend() elif state == QAudio.SuspendedState: self.audiooutput.resume() self.updatePlayingUI(keepActive=True) def stopShader(self): self.audiooutput.stop() self.updatePlayingUI() def renderShaderAndPlay(self, file=None): self.playing = True self.updatePlayingUI() shaderSource = self.shaderHeader + """ uniform float SPB; void main() { float t = (iBlockOffset + gl_FragCoord.x + gl_FragCoord.y*iTexSize) / iSampleRate; t = floor(t*BITS.) / BITS.; vec2 s = .2 * vec2(sin(2.*3.14159*49.*t*(1.+t)*SPB*2.667)); // let's make it fun and squeaky vec2 v = floor((0.5+0.5*s)*65535.0); vec2 vl = mod(v,256.0)/255.0; vec2 vh = floor(v/256.0)/255.0; gl_FragColor = vec4(vl.x,vh.x,vl.y,vh.y); } """ # this is the SUPER FUN BITCRUSHER for the test shader nr_bits = randint(128, 8192) shaderSource = shaderSource.replace('BITS', str(nr_bits)) print(nr_bits, 'bits for the SUPER FUN BITCRUSHER in the test shader.') starttime = datetime.now() try: if self.useWatchFile: watchFile = QFile(self.watchFileName) if not watchFile.open(QFile.ReadOnly | QFile.Text): QMessageBox.warning( self, "Öhm... blöd.", "File öffnen ging nicht. Is genügend Pfeffer drauf?") self.playing = False self.updatePlayingUI() return textStream = QTextStream(watchFile) textStream.setCodec('utf-8') shaderSource = self.shaderHeader + textStream.readAll() else: code = self.codeEditor.toPlainText() if code: shaderSource = code except: raise uniforms = {} SPB = 60 / float(self.renderBpmBox.value()) uniforms.update({'SPB': SPB}) print(self.renderLengthBox.value()) try: duration = self.renderLengthBox.value() except: print('couldn\'t read duration field. take 10secs.') duration = 10 glwidget = SFXGLWidget(self, self.audioformat.sampleRate(), duration, self.texsize, moreUniforms=uniforms) glwidget.show() log = glwidget.newShader(shaderSource) print(log) self.music = glwidget.music floatmusic = glwidget.floatmusic glwidget.hide() glwidget.destroy() if self.music == None: return self.renderLengthBox.setValue(round(glwidget.duration_real, 2) - .01) self.bytearray = QByteArray(self.music) self.audiobuffer = QBuffer(self.bytearray) self.audiobuffer.open(QIODevice.ReadOnly) endtime = datetime.now() el = endtime - starttime print("Compile time: {:.3f}s".format(el.total_seconds())) self.audiooutput.stop() self.audiooutput.start(self.audiobuffer) self.audiooutput.setNotifyInterval(100) self.audiooutput.stateChanged.connect(self.updatePlayingUI) self.progressBar.setMaximum(self.audiobuffer.size()) self.audiooutput.notify.connect(self.proceedAudio) if file is not None: floatmusic_L = [] floatmusic_R = [] for n, sample in enumerate(floatmusic): if n % 2 == 0: floatmusic_L.append(sample) else: floatmusic_R.append(sample) floatmusic_stereo = np.transpose( np.array([floatmusic_L, floatmusic_R], dtype=np.float32)) wavfile.write(file, self.samplerate, floatmusic_stereo) def proceedAudio(self): # print(self.audiobuffer.pos() / self.audioformat.sampleRate()) self.progressBar.setValue(self.audiobuffer.pos()) if self.audiobuffer.atEnd(): self.audiooutput.stop() self.playing = False self.updatePlayingUI() def setVolume(self): self.audiooutput.setVolume(self.playbackVolumeSlider.value() * .01) def dumpInSynFile(self, drumatizeL, drumatizeR, envCode, releaseTime): if not self.synDumpCheckBox.isChecked(): return if self.synDrumName == '': print("specify a valid drum name!!") return if self.synFileName == '': print("specify a valid .syn filename!!") return if not path.exists(self.synFileName): open(self.synFileName, 'a').close() uniqueEnv = f'_{self.synDrumName}ENV' drumatizeL = drumatizeL.replace('__ENV', uniqueEnv) drumatizeR = drumatizeR.replace('__ENV', uniqueEnv) envCode = envCode.replace('__ENV', uniqueEnv).replace('\n', ' ') parLine = f'param include src="{envCode}"\n' synLine = f'maindrum {self.synDrumName} src="{drumatizeL}" srcr="{drumatizeR}" release={releaseTime}\n' print(parLine, '\n', synLine) tmpSynFile = 'tmp.syn' copyfile(self.synFileName, tmpSynFile) parWritten, synWritten = False, False with open(tmpSynFile, 'r') as synFileHandle: synFileLines = synFileHandle.readlines() with open(self.synFileName, 'w') as synFileHandle: for line in synFileLines: parseLine = line.strip('\n').split() if parseLine[0:2] == ['maindrum', self.synDrumName]: synFileHandle.write(synLine) synWritten = True elif parseLine[0:2] == ['param', 'include' ] and line.find(uniqueEnv) != -1: synFileHandle.write(parLine) parWritten = True else: synFileHandle.write(line) if not parWritten: synFileHandle.write('\n' + parLine) if not synWritten: synFileHandle.write('\n' + synLine) synFileHandle.close()
def __init__(self, parent=None): QWidget.__init__(self, parent) format = QAudioFormat() format.setChannelCount(AUDIO_CHANS) format.setSampleRate(SAMPLE_RATE) format.setSampleSize(SAMPLE_SIZE) format.setCodec("audio/pcm") format.setByteOrder(QAudioFormat.LittleEndian) format.setSampleType(QAudioFormat.SignedInt) self.output = QAudioOutput(format, self) output_buffer_size = \ int(2*SAMPLE_RATE \ *CTRL_INTERVAL/1000) self.output.setBufferSize(output_buffer_size) self.generator = Meep(format, self) self.generator.start() self.output.start(self.generator) # Create the port reader object self.midiListener = MidiPortReader() # Create a thread which will read it self.listenerThread = QThread() # Take the object and move it # to the new thread (it isn't running yet) self.midiListener.moveToThread(self.listenerThread) self.midiListener.newNoteFrequency.connect(self.generator.changeFreq) # Tell Qt the function to call # when it starts the thread self.listenerThread.started.connect(self.midiListener.listener) # Fingers in ears, eyes tight shut... self.listenerThread.start()
def __init__(self, output_path: str, mw: aqt.AnkiQt, parent: QWidget) -> None: super().__init__(output_path) self.mw = mw self._parent = parent from PyQt5.QtMultimedia import QAudioDeviceInfo, QAudioFormat, QAudioInput format = QAudioFormat() format.setChannelCount(1) format.setSampleRate(44100) format.setSampleSize(16) format.setCodec("audio/pcm") format.setByteOrder(QAudioFormat.LittleEndian) format.setSampleType(QAudioFormat.SignedInt) device = QAudioDeviceInfo.defaultInputDevice() if not device.isFormatSupported(format): format = device.nearestFormat(format) print("format changed") print("channels", format.channelCount()) print("rate", format.sampleRate()) print("size", format.sampleSize()) self._format = format self._audio_input = QAudioInput(device, format, parent)
def changeinput(self, val): audio = QAudioFormat() audio.setSampleRate(44100) audio.setSampleType(QAudioFormat.UnSignedInt) audio.setSampleSize(8) audio.setCodec('audio/pcm') audio.setChannelCount(1) self.input = QAudioInput(self.inputdevices[val], audio)
def populateTable(self): row = 0 format = QAudioFormat() for codec in self.deviceInfo.supportedCodecs(): format.setCodec(codec) for sampleRate in self.deviceInfo.supportedSampleRates(): format.setSampleRate(sampleRate) for channels in self.deviceInfo.supportedChannelCounts(): format.setChannelCount(channels) for sampleType in self.deviceInfo.supportedSampleTypes(): format.setSampleType(sampleType) for sampleSize in self.deviceInfo.supportedSampleSizes( ): format.setSampleSize(sampleSize) for endian in self.deviceInfo.supportedByteOrders( ): format.setByteOrder(endian) if self.deviceInfo.isFormatSupported(format): self.allFormatsTable.setRowCount(row + 1) self.setFormatValue(row, 0, format.codec()) self.setFormatValue( row, 1, str(format.sampleRate())) self.setFormatValue( row, 2, str(format.channelCount())) self.setFormatValue( row, 3, self.sampleTypeToString( format.sampleType())) self.setFormatValue( row, 4, str(format.sampleSize())) self.setFormatValue( row, 5, self.endianToString( format.byteOrder())) row += 1
def __init__(self): super().__init__() self.m_chart = QChart() chart_view = QChartView(self.m_chart) chart_view.setMinimumSize(800, 600) self.m_series = QLineSeries() self.m_chart.addSeries(self.m_series) axis_x = QValueAxis() axis_x.setRange(0, 2000) axis_x.setLabelFormat("%g") axis_x.setTitleText("Samples") axis_y = QValueAxis() axis_y.setRange(-1, 1) axis_y.setTitleText("Audio level") self.m_chart.setAxisX(axis_x, self.m_series) self.m_chart.setAxisY(axis_y, self.m_series) self.m_chart.setTitle("Data from the microphone") main_layout = QVBoxLayout() main_layout.addWidget(chart_view) self.setLayout(main_layout) format_audio = QAudioFormat() format_audio.setSampleRate(48000) format_audio.setChannelCount(1) format_audio.setSampleSize(8) format_audio.setCodec("audio/pcm") format_audio.setByteOrder(QAudioFormat.LittleEndian) format_audio.setSampleType(QAudioFormat.UnSignedInt) input_devices = QAudioDeviceInfo.defaultInputDevice() self.m_audio_input = QAudioInput(input_devices, format_audio) self.m_device = XYSeriesIODevice(self.m_series) self.m_device.open(QIODevice.WriteOnly) self.m_audio_input.start(self.m_device) self.init_ui()
def on_actStart_triggered(self): audioFormat=QAudioFormat() #使用固定格式 audioFormat.setSampleRate(8000) audioFormat.setChannelCount(1) audioFormat.setSampleSize(8) audioFormat.setCodec("audio/pcm") audioFormat.setByteOrder(QAudioFormat.LittleEndian) audioFormat.setSampleType(QAudioFormat.UnSignedInt) index=self.ui.comboDevices.currentIndex() deviceInfo =self.__deviceList[index] #当前音频设备 if (False== deviceInfo.isFormatSupported(audioFormat)): QMessageBox.critical(self,"错误","测试失败,输入设备不支持此设置") return self.audioDevice = QAudioInput(deviceInfo,audioFormat) #音频输入设备 self.audioDevice.setBufferSize(self.BUFFER_SIZE) #设置的缓冲区大小,字节数,并不一定等于实际数据块大小 self.audioDevice.stateChanged.connect(self.do_stateChanged) #状态变化 ##1. 使用 start()->QIODevice 启动,返回的内置的IODevice, pull mode,利用readyRead()信号读出数据 if self.ui.radioSaveMode_Inner.isChecked(): self.ioDevice=self.audioDevice.start() #返回内建的IODevice self.ioDevice.readyRead.connect(self.do_IO_readyRead) ## 2. 自定义流设备QWAudioBlockReader,push mode, start(QIODevice),不行 ## if self.ui.radioSaveMode_External.isChecked(): ## self.externalReader = QmyAudioReader() ## self.externalReader.open(QIODevice.WriteOnly) ## self.externalReader.updateBlockInfo.connect(self.do_updateBlockInfo) ## self.audioDevice.start(self.externalReader) #使用外建的IODevice ##3. 写入文件,用 start(QIODevice)启动 if self.ui.radioSaveMode_QFile.isChecked(): self.recordFile.setFileName("test.raw") self.recordFile.open(QIODevice.WriteOnly) self.audioDevice.start(self.recordFile)
def populateTable(self): row = 0 format = QAudioFormat() for codec in self.deviceInfo.supportedCodecs(): format.setCodec(codec) for sampleRate in self.deviceInfo.supportedSampleRates(): format.setSampleRate(sampleRate) for channels in self.deviceInfo.supportedChannelCounts(): format.setChannelCount(channels) for sampleType in self.deviceInfo.supportedSampleTypes(): format.setSampleType(sampleType) for sampleSize in self.deviceInfo.supportedSampleSizes(): format.setSampleSize(sampleSize) for endian in self.deviceInfo.supportedByteOrders(): format.setByteOrder(endian) if self.deviceInfo.isFormatSupported(format): self.allFormatsTable.setRowCount(row + 1) self.setFormatValue(row, 0, format.codec()) self.setFormatValue(row, 1, str(format.sampleRate())) self.setFormatValue(row, 2, str(format.channelCount())) self.setFormatValue(row, 3, self.sampleTypeToString( format.sampleType())) self.setFormatValue(row, 4, str(format.sampleSize())) self.setFormatValue(row, 5, self.endianToString( format.byteOrder())) row += 1
class SignalProc: """ This class reads and holds the audiodata and spectrogram, to be used in the main interface. Inverse, denoise, and other processing algorithms are provided here. Also bandpass and Butterworth bandpass filters. Primary parameters are the width of a spectrogram window (window_width) and the shift between them (incr) """ def __init__(self, window_width=256, incr=128, minFreqShow=0, maxFreqShow=0): # maxFreq = 0 means fall back to Fs/2 for any file. self.window_width=window_width self.incr=incr self.minFreqShow = minFreqShow self.maxFreqShow = maxFreqShow self.data = [] # only accepting wav files of this format self.audioFormat = QAudioFormat() self.audioFormat.setCodec("audio/pcm") self.audioFormat.setByteOrder(QAudioFormat.LittleEndian) self.audioFormat.setSampleType(QAudioFormat.SignedInt) def readWav(self, file, len=None, off=0, silent=False): """ Args the same as for wavio.read: filename, length in seconds, offset in seconds. """ wavobj = wavio.read(file, len, off) self.data = wavobj.data # take only left channel if np.shape(np.shape(self.data))[0] > 1: self.data = self.data[:, 0] self.audioFormat.setChannelCount(1) # force float type if self.data.dtype != 'float': self.data = self.data.astype('float') self.audioFormat.setSampleSize(wavobj.sampwidth * 8) # total file length in s read from header (useful for paging) self.fileLength = wavobj.nframes self.sampleRate = wavobj.rate self.audioFormat.setSampleRate(self.sampleRate) # *Freq sets hard bounds, *Show can limit the spec display self.minFreq = 0 self.maxFreq = self.sampleRate // 2 self.minFreqShow = max(self.minFreq, self.minFreqShow) self.maxFreqShow = min(self.maxFreq, self.maxFreqShow) if not silent: print("Detected format: %d channels, %d Hz, %d bit samples" % (self.audioFormat.channelCount(), self.audioFormat.sampleRate(), self.audioFormat.sampleSize())) def resample(self, target): if len(self.data)==0: print("Warning: no data set to resmample") return if target==self.sampleRate: print("No resampling needed") return self.data = librosa.core.audio.resample(self.data, self.sampleRate, target) self.sampleRate = target self.audioFormat.setSampleRate(target) self.minFreq = 0 self.maxFreq = self.sampleRate // 2 self.fileLength = len(self.data) def convertAmpltoSpec(self, x): """ Unit conversion, for easier use wherever spectrograms are needed """ return x*self.sampleRate/self.incr def setWidth(self,window_width,incr): # Does what it says. Called when the user modifies the spectrogram parameters self.window_width = window_width self.incr = incr def setData(self,audiodata,sampleRate=None): self.data = audiodata if sampleRate is not None: self.sampleRate = sampleRate def SnNR(self,startSignal,startNoise): # Compute the estimated signal-to-noise ratio pS = np.sum(self.data[startSignal:startSignal+self.length]**2)/self.length pN = np.sum(self.data[startNoise:startNoise+self.length]**2)/self.length return 10.*np.log10(pS/pN) def equalLoudness(self,data): # TODO: Assumes 16000 sampling rate, fix! # Basically, save a few more sets of filter coefficients... # Basic equal loudness curve. # This is for humans, NOT birds (there is a paper that claims to have some, but I can't access it: # https://doi.org/10.1121/1.428951) # The filter weights were obtained from Matlab (using yulewalk) for the standard 80 dB ISO curve # for a sampling rate of 16000 # 10 coefficient Yule-Walker fit for [0,120;20,113;30,103;40,97;50,93;60,91;70,89;80,87;90,86;100,85;200,78;300,76;400,76;500,76;600,76;700,77;800,78;900,79.5;1000,80;1500,79;2000,77;2500,74;3000,71.5;3700,70;4000,70.5;5000,74;6000,79;7000,84;8000,86] # Or at least, EL80(:,1)./(fs/2) and m=10.^((70-EL80(:,2))/20); ay = np.array([1.0000,-0.6282, 0.2966,-0.3726,0.0021,-0.4203,0.2220,0.0061, 0.0675, 0.0578,0.0322]) by = np.array([0.4492,-0.1435,-0.2278,-0.0142,0.0408,-0.1240,0.0410,0.1048,-0.0186,-0.0319,0.0054]) # Butterworth highpass ab = np.array([1.0000,-1.9167,0.9201]) bb = np.array([0.9592,-1.9184,0.9592]) data = signal.lfilter(by,ay,data) data = signal.lfilter(bb,ab,data) return data # from memory_profiler import profile # fp = open('memory_profiler_sp.log', 'w+') # @profile(stream=fp) def spectrogram(self, window_width=None,incr=None,window='Hann',equal_loudness=False,mean_normalise=True,onesided=True,multitaper=False,need_even=False): """ Compute the spectrogram from amplitude data Returns the power spectrum, not the density -- compute 10.*log10(sg) 10.*log10(sg) before plotting. Uses absolute value of the FT, not FT*conj(FT), 'cos it seems to give better discrimination Options: multitaper version, but it's slow, mean normalised, even, one-sided. This version is faster than the default versions in pylab and scipy.signal Assumes that the values are not normalised. """ if self.data is None or len(self.data)==0: print("ERROR: attempted to calculate spectrogram without audiodata") return if window_width is None: window_width = self.window_width if incr is None: incr = self.incr # clean handling of very short segments: if len(self.data) <= window_width: window_width = len(self.data) - 1 self.sg = np.copy(self.data) if self.sg.dtype != 'float': self.sg = self.sg.astype('float') # Set of window options if window=='Hann': # This is the Hann window window = 0.5 * (1 - np.cos(2 * np.pi * np.arange(window_width) / (window_width - 1))) elif window=='Parzen': # Parzen (window_width even) n = np.arange(window_width) - 0.5*window_width window = np.where(np.abs(n)<0.25*window_width,1 - 6*(n/(0.5*window_width))**2*(1-np.abs(n)/(0.5*window_width)), 2*(1-np.abs(n)/(0.5*window_width))**3) elif window=='Welch': # Welch window = 1.0 - ((np.arange(window_width) - 0.5*(window_width-1))/(0.5*(window_width-1)))**2 elif window=='Hamming': # Hamming alpha = 0.54 beta = 1.-alpha window = alpha - beta*np.cos(2 * np.pi * np.arange(window_width) / (window_width - 1)) elif window=='Blackman': # Blackman alpha = 0.16 a0 = 0.5*(1-alpha) a1 = 0.5 a2 = 0.5*alpha window = a0 - a1*np.cos(2 * np.pi * np.arange(window_width) / (window_width - 1)) + a2*np.cos(4 * np.pi * np.arange(window_width) / (window_width - 1)) elif window=='BlackmanHarris': # Blackman-Harris a0 = 0.358375 a1 = 0.48829 a2 = 0.14128 a3 = 0.01168 window = a0 - a1*np.cos(2 * np.pi * np.arange(window_width) / (window_width - 1)) + a2*np.cos(4 * np.pi * np.arange(window_width) / (window_width - 1)) - a3*np.cos(6 * np.pi * np.arange(window_width) / (window_width - 1)) elif window=='Ones': window = np.ones(window_width) else: print("Unknown window, using Hann") window = 0.5 * (1 - np.cos(2 * np.pi * np.arange(window_width) / (window_width - 1))) if equal_loudness: self.sg = self.equalLoudness(self.sg) if mean_normalise: self.sg -= self.sg.mean() starts = range(0, len(self.sg) - window_width, incr) if multitaper: [tapers, eigen] = dpss(window_width, 2.5, 4) counter = 0 out = np.zeros((len(starts),window_width // 2)) for start in starts: Sk, weights, eigen = pmtm(self.sg[start:start + window_width], v=tapers, e=eigen, show=False) Sk = abs(Sk)**2 Sk = np.mean(Sk.T * weights, axis=1) out[counter:counter + 1,:] = Sk[window_width // 2:].T counter += 1 self.sg = np.fliplr(out) else: if need_even: starts = np.hstack((starts, np.zeros((window_width - len(self.sg) % window_width),dtype=int))) # this mode is optimized for speed, but reportedly sometimes # results in crashes when lots of large files are batch processed. # The FFTs here could be causing this, but I'm not sure. # hi_mem = False should switch FFTs to go over smaller vectors # and possibly use less caching, at the cost of 1.5x longer CPU time. hi_mem = True if hi_mem: ft = np.zeros((len(starts), window_width)) for i in starts: ft[i // incr, :] = self.sg[i:i + window_width] ft = np.multiply(window, ft) if onesided: self.sg = np.absolute(fft.fft(ft)[:, :window_width //2]) else: self.sg = np.absolute(fft.fft(ft)) else: if onesided: ft = np.zeros((len(starts), window_width//2)) for i in starts: winddata = window * self.sg[i:i + window_width] ft[i // incr, :] = fft.fft(winddata)[:window_width//2] else: ft = np.zeros((len(starts), window_width)) for i in starts: winddata = window * self.sg[i:i + window_width] ft[i // incr, :] = fft.fft(winddata) self.sg = np.absolute(ft) del ft gc.collect() #sg = (ft*np.conj(ft))[:,window_width // 2:].T return self.sg def bandpassFilter(self,data=None,sampleRate=None,start=0,end=None): """ FIR bandpass filter 128 taps, Hamming window, very basic. """ if data is None: data = self.data if sampleRate is None: sampleRate = self.sampleRate if end is None: end = sampleRate/2 start = max(start,0) end = min(end,sampleRate/2) if start == 0 and end == sampleRate/2: print("No filter needed!") return data nyquist = sampleRate/2 ntaps = 129 if start == 0: # Low pass taps = signal.firwin(ntaps, cutoff=[end / nyquist], window=('hamming'), pass_zero=True) elif end == sampleRate/2: # High pass taps = signal.firwin(ntaps, cutoff=[start / nyquist], window=('hamming'), pass_zero=False) else: # Bandpass taps = signal.firwin(ntaps, cutoff=[start / nyquist, end / nyquist], window=('hamming'), pass_zero=False) #ntaps, beta = signal.kaiserord(ripple_db, width) #taps = signal.firwin(ntaps,cutoff = [500/nyquist,8000/nyquist], window=('kaiser', beta),pass_zero=False) return signal.lfilter(taps, 1.0, data) def ButterworthBandpass(self,data,sampleRate,low=0,high=None,band=0.005): """ Basic IIR bandpass filter. Identifies order of filter, max 10. If single-stage polynomial is unstable, switches to order 30, second-order filter. Args: 1-2. data and sample rate. 3-4. Low and high pass frequencies in Hz 5. difference between stopband and passband, in fraction of Nyquist. Filter will lose no more than 3 dB in freqs [low,high], and attenuate at least 40 dB outside [low-band*Fn, high+band*Fn]. Does double-pass filtering - slower, but keeps original phase. """ if data is None: data = self.data if sampleRate is None: sampleRate = self.sampleRate nyquist = sampleRate/2 if high is None: high = nyquist low = max(low,0) high = min(high,nyquist) # convert freqs to fractions of Nyquist: lowPass = low/nyquist highPass = high/nyquist lowStop = lowPass-band highStop = highPass+band # safety checks for values near edges if lowStop<0: lowStop = lowPass/2 if highStop>1: highStop = (1+highPass)/2 if lowPass == 0 and highPass == 1: print("No filter needed!") return data elif lowPass == 0: # Low pass # calculate the best order order,wN = signal.buttord(highPass, highStop, 3, 40) if order>10: order=10 b, a = signal.butter(order,wN, btype='lowpass') elif highPass == 1: # High pass # calculate the best order order,wN = signal.buttord(lowPass, lowStop, 3, 40) if order>10: order=10 b, a = signal.butter(order,wN, btype='highpass') else: # Band pass # calculate the best order order,wN = signal.buttord([lowPass, highPass], [lowStop, highStop], 3, 40) if order>10: order=10 b, a = signal.butter(order,wN, btype='bandpass') # check if filter is stable filterUnstable = np.any(np.abs(np.roots(a))>1) if filterUnstable: # redesign to SOS and filter. # uses order=30 because why not print("single-stage filter unstable, switching to SOS filtering") if lowPass == 0: sos = signal.butter(30, wN, btype='lowpass', output='sos') elif highPass == 1: sos = signal.butter(30, wN, btype='highpass', output='sos') else: sos = signal.butter(30, wN, btype='bandpass', output='sos') # do the actual filtering data = signal.sosfiltfilt(sos, data) else: # do the actual filtering data = signal.filtfilt(b, a, data) return data def FastButterworthBandpass(self,data,low=0,high=None): """ Basic IIR bandpass filter. Streamlined to be fast - for use in antialiasing etc. Tries to construct a filter of order 7, with critical bands at +-0.002 Fn. This corresponds to +- 16 Hz or so. If single-stage polynomial is unstable, switches to order 30, second-order filter. Args: 1-2. data and sample rate. 3-4. Low and high pass frequencies in fraction of Nyquist Does single-pass filtering, so does not retain phase. """ if data is None: data = self.data # convert freqs to fractions of Nyquist: lowPass = max(low-0.002, 0) highPass = min(high+0.002, 1) if lowPass == 0 and highPass == 1: print("No filter needed!") return data elif lowPass == 0: # Low pass b, a = signal.butter(7, highPass, btype='lowpass') elif highPass == 1: # High pass b, a = signal.butter(7, lowPass, btype='highpass') else: # Band pass b, a = signal.butter(7, [lowPass, highPass], btype='bandpass') # check if filter is stable filterUnstable = np.any(np.abs(np.roots(a))>1) if filterUnstable: # redesign to SOS and filter. # uses order=30 because why not print("single-stage filter unstable, switching to SOS filtering") if lowPass == 0: sos = signal.butter(30, highPass, btype='lowpass', output='sos') elif highPass == 1: sos = signal.butter(30, lowPass, btype='highpass', output='sos') else: sos = signal.butter(30, [lowPass, highPass], btype='bandpass', output='sos') # do the actual filtering data = signal.sosfilt(sos, data) else: data = signal.lfilter(b, a, data) return data # The next functions perform spectrogram inversion def invertSpectrogram(self,sg,window_width=256,incr=64,nits=10): # Assumes that this is the plain (not power) spectrogram # Make the spectrogram two-sided and make the values small sg = np.concatenate([sg, sg[:, ::-1]], axis=1) sg_best = copy.deepcopy(sg) for i in range(nits): invertedSgram = self.inversion_iteration(sg_best, incr, calculate_offset=True,set_zero_phase=(i==0)) self.setData(invertedSgram) est = self.spectrogram(window_width, incr, onesided=False,need_even=True) phase = est / np.maximum(np.max(sg)/1E8, np.abs(est)) sg_best = sg * phase[:len(sg)] invertedSgram = self.inversion_iteration(sg_best, incr, calculate_offset=True,set_zero_phase=False) return np.real(invertedSgram) def inversion_iteration(self,sg, incr, calculate_offset=True, set_zero_phase=True, window='Hann'): """ Under MSR-LA License Based on MATLAB implementation from Spectrogram Inversion Toolbox References ---------- D. Griffin and J. Lim. Signal estimation from modified short-time Fourier transform. IEEE Trans. Acoust. Speech Signal Process., 32(2):236-243, 1984. Malcolm Slaney, Daniel Naar and Richard F. Lyon. Auditory Model Inversion for Sound Separation. Proc. IEEE-ICASSP, Adelaide, 1994, II.77-80. Xinglei Zhu, G. Beauregard, L. Wyse. Real-Time Signal Estimation from Modified Short-Time Fourier Transform Magnitude Spectra. IEEE Transactions on Audio Speech and Language Processing, 08/2007. """ size = int(np.shape(sg)[1] // 2) wave = np.zeros((np.shape(sg)[0] * incr + size),dtype='float64') # Getting overflow warnings with 32 bit... #wave = wave.astype('float64') total_windowing_sum = np.zeros((np.shape(sg)[0] * incr + size)) #Virginia: adding different windows if window == 'Hann': # Hann window window = 0.5 * (1 - np.cos(2 * np.pi * np.arange(size) / (size - 1))) elif window == 'Blackman': # Blackman alpha = 0.16 a0 = 0.5 * (1 - alpha) a1 = 0.5 a2 = 0.5 * alpha window = a0 - a1 * np.cos(2 * np.pi * np.arange(size) / (size - 1)) + a2 * np.cos(4 * np.pi * np.arange(size) / (size - 1)) est_start = int(size // 2) - 1 est_end = est_start + size for i in range(sg.shape[0]): wave_start = int(incr * i) wave_end = wave_start + size if set_zero_phase: spectral_slice = sg[i].real + 0j else: # already complex spectral_slice = sg[i] wave_est = np.real(fft.ifft(spectral_slice))[::-1] if calculate_offset and i > 0: offset_size = size - incr if offset_size <= 0: print("WARNING: Large step size >50\% detected! " "This code works best with high overlap - try " "with 75% or greater") offset_size = incr offset = self.xcorr_offset(wave[wave_start:wave_start + offset_size], wave_est[est_start:est_start + offset_size]) else: offset = 0 wave[wave_start:wave_end] += window * wave_est[est_start - offset:est_end - offset] total_windowing_sum[wave_start:wave_end] += window**2 #Virginia: needed square wave = np.real(wave) / (total_windowing_sum + 1E-6) return wave def xcorr_offset(self,x1, x2): x1 = x1 - x1.mean() x2 = x2 - x2.mean() frame_size = len(x2) half = frame_size // 2 corrs = np.convolve(x1.astype('float32'), x2[::-1].astype('float32')) corrs[:half] = -1E30 corrs[-half:] = -1E30 return corrs.argmax() - len(x1) def medianFilter(self,data=None,width=11): # Median Filtering # Uses smaller width windows at edges to remove edge effects # TODO: Use abs rather than pure median? if data is None: data = self.data mData = np.zeros(len(data)) for i in range(width,len(data)-width): mData[i] = np.median(data[i-width:i+width]) for i in range(len(data)): wid = min(i,len(data)-i,width) mData[i] = np.median(data[i - wid:i + wid]) return mData # Could be either features of signal processing things. Anyway, they are here -- spectral derivatives and extensions def wiener_entropy(self,sg): return np.sum(np.log(sg),1)/np.shape(sg)[1] - np.log(np.sum(sg,1)/np.shape(sg)[1]) def mean_frequency(self,sampleRate,timederiv,freqderiv): freqs = sampleRate//2 / np.shape(timederiv)[1] * (np.arange(np.shape(timederiv)[1])+1) mfd = np.sum(timederiv**2 + freqderiv**2,axis=1) mfd = np.where(mfd==0,1,mfd) mf = np.sum(freqs * (timederiv**2 + freqderiv**2),axis=1)/mfd return freqs,mf def goodness_of_pitch(self,spectral_deriv,sg): return np.max(np.abs(fft.fft(spectral_deriv/sg, axis=0)),axis=0) def spectral_derivative(self, window_width, incr, K=2, threshold=0.5, returnAll=False): """ Compute the spectral derivative """ if self.data is None or len(self.data)==0: print("ERROR: attempted to calculate spectrogram without audiodata") return # Compute the set of multi-tapered spectrograms starts = range(0, len(self.data) - window_width, incr) [tapers, eigen] = dpss(window_width, 2.5, K) sg = np.zeros((len(starts), window_width, K), dtype=complex) for k in range(K): for i in starts: sg[i // incr, :, k] = tapers[:, k] * self.data[i:i + window_width] sg[:, :, k] = fft.fft(sg[:, :, k]) sg = sg[:, window_width//2:, :] # Spectral derivative is the real part of exp(i \phi) \sum_ k s_k conj(s_{k+1}) where s_k is the k-th tapered spectrogram # and \phi is the direction of maximum change (tan inverse of the ratio of pure time and pure frequency components) S = np.sum(sg[:, :, :-1]*np.conj(sg[:, :, 1:]), axis=2) timederiv = np.real(S) freqderiv = np.imag(S) # Frequency modulation is the angle $\pi/2 - direction of max change$ mfd = np.max(freqderiv**2, axis=0) mfd = np.where(mfd==0,1,mfd) fm = np.arctan(np.max(timederiv**2, axis=0) / mfd) spectral_deriv = -timederiv*np.sin(fm) + freqderiv*np.cos(fm) sg = np.sum(np.real(sg*np.conj(sg)), axis=2) sg /= np.max(sg) # Suppress the noise (spectral continuity) # Compute the zero crossings of the spectral derivative in all directions # Pixel is a contour pixel if it is at a zero crossing and both neighbouring pixels in that direction are > threshold sdt = spectral_deriv * np.roll(spectral_deriv, 1, 0) sdf = spectral_deriv * np.roll(spectral_deriv, 1, 1) sdtf = spectral_deriv * np.roll(spectral_deriv, 1, (0, 1)) sdft = spectral_deriv * np.roll(spectral_deriv, (1, -1), (0, 1)) indt, indf = np.where(((sdt < 0) | (sdf < 0) | (sdtf < 0) | (sdft < 0)) & (spectral_deriv < 0)) # Noise reduction using a threshold we = np.abs(self.wiener_entropy(sg)) freqs, mf = self.mean_frequency(self.sampleRate, timederiv, freqderiv) # Given a time and frequency bin contours = np.zeros(np.shape(spectral_deriv)) for i in range(len(indf)): f = indf[i] t = indt[i] if (t > 0) & (t < (np.shape(sg)[0]-1)) & (f > 0) & (f < (np.shape(sg)[1]-1)): thr = threshold*we[t]/np.abs(freqs[f] - mf[t]) if (sdt[t, f] < 0) & (sg[t-1, f] > thr) & (sg[t+1, f] > thr): contours[t, f] = 1 if (sdf[t, f] < 0) & (sg[t, f-1] > thr) & (sg[t, f+1] > thr): contours[t, f] = 1 if (sdtf[t, f] < 0) & (sg[t-1, f-1] > thr) & (sg[t+1, f+1] > thr): contours[t, f] = 1 if (sdft[t, f] < 0) & (sg[t-1, f+1] > thr) & (sg[t-1, f+1] > thr): contours[t, f] = 1 if returnAll: return spectral_deriv, sg, fm, we, mf, np.fliplr(contours) else: return np.fliplr(contours) def drawSpectralDeriv(self): # helper function to parse output for plotting spectral derivs. sd = self.spectral_derivative(self.window_width, self.incr, 2, 5.0) x, y = np.where(sd > 0) # remove points beyond frq range to show y1 = [i * self.sampleRate//2/np.shape(self.sg)[1] for i in y] y1 = np.asarray(y1) valminfrq = self.minFreqShow/(self.sampleRate//2/np.shape(self.sg)[1]) inds = np.where((y1 >= self.minFreqShow) & (y1 <= self.maxFreqShow)) x = x[inds] y = y[inds] y = [i - valminfrq for i in y] return x, y def drawFundFreq(self, seg): # produces marks of fundamental freq to be drawn on the spectrogram. pitch, starts, _, W = seg.yin() # find out which marks should be visible ind = np.logical_and(pitch > self.minFreqShow+50, pitch < self.maxFreqShow) if not np.any(ind): print("Warning: no fund. freq. identified in this page") return # ffreq is calculated over windows of size W # first, identify segments using that original scale: segs = seg.convert01(ind) segs = seg.deleteShort(segs, 2) segs = seg.joinGaps(segs, 2) # extra round to delete those which didn't merge with any longer segments segs = seg.deleteShort(segs, 4) yadjfact = 2/self.sampleRate*np.shape(self.sg)[1] # then map starts from samples to spec windows starts = starts / self.incr # then convert segments back to positions in each array: out = [] for s in segs: # convert [s, e] to [s s+1 ... e-1 e] i = np.arange(s[0], s[1]) # retrieve all pitch and start positions corresponding to this segment pitchSeg = pitch[i] # Adjust pitch marks to the visible freq range on the spec y = ((pitchSeg-self.minFreqShow)*yadjfact).astype('int') # smooth the pitch lines medfiltsize = min((len(y)-1)//2*2+1, 15) y = medfilt(y, medfiltsize) # joinGaps can introduce no-pitch pixels, which cause # smoothed segments to have 0 ends. Trim those: trimst = 0 while y[trimst]==0 and trimst<medfiltsize//2: trimst += 1 trime = len(y)-1 while y[trime]==0 and trime>len(y)-medfiltsize//2: trime -= 1 y = y[trimst:trime] i = i[trimst:trime] out.append((starts[i], y)) return out def max_energy(self, sg,thr=1.2): # Remember that spectrogram is actually rotated! colmaxinds = np.argmax(sg,axis=1) points = np.zeros(np.shape(sg)) # If one wants to show only some colmaxs: # sg = sg/np.max(sg) # colmedians = np.median(sg, axis=1) # colmax = np.max(sg,axis=1) # inds = np.where(colmax>thr*colmedians) # print(len(inds)) # points[inds, colmaxinds[inds]] = 1 # just mark the argmax position in each column points[range(points.shape[0]), colmaxinds] = 1 x, y = np.where(points > 0) # convert points y coord from spec units to Hz yfr = [i * self.sampleRate//2/np.shape(self.sg)[1] for i in y] yfr = np.asarray(yfr) # remove points beyond frq range to show inds = np.where((yfr >= self.minFreqShow) & (yfr <= self.maxFreqShow)) x = x[inds] y = y[inds] # adjust y pos for when spec doesn't start at 0 specstarty = self.minFreqShow / (self.sampleRate // 2 / np.shape(self.sg)[1]) y = [i - specstarty for i in y] return x, y def denoiseImage(self,sg,thr=1.2): from skimage.restoration import (denoise_tv_chambolle, denoise_bilateral, denoise_wavelet, estimate_sigma) sigma_est = estimate_sigma(sg, multichannel=False, average_sigmas=True) sgnew = denoise_tv_chambolle(sg, weight=0.2, multichannel=False) #sgnew = denoise_bilateral(sg, sigma_color=0.05, sigma_spatial=15, multichannel=False) #sgnew = denoise_wavelet(sg, multichannel=False) return sgnew def denoiseImage2(self,sg,filterSize=5): # Filter size is odd [x,y] = np.shape(sg) width = filterSize//2 sgnew = np.zeros(np.shape(sg)) sgnew[0:width+1,:] = sg[0:width+1,:] sgnew[-width:,:] = sg[-width:,:] sgnew[:,0:width+1] = sg[:,0:width+1] sgnew[:,-width:] = sg[:,-width:] for i in range(width,x-width): for j in range(width,y-width): sgnew[i,j] = np.median(sg[i-width:i+width+1,j-width:j+width+1]) print(sgnew) return sgnew def mark_rain(self, sg, thr=0.9): row, col = np.shape(sg.T) print(row, col) inds = np.where(sg > thr * np.max(sg)) longest = np.zeros(col) start = np.zeros(col) for c in range(col): r = 0 l = 0 s = 0 j = 0 while inds[0][r] == c: if inds[1][r + 1] == inds[1][r] + 1: l += 1 else: if l > longest[c]: longest[c] = l start[c] = s l = 0 s = j + 1 r += 1 newsg = np.zeros(np.shape(sg)) newsg = newsg.T for c in range(col): if longest[c] > 10: newsg[c, start[c]:start[c] + longest[c]] = 1 print(longest) return newsg.T def denoise(self, alg, start=None, end=None, width=None): """ alg - string, algorithm type from the Denoise dialog start, end - filtering limits, from Denoise dialog width - median parameter, from Denoise dialog """ if str(alg) == "Wavelets": print("Don't use this interface for wavelets") return elif str(alg) == "Bandpass": self.data = self.bandpassFilter(self.data,self.sampleRate, start=start, end=end) elif str(alg) == "Butterworth Bandpass": self.data = self.ButterworthBandpass(self.data, self.sampleRate, low=start, high=end) else: # Median Filter self.data = self.medianFilter(self.data,int(str(width))) def impMask(self, engp=90, fp=0.75): """ Impulse mask :param engp: energy percentile (for rows of the spectrogram) :param fp: frequency proportion to consider it as an impulse (cols of the spectrogram) :return: audiodata """ print('Impulse masking...') imps = self.impulse_cal(fs=self.sampleRate, engp=engp, fp=fp) print('Samples to mask: ', len(self.data) - np.sum(imps)) # Mask only the affected samples return np.multiply(self.data, imps) def impulse_cal(self, fs, engp=90, fp=0.75, blocksize=10): """ Find sections where impulse sounds occur e.g. clicks window - window length (no overlap) engp - energy percentile (thr), the percentile of energy to inform that a section got high energy across frequency bands fp - frequency percentage (thr), the percentage of frequency bands to have high energy to mark a section as having impulse noise blocksize - max number of consecutive blocks, 10 consecutive blocks (~1/25 sec) is a good value, to not to mask very close-range calls :return: a binary list of length len(data) indicating presence of impulsive noise (0) otherwise (1) """ # Calculate window length w1 = np.floor(fs/250) # Window length of 1/250 sec selected experimentally arr = [2 ** i for i in range(5, 11)] pos = np.abs(arr - w1).argmin() window = arr[pos] sp = SignalProc(window, window) # No overlap sp.data = self.data sp.sampleRate = self.sampleRate sg = sp.spectrogram(multitaper=False) # For each frq band get sections where energy exceeds some (90%) percentile, engp # and generate a binary spectrogram sgb = np.zeros((np.shape(sg))) ep = np.percentile(sg, engp, axis=0) # note thr - 90% for energy percentile for y in range(np.shape(sg)[1]): ey = sg[:, y] sgb[np.where(ey > ep[y]), y] = 1 # If lots of frq bands got 1 then predict a click # 1 - presence of impulse noise, 0 - otherwise here impulse = np.where(np.count_nonzero(sgb, axis=1) > np.shape(sgb)[1] * fp, 1, 0) # Note thr fp # When an impulsive noise detected, it's better to check neighbours to make sure its not a bird call # very close to the microphone. imp_inds = np.where(impulse > 0)[0].tolist() imp = self.countConsecutive(imp_inds, len(impulse)) impulse = [] for item in imp: if item > blocksize or item == 0: # Note threshold - blocksize, 10 consecutive blocks ~1/25 sec impulse.append(1) else: impulse.append(0) impulse = list(chain.from_iterable(repeat(e, window) for e in impulse)) # Make it same length as self.audioData if len(impulse) > len(self.data): # Sanity check impulse = impulse[0:len(self.data)] elif len(impulse) < len(self.data): gap = len(self.data) - len(impulse) impulse = np.pad(impulse, (0, gap), 'constant') return impulse def countConsecutive(self, nums, length): gaps = [[s, e] for s, e in zip(nums, nums[1:]) if s + 1 < e] edges = iter(nums[:1] + sum(gaps, []) + nums[-1:]) edges = list(zip(edges, edges)) edges_reps = [item[1] - item[0] + 1 for item in edges] res = np.zeros((length)).tolist() t = 0 for item in edges: for i in range(item[0], item[1]+1): res[i] = edges_reps[t] t += 1 return res
class AudioWidget(QWidget): def __init__(self, parent=None): QWidget.__init__(self, parent) self.format = None self.output = None self.buffer = QBuffer() self.volumeSlider = QSlider(Qt.Horizontal) self.volumeSlider.setMaximum(10) self.volumeSlider.setPageStep(1) self.volumeSlider.setValue(5) self.playButton = QPushButton() self.playButton.setIcon(QIcon("icons/play.png")) self.stopButton = QPushButton() self.stopButton.setIcon(QIcon("icons/stop.png")) self.volumeSlider.valueChanged.connect(self.change_volume) self.playButton.clicked.connect(self.play_pause) self.stopButton.clicked.connect(self.stop) layout = QHBoxLayout(self) layout.addWidget(self.playButton) layout.addWidget(self.stopButton) layout.addWidget(self.volumeSlider) layout.addStretch() def stop(self): if self.output: if self.output.state() != QAudio.StoppedState: self.output.stop() def set_data(self, mono_sig, sr): # if not self.format: self.format = QAudioFormat() self.format.setChannelCount(1) self.format.setSampleRate(sr) #numpy is in bites, qt in bits self.format.setSampleSize(mono_sig.dtype.itemsize * 8) self.format.setCodec("audio/pcm") self.format.setByteOrder(QAudioFormat.LittleEndian) self.format.setSampleType(QAudioFormat.Float) self.output = QAudioOutput(self.format, self) self.output.stateChanged.connect(self.audio_state_changed) #change the content without stopping playback p = self.buffer.pos() if self.buffer.isOpen(): self.buffer.close() self.data = mono_sig.tobytes() self.buffer.setData(self.data) self.buffer.open(QIODevice.ReadWrite) self.buffer.seek(p) def audio_state_changed(self, new_state): #adjust the button icon if new_state != QAudio.ActiveState: self.playButton.setIcon(QIcon("icons/play.png")) else: self.playButton.setIcon(QIcon("icons/pause.png")) def cursor(self, t): #seek towards the time t #todo: handle EOF case try: if self.format: t = max(0, t) b = self.format.bytesForDuration(t * 1000000) self.buffer.seek(b) except: print("cursor error") def play_pause(self): if self.output: #(un)pause the audio output, keeps the buffer intact if self.output.state() == QAudio.ActiveState: self.output.suspend() elif self.output.state() == QAudio.SuspendedState: self.output.resume() else: self.buffer.seek(0) self.output.start(self.buffer) def change_volume(self, value): if self.output: #need to wrap this because slider gives not float output self.output.setVolume(value / 10)
class AudioTest(AudioDevicesBase): def __init__(self, parent=None): super(AudioTest, self).__init__(parent) self.deviceInfo = QAudioDeviceInfo() self.settings = QAudioFormat() self.mode = QAudio.AudioOutput self.testButton.clicked.connect(self.test) self.modeBox.activated.connect(self.modeChanged) self.deviceBox.activated.connect(self.deviceChanged) self.sampleRateBox.activated.connect(self.sampleRateChanged) self.channelsBox.activated.connect(self.channelChanged) self.codecsBox.activated.connect(self.codecChanged) self.sampleSizesBox.activated.connect(self.sampleSizeChanged) self.sampleTypesBox.activated.connect(self.sampleTypeChanged) self.endianBox.activated.connect(self.endianChanged) self.populateTableButton.clicked.connect(self.populateTable) self.modeBox.setCurrentIndex(0) self.modeChanged(0) self.deviceBox.setCurrentIndex(0) self.deviceChanged(0) def test(self): self.testResult.clear() if not self.deviceInfo.isNull(): if self.deviceInfo.isFormatSupported(self.settings): self.testResult.setText("Success") self.nearestSampleRate.setText("") self.nearestChannel.setText("") self.nearestCodec.setText("") self.nearestSampleSize.setText("") self.nearestSampleType.setText("") self.nearestEndian.setText("") else: nearest = self.deviceInfo.nearestFormat(self.settings) self.testResult.setText("Failed") self.nearestSampleRate.setText(str(nearest.sampleRate())) self.nearestChannel.setText(str(nearest.channelCount())) self.nearestCodec.setText(nearest.codec()) self.nearestSampleSize.setText(str(nearest.sampleSize())) self.nearestSampleType.setText( self.sampleTypeToString(nearest.sampleType())) self.nearestEndian.setText( self.endianToString(nearest.byteOrder())) else: self.testResult.setText("No Device") sampleTypeMap = { QAudioFormat.SignedInt: "SignedInt", QAudioFormat.UnSignedInt: "UnSignedInt", QAudioFormat.Float: "Float" } @classmethod def sampleTypeToString(cls, sampleType): return cls.sampleTypeMap.get(sampleType, "Unknown") endianMap = { QAudioFormat.LittleEndian: "LittleEndian", QAudioFormat.BigEndian: "BigEndian" } @classmethod def endianToString(cls, endian): return cls.endianMap.get(endian, "Unknown") def modeChanged(self, idx): self.testResult.clear() if idx == 0: self.mode = QAudio.AudioInput else: self.mode = QAudio.AudioOutput self.deviceBox.clear() for deviceInfo in QAudioDeviceInfo.availableDevices(self.mode): self.deviceBox.addItem(deviceInfo.deviceName(), deviceInfo) self.deviceBox.setCurrentIndex(0) self.deviceChanged(0) def deviceChanged(self, idx): self.testResult.clear() if self.deviceBox.count() == 0: return self.deviceInfo = self.deviceBox.itemData(idx) self.sampleRateBox.clear() sampleRatez = self.deviceInfo.supportedSampleRates() self.sampleRateBox.addItems([str(sr) for sr in sampleRatez]) if len(sampleRatez) != 0: self.settings.setSampleRate(sampleRatez[0]) self.channelsBox.clear() chz = self.deviceInfo.supportedChannelCounts() self.channelsBox.addItems([str(ch) for ch in chz]) if len(chz) != 0: self.settings.setChannelCount(chz[0]) self.codecsBox.clear() codecs = self.deviceInfo.supportedCodecs() self.codecsBox.addItems([str(c) for c in codecs]) if len(codecs) != 0: self.settings.setCodec(codecs[0]) # Create a failed condition. self.codecsBox.addItem("audio/test") self.sampleSizesBox.clear() sampleSizez = self.deviceInfo.supportedSampleSizes() self.sampleSizesBox.addItems([str(ss) for ss in sampleSizez]) if len(sampleSizez) != 0: self.settings.setSampleSize(sampleSizez[0]) self.sampleTypesBox.clear() sampleTypez = self.deviceInfo.supportedSampleTypes() self.sampleTypesBox.addItems( [self.sampleTypeToString(st) for st in sampleTypez]) if len(sampleTypez) != 0: self.settings.setSampleType(sampleTypez[0]) self.endianBox.clear() endianz = self.deviceInfo.supportedByteOrders() self.endianBox.addItems([self.endianToString(e) for e in endianz]) if len(endianz) != 0: self.settings.setByteOrder(endianz[0]) self.allFormatsTable.clearContents() def populateTable(self): row = 0 format = QAudioFormat() for codec in self.deviceInfo.supportedCodecs(): format.setCodec(codec) for sampleRate in self.deviceInfo.supportedSampleRates(): format.setSampleRate(sampleRate) for channels in self.deviceInfo.supportedChannelCounts(): format.setChannelCount(channels) for sampleType in self.deviceInfo.supportedSampleTypes(): format.setSampleType(sampleType) for sampleSize in self.deviceInfo.supportedSampleSizes(): format.setSampleSize(sampleSize) for endian in self.deviceInfo.supportedByteOrders(): format.setByteOrder(endian) if self.deviceInfo.isFormatSupported(format): self.allFormatsTable.setRowCount(row + 1) self.setFormatValue(row, 0, format.codec()) self.setFormatValue(row, 1, str(format.sampleRate())) self.setFormatValue(row, 2, str(format.channelCount())) self.setFormatValue(row, 3, self.sampleTypeToString( format.sampleType())) self.setFormatValue(row, 4, str(format.sampleSize())) self.setFormatValue(row, 5, self.endianToString( format.byteOrder())) row += 1 def setFormatValue(self, row, column, value): self.allFormatsTable.setItem(row, column, QTableWidgetItem(value)) def sampleRateChanged(self, idx): self.settings.setSampleRate(int(self.sampleRateBox.itemText(idx))) def channelChanged(self, idx): self.settings.setChannelCount(int(self.channelsBox.itemText(idx))) def codecChanged(self, idx): self.settings.setCodec(self.codecsBox.itemText(idx)) def sampleSizeChanged(self, idx): self.settings.setSampleSize(int(self.sampleSizesBox.itemText(idx))) def sampleTypeChanged(self, idx): sampleType = int(self.sampleTypesBox.itemText(idx)) if sampleType == QAudioFormat.SignedInt: self.settings.setSampleType(QAudioFormat.SignedInt) elif sampleType == QAudioFormat.UnSignedInt: self.settings.setSampleType(QAudioFormat.UnSignedInt) elif sampleType == QAudioFormat.Float: self.settings.setSampleType(QAudioFormat.Float) def endianChanged(self, idx): endian = int(self.endianBox.itemText(idx)) if endian == QAudioFormat.LittleEndian: self.settings.setByteOrder(QAudioFormat.LittleEndian) elif endian == QAudioFormat.BigEndian: self.settings.setByteOrder(QAudioFormat.BigEndian)
def setup(self): self.output.setFileName("record.pcm") self.output.open(QIODevice.WriteOnly | QIODevice.Truncate) settings = QAudioFormat() settings.setCodec("audio/pcm") settings.setSampleRate(16000) settings.setSampleSize(16) settings.setChannelCount(1) settings.setByteOrder(QAudioFormat.LittleEndian) settings.setSampleType(QAudioFormat.SignedInt) self.audio = QAudioInput(settings)
def __init__(self, parent=None): QWidget.__init__(self, parent) format = QAudioFormat() format.setChannelCount(AUDIO_CHANS) format.setSampleRate(SAMPLE_RATE) format.setSampleSize(SAMPLE_SIZE) format.setCodec("audio/pcm") format.setByteOrder(QAudioFormat.LittleEndian) format.setSampleType(QAudioFormat.SignedInt) self.output = QAudioOutput(format, self) output_buffer_size = \ int(2*CTRL_INTERVAL/1000) self.output.setBufferSize(output_buffer_size) self.generator = Generator(format, self) self.midiListener = MidiPortReader() self.listenerThread = QThread() self.midiListener.moveToThread(self.listenerThread) self.listenerThread.started.connect(self.midiListener.listener) self.midiListener.addVoice.connect(self.generator.addVoice) self.midiListener.removeVoice.connect(self.generator.removeVoice) self.createUI() self.pslider.valueChanged.connect(self.generator.changeP) self.fslider.valueChanged.connect(self.generator.qCalc) self.semiDown.clicked.connect(self.smDown) self.semiUp.clicked.connect(self.smUp) self.octaveDown.clicked.connect(self.ovDown) self.octaveUp.clicked.connect(self.ovUp) self.listenerThread.start() self.generator.start() self.output.start(self.generator)
class SleaZynth(QMainWindow): autoSaveFile = 'auto.save' texsize = 512 samplerate = 44100 def __init__(self): QMainWindow.__init__(self) self.ui = Ui_MainWindow() self.ui.setupUi(self) self.show() self.initModelView() self.initSignals() self.initState() self.autoLoad() self.initAMaySyn() self.initAudio() def keyPressEvent(self, event): if event.key() == Qt.Key_Escape: self.close() elif event.key() == Qt.Key_F1: self.debugOutput() if event.modifiers() & Qt.ControlModifier: if event.key() == Qt.Key_S: self.autoSave() elif event.key() == Qt.Key_L: self.autoLoad() elif event.key() == Qt.Key_T: self.renderWhateverWasLast() def closeEvent(self, event): QApplication.quit() def initSignals(self): self.ui.btnChooseFilename.clicked.connect(self.loadAndImportMayson) self.ui.btnImport.clicked.connect(self.importMayson) self.ui.btnExport.clicked.connect(self.exportChangedMayson) self.ui.checkAutoReimport.clicked.connect(self.toggleAutoReimport) self.ui.checkAutoRender.clicked.connect(self.toggleAutoRender) self.ui.editFilename.editingFinished.connect( partial(self.updateStateFromUI, only='maysonFile')) self.ui.editBPM.editingFinished.connect( partial(self.updateStateFromUI, only='BPM')) self.ui.spinBOffset.valueChanged.connect( partial(self.updateStateFromUI, only='B_offset')) self.ui.spinBStop.valueChanged.connect( partial(self.updateStateFromUI, only='B_stop')) self.ui.spinLevelSyn.valueChanged.connect( partial(self.updateStateFromUI, only='level_syn')) self.ui.spinLevelDrum.valueChanged.connect( partial(self.updateStateFromUI, only='level_drum')) self.ui.checkWriteWAV.clicked.connect( partial(self.updateStateFromUI, only='writeWAV')) self.ui.spinTimeShift.valueChanged.connect( partial(self.updateStateFromUI, only='extraTimeShift')) self.ui.editTrackName.textChanged.connect(self.trackSetName) self.ui.spinTrackVolume.valueChanged.connect(self.trackSetVolume) self.ui.checkTrackMute.stateChanged.connect(self.trackSetMute) self.ui.spinModOn.valueChanged.connect(self.moduleSetModOn) self.ui.spinModTranspose.valueChanged.connect(self.moduleSetTranspose) self.ui.btnApplyPattern.clicked.connect(self.moduleSetPattern) self.ui.btnApplyNote.clicked.connect(self.noteApplyChanges) self.ui.btnTrackClone.clicked.connect(self.trackClone) self.ui.btnTrackDelete.clicked.connect(self.trackDelete) self.ui.btnRandomSynth.clicked.connect(self.trackSetRandomSynth) self.ui.btnRandomizeSynth.clicked.connect(self.synthRandomize) self.ui.btnSaveSynth.clicked.connect(self.synthHardClone) self.ui.btnApplySynthName.clicked.connect(self.synthChangeName) self.ui.btnReloadSyn.clicked.connect(self.loadSynthsFromSynFile) self.ui.btnRenderModule.clicked.connect(self.renderModule) self.ui.btnRenderTrack.clicked.connect(self.renderTrack) self.ui.btnRenderSong.clicked.connect(self.renderSong) self.ui.btnStopPlayback.clicked.connect(self.stopPlayback) # model/view signals self.ui.trackList.selectionModel().currentChanged.connect( self.trackLoad) self.ui.patternCombox.currentIndexChanged.connect(self.patternLoad) self.ui.moduleList.selectionModel().currentChanged.connect( self.moduleLoad) self.ui.synthList.selectionModel().currentChanged.connect( self.trackSetSynth) self.ui.noteList.selectionModel().currentChanged.connect(self.noteLoad) self.ui.drumList.selectionModel().currentChanged.connect( self.noteSetDrum) def initModelView(self): self.trackModel = TrackModel() self.ui.trackList.setModel(self.trackModel) self.moduleModel = ModuleModel() self.ui.moduleList.setModel(self.moduleModel) self.patternModel = PatternModel() self.ui.patternCombox.setModel(self.patternModel) self.noteModel = NoteModel() self.ui.noteList.setModel(self.noteModel) self.synthModel = QStringListModel() self.ui.synthList.setModel(self.synthModel) self.drumModel = QStringListModel() self.ui.drumList.setModel(self.drumModel) self.noteModel.dataChanged.connect( self.updateModulesWithChangedPattern) self.noteModel.reloadNoteParameters.connect(self.noteLoad) def initState(self): self.state = { 'maysonFile': '', 'autoReimport': False, 'autoRender': False, 'lastRendered': '', 'writeWAV': False, 'selectedTrack': 0, 'selectedModule': 0, 'extraTimeShift': 0, } self.info = {} self.patterns = [] self.synths = [] self.drumkit = [] self.amaysyn = None self.fileObserver = None def loadAndImportMayson(self): name, _ = QFileDialog.getOpenFileName( self, 'Load MAYSON file', '', 'aMaySyn export *.mayson(*.mayson)') if name == '': return self.state['maysonFile'] = name self.state['title'], self.state[ 'synFile'] = self.getTitleAndSynFromMayson(name) self.autoSave() self.importMayson() def importMayson(self): maysonData = {} try: file = open(self.state['maysonFile'], 'r') maysonData = json.load(file) except FileNotFoundError: print( f"{self.state['maysonFile']} could not be imported. make sure that it exists, or choose another one." ) self.loadAndImportMayson() except json.decoder.JSONDecodeError: print( f"{self.state['maysonFile']} is changing right now, pause for 1 sec..." ) sleep(1) self.importMayson() finally: file.close() if maysonData == {}: return self.info = maysonData['info'] self.info.update({'title': self.state['title']}) if self.amaysyn is not None: self.amaysyn.updateState(info=self.info) self.trackModel.setTracks(maysonData['tracks']) self.patternModel.setPatterns(maysonData['patterns']) self.synthModel.setStringList(maysonData['synths']) self.drumModel.setStringList(maysonData['drumkit']) self.trackModel.layoutChanged.emit() if self.state['selectedTrack'] >= self.trackModel.rowCount(): self.state['selectedTrack'] = 0 self.selectIndex(self.ui.trackList, self.trackModel, self.state['selectedTrack']) if self.state['selectedModule'] >= self.moduleModel.rowCount(): self.state['selectedModule'] = 0 self.selectIndex(self.ui.moduleList, self.moduleModel, self.state['selectedModule']) self.noteModel.layoutChanged.emit() if self.noteModel.rowCount() > 0: self.selectIndex(self.ui.noteList, self.noteModel, 0) self.synthModel.layoutChanged.emit() self.drumModel.layoutChanged.emit() if self.drumIndex().isValid(): self.selectIndex(self.ui.drumList, self.drumModel, self.drumIndex().row()) self.applyStateToUI() def exportChangedMayson(self): name, _ = QFileDialog.getSaveFileName( self, 'Export with Changes', self.state['maysonFile'], 'aMaySyn export *.mayson(*.mayson)') if name == '': return data = { 'info': self.info, 'tracks': self.trackModel.tracks, 'patterns': self.patternModel.patterns, 'synths': self.synthModel.stringList(), 'drumkit': self.drumModel.stringList(), } file = open(name, 'w') json.dump(data, file) file.close() def updateStateFromUI(self, only=None): if only is None or only == 'maysonFile': self.state.update({'maysonFile': self.ui.editFilename.text()}) title, synFile = self.getTitleAndSynFromMayson( self.state['maysonFile']) self.state.update({'synFile': synFile}) self.state.update({'title': title}) self.info['title'] = title if only is None or only == 'BPM': self.info['BPM'] = self.ui.editBPM.text() if only is None or only == 'B_offset': self.info['B_offset'] = self.ui.spinBOffset.value() if only is None or only == 'B_stop': self.info['B_stop'] = self.ui.spinBStop.value() if only is None or only == 'level_syn': self.info['level_syn'] == self.ui.spinLevelSyn.value() if only is None or only == 'level_drum': self.info['level_drum'] == self.ui.spinLevelDrum.value() if only is None or only == 'writeWAV': self.state['writeWAV'] = self.ui.checkWriteWAV.isChecked() if only is None or only == 'extraTimeShift': self.state['extraTimeShift'] = self.ui.spinTimeShift.value() if self.amaysyn is not None: self.amaysyn.updateState(info=self.info, synFile=synFile) def applyStateToUI(self): self.ui.editFilename.setText(self.state['maysonFile']) # TODO: think about - do I want self.state['override']['BPM'] etc.?? self.ui.editBPM.setText(self.info['BPM']) self.ui.spinBOffset.setValue(self.info['B_offset']) self.ui.spinBStop.setValue(self.info['B_stop']) self.ui.spinLevelSyn.setValue(self.info['level_syn']) self.ui.spinLevelDrum.setValue(self.info['level_drum']) self.ui.checkAutoReimport.setChecked(self.state['autoReimport']) self.ui.checkAutoRender.setChecked(self.state['autoRender']) self.ui.checkWriteWAV.setChecked(self.state['writeWAV']) self.ui.spinTimeShift.setValue(self.state['extraTimeShift']) def autoSave(self): file = open(self.autoSaveFile, 'w') json.dump(self.state, file) file.close() def autoLoad(self): loadState = {} try: file = open(self.autoSaveFile, 'r') loadState = json.load(file) file.close() except FileNotFoundError: pass for key in loadState: self.state[key] = loadState[key] if 'autoReimport' in self.state: self.toggleAutoReimport(self.state['autoReimport']) if 'maysonFile' not in self.state or self.state['maysonFile'] == '': self.loadAndImportMayson() else: self.importMayson() def toggleAutoRender(self, checked): self.state['autoRender'] = checked self.autoSave() def toggleAutoReimport(self, checked): self.state['autoReimport'] = checked self.autoSave() if self.fileObserver is not None: self.fileObserver.stop() self.fileObserver.join() self.fileObserver = None if checked: file = self.state['maysonFile'] eventHandler = FileModifiedHandler(file) eventHandler.fileChanged.connect( self.importAndRender if self.state['autoRender'] else self. importMayson) self.fileObserver = Observer() self.fileObserver.schedule(eventHandler, path=path.dirname(file), recursive=False) self.fileObserver.start() def importAndRender(self): self.importMayson() if self.amaysyn is None: print( "You want to Reimport&Render, but why is aMaySyn not initialized? do some rendering first!" ) return self.renderWhateverWasLast() #################################### GENERAL HELPERS ########################################### def selectIndex(self, list, model, index): list.selectionModel().setCurrentIndex( model.createIndex(index, 0), QItemSelectionModel.SelectCurrent) def patternIndexOfName(self, name): patternNames = [p['name'] for p in self.patternModel.patterns] if name in patternNames: return patternNames.index(name) else: return None def getTitleAndSynFromMayson(self, maysonFile): synFile = '.'.join(maysonFile.split('.')[:-1]) + '.syn' title = '.'.join(path.basename(maysonFile).split('.')[:-1]) return title, synFile def placeholder(self): print("FUNCTION NOT IMPLEMENTED. Sorrriiiiiiieee! (not sorry.)") #################################### TRACK FUNCTIONALITY ####################################### def track(self): return self.trackModel.tracks[self.trackIndex().row( )] if self.trackModel.rowCount() > 0 else None def trackIndex(self): return self.ui.trackList.currentIndex() def trackModelChanged(self): self.trackModel.dataChanged.emit(self.trackIndex(), self.trackIndex()) def trackLoad(self, currentIndex): cTrack = self.trackModel.tracks[currentIndex.row()] self.ui.editTrackName.setText(cTrack['name']) self.ui.spinTrackVolume.setValue(100 * cTrack['par_norm']) self.ui.checkTrackMute.setChecked(not cTrack['mute']) self.moduleModel.setModules(cTrack['modules']) if len(cTrack['modules']) > 0: self.selectIndex(self.ui.moduleList, self.moduleModel, cTrack['current_module']) self.moduleLoad() self.selectIndex(self.ui.synthList, self.synthModel, cTrack['current_synth']) self.state['selectedTrack'] = currentIndex.row() def trackClone(self): self.trackModel.cloneRow(self.trackIndex().row()) def trackDelete(self): self.trackModel.removeRow(self.trackIndex().row()) def trackSetName(self, name): self.track()['name'] = name self.trackModelChanged() def trackSetVolume(self, value): self.track()['par_norm'] = round(value * .01, 3) self.trackModelChanged() def trackSetMute(self, state): self.track()['mute'] = (state != Qt.Checked) self.trackModelChanged() def trackSetSynth(self, index): self.track()['current_synth'] = self.synthModel.stringList().index( self.synthModel.data(index, Qt.DisplayRole)) self.ui.editSynthName.setText(self.synthName()) self.trackModelChanged() if self.synth()[0] == 'D': self.noteModel.useDrumkit(self.drumModel.stringList()) else: self.noteModel.useDrumkit(None) def trackSetRandomSynth(self): randomIndex = self.synthModel.createIndex( randint(0, len(self.instrumentSynths()) - 1), 0) self.ui.synthList.setCurrentIndex(randomIndex) #################################### MODULE FUNCTIONALITY ###################################### def module(self): return self.moduleModel.modules[self.moduleIndex().row( )] if self.moduleModel.rowCount() > 0 else None def moduleIndex(self): return self.ui.moduleList.currentIndex() def moduleModelChanged(self): self.moduleModel.dataChanged.emit(self.moduleIndex(), self.moduleIndex()) def moduleLoad(self, currentIndex=None): if currentIndex is None: cModule = self.module() else: cModule = self.moduleModel.modules[currentIndex.row()] self.state['selectedModule'] = currentIndex.row() self.ui.patternCombox.setCurrentIndex( self.patternIndexOfName(cModule['pattern']['name'])) self.ui.spinModOn.setValue(cModule['mod_on']) self.ui.spinModTranspose.setValue(cModule['transpose']) def moduleAssignPattern(self, pattern): self.module()['pattern'] = pattern # deepcopy(pattern) def moduleSetPattern(self): self.moduleAssignPattern(self.pattern()) self.moduleModelChanged() def moduleSetModOn(self, value): self.module()['mod_on'] = self.ui.spinModOn.value() self.moduleModelChanged() def moduleSetTranspose(self, value): self.module()['transpose'] = self.ui.spinModTranspose.value() self.moduleModelChanged() #################################### PATTERN FUNCTIONALITY ##################################### def pattern(self): return self.patternModel.patterns[ self.patternIndex()] if self.patternModel.rowCount() > 0 else None def patternIndex(self): return self.ui.patternCombox.currentIndex() def patternLoad(self, currentIndex): cPattern = self.patternModel.patterns[currentIndex] self.noteModel.setNotes(cPattern['notes']) def updateModulesWithChangedPattern(self, rowBegin, rowEnd): self.moduleAssignPattern(self.pattern()) self.moduleModelChanged() self.trackModel.updateModulesWithChangedPattern(self.pattern()) self.trackModelChanged() #################################### NOTE FUNCTIONALITY ######################################## def note(self): return self.noteModel.notes[ self.noteIndex().row()] if self.noteModel.rowCount() > 0 else None def noteIndex(self): return self.ui.noteList.currentIndex() def noteModelChanged(self): self.noteModel.dataChanged.emit(self.noteIndex(), self.noteIndex()) def noteLoad(self, currentIndex): self.ui.editNote.setText( self.noteModel.data(currentIndex, Qt.DisplayRole)) self.ui.editNote.setCursorPosition(0) def noteApplyChanges(self): self.noteModel.changeByString(self.noteIndex(), self.ui.editNote.text()) def noteSetDrum(self, currentIndex): if self.drum() is not None: self.noteModel.changeDrumTo(self.noteIndex(), self.drum()) ################################ SYNTH / DRUM FUNCTIONALITY #################################### def synth(self): return self.synthModel.data(self.ui.synthList.currentIndex(), Qt.DisplayRole) def synthName(self): return self.synth()[2:] def instrumentSynths(self): return [ I_synth for I_synth in self.synthModel.stringList() if I_synth[0] == 'I' ] def drum(self): if not self.drumIndex(): print("LOLOLOL DRUM INDEX IS NONE (should never happen)") if not self.drumIndex().isValid(): print("LOLOLOL DRUM INDEX NOT VALID") return self.drumModel.data(self.drumIndex(), Qt.DisplayRole) def drumIndex(self): return self.ui.drumList.currentIndex() def synthRandomize(self): self.amaysyn.aMaySynatize(reshuffle_randoms=True) def synthHardClone(self): if self.synth()[0] == 'D': self.synthHardCloneDrum(self) return else: count = 0 oldID = self.synthName() synths = self.instrumentSynths() while True: formID = oldID + '.' + str(count) print("TRYING", formID, synths) if 'I_' + formID not in synths: break count += 1 try: formTemplate = next( form for form in self.amaysyn.last_synatized_forms if form['id'] == oldID) formType = formTemplate['type'] formMode = formTemplate['mode'] formBody = ' '.join(key + '=' + formTemplate[key] for key in formTemplate if key not in ['type', 'id', 'mode']) if formMode: formBody += ' mode=' + ','.join(formMode) except StopIteration: print( "Current synth is not compiled yet. Do so and try again.") return except: print("could not CLONE HARD:", formID, formTemplate) raise else: with open(self.state['synFile'], mode='a') as filehandle: filehandle.write('\n' + formType + 4 * ' ' + formID + 4 * ' ' + formBody) self.loadSynthsFromSynFile() def synthHardDrum(self): print("NOT IMPLEMENTED YET") return count = 0 oldID = self.synthName() synths = self.instrumentSynths() while True: formID = oldID + '.' + str(count) print("TRYING", formID, synths) if 'I_' + formID not in synths: break count += 1 try: formTemplate = next(form for form in self.amaysyn.last_synatized_forms if form['id'] == oldID) formType = formTemplate['type'] formMode = formTemplate['mode'] formBody = ' '.join(key + '=' + formTemplate[key] for key in formTemplate if key not in ['type', 'id', 'mode']) if formMode: formBody += ' mode=' + ','.join(formMode) except StopIteration: print("Current synth is not compiled yet. Do so and try again.") return except: print("could not CLONE HARD:", formID, formTemplate) raise else: with open(self.state['synFile'], mode='a') as filehandle: filehandle.write('\n' + formType + 4 * ' ' + formID + 4 * ' ' + formBody) self.loadSynthsFromSynFile() def synthChangeName(self): if self.synth()[0] != 'I': print("Nah. Select an instrument synth (I_blabloo)") return newID = self.ui.editSynthName.text() if newID == '': return formID = self.synthName() tmpFile = self.state['synFile'] + '.tmp' move(self.state['synFile'], tmpFile) with open(tmpFile, mode='r') as tmp_handle: with open(self.state['synFile'], mode='w') as new_handle: for line in tmp_handle.readlines(): lineparse = line.split() if len(lineparse) > 2 and lineparse[0] in [ 'main', 'maindrum' ] and lineparse[1] == formID: new_handle.write( line.replace(' ' + formID + ' ', ' ' + newID + ' ')) else: new_handle.write(line) self.loadSynthsFromSynFile() def loadSynthsFromSynFile(self): self.amaysyn.aMaySynatize() self.synthModel.setStringList(self.amaysyn.synths) self.synthModel.dataChanged.emit( self.synthModel.createIndex(0, 0), self.synthModel.createIndex(self.synthModel.rowCount(), 0)) self.drumModel.setStringList(self.amaysyn.drumkit) self.drumModel.dataChanged.emit( self.drumModel.createIndex(0, 0), self.drumModel.createIndex(self.drumModel.rowCount(), 0)) self.trackModel.setSynthList(self.amaysyn.synths) self.trackModelChanged() # TODO: function to change drumkit order / assignment? ######################################## SleaZYNTHesizer ####################################### def initAMaySyn(self): self.amaysyn = aMaySynBuilder(self, self.state['synFile'], self.info) def initAudio(self): self.audioformat = QAudioFormat() self.audioformat.setSampleRate(self.samplerate) self.audioformat.setChannelCount(2) self.audioformat.setSampleSize(32) self.audioformat.setCodec('audio/pcm') self.audioformat.setByteOrder(QAudioFormat.LittleEndian) self.audioformat.setSampleType(QAudioFormat.Float) self.audiooutput = QAudioOutput(self.audioformat) self.audiooutput.setVolume(1.0) def stopPlayback(self): self.audiooutput.stop() def renderWhateverWasLast(self): if self.state['lastRendered'] == 'module': self.renderModule() elif self.state['lastRendered'] == 'track': self.renderTrack() else: self.renderSong() def renderModule(self): print(self.track(), self.module()) self.state['lastRendered'] = 'module' restoreMute = self.track()['mute'] self.track()['mute'] = False modInfo = deepcopy(self.info) modInfo['B_offset'] = self.module()['mod_on'] modInfo['B_stop'] = self.module()['mod_on'] + self.module( )['pattern']['length'] self.amaysyn.info = modInfo self.amaysyn.extra_time_shift = self.state['extraTimeShift'] shader = self.amaysyn.build(tracks=[self.track()], patterns=[self.module()['pattern']]) self.amaysyn.info = self.info self.track()['mute'] = restoreMute self.executeShader(shader) def renderTrack(self): self.state['lastRendered'] = 'track' restoreMute = self.track()['mute'] self.track()['mute'] = False self.amaysyn.extra_time_shift = self.state['extraTimeShift'] shader = self.amaysyn.build(tracks=[self.track()], patterns=self.patternModel.patterns) self.track()['mute'] = restoreMute self.executeShader(shader) def renderSong(self): self.state['lastRendered'] = 'song' self.amaysyn.extra_time_shift = self.state['extraTimeShift'] shader = self.amaysyn.build(tracks=self.trackModel.tracks, patterns=self.patternModel.patterns) self.executeShader(shader) def executeShader(self, shader): self.ui.codeEditor.clear() self.ui.codeEditor.insertPlainText( shader.replace(4 * ' ', '\t').replace(3 * ' ', '\t')) self.ui.codeEditor.ensureCursorVisible() sequenceLength = len( self.amaysyn.sequence) if self.amaysyn.sequence is not None else 0 if not self.amaysyn.useSequenceTexture and sequenceLength > pow(2, 14): QMessageBox.critical( self, "I CAN'T", f"Either switch to using the Sequence Texture (ask QM), or reduce the sequence size by limiting the offset/stop positions or muting tracks.\nCurrent sequence length is:\n{sequenceLength} > {pow(2,14)}" ) return self.bytearray = self.amaysyn.executeShader( shader, self.samplerate, self.texsize, renderWAV=self.state['writeWAV']) self.audiobuffer = QBuffer(self.bytearray) self.audiobuffer.open(QIODevice.ReadOnly) self.audiooutput.stop() self.audiooutput.start(self.audiobuffer) ###################################### DEBUG STUFF ############################################# def debugOutput(self): print("TRACKS:", self.trackModel.rowCount()) print("===== TRACK ACCUMULATION =====") track_accumulate = 0 for t in self.trackModel.tracks: delta = len(t['modules']) print( f"{t['name']:>20} {track_accumulate:>10} {track_accumulate + delta:>10}" ) track_accumulate += delta print("END AT", track_accumulate) print() print("PATTERNS:", self.patternModel.rowCount()) print("===== PATTERN ACCUMULATION =====") pattern_accumulate = 0 for p in self.patternModel.patterns: delta = len(p['notes']) print( f"{p['name']:>20} {pattern_accumulate:>10} {pattern_accumulate + delta:>10}" ) pattern_accumulate += delta print("END AT", pattern_accumulate) print()
def initializeAudio(self, deviceInfo): """ Make a QAudioInput from the given device """ # make buffers of 40ms of samples self.refRate = 0.04 # mono, 32-bit float audio fmt = QAudioFormat() fmt.setSampleRate(self.getSampleRate()) fmt.setChannelCount(1) fmt.setSampleSize(32) fmt.setSampleType(QAudioFormat.Float) fmt.setByteOrder(QAudioFormat.LittleEndian) fmt.setCodec("audio/pcm") if not deviceInfo.isFormatSupported(fmt): fmt = deviceInfo.nearestFormat(fmt) self.audioInput = QAudioInput(deviceInfo, fmt) self.audioInput.setBufferSize(4*self.buflen) # set size in bytes
def __init__(self, parent=None): print("Tone Widget inst") QWidget.__init__(self, parent) self.activeGen = ActiveGen(sampleRate=SAMPLE_RATE, samplePerRead=Meep.SAMPLES_PER_READ) self.createUI(self) # Meep playback format initialization format = QAudioFormat() format.setChannelCount(AUDIO_CHANS) format.setSampleRate(SAMPLE_RATE) format.setSampleSize(SAMPLE_SIZE) format.setCodec("audio/pcm") format.setByteOrder(QAudioFormat.LittleEndian) format.setSampleType(QAudioFormat.SignedInt) # check compatibility of format with device info = QAudioDeviceInfo(QAudioDeviceInfo.defaultOutputDevice()) if info.isFormatSupported(format) is False: print( "Raw audio format not supported by backend, cannot play audio." ) return None # Audio Output init self.output = QAudioOutput(format, self) output_buffer_size = \ int(2*SAMPLE_RATE \ *CTRL_INTERVAL/1000) self.output.setBufferSize(output_buffer_size) # initialize and start the audio playback self.generator = Meep(format, self.activeGen, self) self.generator.start() self.output.start(self.generator) # Create the port reader object self.midiListener = MidiPortReader() # Create a thread which will read it self.listenerThread = QThread() # move QObjet to a new thread self.midiListener.moveToThread(self.listenerThread) # Connect the signals to slot functions self.midiListener.newNoteFrequency.connect(self.activeGen.setFreq) self.midiListener.newNotePress.connect(self.activeGen.setNote) # Tell Qt the function to call # when it starts the thread self.listenerThread.started.connect(self.midiListener.listener) # start the thread self.listenerThread.start()
class AudioTest(AudioDevicesBase): def __init__(self, parent=None): super(AudioTest, self).__init__(parent) self.deviceInfo = QAudioDeviceInfo() self.settings = QAudioFormat() self.mode = QAudio.AudioOutput self.testButton.clicked.connect(self.test) self.modeBox.activated.connect(self.modeChanged) self.deviceBox.activated.connect(self.deviceChanged) self.sampleRateBox.activated.connect(self.sampleRateChanged) self.channelsBox.activated.connect(self.channelChanged) self.codecsBox.activated.connect(self.codecChanged) self.sampleSizesBox.activated.connect(self.sampleSizeChanged) self.sampleTypesBox.activated.connect(self.sampleTypeChanged) self.endianBox.activated.connect(self.endianChanged) self.populateTableButton.clicked.connect(self.populateTable) self.modeBox.setCurrentIndex(0) self.modeChanged(0) self.deviceBox.setCurrentIndex(0) self.deviceChanged(0) def test(self): self.testResult.clear() if not self.deviceInfo.isNull(): if self.deviceInfo.isFormatSupported(self.settings): self.testResult.setText("Success") self.nearestSampleRate.setText("") self.nearestChannel.setText("") self.nearestCodec.setText("") self.nearestSampleSize.setText("") self.nearestSampleType.setText("") self.nearestEndian.setText("") else: nearest = self.deviceInfo.nearestFormat(self.settings) self.testResult.setText("Failed") self.nearestSampleRate.setText(str(nearest.sampleRate())) self.nearestChannel.setText(str(nearest.channelCount())) self.nearestCodec.setText(nearest.codec()) self.nearestSampleSize.setText(str(nearest.sampleSize())) self.nearestSampleType.setText( self.sampleTypeToString(nearest.sampleType())) self.nearestEndian.setText( self.endianToString(nearest.byteOrder())) else: self.testResult.setText("No Device") sampleTypeMap = { QAudioFormat.SignedInt: "SignedInt", QAudioFormat.UnSignedInt: "UnSignedInt", QAudioFormat.Float: "Float" } @classmethod def sampleTypeToString(cls, sampleType): return cls.sampleTypeMap.get(sampleType, "Unknown") endianMap = { QAudioFormat.LittleEndian: "LittleEndian", QAudioFormat.BigEndian: "BigEndian" } @classmethod def endianToString(cls, endian): return cls.endianMap.get(endian, "Unknown") def modeChanged(self, idx): self.testResult.clear() if idx == 0: self.mode = QAudio.AudioInput else: self.mode = QAudio.AudioOutput self.deviceBox.clear() for deviceInfo in QAudioDeviceInfo.availableDevices(self.mode): self.deviceBox.addItem(deviceInfo.deviceName(), deviceInfo) self.deviceBox.setCurrentIndex(0) self.deviceChanged(0) def deviceChanged(self, idx): self.testResult.clear() if self.deviceBox.count() == 0: return self.deviceInfo = self.deviceBox.itemData(idx) self.sampleRateBox.clear() sampleRatez = self.deviceInfo.supportedSampleRates() self.sampleRateBox.addItems([str(sr) for sr in sampleRatez]) if len(sampleRatez) != 0: self.settings.setSampleRate(sampleRatez[0]) self.channelsBox.clear() chz = self.deviceInfo.supportedChannelCounts() self.channelsBox.addItems([str(ch) for ch in chz]) if len(chz) != 0: self.settings.setChannelCount(chz[0]) self.codecsBox.clear() codecs = self.deviceInfo.supportedCodecs() self.codecsBox.addItems([str(c) for c in codecs]) if len(codecs) != 0: self.settings.setCodec(codecs[0]) # Create a failed condition. self.codecsBox.addItem("audio/test") self.sampleSizesBox.clear() sampleSizez = self.deviceInfo.supportedSampleSizes() self.sampleSizesBox.addItems([str(ss) for ss in sampleSizez]) if len(sampleSizez) != 0: self.settings.setSampleSize(sampleSizez[0]) self.sampleTypesBox.clear() sampleTypez = self.deviceInfo.supportedSampleTypes() self.sampleTypesBox.addItems( [self.sampleTypeToString(st) for st in sampleTypez]) if len(sampleTypez) != 0: self.settings.setSampleType(sampleTypez[0]) self.endianBox.clear() endianz = self.deviceInfo.supportedByteOrders() self.endianBox.addItems([self.endianToString(e) for e in endianz]) if len(endianz) != 0: self.settings.setByteOrder(endianz[0]) self.allFormatsTable.clearContents() def populateTable(self): row = 0 format = QAudioFormat() for codec in self.deviceInfo.supportedCodecs(): format.setCodec(codec) for sampleRate in self.deviceInfo.supportedSampleRates(): format.setSampleRate(sampleRate) for channels in self.deviceInfo.supportedChannelCounts(): format.setChannelCount(channels) for sampleType in self.deviceInfo.supportedSampleTypes(): format.setSampleType(sampleType) for sampleSize in self.deviceInfo.supportedSampleSizes( ): format.setSampleSize(sampleSize) for endian in self.deviceInfo.supportedByteOrders( ): format.setByteOrder(endian) if self.deviceInfo.isFormatSupported(format): self.allFormatsTable.setRowCount(row + 1) self.setFormatValue(row, 0, format.codec()) self.setFormatValue( row, 1, str(format.sampleRate())) self.setFormatValue( row, 2, str(format.channelCount())) self.setFormatValue( row, 3, self.sampleTypeToString( format.sampleType())) self.setFormatValue( row, 4, str(format.sampleSize())) self.setFormatValue( row, 5, self.endianToString( format.byteOrder())) row += 1 def setFormatValue(self, row, column, value): self.allFormatsTable.setItem(row, column, QTableWidgetItem(value)) def sampleRateChanged(self, idx): self.settings.setSampleRate(int(self.sampleRateBox.itemText(idx))) def channelChanged(self, idx): self.settings.setChannelCount(int(self.channelsBox.itemText(idx))) def codecChanged(self, idx): self.settings.setCodec(self.codecsBox.itemText(idx)) def sampleSizeChanged(self, idx): self.settings.setSampleSize(int(self.sampleSizesBox.itemText(idx))) def sampleTypeChanged(self, idx): sampleType = int(self.sampleTypesBox.itemText(idx)) if sampleType == QAudioFormat.SignedInt: self.settings.setSampleType(QAudioFormat.SignedInt) elif sampleType == QAudioFormat.UnSignedInt: self.settings.setSampleType(QAudioFormat.UnSignedInt) elif sampleType == QAudioFormat.Float: self.settings.setSampleType(QAudioFormat.Float) def endianChanged(self, idx): endian = int(self.endianBox.itemText(idx)) if endian == QAudioFormat.LittleEndian: self.settings.setByteOrder(QAudioFormat.LittleEndian) elif endian == QAudioFormat.BigEndian: self.settings.setByteOrder(QAudioFormat.BigEndian)
def __init__(self,parent=None): #UI QWidget.__init__(self,parent) self.create_UI(parent) #audio formatting format=QAudioFormat() format.setChannelCount(AUDIO_CHANS) format.setSampleRate(SAMPLE_RATE) format.setSampleSize(SAMPLE_SIZE) format.setCodec("audio/pcm") format.setByteOrder( QAudioFormat.LittleEndian ) format.setSampleType( QAudioFormat.SignedInt ) self.output=QAudioOutput(format,self) output_buffer_size=\ int(2*SAMPLE_RATE \ *CTRL_INTERVAL/1000) self.output.setBufferSize( output_buffer_size ) self.generator=Generator(format,self) #THREADS self.midiListener=MidiPortReader() self.listenerThread=QThread() self.midiListener.moveToThread( self.listenerThread ) self.listenerThread.started.connect( self.midiListener.listener ) self.listenerThread.start() self.midiListener.noteOff.connect(self.generator.noteOff) self.midiListener.noteVelocity.connect(self.generator.noteVelocity) self.midiListener.noteOn.connect(self.generator.noteOn) self.volumeSlider.valueChanged.connect(self.generator.volSlide) self.qfacSlider.valueChanged.connect(self.generator.qFactor) self.generator.start() self.output.start(self.generator)
class AudioTest(QMainWindow): PUSH_MODE_LABEL = "Enable push mode" PULL_MODE_LABEL = "Enable pull mode" SUSPEND_LABEL = "Suspend playback" RESUME_LABEL = "Resume playback" DurationSeconds = 1 ToneSampleRateHz = 600 DataSampleRateHz = 44100 def __init__(self): super(AudioTest, self).__init__() self.m_device = QAudioDeviceInfo.defaultOutputDevice() self.m_output = None self.initializeWindow() self.initializeAudio() def initializeWindow(self): layout = QVBoxLayout() self.m_deviceBox = QComboBox(activated=self.deviceChanged) for deviceInfo in QAudioDeviceInfo.availableDevices( QAudio.AudioOutput): self.m_deviceBox.addItem(deviceInfo.deviceName(), deviceInfo) layout.addWidget(self.m_deviceBox) self.m_modeButton = QPushButton(clicked=self.toggleMode) self.m_modeButton.setText(self.PUSH_MODE_LABEL) layout.addWidget(self.m_modeButton) self.m_suspendResumeButton = QPushButton( clicked=self.toggleSuspendResume) self.m_suspendResumeButton.setText(self.SUSPEND_LABEL) layout.addWidget(self.m_suspendResumeButton) volumeBox = QHBoxLayout() volumeLabel = QLabel("Volume:") self.m_volumeSlider = QSlider(Qt.Horizontal, minimum=0, maximum=100, singleStep=10, valueChanged=self.volumeChanged) volumeBox.addWidget(volumeLabel) volumeBox.addWidget(self.m_volumeSlider) layout.addLayout(volumeBox) window = QWidget() window.setLayout(layout) self.setCentralWidget(window) def initializeAudio(self): self.m_pullTimer = QTimer(self, timeout=self.pullTimerExpired) self.m_pullMode = True self.m_format = QAudioFormat() self.m_format.setSampleRate(self.DataSampleRateHz) self.m_format.setChannelCount(1) self.m_format.setSampleSize(16) self.m_format.setCodec('audio/pcm') self.m_format.setByteOrder(QAudioFormat.LittleEndian) self.m_format.setSampleType(QAudioFormat.SignedInt) info = QAudioDeviceInfo(QAudioDeviceInfo.defaultOutputDevice()) if not info.isFormatSupported(self.m_format): qWarning("Default format not supported - trying to use nearest") self.m_format = info.nearestFormat(self.m_format) self.m_generator = Generator(self.m_format, self.DurationSeconds * 1000000, self.ToneSampleRateHz, self) self.createAudioOutput() def createAudioOutput(self): self.m_audioOutput = QAudioOutput(self.m_device, self.m_format) self.m_audioOutput.notify.connect(self.notified) self.m_audioOutput.stateChanged.connect(self.handleStateChanged) self.m_generator.start() self.m_audioOutput.start(self.m_generator) self.m_volumeSlider.setValue(self.m_audioOutput.volume() * 100) def deviceChanged(self, index): self.m_pullTimer.stop() self.m_generator.stop() self.m_audioOutput.stop() self.m_device = self.m_deviceBox.itemData(index) self.createAudioOutput() def volumeChanged(self, value): if self.m_audioOutput is not None: self.m_audioOutput.setVolume(value / 100.0) def notified(self): qWarning( "bytesFree = %d, elapsedUSecs = %d, processedUSecs = %d" % (self.m_audioOutput.bytesFree(), self.m_audioOutput.elapsedUSecs(), self.m_audioOutput.processedUSecs())) def pullTimerExpired(self): if self.m_audioOutput is not None and self.m_audioOutput.state( ) != QAudio.StoppedState: chunks = self.m_audioOutput.bytesFree( ) // self.m_audioOutput.periodSize() for _ in range(chunks): data = self.m_generator.read(self.m_audioOutput.periodSize()) if data is None or len( data) != self.m_audioOutput.periodSize(): break self.m_output.write(data) def toggleMode(self): self.m_pullTimer.stop() self.m_audioOutput.stop() if self.m_pullMode: self.m_modeButton.setText(self.PULL_MODE_LABEL) self.m_output = self.m_audioOutput.start() self.m_pullMode = False self.m_pullTimer.start(20) else: self.m_modeButton.setText(self.PUSH_MODE_LABEL) self.m_pullMode = True self.m_audioOutput.start(self.m_generator) self.m_suspendResumeButton.setText(self.SUSPEND_LABEL) def toggleSuspendResume(self): if self.m_audioOutput.state() == QAudio.SuspendedState: qWarning("status: Suspended, resume()") self.m_audioOutput.resume() self.m_suspendResumeButton.setText(self.SUSPEND_LABEL) elif self.m_audioOutput.state() == QAudio.ActiveState: qWarning("status: Active, suspend()") self.m_audioOutput.suspend() self.m_suspendResumeButton.setText(self.RESUME_LABEL) elif self.m_audioOutput.state() == QAudio.StoppedState: qWarning("status: Stopped, resume()") self.m_audioOutput.resume() self.m_suspendResumeButton.setText(self.SUSPEND_LABEL) elif self.m_audioOutput.state() == QAudio.IdleState: qWarning("status: IdleState") stateMap = { QAudio.ActiveState: "ActiveState", QAudio.SuspendedState: "SuspendedState", QAudio.StoppedState: "StoppedState", QAudio.IdleState: "IdleState" } def handleStateChanged(self, state): qWarning("state = " + self.stateMap.get(state, "Unknown"))
class AudioTest(QMainWindow): PUSH_MODE_LABEL = "Enable push mode" PULL_MODE_LABEL = "Enable pull mode" SUSPEND_LABEL = "Suspend playback" RESUME_LABEL = "Resume playback" DurationSeconds = 1 ToneSampleRateHz = 600 DataSampleRateHz = 44100 def __init__(self): super(AudioTest, self).__init__() self.m_device = QAudioDeviceInfo.defaultOutputDevice() self.m_output = None self.initializeWindow() self.initializeAudio() def initializeWindow(self): layout = QVBoxLayout() self.m_deviceBox = QComboBox(activated=self.deviceChanged) for deviceInfo in QAudioDeviceInfo.availableDevices(QAudio.AudioOutput): self.m_deviceBox.addItem(deviceInfo.deviceName(), deviceInfo) layout.addWidget(self.m_deviceBox) self.m_modeButton = QPushButton(clicked=self.toggleMode) self.m_modeButton.setText(self.PUSH_MODE_LABEL) layout.addWidget(self.m_modeButton) self.m_suspendResumeButton = QPushButton( clicked=self.toggleSuspendResume) self.m_suspendResumeButton.setText(self.SUSPEND_LABEL) layout.addWidget(self.m_suspendResumeButton) volumeBox = QHBoxLayout() volumeLabel = QLabel("Volume:") self.m_volumeSlider = QSlider(Qt.Horizontal, minimum=0, maximum=100, singleStep=10, valueChanged=self.volumeChanged) volumeBox.addWidget(volumeLabel) volumeBox.addWidget(self.m_volumeSlider) layout.addLayout(volumeBox) window = QWidget() window.setLayout(layout) self.setCentralWidget(window) def initializeAudio(self): self.m_pullTimer = QTimer(self, timeout=self.pullTimerExpired) self.m_pullMode = True self.m_format = QAudioFormat() self.m_format.setSampleRate(self.DataSampleRateHz) self.m_format.setChannelCount(1) self.m_format.setSampleSize(16) self.m_format.setCodec('audio/pcm') self.m_format.setByteOrder(QAudioFormat.LittleEndian) self.m_format.setSampleType(QAudioFormat.SignedInt) info = QAudioDeviceInfo(QAudioDeviceInfo.defaultOutputDevice()) if not info.isFormatSupported(self.m_format): qWarning("Default format not supported - trying to use nearest") self.m_format = info.nearestFormat(self.m_format) self.m_generator = Generator(self.m_format, self.DurationSeconds * 1000000, self.ToneSampleRateHz, self) self.createAudioOutput() def createAudioOutput(self): self.m_audioOutput = QAudioOutput(self.m_device, self.m_format) self.m_audioOutput.notify.connect(self.notified) self.m_audioOutput.stateChanged.connect(self.handleStateChanged) self.m_generator.start() self.m_audioOutput.start(self.m_generator) self.m_volumeSlider.setValue(self.m_audioOutput.volume() * 100) def deviceChanged(self, index): self.m_pullTimer.stop() self.m_generator.stop() self.m_audioOutput.stop() self.m_device = self.m_deviceBox.itemData(index) self.createAudioOutput() def volumeChanged(self, value): if self.m_audioOutput is not None: self.m_audioOutput.setVolume(value / 100.0) def notified(self): qWarning("bytesFree = %d, elapsedUSecs = %d, processedUSecs = %d" % ( self.m_audioOutput.bytesFree(), self.m_audioOutput.elapsedUSecs(), self.m_audioOutput.processedUSecs())) def pullTimerExpired(self): if self.m_audioOutput is not None and self.m_audioOutput.state() != QAudio.StoppedState: chunks = self.m_audioOutput.bytesFree() // self.m_audioOutput.periodSize() for _ in range(chunks): data = self.m_generator.read(self.m_audioOutput.periodSize()) if data is None or len(data) != self.m_audioOutput.periodSize(): break self.m_output.write(data) def toggleMode(self): self.m_pullTimer.stop() self.m_audioOutput.stop() if self.m_pullMode: self.m_modeButton.setText(self.PULL_MODE_LABEL) self.m_output = self.m_audioOutput.start() self.m_pullMode = False self.m_pullTimer.start(20) else: self.m_modeButton.setText(self.PUSH_MODE_LABEL) self.m_pullMode = True self.m_audioOutput.start(self.m_generator) self.m_suspendResumeButton.setText(self.SUSPEND_LABEL) def toggleSuspendResume(self): if self.m_audioOutput.state() == QAudio.SuspendedState: qWarning("status: Suspended, resume()") self.m_audioOutput.resume() self.m_suspendResumeButton.setText(self.SUSPEND_LABEL) elif self.m_audioOutput.state() == QAudio.ActiveState: qWarning("status: Active, suspend()") self.m_audioOutput.suspend() self.m_suspendResumeButton.setText(self.RESUME_LABEL) elif self.m_audioOutput.state() == QAudio.StoppedState: qWarning("status: Stopped, resume()") self.m_audioOutput.resume() self.m_suspendResumeButton.setText(self.SUSPEND_LABEL) elif self.m_audioOutput.state() == QAudio.IdleState: qWarning("status: IdleState") stateMap = { QAudio.ActiveState: "ActiveState", QAudio.SuspendedState: "SuspendedState", QAudio.StoppedState: "StoppedState", QAudio.IdleState: "IdleState"} def handleStateChanged(self, state): qWarning("state = " + self.stateMap.get(state, "Unknown"))
def get_audio_output(params): """ Create and return a QAudioOutput from wav params. """ enc = 'audio/pcm' fmt = QAudioFormat() fmt.setChannelCount(params.nchannels) fmt.setSampleRate(params.framerate) fmt.setSampleSize(params.sampwidth * 8) fmt.setCodec(enc) fmt.setByteOrder(QAudioFormat.LittleEndian) fmt.setSampleType(QAudioFormat.SignedInt) return QAudioOutput(fmt)
def initAudioInput(self, filepath): self.outputFile = QFile() self.outputFile.setFileName(filepath) self.outputFile.open(QIODevice.WriteOnly | QIODevice.Truncate) format = QAudioFormat() format.setSampleType(QAudioFormat.Float) format.setSampleRate(44100) format.setChannelCount(1) format.setSampleSize(32) format.setCodec("audio/pcm") format.setByteOrder(QAudioFormat.LittleEndian) print(format.codec()) #self.audio_input = QAudioInput(QAudioDeviceInfo.defaultInputDevice(), format); self.audio_input = QAudioInput(format) print(self.audio_input.error()) print(self.audio_input.state())