def savewav(self, filename): wf = wave.open(filename, 'wb') wf.setnchannels(1) wf.setsampwidth(2) wf.setframerate(self.SAMPLING_RATE) wf.writeframes(np.array(self.Voice_String).tostring()) wf.close()
def capture_audio(callback, final_callback): import pyaudio import wave as wv from array import array FORMAT = pyaudio.paInt16 RATE = 16000 CHUNK = 1024 COUNTCHUNK = (RATE + CHUNK - 1) / CHUNK RECORD_SECONDS = 1 FILE_NAME = "/tmp/mic_rec.wav" audio = pyaudio.PyAudio() stream = audio.open(format=FORMAT, rate=RATE, channels=1, input=True, frames_per_buffer=CHUNK) #starting recording wave = [] callback_free = [True] print("started") last = time.time() lastloudsoundtime = time.time() - 100000 while True: data = stream.read(CHUNK) data_chunk = array('h', data) vol = max(data_chunk) # print("Volume {}".format(vol)) wave.append(data) if len(wave) > COUNTCHUNK: wave.pop(0) if (vol >= 2500): lastloudsoundtime = time.time() if (time.time() - last > 0.05 and time.time() - lastloudsoundtime < 0.8): last = time.time() if len(wave) == COUNTCHUNK: #writing to file wavfile = wv.open(FILE_NAME, 'wb') wavfile.setnchannels(1) wavfile.setsampwidth(audio.get_sample_size(FORMAT)) wavfile.setframerate(RATE) wavfile.writeframes(b''.join(wave)) wavfile.close() if callback_free[0]: callback_free[0] = False callback(callback_free, FILE_NAME, final_callback) # print("Something is said") else: pass
def save_wave_file(filename, data): # define of params framerate = 8000 channels = 1 sampwidth = 2 '''save the date to the wav file''' wf = wave.open(filename, 'wb') wf.setnchannels(channels) wf.setsampwidth(sampwidth) wf.setframerate(framerate) wf.writeframes("".join(data)) wf.close() return 0
def stop(self, path='output.wav'): sd.stop() duration = int(math.ceil(time.time() - self._start_time)) data = self._recording[:duration * sd.default.samplerate] # save audio audio = pyaudio.PyAudio() format = pyaudio.paInt16 wavfile = wave.open(path, 'wb') wavfile.setnchannels(CHANNELS) wavfile.setsampwidth(audio.get_sample_size(format)) wavfile.setframerate(SAMPLERATE) wavfile.writeframes(data.tostring()) wavfile.close()
def run(self): audio = PyAudio() wavfile = wave.open(self.audiofile, 'ab') wavfile.setnchannels(self.channels) wavfile.setsampwidth(audio.get_sample_size(self.format)) wavfile.setframerate(self.rate) wavstream = audio.open(format=self.format, channels=self.channels, rate=self.rate, input=True, frames_per_buffer=self.chunk) while self.bRecord: wavfile.writeframes(wavstream.read(self.chunk)) wavstream.stop_stream() wavstream.close() audio.terminate()
def run(self): audio = PyAudio() print("Sound device:", self.dev_idx) device_info = audio.get_device_info_by_index(self.dev_idx) self.channels = device_info["maxInputChannels"] if ( device_info["maxOutputChannels"] < device_info["maxInputChannels"] ) else device_info["maxOutputChannels"] self.rate = int(device_info["defaultSampleRate"]) print(color.yellow(str(device_info))) wavstream = audio.open(format=self.format, channels=self.channels, rate=self.rate, input=True, frames_per_buffer=self.chunk, input_device_index=device_info["index"], as_loopback=True) # wavstream = audio.open(format=self.format, # channels=self.channels, # rate=self.rate, # input=True, # frames_per_buffer=self.chunk) # 如果没有外放的话,loopback会没有数据,造成阻塞 # 循环读取输入流 while self.bRecord: data = wavstream.read(self.chunk) self._frames.append(data) self._status = 1 wavstream.stop_stream() wavstream.close() # 保存到文件 print("Saveing .... ", self.audiofile) with wave.open(self.audiofile, 'wb') as wavfile: wavfile.setnchannels(self.channels) wavfile.setsampwidth(audio.get_sample_size(self.format)) wavfile.setframerate(self.rate) wavfile.writeframes(b''.join(self._frames)) audio.terminate() self._status = 2
rate=RATE, input=True, frames_per_buffer=CHUNK ) frames = [] for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)): data = stream.read(CHUNK) frames.append(data) stream.stop_stream() stream.close() audio.terminate() wavfile = wave.open(FILE_NAME, 'w') wavfile.setnchannels(CHANNELS) wavfile.setsampwidth(audio.get_sample_size(FORMAT)) wavfile.setframerate(RATE) wavfile.writeframes(b''.join(frames)) wavfile.close() wavfile = wave.open(FILE_NAME, 'r') samplingFrequency, signalData = scipy.io.wavfile.read('ses.wav') plot.subplot(211) plot.plot(signalData) plot.subplot(212) plot.specgram(signalData, Fs=samplingFrequency) plot.show() #3.Kısım - Real time mikrofon ses verileriyle sinyal işleme
binVal = int(bitList, 2) ^ int(key, 2) hexString = "" hexString += (hex(binVal)[2:-1]) decryptMess = binascii.unhexlify(hexString) print("original text: " +decryptMess) stream.stop_stream() stream.close() p.terminate() wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close() #-------------------------- ---------------------------- #plotting sound below #------------------------------------------------------ spf = wave.open('input.wav','r') signal = spf.readframes(-1) signal = np.fromstring(signal, 'Int16') fs = spf.getframerate() #If Stereo