def play(frames): p=PyAudio() stream=p.open(format=p.get_format_from_width(1),channels=1,rate=10000,output=True) stream.write(frames) stream.stop_stream() stream.close() p.terminate()
def Audio_play(filepath): ''' play audio ''' CHUNK = 1024 wf = wave.open(filepath, 'rb') pa = PyAudio() default_output = pa.get_default_host_api_info().get('defaultOutputDevice') stream =pa.open(format = pa.get_format_from_width(wf.getsampwidth()), channels = wf.getnchannels(), rate = wf.getframerate(), output = True, output_device_index = default_output) NUM = int(wf.getframerate()/CHUNK * 15) logging.info(">> START TO PLAY AUDIO") while NUM: data = wf.readframes(CHUNK) if data == " ": break stream.write(data) NUM -= 1 stream.stop_stream() stream.close() del data pa.terminate()
def run(self): #定义每个线程要运行的函数 chunk = 2014 wf = wave.open(self.prams, 'rb') p = PyAudio() stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) print("开始播放") while True: data = wf.readframes(chunk) if data == b"": break stream.write(data) #print("正在读呢") print("播放完毕") stream.close() p.terminate() wf.close() # main1函数是把录音转化为文字 如果是双击的话,那么把录音读出来, # 如果是机器说话的话,把他说的文件删掉!以免过多 if self.type == 0: main1(self.prams, 0) else: os.remove(self.prams)
def play(frequency, length): print("Playing impulse at %sHz" % frequency); frequency = float(frequency); BITRATE = 16000 FREQUENCY = frequency LENGTH = length NUMBEROFFRAMES = int(BITRATE * LENGTH) RESTFRAMES = NUMBEROFFRAMES % BITRATE WAVEDATA = '' for x in range(NUMBEROFFRAMES): WAVEDATA += chr(int(math.sin(x / ((BITRATE / FREQUENCY) / math.pi)) * 127 + 128)) for x in range(RESTFRAMES): WAVEDATA += chr(128) p = PyAudio() stream = p.open( format=p.get_format_from_width(1), channels=1, rate=BITRATE, output=True, ) stream.write(WAVEDATA) stream.stop_stream() stream.close() p.terminate()
class pybeeptone: def __init__(self, rate=44100): self.rate = 44100 self.pyaudio = PyAudio() self.stream = self.pyaudio.open( format = self.pyaudio.get_format_from_width(1), channels = 1, rate = self.rate, output = True) def play_tone(self, freq=1000, duration=0.3): rate = self.rate length = int(math.ceil(self.rate*duration)) data = ''.join( [chr(int(math.sin(x/((rate/freq)/math.pi))*127+128)) for x in xrange(length)] ) self.stream.write(data) def play_rest(self, duration): rate = self.rate length = int(math.ceil(self.rate*duration)) data = ''.join( [chr(int(128)) for x in xrange(length)] ) self.stream.write(data) def close(self): self.stream.stop_stream() self.stream.close() self.pyaudio.terminate()
def Play_sound(self, File_name="resources/speak.wav"): try: wf = wave.open(File_name, 'rb') except: try: os.system("mpg123 " + File_name) #这里的mpg123是一个程序插件,可以用来播放MP3文件 print("*" * 5, "播放成功!播放格式非wav!播放了本地文件!") return True except: print("*" * 5, "播放失败!没有此目录或播放格式非wav ") return False p = PyAudio() stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # 读数据 data = wf.readframes(self.CHUNK) print("*" * 5, "正在播放", "*" * 5) # 播放流 while len(data) > 0: stream.write(data) data = wf.readframes(self.CHUNK) print(".") stream.stop_stream() # 暂停播放/录制 stream.close() # 终止播放 p.terminate() # 终止portaudio会话 return True
def sine_tone(frequency, duration, volume=1, sample_rate=22050): n_samples = int(sample_rate * duration) restframes = n_samples % sample_rate p = PyAudio() stream = p.open(format=p.get_format_from_width(2), # 16 bit channels=2, rate=sample_rate, output=True) for i in xrange(0, 10): if i % 2 == 0: frequency = ZERO_FREQUENCY else: frequency = ONE_FREQUENCY s = lambda t: volume * math.sin(2 * math.pi * frequency * t / sample_rate) samples = (int(s(t) * 0x7f + 0x80) for t in xrange(n_samples)) for buf in izip(*[samples]*sample_rate): # write several samples at a time stream.write(bytes(bytearray(buf))) # fill remainder of frameset with silence stream.write(b'\x80' * restframes) stream.stop_stream() stream.close() p.terminate()
def play(): wf = wave.open(filename, 'rb') p = PyAudio() stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) data = wf.readframes(chunk) print('Playing your record') while len(data) > 0: stream.write(data) data = wf.readframes(chunk) stream.stop_stream() stream.close() p.terminate() # gender classifier if deploy_gender(filename) == 1: print('You are man') else: print('You are woman') # age classifier age_list = ['teens', 'twenties', 'thirties', 'fourties'] age = deploy_age(filename) print('Your age:', age_list[age])
def sine_tone(frequencies, amplitudes, duration, volume=1.0, sample_rate=22050): n_samples = int(sample_rate * duration) restframes = n_samples % sample_rate p = PyAudio() stream = p.open(format=p.get_format_from_width(1), # 8bit channels=1, # mono rate=sample_rate, output=True) def s(t): r = 0 for i in range(0, len(frequencies)): r += volume * amplitudes[i] * math.sin(2 * math.pi * frequencies[i] * t / sample_rate) return r samples = (int(s(t) * 0x7f + 0x80) for t in range(n_samples)) for buf in zip(*[samples]*sample_rate): # write several samples at a time stream.write(bytes(bytearray(buf))) # fill remainder of frameset with silence stream.write(b'\x80' * restframes) stream.stop_stream() stream.close() p.terminate()
def play(): wavName = 'test.wav' print "play %s" % (wavName) wf = wave.open(wavName, 'rb') pa = PyAudio() stream = pa.open(format=pa.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) data = wf.readframes(CHUNK) td = threading.Thread(target=startGame) td.start() while data != '': stream.write(data) data = wf.readframes(CHUNK) audio_data = np.fromstring(data, dtype=np.short) print data stream.stop_stream() stream.close() pa.terminate()
def play_voice(audio_file): chunk = 1024 # 2014kb wf = wave.open(audio_file, 'rb') p = PyAudio() stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) data = wf.readframes(chunk) # 读取数据 while True: data = wf.readframes(chunk) if signal1.is_set(): # 点击结束后这个变为true break if data == "": break stream.write(data) stream.stop_stream() # 停止数据流 stream.close() p.terminate() # 关闭 PyAudio print('play函数结束!')
def _playback(self): """ Playback function for thread. Plays a segment until running is set to false or end is reached """ # open stream p = PyAudio() stream = p.open( format=p.get_format_from_width(self._segment.sample_width), channels=self._segment.channels, rate=self._segment.frame_rate, output=True, output_device_index=self._audio_device, ) # output the file in millisecond steps while self._pos < len(self._segment) and self._running: stream.write(self._segment[self._pos]._data) self._pos += 1 # close stream time.sleep(stream.get_output_latency()) stream.stop_stream() stream.close() p.terminate()
def get_microphone_working( self, device_name: str, device_index: int, audio: pyaudio.PyAudio, chunk_size: int = 1024, ) -> bool: """Record some audio from a microphone and check its energy.""" try: # read audio pyaudio_stream = audio.open( input_device_index=device_index, channels=self.channels, format=audio.get_format_from_width(self.sample_width), rate=self.sample_rate, input=True, ) try: audio_data = pyaudio_stream.read(chunk_size) if not pyaudio_stream.is_stopped(): pyaudio_stream.stop_stream() finally: pyaudio_stream.close() debiased_energy = AudioSummary.get_debiased_energy(audio_data) # probably actually audio return debiased_energy > 30 except Exception: _LOGGER.exception("get_microphone_working ({device_name})") pass return False
def run(self): # 定义每个线程要运行的函数 chunk = 2014 try: wf = wave.open(self.prams, 'rb') p = PyAudio() stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # print("开始播放") except FileNotFoundError: print("录音文件未找到!") return while True: data = wf.readframes(chunk) if data == b"": break stream.write(data) # print("正在读呢") # print("播放完毕") stream.close() p.terminate() wf.close() # main1函数是把录音转化为文字 # 1 代表提示音 不处理 # 2 代表精灵的录音 听完直接删除掉 以免过多 if self.typ == 1: pass elif self.typ == 2: os.remove(self.prams)
def ps(lng, fq): # See http://en.wikipedia.org/wiki/Bit_rate#Audio BITRATE = 16000 #number of frames per second/frameset. #See http://www.phy.mtu.edu/~suits/notefreqs.html FREQUENCY = fq #Hz, waves per second, 261.63=C4-note. LENGTH = lng #seconds to play sound NUMBEROFFRAMES = int(BITRATE * LENGTH) RESTFRAMES = NUMBEROFFRAMES % BITRATE WAVEDATA = '' for x in xrange(NUMBEROFFRAMES): WAVEDATA += chr( int(math.sin(x / ((BITRATE / FREQUENCY) / math.pi)) * 127 + 128)) #fill remainder of frameset with silence for x in xrange(RESTFRAMES): WAVEDATA += chr(128) p = PyAudio() stream = p.open( format=p.get_format_from_width(1), channels=1, rate=BITRATE, output=True, ) stream.write(WAVEDATA) stream.stop_stream() stream.close() p.terminate()
def main(): # read in some block data from pyaudio RATE=44100 INPUT_BLOCK_TIME=0.2 INPUT_FRAMES_PER_BLOCK=int(RATE*INPUT_BLOCK_TIME) pa=PyAudio() data=True fmt="%dh"%INPUT_FRAMES_PER_BLOCK total_rms=0 total_blocks=0 while data: for dr,subdr,fnames in os.walk(path): for filename in fnames: try: print filename wf=wave.open("%s/%s"%(path,filename),'rb') strm=pa.open(format=pa.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), input=True) strm.stop_stream() strm.close() d=wf.readframes(INPUT_FRAMES_PER_BLOCK) d=struct.unpack(fmt,d) wf.close() total_rms+=calc_rms(d) total_blocks+=1 except: #print e print "*** ERROR ***" data=False avg=total_rms/total_blocks print "The average is %f"%avg
class Stream(Thread): def __init__(self, f, on_terminated): self.__active = True self.__path = f self.__paused = True self.on_terminated = on_terminated self.__position = 0 self.__chunks = [] self.__pyaudio = PyAudio() Thread.__init__(self) self.start() def play(self): self.__paused = False def seek(self, seconds): self.__position = int(seconds * 10) def is_playing(self): return self.__active and not self.__paused def get_position(self): return int(self.__position / 10) def get_duration(self): return int(len(self.__chunks) / 10) def pause(self): self.__paused = True def kill(self): self.__active = False def __get_stream(self): self.__segment = AudioSegment.from_file(self.__path) self.__chunks = make_chunks(self.__segment, 100) return self.__pyaudio.open(format=self.__pyaudio.get_format_from_width(self.__segment.sample_width), channels=self.__segment.channels, rate=self.__segment.frame_rate, output=True) def run(self): stream = self.__get_stream() while self.__position < len(self.__chunks): if not self.__active: break if not self.__paused: # noinspection PyProtectedMember data = self.__chunks[self.__position]._data self.__position += 1 else: free = stream.get_write_available() data = chr(0) * free stream.write(data) stream.stop_stream() self.__pyaudio.terminate() if self.__active: self.on_terminated()
def worker(): p = PyAudio() stream = p.open(format=p.get_format_from_width(2), channels=1, rate=44100, output=True) while True: self.lock.acquire() stream.write(self.wavdata.tostring()) self.lock.release()
def play(filepath): fp = open(filepath, 'rb') data = fp.read() fp.close() aud = BytesIO(data) sound = AudioSegment.from_file(aud, format='mp3') # LONG global isPlaying p = PyAudio() stream = p.open(format=p.get_format_from_width(sound.sample_width), channels=sound.channels, rate=sound.frame_rate, output=True) print("[PLAYAUDIO INTERVAL]%f" % (time.time())) datas = sound.raw_data isPlaying = True stream.write(datas) # ------------这里,接住! ''' 来来来兄弟们,来看看网上大神的代码↓(稍加修改,但是思想融会贯通) # framewidth = sound.frame_width # i = 0 # while True: # data = datas[i*framewidth:(i+1)*framewidth] # if data == b"": # break # i += 1 # stream.write(data) 这个人教我一帧一帧的去写,我一想好像没什么毛病。 当然后果就是CPU必须时刻保持有一定空闲,让出来给这一线程。 总之就是说CPU必须不能跑高了,稍微高一点就爆音。 然而我TM发现这东西可以一次全写进去!(如上一句话↑) 让我们看看官方的注释: def write(self, frames, num_frames=None, exception_on_underflow=False): """ Write samples to the stream. Do not call when using *non-blocking* mode. :param frames:#音频的数据帧 The frames of data. :param num_frames:#帧的数量 The number of frames to write. Defaults to None, in which this value will be automatically computed.#你如果不输就是None,系统自动给你判断 结果网上的代码让我一帧一帧些,他每次都会判断一次帧的数量为1。 求CPU心理阴影面积。 不过感谢这位老哥指导我用了pydub这个库。 改完之后妈妈再也不用担心我音频卡顿了,哪怕我主线程写个while True:pass也不怕了! ''' stream.stop_stream() # 停止数据流 stream.close() p.terminate() # 关闭 PyAudio
def play_audio(self): audio = PyAudio() stream = audio.open(format=audio.get_format_from_width(self.sampwidth), channels=self.nchannels, rate=self.rate, output=True) stream.write(self.response) stream.stop_stream() stream.close() audio.terminate()
def playWaveData(self, waveData): p = PyAudio() stream = p.open(format = p.get_format_from_width(1), channels = 1, rate = self.bitRate, output = True) stream.write(waveData) stream.stop_stream() stream.close() p.terminate()
def play(self,filename): #os.popen(self.locpla+filename ) wf=wave.open(self.locpla+filename,'rb') p=PyAudio() stream=p.open(format=p.get_format_from_width(wf.getsampwidth()),channels=wf.getnchannels(),rate=wf.getframerate(),output=True) data = wf.readframes(self.chunk) while data != b'': stream.write(data) data = wf.readframes(self.chunk) stream.close() p.terminate()
def play(): wf=wave.open(r"C:\Users\pengfy\PycharmProjects\smart-audio\record_files\2019-07-30-11-52-51.wav",'rb') p=PyAudio() stream=p.open(format=p.get_format_from_width(wf.getsampwidth()),channels= wf.getnchannels(),rate=wf.getframerate(),output=True) while True: data=wf.readframes(chunk) if data=="":break stream.write(data) stream.close() p.terminate()
def play(): wf=wave.open(file,'r') p=PyAudio() stream=p.open(format=p.get_format_from_width(wf.getsampwidth()),channels= wf.getnchannels(),rate=wf.getframerate(),output=True) data=wf.readframes(8000) while len(data)>0: stream.write(data) data=wf.readframes(8000) stream.close() p.terminate()
def play(): wf=wave.open(r"01.wav",'rb') p=PyAudio() stream=p.open(format=p.get_format_from_width(wf.getsampwidth()),channels= wf.getnchannels(),rate=wf.getframerate(),output=True) while True: data=wf.readframes(chunk) if data=="":break stream.write(data) stream.close() p.terminate()
def play(self,event): wf=wave.open(self.name,'rb') p=PyAudio() stream=p.open(format=p.get_format_from_width(wf.getsampwidth()),channels= wf.getnchannels(),rate=wf.getframerate(),output=True) while True: data=wf.readframes(1024) if not data: break stream.write(data) stream.close() p.terminate()
def play(): wf = wave.open(r"01.wav", 'rb') #以二进制的格式打开 p = PyAudio() #创建一个对象 stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) #这里是output while True: data = wf.readframes(chunk) #进行读取 if data == "": break stream.write(data) #进行输出 stream.close() #关闭输出流 p.terminate() # 关闭文件的输入流
def play(): wf=wave.open(SOUND_FILE,'rb') p=PyAudio() stream=p.open(format=p.get_format_from_width(wf.getsampwidth()),channels= wf.getnchannels(),rate=wf.getframerate(),output=True) data = wf.readframes(CHUNK) while data != '' or data != " ": stream.write(data) data = wf.readframes(CHUNK) stream.start_stream() stream.close() p.terminate()
def pyaudio_stream(sample_rate=22050): p = PyAudio() stream = p.open( format=p.get_format_from_width(1), # 8bit channels=1, # mono rate=sample_rate, output=True, ) yield stream stream.stop_stream() stream.close() p.terminate()
class PySine(object): BITRATE = 96000. def __init__(self): self.pyaudio = PyAudio() try: self.stream = self.pyaudio.open( format=self.pyaudio.get_format_from_width(1), channels=1, rate=int(self.BITRATE), output=True) except: logger.error("No audio output is available. Mocking audio stream to simulate one...") # output stream simulation with magicmock try: from mock import MagicMock except: # python > 3.3 from unittest.mock import MagicMock from time import sleep self.stream = MagicMock() def write(array): duration = len(array)/float(self.BITRATE) sleep(duration) self.stream.write = write def __del__(self): self.stream.stop_stream() self.stream.close() self.pyaudio.terminate() def sine(self, frequency=440.0, duration=1.0): """ This will generate a sine wave with minimum of 1 second duration. """ grain = round(self.BITRATE / frequency) points = grain * round(self.BITRATE * duration / grain) duration = points / self.BITRATE data = np.zeros(int(self.BITRATE * max(duration, 1.0))) try: times = np.linspace(0, duration, points, endpoint=False) data[:points] = np.sin(times * frequency * 2 * np.pi) data = np.array((data + 1.0) * 127.5, dtype=np.int8).tostring() except: # do it without numpy data = '' omega = 2.0*pi*frequency/self.BITRATE for i in range(points): data += chr(int(127.5*(1.0+sin(float(i)*omega)))) self.stream.write(data)
def play(file): wf = wave.open(file, 'rb') pa = PyAudio() stream = pa.open(format=pa.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) while True: data = wf.readframes(1024) if data == '': break stream.write(data) stream.close() pa.terminate()
def out_audio(data): from pyaudio import PyAudio if isinstance(data, np.ndarray): data = list(data.astype(int) + 0x80) p = PyAudio() stream = p.open( format=p.get_format_from_width(1), # 8bit channels=1, # mono rate=Player.sample_rate, output=True) stream.write(bytes(data)) stream.stop_stream() stream.close() p.terminate()
class Voice2Midi(object): def __init__(self): self._synth = Synth() self._onset_detector = OnsetDetector() self._f0_detector = F0Detector() self._wf = wave.open('audio_files/vocal2.wav', 'rb') self._p = PyAudio() self._e = threading.Event() self._input_buffer = [] self._stream = self._p.open(format=self._p.get_format_from_width( self._wf.getsampwidth()), channels=self._wf.getnchannels(), rate=self._wf.getframerate(), output=True, frames_per_buffer=WINDOW_SIZE, stream_callback=self._process_frame) def run(self): print(self._wf.getframerate()) self._synth.run() #main loop of the synth self._stream.stop_stream() self._stream.close() self._p.terminate() def _process_frame(self, in_data, frame_count, time_info, status_flag): data = self._wf.readframes(frame_count) data_array = np.frombuffer(data, dtype=np.int32) if np.shape(data_array)[0] == WINDOW_SIZE and np.shape( np.nonzero(data_array))[1] > 0: # print(np.shape(np.nonzero(data_array))[1]) data_array = data_array.astype('float32') data_array = data_array / np.max(data_array) # onset = self._onset_detector.find_onset(data_array) if self._onset_detector.find_onset(data_array): freq0 = self._f0_detector.find_f0(data_array) if freq0: # Onset detected print("Note detected; fundamental frequency: ", freq0) midi_note_value = int(hz_to_midi(freq0)[0]) print("Midi note value: ", midi_note_value) note = RTNote(midi_note_value, 100, 0.5) self._synth.set_new_note(note) self._synth.e.set() return data, paContinue else: print("Sending over") return data, paComplete
class AudioStream(object): def __init__(self, sample_rate=44100, channels=1, width=2, chunk=1024, input_device_index=None): self.sample_rate = sample_rate self.channels = channels self.width = width self.chunk = chunk self.input_device_index = input_device_index def __enter__(self): self._pa = PyAudio() if self.input_device_index is None: self.input_device_index = \ self._pa.get_default_input_device_info()['index'] self._stream = self._pa.open( format=self._pa.get_format_from_width(self.width), channels=self.channels, rate=self.sample_rate, input=True, frames_per_buffer=self.chunk, input_device_index=self.input_device_index) self._stream.start_stream() return self def read(self): ''' On a buffer overflow this returns 0 bytes. ''' try: return self._stream.read(self.chunk) except IOError: return '' except AttributeError: raise Exception('Must be used as a context manager.') def stream(self): try: while True: bytes = self.read() if bytes: self.handle(bytes) except (KeyboardInterrupt, SystemExit): pass def __exit__(self, type, value, traceback): self._stream.stop_stream() self._stream.close() self._pa.terminate() def handle(self, bytes): pass
def play_wave_data(self): p = PyAudio() stream = p.open( format=p.get_format_from_width(1), channels=self.CHANNELS, rate=self.BITRATE, output=True, ) stream.write(self.WAVE_DATA) stream.stop_stream() stream.close() p.terminate()
def openWav(self): chunk = 1024 wf = wave.open(r"result.wav", 'rb') p = PyAudio() stream = p.open(format = p.get_format_from_width(wf.getsampwidth()), channels = wf.getnchannels(), rate = wf.getframerate(), output = True) while True: data = wf.readframes(chunk) if data == "":break stream.write(data) stream.close() p.terminate()
def play(filename): wf = wave.open(filename, 'rb') p = PyAudio() stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) while True: data = wf.readframes(NUM_SAMPLES) if data == b'': break stream.write(data) stream.close() p.terminate()
def play(wave_data): chunk_size = BITRATE/10 p = PyAudio() stream = p.open(format = p.get_format_from_width(1), channels = 1, rate = BITRATE, output = True) for chunk in itertools.islice(wave_data, chunk_size): stream.write(chunk) stream.stop_stream() stream.close() p.terminate()
def go(): filepath = "/home/pi/tmp/talkynotes/" + "resources/audio/" + file + ".wav" wf = wave.open(filepath, "rb") chunk = 1024 p = PyAudio() stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) data = wf.readframes(chunk) while data != b'': stream.write(data) data = wf.readframes(chunk) stream.close() p.terminate()
def play(wave_data): chunk_size = BITRATE / 10 p = PyAudio() stream = p.open(format=p.get_format_from_width(1), channels=1, rate=BITRATE, output=True) for chunk in itertools.islice(wave_data, chunk_size): stream.write(chunk) stream.stop_stream() stream.close() p.terminate()
def read_file(): wf = wave.open(filename, 'rb') p = PyAudio() stream = p.open(format = p.get_format_from_width(wf.getsampwidth()), channels = wf.getnchannels(),rate = wf.getframerate(),output = True,) time.sleep(2) print('开始放音') while True: data = wf.readframes(chunk) if data == b'':break stream.write(data) print('...') wf.close() stream.close() p.terminate()
def Audio_play(): #play audio CHUNK =8192 filename = current_directory + '/audio_lib/source1.wav' wf = wave.open(filename, 'rb') pa = PyAudio() stream =pa.open(format = pa.get_format_from_width(wf.getsampwidth()), channels = wf.getnchannels(), rate = wf.getframerate(), output = True) NUM = int(wf.getframerate()/CHUNK * 15) logging.info("-> START PLAY AUDIO") while NUM: data = wf.readframes(CHUNK) if data == " ": break stream.write(data) NUM -= 1 stream.stop_stream() stream.close() logging.info(">> PLAY DONE")
def getWavFeature(wavName): print "processing %s" % (wavName) wf = wave.open(wavName, 'rb') pa = PyAudio() stream = pa.open(format=pa.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) data = wf.readframes(CHUNK) while data != '': stream.write(data) data = wf.readframes(CHUNK) stream.stop_stream() stream.close() pa.terminate()
def play(wavName): print "play %s" % (wavName) wf = wave.open(wavName, 'rb') pa = PyAudio() stream = pa.open(format=pa.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # td = threading.Thread(target=startGame) # td.start() data = wf.readframes(CHUNK) while data != '': stream.write(data) data = wf.readframes(CHUNK) stream.stop_stream() stream.close() pa.terminate()
def button_play_Click(): global state, wav_out, stream_out, data_out, wave_form, wavefile_name, script_count, script_list state = 2 # change the state flag into "playing" # update button states button_stop.configure(state = NORMAL) button_record.configure(state = DISABLED) button_back.configure(state = DISABLED) button_next.configure(state = DISABLED) button_play.configure(state = DISABLED) wav_out = PyAudio() wave_form = wave.open(wavefile_name, 'rb') stream_out = wav_out.open(format = wav_out.get_format_from_width(wave_form.getsampwidth()), channels = wave_form.getnchannels(), rate = wave_form.getframerate(), output = True) data_out = wave_form.readframes(CHUNK) echo_text.configure(text = "Playing...", bg = 'blue', fg = 'white', font = ("Helvetica", 50)) play_wave()
def Play_WAV(self, filename): chunk = 1024 f = wave.open(filename,"rb") pa = PyAudio() # open stream stream = pa.open(format = pa.get_format_from_width(f.getsampwidth()), channels = f.getnchannels(), rate = f.getframerate(), output = True) # read data data = f.readframes(chunk) # play stream while data != '': stream.write(data) data = f.readframes(chunk) # stop stream stream.stop_stream() stream.close() # close PyAudio pa.terminate()
data = waves[instrument][pitch].readframes(frame_count) raw = fromstring(data, dtype=int16) # print(raw) gainlvl = vectorize(lambda x: x * volume) raw = gainlvl(raw) data = raw.astype(int16).tostring() if len(data) < 2048 * swidth : # If file is over then rewind. waves[instrument][pitch].rewind() data = waves[instrument][pitch].readframes(frame_count) data_pos = waves[instrument][pitch].tell() return (data, paContinue) print "Hey, Parent Process, Opening Stream..." stream = pa.open( format = pa.get_format_from_width(waves[instrument][1].getsampwidth()), channels = waves[instrument][1].getnchannels(), rate = waves[instrument][1].getframerate(), output = True, stream_callback = loopaudio) print "Hey, Parent Process1" stream.start_stream() print "Hey, Parent Process2" while stream.is_active(): #print "Hey, Parent Process3" data=r.readline() if not data: break str_vals = data.strip().split() int_list = [int(i) for i in str_vals]
sc=[1,0,2,3,0,2,4,5,5,6,7,5,5,6,8,5,5,6,7,5,9,7,5,5,6,7,5,5,6,8,5,5,6,7,5,9,10,0,2,3,0,2,4,0,2,3,0,2,4,5,5,6,7,5,5,6,8,5,5,6,7,5,9,11,12,13,12,13,14,11,12,13,12,13,14,13,12,13,12,13,14,13,12,13,12,15] gtN=[[g],[-k,-22,-20,-19],[-22,-20,-k],[-22,-20,-k,g,-13,-15],[-k,-20,-k,g,-k,-20,-k],[-k,-k,-15,g,-15,-13],[g,-15,-13,g,-k,-15],[g,-15,-13,g,-k,-15],[-12,-10,-12,-10,-12,-10],[-21,-19,-17]] gtD=[[96],[8,4,2,2],[4,4,8],[4,4,4,2,1,1],[c]+[b]*6,[c,b,b,c,b,b],[c,b,b,c,b,b],[a]*6,[b]*6,[4,4,8]] gtL=[[96],[8,4,2,2],[4,4,8],gtD[3],gtD[4],gtD[5],gtD[6],gtD[7],[b,c,b,c,b,b],[4,4,8]] gt=[0,1,1]+[2]*7+[3,4,4,4,5,4,4,4,6,4,4,4,5,4,4,4,7]+[1]*4+[2]*4+[9]*4+[8]*8 sg["bd"]=[bd,bdN,bdD,bdL] sg["mn"]=[mn,mnN,mnD,mnL] sg["cr"]=[cr,crN,crD,crL] sg["rc"]=[rc,rcN,rcD,rcL] sg["bs"]=[bs,bsN,bsD,bsL] sg["sy"]=[sy,syN,syD,syL] sg["sc"]=[sc,scN,scD,scL] sg["gt"]=[gt,gtN,gtD,gtL] z=[[] for i in X(8)] p=PyAudio() st=p.open(format=p.get_format_from_width(1),channels=1,rate=R,output=True) os.system("clear") for i in X(R):sD.append(math.sin(tp*(float(i)/R))) tP=0 for tk in sg: A=z[tP].append for pt in sg[tk][0]: pP=nP=0 for n in sg[tk][1][pt]: ss=int(qL*sg[tk][3][pt][nP]*R) du=int(qL*sg[tk][2][pt][nP]*R) if n!=g: F=440*pow(1.059463094,n) for x in X(int(du)): if n==g:A(0.) else:
## LOADING GLOBAL VARIABLES # Pre-Loading Music data names = ['500', '505', '510', '515', '520', '525', '530', '535', '540', '545', '550'] sounds = map(lambda i: wave.open("stimuli/" + names[i] + ".wav"), range(0,len(names))) data = map(lambda x: x.readframes(x.getnframes()),sounds) # Pre-loading yes/no data ynnames = ['no_f', 'yes_m'] ynsounds = map(lambda i: wave.open("stimuli_yesno/" + ynnames[i] + ".wav"), range(0,len(ynnames))) yndata = map(lambda x: x.readframes(x.getnframes()),ynsounds) # Opening Audio Stream print("tone width " + str(sounds[0].getsampwidth()) + " framerate " + str(sounds[0]._framerate) + " nch " + str(sounds[0].getnchannels())); print("yesno width " + str(ynsounds[0].getsampwidth()) + " framerate " + str(ynsounds[0]._framerate) + " nch " + str(ynsounds[0].getnchannels())); stream = p.open(format=p.get_format_from_width(sounds[0].getsampwidth()), channels=sounds[0].getnchannels(), rate=sounds[0].getframerate(), output=True) # set up the colors BLACK = (0, 0, 0) WHITE = (255, 255, 255) # set up fonts basicFont = pygame.font.SysFont(None, 48) basicBigFont = pygame.font.SysFont(None, 48*2) updateframe(["Welcome to the BCI Music Experiment"]) #
if options.link is not None: config.set("messages", "encrypted_link", options.link) if options.section is not None: config.set("messages", "encrypted_section", options.section) msg_decode = options.msgdecode espeak_voice = options.voice if espeak_voice is not None: import struct from pyaudio import PyAudio import espeak rate = espeak.initialize(espeak.AUDIO_OUTPUT_SYNCHRONOUS) espeak.set_voice(espeak_voice) pa = PyAudio() snd = pa.open(format=pa.get_format_from_width(2), channels=1, rate=rate, output=True) snd.start_stream() def synth_cb(samples, nsamples, events): result = bytes() for sample in samples[:nsamples]: result += struct.pack("=h", sample) snd.write(result) return 0 espeak.set_synth_callback(synth_cb) jid = config.get("xmpp", "jid") try: password = config.get("xmpp", "password") except: password = getpass("Password: ")
class AudioTool: ''' This function include record and play, if you want to play and record, please set the play is True. The sample rate is 44100 Bit:16 ''' def __init__(self): self.chunk = 1024 self.channels = 2 self.samplerate = 44100 self.format = paInt16 #open audio stream self.pa = PyAudio() self.save_buffer = [] def record_play(self,seconds,play=False,file_play_path=None,file_save_path=None): NUM = int((self.samplerate/float(self.chunk)) * seconds) if play is True: swf = wave.open(file_play_path, 'rb') stream = self.pa.open( format = self.format, channels = self.channels, rate = self.samplerate, input = True, output = play, frames_per_buffer = self.chunk ) # wave_data = [] while NUM: data = stream.read(self.chunk) self.save_buffer.append(data) wave_data=np.fromstring(data, dtype = np.short) wave_data.shape = -1,2 wave_data = wave_data.T #transpose multiprocessing.Process # print int(data) print wave_data NUM -= 1 if play is True: data = swf.readframes(self.chunk) stream.write(data) if data == " ": break if play is True: swf.close() #stop stream stream.stop_stream() stream.close() # save wav file def _save_wave_file(filename,data): wf_save = wave.open(filename, 'wb') wf_save.setnchannels(self.channels) wf_save.setsampwidth(self.pa.get_sample_size(self.format)) wf_save.setframerate(self.samplerate) wf_save.writeframes("".join(data)) wf_save.close() _save_wave_file(file_save_path, self.save_buffer) del self.save_buffer[:] print file_save_path," Record Sucessful!" def play(self,filepath): wf = wave.open(filepath, 'rb') stream =self.pa.open( format = self.pa.get_format_from_width(wf.getsampwidth()), channels = wf.getnchannels(), rate = wf.getframerate(), output = True, ) NUM = int(wf.getframerate()/self.chunk * 15) print "playing.." while NUM: data = wf.readframes(self.chunk) if data == " ": break stream.write(data) NUM -= 1 stream.stop_stream() del data stream.close() def close(self): self.pa.terminate()
#plot.add_img_plot(mag_vec, yok, linspace(0, len(anr)-1, len(anr))) #zplot.add_img_plot(zname="blah", zdata=mag_vec)#z, ydata=linspace(0, len(anr)-1, len(anr)), xdata=linspace(0, len(yok)-1, len(yok))) #plot.add_plot("cross_sec", yname="Macvec1", ydata=c) # plot.add_plot("cross_se2", yname="Macvec2", ydata=mag_vec[:, 75]) plot.show() for x in xrange(NUMBEROFFRAMES): WAVEDATA = WAVEDATA+chr(c[x]) #fill remainder of frameset with silence for x in xrange(RESTFRAMES): WAVEDATA = WAVEDATA+chr(128) p = PyAudio() FORMAT=p.get_format_from_width(1) stream = p.open(format = p.get_format_from_width(1), channels = 1, rate = BITRATE, output = True) stream.write(WAVEDATA) stream.stop_stream() stream.close() p.terminate() if 0: import wave wf = wave.open('short_pulse.wav', 'wb') wf.setnchannels(1) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(BITRATE)
class InterfaceAport(Thread): def __init__(self, bitrate): Thread.__init__(self) # Stream parameters self.bitrate_ = bitrate self.channels_ = 1 # create an audio object self.p_ = PyAudio() self.stream_ = None self.createStream() self.q = Queue.Queue() self.lastq = [None,0] self.play_ = True self.stop_ = False def __del__(self): self.p_.terminate() self.stream_.close() @property def bitrate(self): return self.bitrate_ @bitrate.setter def bitrate(self, x): self.bitrate_=x self.createStream() @property def channels(self): return self.channels_ @channels.setter def channels(self, x): self.channels_=x self.createStream() def createStream(self): if self.stream_ != None: self.stream_.close() self.stream_ = self.p_.open( format = self.p_.get_format_from_width(1), channels = self.channels_, rate = self.bitrate_, output = True) def playData(self, dataIn, repetitions=None): # Encodage data='' for x in xrange(int(len(dataIn))): data+=chr(int(dataIn[x]*127+128)) self.q.put([data,repetitions]) def run(self): while (self.stop_ == False): # stop = True on arrete tout if (self.q.qsize()!=0 and self.play_ == True): # Si il a a à jouer et que cest pas pause self.lastq = self.q.get() if self.lastq[1] == None: # Si un nombre de repetition nest pas None while self.lastq[1] == None and self.q.qsize()==0: self.stream_.write(self.lastq[0]) # on joue a linfini else : for x in xrange(int(self.lastq[1])): self.stream_.write(self.lastq[0]) def stopLast(self,repetitions=0): self.lastq[1] = repetitions def play(self): self.play_ = True self.stop_ = False def pause(self): self.play_ = False self.stop_ = False def stop(self): self.play_ = False self.stop_ = True
xrange = range volume=0.1 sample_rate = 22050 duration = 0.1 print(sample_rate*duration) # Set the root root = 37 n_samples = int(sample_rate*duration) restframes = n_samples % sample_rate p = PyAudio() stream = p.open(format=p.get_format_from_width(1), # 8bit channels=1, # mono rate = sample_rate, output = True) for i in range(0, length): if melody[i] == 1: play = 0 if melody[i] == 2: play = 2 if melody[i] == 3: play = 4 if melody[i] == 4: play = 5 if melody[i] == 5: play = 7
gtN=[[g],[-k,-22,-20,-19],[-22,-20,-k],[-22,-20,-k,g,-13,-15],[-k,-20,-k,g,-k,-20,-k],[-k,-k,-15,g,-15,-13],[g,-15,-13,g,-k,-15],[g,-15,-13,g,-k,-15],[-12,-10,-12,-10,-12,-10],[-21,-19,-17]] gtD=[[96],[8,4,2,2],[4,4,8],[4,4,4,2,1,1],[c,b,b,b,b,b,b],[c,b,b,c,b,b],[c,b,b,c,b,b],[a,a,a,a,a,a],[b,b,b,b,b,b],[4,4,8]] gtL=[[96],[8,4,2,2],[4,4,8],gtD[3],gtD[4],gtD[5],gtD[6],gtD[7],[b,c,b,c,b,b],[4,4,8]] gt=[0,1,1,2,2,2,2,2,2,2,3,4,4,4,5,4,4,4,6,4,4,4,5,4,4,4,7,1,1,1,1,2,2,2,2,9,9,9,9,8,8,8,8,8,8,8,8] sg["bd"]=[bd,bdN,bdD,bdL] sg["mn"]=[mn,mnN,mnD,mnL] sg["cr"]=[cr,crN,crD,crL] sg["rc"]=[rc,rcN,rcD,rcL] sg["bs"]=[bs,bsN,bsD,bsL] sg["sy"]=[sy,syN,syD,syL] sg["sc"]=[sc,scN,scD,scL] sg["gt"]=[gt,gtN,gtD,gtL] z=[[] for i in X(8)] p=PyAudio() st=p.open(format = p.get_format_from_width(1), channels=1, rate=R, output=True) os.system("clear") for i in X(R): sD.append(math.sin(tp*(float(i)/R))) tP=0 for tk in sg: A=z[tP].append for pt in sg[tk][0]: pP=0 nP=0 for n in sg[tk][1][pt]: ss=int(qL*sg[tk][3][pt][nP]*R); du=int(qL*sg[tk][2][pt][nP]*R);
import wave import sys from pyaudio import PyAudio CHUNK = 1024 wf = wave.open(sys.argv[1], 'rb') p = PyAudio() stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) data = wf.readframes(CHUNK) while data != '': stream.write(data) data = wf.readframes(CHUNK) stream.stop_stream() stream.close() p.terminate()