Beispiel #1
33
def record():

	pa = PyAudio()
	in_stream = pa.open(format=paInt16, channels=1, rate=SAMPLING_RATE, input=True, frames_per_buffer=BUFFER_SIZE)
	out_stream = pa.open(format=paInt16, channels=1, rate=SAMPLING_RATE,output=True)
	save_count = 0
	save_buffer = []
	save_data   = []

	save_count = SAVE_LENGTH
	
	print 'start recording'

	while save_count>0:
		string_audio_data = in_stream.read(BUFFER_SIZE)
		audio_data = np.fromstring(string_audio_data, dtype=np.short)
		
		print type(audio_data)
		save_buffer.append( string_audio_data )
		save_data.append( audio_data )

		save_count = save_count - 1

#print 'save %s' % (wav.fileName)
#save_wave_file(wav.fileName, save_buffer)
		save_wave_file("test.wav", save_buffer)
		pa.terminate()
Beispiel #2
25
	def record(self):
		pa = PyAudio()
		in_stream = pa.open(format=paInt16, channels=1, rate=SAMPLING_RATE, input=True, frames_per_buffer=BUFFER_SIZE)
		save_count = 0
		save_buffer = []

		save_count = SAVE_LENGTH
		
		print 'start recording'
		while save_count>0:
			string_audio_data = in_stream.read(BUFFER_SIZE)
			audio_data = np.fromstring(string_audio_data, dtype=np.short)
			save_buffer.append( string_audio_data )
			save_count = save_count - 1

		print 'save %s' % (self.fileName)
		pa.terminate()
		
		wf = wave.open(self.fileName, 'wb')
		wf.setnchannels(1)
		wf.setsampwidth(2)
		wf.setframerate(SAMPLING_RATE)
		wf.writeframes("".join(save_buffer))
		wf.close()
		
		self.stringAudioData = "".join(save_buffer)
		save_data = np.fromstring(self.stringAudioData, dtype=np.short)
		self.audioData = save_data[10000:10000+4608*4]
		self.stringAudioData = self.audioData.tostring()
		self.cutAudio = self.audioData
		# self.cut2()
		self.getFeature()
Beispiel #3
17
 def record(self, time):
     audio = PyAudio()
     stream = audio.open(input_device_index=self.device_index,
                         output_device_index=self.device_index,
                         format=self.format,
                         channels=self.channel,
                         rate=self.rate,
                         input=True,
                         frames_per_buffer=self.chunk
                         )
     print "Recording..."
     frames = []
     for i in range(0, self.rate / self.chunk * time):
         data = stream.read(self.chunk)
         frames.append(data)
     stream.stop_stream()
     print "Recording Complete"
     stream.close()
     audio.terminate()
     write_frames = open_audio(self.file, 'wb')
     write_frames.setnchannels(self.channel)
     write_frames.setsampwidth(audio.get_sample_size(self.format))
     write_frames.setframerate(self.rate)
     write_frames.writeframes(''.join(frames))
     write_frames.close()
     self.convert()
Beispiel #4
0
 def record(self, time):
     audio = PyAudio()
     stream = audio.open(input_device_index=self.device_index,
                         output_device_index=self.device_index,
                         format=self.format,
                         channels=self.channel,
                         rate=self.rate,
                         input=True,
                         frames_per_buffer=self.chunk)
     print "Recording..."
     frames = []
     for i in range(0, self.rate / self.chunk * time):
         data = stream.read(self.chunk)
         frames.append(data)
     stream.stop_stream()
     print "Recording Complete"
     stream.close()
     audio.terminate()
     write_frames = open_audio(self.file, 'wb')
     write_frames.setnchannels(self.channel)
     write_frames.setsampwidth(audio.get_sample_size(self.format))
     write_frames.setframerate(self.rate)
     write_frames.writeframes(''.join(frames))
     write_frames.close()
     self.convert()
Beispiel #5
0
def play(frequency, length):
    print("Playing impulse at %sHz" % frequency);
    frequency = float(frequency);
    BITRATE = 16000

    FREQUENCY = frequency
    LENGTH = length

    NUMBEROFFRAMES = int(BITRATE * LENGTH)
    RESTFRAMES = NUMBEROFFRAMES % BITRATE
    WAVEDATA = ''

    for x in range(NUMBEROFFRAMES):
       WAVEDATA += chr(int(math.sin(x / ((BITRATE / FREQUENCY) / math.pi)) * 127 + 128))

    for x in range(RESTFRAMES):
        WAVEDATA += chr(128)

    p = PyAudio()
    stream = p.open(
        format=p.get_format_from_width(1),
        channels=1,
        rate=BITRATE,
        output=True,
        )
    stream.write(WAVEDATA)
    stream.stop_stream()
    stream.close()
    p.terminate()
Beispiel #6
0
def Audio_play(filepath):
    '''
    play audio
    '''
    CHUNK = 1024

    wf = wave.open(filepath, 'rb')
    pa = PyAudio()
    default_output = pa.get_default_host_api_info().get('defaultOutputDevice')
    stream =pa.open(format   = pa.get_format_from_width(wf.getsampwidth()), 
                    channels = wf.getnchannels(), 
                    rate     = wf.getframerate(), 
                    output   = True,
                    output_device_index = default_output)

    NUM = int(wf.getframerate()/CHUNK * 15)
    logging.info(">> START TO  PLAY  AUDIO")
    while NUM:
        data = wf.readframes(CHUNK)
        if data == " ": break
        stream.write(data)
        NUM -= 1
    stream.stop_stream()
    stream.close()
    del data
    pa.terminate()
class BD_API():
    def __init__(self):
        # super(BD_API, self).__init__(parent)
        # self.initUi()
        self.initariateV()

    def initariateV(self):
        self.pa = None
        self.stream = None

    def save_wave_file(self, filename, data):
        '''save the date to the wavfile'''
        self.wf = wave.open(filename, 'wb')
        self.wf.setnchannels(channels)  # 声道
        self.wf.setsampwidth(sampwidth)  # 采样字节 1 or 2
        self.wf.setframerate(framerate)  # 采样频率 8000 or 16000
        self.wf.writeframes(
            b"".join(data)
        )  # https://stackoverflow.com/questions/32071536/typeerror-sequence-item-0-expected-str-instance-bytes-found
        self.wf.close()

    def my_record(self):
        self.pa = PyAudio()
        self.stream = self.pa.open(format=paInt16,
                                   channels=1,
                                   rate=framerate,
                                   input=True,
                                   frames_per_buffer=NUM_SAMPLES)
        print("开始录音。。。")
        frames = []
        temp1 = []
        temp2 = 0
        # t1=time.time()
        while (True):
            print('begin ')
            temp2 = temp2 + 1
            for i in range(0, int(framerate / NUM_SAMPLES)):
                data = self.stream.read(NUM_SAMPLES)
                frames.append(data)
            audio_data = np.fromstring(data, dtype=np.short)
            large_sample_count = np.sum(audio_data > 800)
            temp = np.max(audio_data)
            if temp2 >= 60:
                print("超过百度调用最大时长")
                break
            if temp < 800:
                # print("未检测到信号")
                temp1.append(temp)
                if len(temp1) == 2:
                    print('未说话超过2s')
                    break
        print("录音结束")
        self.stream.stop_stream()
        self.stream.close()
        self.pa.terminate()
        self.save_wave_file('01.pcm', frames)

    def get_file_content(self, filePath):
        with open(filePath, 'rb') as fp:
            return fp.read()
Beispiel #8
0
def LuYin(b):  #录音功能

    # 采样率16k
    #RECORD_SECONDS = Time    #采样时间
    WAVE_OUTPUT_FILENAME = 'recode.wav'  #输出文件名
    a = 'b'
    if eval(a) == 0:
        a = '5'
    if eval(a) != 0:
        p = PyAudio()
        # 可以在此写一个判断,当如某个值开始录音。最好可以该进程按下录音
        stream = p.open(format=FORMAT,
                        channels=CHANNELS,
                        rate=RATE,
                        input=True,
                        frames_per_buffer=CHUNK)
        print("*录音开始*")

        frames = []
        for i in range(0, int(RATE / CHUNK * (eval(a) + 1))):
            data = stream.read(CHUNK)
            frames.append(data)
        print(data)
        print("* 录音结束")

        stream.stop_stream()
        stream.close()
        p.terminate()  #关闭数据流,并关闭pyaudio

    wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')  #以二进制模式写入
    wf.setnchannels(CHANNELS)
    wf.setsampwidth(sampwidth)
    wf.setframerate(RATE)
    wf.writeframes(b''.join(frames))
    wf.close()
Beispiel #9
0
def _pyaudio() -> Iterator[PyAudio]:
    p = PyAudio()
    try:
        yield p
    finally:
        logging.info('Terminating PyAudio object')
        p.terminate()
Beispiel #10
0
def _pyaudio() -> Generator[PyAudio, None, None]:
    p = PyAudio()
    try:
        yield p
    finally:
        print('Terminating PyAudio object')
        p.terminate()
class PyAudioSense:
    def __init__(self):
        self.rate = MIN_RECOMMANDED_SAMPLING_RATE
        chunk = int(self.rate / 2)
        self.buff = queue.Queue()
        self.audio_interface = PyAudio()
        self.audio_stream = self.audio_interface.open(
            format=paFloat32,
            channels=1,
            rate=self.rate,
            input=True,
            frames_per_buffer=chunk,
            stream_callback=self._fill_buffer)

    def stop(self):
        self.audio_stream.stop_stream()
        self.audio_stream.close()
        self.buff.put(None)
        self.audio_interface.terminate()

    def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
        self.buff.put(in_data)
        return None, paContinue

    def generator(self):
        while True:
            chunk = self.buff.get()
            if chunk is None:
                return
            yield chunk
Beispiel #12
0
def Microphone(File, Seconds):
 CHUNK = 1024
 FORMAT = paInt16
 CHANNELS = 2
 RATE = 44100
 RECORD_SECONDS = float(Seconds)
 WAVE_OUTPUT_FILENAME = File
 p = PyAudio()
 stream = p.open(format=FORMAT,
                channels=CHANNELS,
                rate=RATE,
                input=True,
                frames_per_buffer=CHUNK)
 frames = []
 for i in range(0, int(RATE/CHUNK * RECORD_SECONDS)):
    data = stream.read(CHUNK)
    frames.append(data)
 stream.stop_stream()
 stream.close()
 p.terminate()
 wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
 wf.setnchannels(CHANNELS)
 wf.setsampwidth(p.get_sample_size(FORMAT))
 wf.setframerate(RATE)
 wf.writeframes(b''.join(frames))
 wf.close()
Beispiel #13
0
def playvoice():
    # chunk = 1024  # 2014kb
    # wf是打开的音频文件
    p = PyAudio()
    # chunk = 1024, channels = 1, rate = 8000
    stream = p.open(format=pyaudio.paInt16, channels=1,
                    rate=8000, output=True)
    # 定义一个数据流对象,帧会直接通过它进行播放
    # 客户端作为请求接受端
    client = socket.socket()
    client.connect(('localhost', 6969))
    # 客户端完成连接
    # 等待服务端传来的文件
    while not signal_time.is_set():
        # 对了如果是对的才是True
        # 这里得设一个挂断信号,这个挂断信号只是自己的,还得有对面的。
        data = client.recv(1024)
        stream.write(data)
        print(data)
        if data == b'':
            break
        # 还得确定一个结束的信号
        # 接收信号,之后进行播放
    client.close()
    # 结束关闭
    stream.stop_stream()  # 停止数据流
    stream.close()
    p.terminate()
Beispiel #14
0
def play():
	
	wavName = 'test.wav'
	print "play %s" % (wavName)
	wf = wave.open(wavName, 'rb')

	pa = PyAudio()

	stream = pa.open(format=pa.get_format_from_width(wf.getsampwidth()),
					channels=wf.getnchannels(),
					rate=wf.getframerate(),
					output=True)

	data = wf.readframes(CHUNK)
	td = threading.Thread(target=startGame)
	td.start()
	while data != '':
		stream.write(data)
		data = wf.readframes(CHUNK)
		
		audio_data = np.fromstring(data, dtype=np.short)
		print data

	stream.stop_stream()
	stream.close()

	pa.terminate()
def run_predictor():

    # range(1) means ensamble learning is not used to get faster inference
    # model_0.h5 is the best one chosen among models
    # range(5) means we have 5 models and want to apply ensamble learning into these 5 models

    model = get_2d_conv_model(config)
    model.load_weights(MODEL_FOLDER + '/model_0.h5')
    #model.load_weights(MODEL_FOLDER + '/model_%d.h5'%i)

    # realtime recording
    audio = PyAudio()
    stream = audio.open(
        format=FORMAT,
        channels=CHANNELS,
        rate=RATE,
        input=True,
        #input_device_index=0,
        frames_per_buffer=SAMPLES_PER_CHUNK,
        start=False,
        stream_callback=callback)

    # print(audio.get_sample_size(FORMAT))
    # sample_width = audio.get_sample_size(FORMAT)

    # main loop
    stream.start_stream()
    while stream.is_active():
        main_process(model, on_predicted)
        sleep(0.001)
    stream.stop_stream()
    stream.close()
    # finish
    audio.terminate()
    exit(0)
Beispiel #16
0
def audioTask(_running, _queue, _queue_1):
    print("audioTaskStart")
    pa = PyAudio()
    audioStream = pa.open(format=paInt16,
                          channels=channels,
                          rate=framerate,
                          input=True,
                          frames_per_buffer=CHUNK,
                          output=True)

    while audioStream.is_active() and _running.value:
        # 采集数据
        string_audio_data = audioStream.read(CHUNK)
        # 播放数据
        audioStream.write(string_audio_data)
        try:
            _queue.put(string_audio_data, False)
            _queue_1.put(string_audio_data, False)
        except Exception as e:
            pass

    audioStream.close()
    pa.terminate()

    return "audio finish"
Beispiel #17
0
def pyrec(file_name,
          CHUNK=1024,
          FORMAT=paInt16,
          CHANNELS=2,
          RATE=16000,
          RECORD_SECONDS=2):
    p = PyAudio()

    stream = p.open(format=FORMAT,
                    channels=CHANNELS,
                    rate=RATE,
                    input=True,
                    frames_per_buffer=CHUNK)

    print("开始录音,请说话......")

    frames = []

    for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
        data = stream.read(CHUNK)
        frames.append(data)

    print("录音结束,请闭嘴!")

    stream.stop_stream()
    stream.close()
    p.terminate()

    wf = wave.open(file_name, 'wb')
    wf.setnchannels(CHANNELS)
    wf.setsampwidth(p.get_sample_size(FORMAT))
    wf.setframerate(RATE)
    wf.writeframes(b''.join(frames))
    wf.close()
    return
Beispiel #18
0
    def record(self):
        #open the input of wave
        pa = PyAudio()
        stream = pa.open(format = paInt16, channels = 1,
            rate = self.getRate(pa), input = True,
            frames_per_buffer = self.NUM_SAMPLES)
        save_buffer = []

        record_start = False
        record_end = False

        no_record_times = 0


        while 1:
            #read NUM_SAMPLES sampling data
            string_audio_data = stream.read(self.NUM_SAMPLES)
            if record_start == True :save_buffer.append(string_audio_data)
            print max(array('h', string_audio_data))
            if max(array('h', string_audio_data)) >5000:
                record_start = True
                no_record_times = 0
            else:
                no_record_times += 1
            if record_start == False:continue

            if no_record_times >10:
                break
        stream.close()
        pa.terminate()
        return save_buffer
Beispiel #19
0
def play_voice(audio_file):

    chunk = 1024  # 2014kb
    wf = wave.open(audio_file, 'rb')
    p = PyAudio()
    stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
                    channels=wf.getnchannels(),
                    rate=wf.getframerate(),
                    output=True)

    data = wf.readframes(chunk)  # 读取数据

    while True:
        data = wf.readframes(chunk)
        if signal1.is_set():
            # 点击结束后这个变为true
            break
        if data == "":
            break
        stream.write(data)

    stream.stop_stream()  # 停止数据流
    stream.close()
    p.terminate()  # 关闭 PyAudio
    print('play函数结束!')
Beispiel #20
0
class AudioRecorder():

    # Audio class based on pyAudio and Wave
    def __init__(self, filename):
        self.open = True
        self.rate = 44100
        self.frames_per_buffer = 1024
        self.channels = 2
        self.startTime = 0
        self.endTime = 0
        self.duration = 0
        self.format = 8
        self.audio_filename = filename+".wav"
        self.audio = PyAudio()
        self.stream = self.audio.open(format=self.format,
                                      channels=self.channels,
                                      rate=self.rate,
                                      input=True,
                                      frames_per_buffer=self.frames_per_buffer)
        self.audio_frames = []

    # Audio starts being recorded in separate thread

    def record(self):

        self.stream.start_stream()
        while(self.open == True):
            data = self.stream.read(self.frames_per_buffer)
            self.audio_frames.append(data)
            if self.open == False:
                break

    # Finishes the audio recording therefore the thread too

    def stop(self):

        if self.open == True:
            self.open = False
            time.sleep(0.5)
            self.stream.stop_stream()
            self.stream.close()
            self.audio.terminate()
            self.endTime = time.time()
            self.duration = self.endTime - self.startTime
            time.sleep(0.5)
            waveFile = wave.open(self.audio_filename, 'wb')
            waveFile.setnchannels(self.channels)
            waveFile.setsampwidth(self.audio.get_sample_size(self.format))
            waveFile.setframerate(self.rate)
            waveFile.writeframes(b''.join(self.audio_frames)) # Audio write out at this point
            waveFile.close()
        time.sleep(0.5)
        return self.duration

    # Launches the audio recording function using a thread
    def start(self):
        self.startTime = time.time()
        self.open = True
        self.audio_thread = threading.Thread(target=self.record)
        self.audio_thread.start()
def sine_tone(freq, duration, bitrate):
    #See http://en.wikipedia.org/wiki/Bit_rate#Audio
    BITRATE = bitrate #number of frames per second/frameset.

    #See http://www.phy.mtu.edu/~suits/notefreqs.html
    FREQUENCY = freq #Hz, waves per second, 261.63=C4-note.
    LENGTH = duration #seconds to play sound

    NUMBEROFFRAMES = int(BITRATE * LENGTH)
    RESTFRAMES = NUMBEROFFRAMES % BITRATE
    WAVEDATA = ''
    # print (type(FREQUENCY))

    for x in xrange(NUMBEROFFRAMES):
        WAVEDATA += chr(int(math.sin(x / ((BITRATE / FREQUENCY) / math.pi)) * 127 + 128))
    #fill remainder of frameset with silence
    for x in xrange(RESTFRAMES):
        WAVEDATA += chr(128)

    p = PyAudio()
    stream = p.open(
        format=p.get_format_from_width(1),
        channels=1,
        rate=BITRATE,
        output=True,
        )
    stream.write(WAVEDATA)
    stream.stop_stream()
    stream.close()
    p.terminate()
Beispiel #22
0
class pybeeptone:
    def __init__(self, rate=44100):
        self.rate = 44100
        self.pyaudio = PyAudio()
        self.stream = self.pyaudio.open(
                                format = self.pyaudio.get_format_from_width(1),
                                channels = 1, rate = self.rate, output = True)

    def play_tone(self, freq=1000, duration=0.3):
        rate = self.rate
        length = int(math.ceil(self.rate*duration))
        data = ''.join( [chr(int(math.sin(x/((rate/freq)/math.pi))*127+128)) 
                            for x in xrange(length)] )
        self.stream.write(data)

    def play_rest(self, duration):
        rate = self.rate
        length = int(math.ceil(self.rate*duration))
        data = ''.join( [chr(int(128)) for x in xrange(length)] )
        self.stream.write(data)

    def close(self):
        self.stream.stop_stream()
        self.stream.close()
        self.pyaudio.terminate()
Beispiel #23
0
def sine_tone(frequencies, amplitudes, duration, volume=1.0, sample_rate=22050):
    n_samples = int(sample_rate * duration)
    restframes = n_samples % sample_rate

    p = PyAudio()
    stream = p.open(format=p.get_format_from_width(1), # 8bit
                    channels=1, # mono
                    rate=sample_rate,
                    output=True)

    def s(t):
        r = 0
        for i in range(0, len(frequencies)):
            r += volume * amplitudes[i] * math.sin(2 * math.pi * frequencies[i] * t / sample_rate)
        return r

    samples = (int(s(t) * 0x7f + 0x80) for t in range(n_samples))
    for buf in zip(*[samples]*sample_rate): # write several samples at a time
        stream.write(bytes(bytearray(buf)))

    # fill remainder of frameset with silence
    stream.write(b'\x80' * restframes)

    stream.stop_stream()
    stream.close()
    p.terminate()
Beispiel #24
0
def sine_tone(frequency, duration, volume=1, sample_rate=22050):
  n_samples = int(sample_rate * duration)
  restframes = n_samples % sample_rate

  p = PyAudio()
  stream = p.open(format=p.get_format_from_width(2), # 16 bit
                  channels=2,
                  rate=sample_rate,
                  output=True)

  for i in xrange(0, 10):
    if i % 2 == 0:
      frequency = ZERO_FREQUENCY
    else:
      frequency = ONE_FREQUENCY

    s = lambda t: volume * math.sin(2 * math.pi * frequency * t / sample_rate)
    samples = (int(s(t) * 0x7f + 0x80) for t in xrange(n_samples))
    for buf in izip(*[samples]*sample_rate): # write several samples at a time
      stream.write(bytes(bytearray(buf)))

  # fill remainder of frameset with silence
  stream.write(b'\x80' * restframes)

  stream.stop_stream()
  stream.close()
  p.terminate()
Beispiel #25
0
    def record_voice(self):
        #open the input of wave
        pa = PyAudio()
        stream = pa.open(format=paInt16,
                         channels=self._channels,
                         rate=self._sample_rate,
                         input=True,
                         input_device_index=2,
                         frames_per_buffer=self._chuck)
        save_buffer = []
        print('recording...')
        for i in range(
                0, int(self._sample_rate / self._chuck * self._sample_time)):
            data = stream.read(self._chuck, exception_on_overflow=False)
            save_buffer.append(data)

        stream.stop_stream()
        stream.close()
        pa.terminate()

        self.save_wave_file(self._record_speech_name, save_buffer)
        save_buffer = []
        print(self._record_speech_name, "saved")
        print('speech record OK!')
        os.system(
            "ffmpeg -y -f s16le -ac 1 -ar 44100 -i recorded_speech.wav -acodec pcm_s16le -f s16le -ac 1 -ar 16000 recorded_speech.pcm -loglevel -8"
        )
    def _playback(self):
        """
        Playback function for thread. Plays a segment until running is set to
        false or end is reached
        """
        # open stream
        p = PyAudio()
        stream = p.open(
            format=p.get_format_from_width(self._segment.sample_width),
            channels=self._segment.channels,
            rate=self._segment.frame_rate,
            output=True,
            output_device_index=self._audio_device,
        )

        # output the file in millisecond steps
        while self._pos < len(self._segment) and self._running:
            stream.write(self._segment[self._pos]._data)
            self._pos += 1

        # close stream
        time.sleep(stream.get_output_latency())
        stream.stop_stream()
        stream.close()

        p.terminate()
Beispiel #27
0
    def run(self):
        pa = PyAudio()

        self.stream = pa.open(format=paFloat32,
                              channels=CHANNELS,
                              rate=SAMPLE_RATE,
                              output=False,
                              input=True,
                              stream_callback=self.callback,
                              input_device_index=INPUT_DEVICE_INDEX,
                              frames_per_buffer=CHUNK)

        self.stream.start_stream()

        pyquit = False
        while self.stream.is_active() and not pyquit:
            for event in pygame.event.get():
                if event.type == pygame.QUIT:
                    pyquit = True
            sleep(0.76)  # 24 fps

        self.stream.stop_stream()
        self.stream.close()
        pa.terminate()
        pygame.quit()
class BD_API():
    def __init__(self):
        self.initariateV()
    def initariateV(self):
        self.APP_ID = '16143259'
        self.API_KEY = 'Xy3G5WD6E8kEFwSy3FuCgSlm'
        self.SECRET_KEY = '94QyVX3gFvNjTq3fwKGSNmPgApdBFR6u'
        self.client = AipSpeech(self.APP_ID, self.API_KEY, self.SECRET_KEY)
        self.pa = None
        self.stream = None
    def save_wave_file(self,filename,data):
        '''save the date to the wavfile'''
        self.wf = wave.open(filename, 'wb')
        self.wf.setnchannels(channels)  # 声道
        self.wf.setsampwidth(sampwidth)  # 采样字节 1 or 2
        self.wf.setframerate(framerate)  # 采样频率 8000 or 16000
        self.wf.writeframes(b"".join(data))  # https://stackoverflow.com/questions/32071536/typeerror-sequence-item-0-expected-str-instance-bytes-found
        self.wf.close()
    def my_record(self):
        self.pa = PyAudio()
        self.stream = self.pa.open(format=paInt16, channels=1,
                         rate=framerate, input=True,
                         frames_per_buffer=NUM_SAMPLES)
        print("开始录音")
        frames = []
        for i in range(0, int(framerate / NUM_SAMPLES * RECORD_SECONDS)):
            data = self.stream.read(NUM_SAMPLES)
            frames.append(data)
        print("录音结束!")
        self.stream.stop_stream()
        self.stream.close()
        self.pa.terminate()
        self.save_wave_file('01.wav', frames)
    def get_file_content(self,filePath):
         with open(filePath, 'rb') as fp:
              return fp.read()
    def send_request(self, words):
        result = self.client.synthesis(words, 'zh', 1, {'vol': 5, 'per': 4})
        if not isinstance(result, dict):
            with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as f:
                f.write(result)
                tmpfile = f.name
                return tmpfile,result,f
    def say(self, words):
        tmpfile, result,f = self.send_request(words)
        # time.sleep(0.5)
        print("file name is"+ tmpfile)
        subprocess.call("play -q %s" % tmpfile, shell=True)

    def InitDevice():
        audioDev = pyaudio.PyAudio()
        stream = audioDev.open(format=pyaudio.paInt8,
                               channels=1,
                               rate=16000,
                               output=True)
        return stream

    def play(stream, Result):
        stream.write(Result)
Beispiel #29
0
class Stream(Thread):
    def __init__(self, f, on_terminated):
        self.__active = True
        self.__path = f
        self.__paused = True
        self.on_terminated = on_terminated
        self.__position = 0
        self.__chunks = []
        self.__pyaudio = PyAudio()
        Thread.__init__(self)
        self.start()

    def play(self):
        self.__paused = False

    def seek(self, seconds):
        self.__position = int(seconds * 10)

    def is_playing(self):
        return self.__active and not self.__paused

    def get_position(self):
        return int(self.__position / 10)

    def get_duration(self):
        return int(len(self.__chunks) / 10)

    def pause(self):
        self.__paused = True

    def kill(self):
        self.__active = False

    def __get_stream(self):
        self.__segment = AudioSegment.from_file(self.__path)
        self.__chunks = make_chunks(self.__segment, 100)
        return self.__pyaudio.open(format=self.__pyaudio.get_format_from_width(self.__segment.sample_width),
                                   channels=self.__segment.channels,
                                   rate=self.__segment.frame_rate,
                                   output=True)

    def run(self):
        stream = self.__get_stream()
        while self.__position < len(self.__chunks):
            if not self.__active:
                break
            if not self.__paused:
                # noinspection PyProtectedMember
                data = self.__chunks[self.__position]._data
                self.__position += 1
            else:
                free = stream.get_write_available()
                data = chr(0) * free
            stream.write(data)

        stream.stop_stream()
        self.__pyaudio.terminate()
        if self.__active:
            self.on_terminated()
Beispiel #30
0
def Audio_record_play(seconds,play,filename):
    '''
    This function include record and play, if you want to play and record,
    please set the play is True.
    The sample rate is 44100
    Bit:16
    '''
    CHUNK = 1024
    CHANNELS = 2
    SAMPLING_RATE = 44100
    FORMAT = paInt16
    NUM = int(SAMPLING_RATE/CHUNK * seconds)

    save_buffer = []

    if play is True:
        source_file = autohandle_directory + '/audio_lib/'+'source1.wav'
        swf = wave.open(source_file, 'rb')
    
    #open audio stream
    pa = PyAudio()
    default_input = pa.get_default_host_api_info().get('defaultInputDevice')
    stream = pa.open(
                    format   = FORMAT, 
                    channels = CHANNELS, 
                    rate     = SAMPLING_RATE, 
                    input    = True,
                    output   = play,
                    frames_per_buffer  = CHUNK,
                    input_device_index = default_input
                    )

    logging.info(">> START TO RECORD AUDIO")
    while NUM:
        save_buffer.append(stream.read(CHUNK))
        NUM -= 1
        if play is True:
            data = swf.readframes(CHUNK)
            stream.write(data)
            if data == " ": break

    #close stream
    stream.stop_stream()
    stream.close()
    pa.terminate()

    # save wav file
    def save_wave_file(filename,data):
        wf_save = wave.open(filename, 'wb')
        wf_save.setnchannels(CHANNELS)
        wf_save.setsampwidth(pa.get_sample_size(FORMAT))
        wf_save.setframerate(SAMPLING_RATE)
        wf_save.writeframes("".join(data))
        wf_save.close()

    save_wave_file(filename, save_buffer)

    del save_buffer[:]
Beispiel #31
0
class AudioServer(Thread):
    """Server class for receiving audio data from client.
    Keep running as a thread until self.sock is deleted.
    """
    def __init__(self, port):
        super(AudioServer, self).__init__()

        self.setDaemon(True)
        self.address = ('', port)
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.recoder = PyAudio()
        self.audio_stream = None

        # parameters for recording
        self.chunk = config.VOICE_CHUNK
        self.format = config.VOICE_FORMAT
        self.channels = config.VOICE_CHANNELS
        self.rate = config.VOICE_RATE
        self.max_record_seconds = config.MAX_RECORD_SECONDS * 50

        self.is_alive = True

    def __del__(self):
        self.sock.close()
        if self.audio_stream is not None:
            self.audio_stream.stop_stream()
            self.audio_stream.close()
        self.recoder.terminate()

    def run(self):
        self.sock.bind(self.address)
        self.sock.listen(1)
        client_sock, _ = self.sock.accept()

        payload_size = struct.calcsize('L')
        self.audio_stream = self.recoder.open(format=self.format,
                                              channels=self.channels,
                                              rate=self.rate,
                                              output=True,
                                              frames_per_buffer=self.chunk)
        audio_data = ''.encode('utf-8')
        while self.is_alive:  # get one piece and output per iteration
            packed_data = client_sock.recv(payload_size)
            msg_size = struct.unpack('L', packed_data)[0]
            while len(audio_data) < msg_size:
                next_size = config.MAX_PACKAGE_SIZE if \
                    len(audio_data) + config.MAX_PACKAGE_SIZE <= msg_size else \
                    msg_size - len(audio_data)
                next_data = client_sock.recv(next_size)
                audio_data += next_data
            audio_frames = loads(audio_data)
            for frame in audio_frames:
                self.audio_stream.write(frame, self.chunk)
            audio_data = ''.encode('utf-8')

    def kill(self):
        """Kill the thread."""
        self.is_alive = False
class MicrophoneStream(object):
    """Opens a recording stream as a generator yielding the audio chunks."""
    def __init__(self, rate, chunk):
        self._rate = rate
        self._chunk = chunk
        # Create a thread-safe buffer of audio data
        self._buff = queue.Queue()
        self.closed = True

    def __enter__(self):
        self._audio_interface = PyAudio()
        self._audio_stream = self._audio_interface.open(
            format = paInt16,
            channels = 1,
            rate = self._rate,
            input = True,
            frames_per_buffer = self._chunk,
            # Run the audio stream asynchronously to fill the buffer object.
            # This is necessary so that the input device's buffer doesn't
            # overflow while the calling thread makes network requests, etc.
            stream_callback=self._fill_buffer,
        )
        self.closed = False
        return self

    def __exit__(self, type, value, traceback):
        self._audio_stream.stop_stream()
        self._audio_stream.close()
        self.closed = True
        # Signal the generator to terminate so that the client's
        # streaming_recognize method will not block the process termination.
        self._buff.put(None)
        self._audio_interface.terminate()

    def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
        """Continuously collect data from the audio stream, into the buffer."""
        self._buff.put(in_data)
        return None, paContinue

    def generator(self):
        while not self.closed:
            # Use a blocking get() to ensure there's at least one chunk of
            # data, and stop iteration if the chunk is None
            chunk = self._buff.get()
            if chunk is None:
                return
            data = [chunk]
            # Now consume whatever other data's still buffered.
            while True:
                try:
                    chunk = self._buff.get(block=False)
                    if chunk is None:
                        return
                    data.append(chunk)
                except queue.Empty:
                    break
            yield b''.join(data)
Beispiel #33
0
def play(filepath):
    fp = open(filepath, 'rb')
    data = fp.read()
    fp.close()
    aud = BytesIO(data)
    sound = AudioSegment.from_file(aud, format='mp3')  # LONG

    global isPlaying
    p = PyAudio()
    stream = p.open(format=p.get_format_from_width(sound.sample_width),
                    channels=sound.channels,
                    rate=sound.frame_rate,
                    output=True)
    print("[PLAYAUDIO INTERVAL]%f" % (time.time()))
    datas = sound.raw_data

    isPlaying = True

    stream.write(datas)  # ------------这里,接住!
    '''
    来来来兄弟们,来看看网上大神的代码↓(稍加修改,但是思想融会贯通)
    # framewidth = sound.frame_width
    # i = 0
    # while True:
    #     data = datas[i*framewidth:(i+1)*framewidth]
    #     if data == b"":
    #         break
    #     i += 1
    #     stream.write(data)
    这个人教我一帧一帧的去写,我一想好像没什么毛病。
    当然后果就是CPU必须时刻保持有一定空闲,让出来给这一线程。
    总之就是说CPU必须不能跑高了,稍微高一点就爆音。
    然而我TM发现这东西可以一次全写进去!(如上一句话↑)
    让我们看看官方的注释:
        def write(self, frames, num_frames=None,
              exception_on_underflow=False):

        """
        Write samples to the stream.  Do not call when using
        *non-blocking* mode.

        :param frames:#音频的数据帧
           The frames of data.
        :param num_frames:#帧的数量
           The number of frames to write.
           Defaults to None, in which this value will be
           automatically computed.#你如果不输就是None,系统自动给你判断
    结果网上的代码让我一帧一帧些,他每次都会判断一次帧的数量为1。
    求CPU心理阴影面积。
    不过感谢这位老哥指导我用了pydub这个库。
    改完之后妈妈再也不用担心我音频卡顿了,哪怕我主线程写个while True:pass也不怕了!
    '''

    stream.stop_stream()  # 停止数据流
    stream.close()
    p.terminate()  # 关闭 PyAudio
Beispiel #34
0
class RadioServer:
	def __init__(self):
		self.pa=PyAudio()
		self.input_names={}
		self.output_names={}
		self.listeners=[]

	def registerDevices(self,inputDevice=None,outputDevice=None):
		if inputDevice==None:
			self.inputDevice=InputDevice(self.pa)
		else:
			self.inputDevice=inputDevice

		if outputDevice==None:
			self.outputDevice=OutputDevice(self.pa)
		else:
			self.outputDevice=outputDevice



	def registerInput(self,descriptor,name):
		self.input_names[name]=descriptor

	def registerOutput(self,descriptor,name):
		self.output_names[name]=descriptor

	def subscribeToInput(self,name,queue):
		self.inputDevice.subscribe(queue,self.input_names[name])

	def subscribeToOutput(self,name,queue):
		self.outputDevice.subscribe(queue,self.output_names[name])

	def addListener(self,listener):
		self.listeners.append(listener)
		listener.bind(self)

	def start(self):
		for l in self.listeners:
			l.start()
		self.inputDevice.start()
		self.outputDevice.start()

	def stop(self):
		self.inputDevice.stop()
		self.outputDevice.stop()
		for l in self.listeners:
			l.stop()
		self.pa.terminate()

	def sigint(self,signal,frame):
		self.stop()

	def run_forever(self):
		self.start()
		signal.signal(signal.SIGINT,lambda s,f:self.sigint(s,f))
		signal.pause()
 def playWaveData(self, waveData):
     p = PyAudio()
     stream = p.open(format = p.get_format_from_width(1),
                     channels = 1,
                     rate = self.bitRate,
                     output = True)
     stream.write(waveData)
     stream.stop_stream()
     stream.close()
     p.terminate()
Beispiel #36
0
 def play_audio(self):
     audio = PyAudio()
     stream = audio.open(format=audio.get_format_from_width(self.sampwidth),
                         channels=self.nchannels,
                         rate=self.rate,
                         output=True)
     stream.write(self.response)
     stream.stop_stream()
     stream.close()
     audio.terminate()
Beispiel #37
0
 def play(self,filename):
     #os.popen(self.locpla+filename )
     wf=wave.open(self.locpla+filename,'rb')
     p=PyAudio()
     stream=p.open(format=p.get_format_from_width(wf.getsampwidth()),channels=wf.getnchannels(),rate=wf.getframerate(),output=True)
     data = wf.readframes(self.chunk)
     while data != b'':
         stream.write(data)
         data = wf.readframes(self.chunk)
     stream.close()
     p.terminate()
Beispiel #38
0
    def Record(self):
        global CHANNELS

        # 开启声音输入
        pa = PyAudio()
        stream = pa.open(format=paInt16, channels=CHANNELS, rate=self.sampling_rate, input=True, 
                        frames_per_buffer=self.cacheblock_size)

        save_count = 0          # 已经保存的样本块
        silence_count = 0       # 持续无声音的样本块
        save_buffer = []        # 音频缓冲

        try:
            print "start recording"
            while True:
                # 录音、取样
                string_audio_data = stream.read(self.cacheblock_size)
                # 将读入的数据转换为数组
                audio_data = np.fromstring(string_audio_data, dtype=np.short)
                # 样本值大于LEVEL的取样为成功取样,计算成功取样的样本的个数
                large_sample_count = np.sum(audio_data > self.level)
                print "Peak:",np.max(audio_data),"    Sum:",large_sample_count
                # 如果成功取样数大于SAMPLING_NUM,则当前数据块取样都成功
                if large_sample_count > self.sampling_num:
                    # 有成功取样的数据块时,样本计数+1
                    save_count += 1
                else:
                    # 有成功录取的块后,若取样失败,此时可能处于静音状态,静音计数+1
                    if(save_count > 0):
                        silence_count += 1

                # 取样失败次数是否超过最大值
                if (save_count <= self.max_save_length) and (silence_count <= self.max_silence_length):
                    # 将要保存的数据存放到save_buffer中
                    save_buffer.append(string_audio_data)
                else:
                    # 将save_buffer中的数据写入WAV文件,WAV文件的文件名是保存的时刻
                    if len(save_buffer) > 0:
                        self.filename = datetime.now().strftime("%Y-%m-%d_%H_%M_%S") + ".wav"
                        self.__Save_wave_file(self.filename, save_buffer)
                        save_buffer = []
                        print self.filename, "saved"
                    break
        except KeyboardInterrupt:
            print "manual exit"
        finally:
            # stop stream
            stream.stop_stream()  
            stream.close()
            # close PyAudio  
            pa.terminate() 
            print "exit recording"

        return self.filename
Beispiel #39
0
def play():
    wf=wave.open(r"C:\Users\pengfy\PycharmProjects\smart-audio\record_files\2019-07-30-11-52-51.wav",'rb')
    p=PyAudio()
    stream=p.open(format=p.get_format_from_width(wf.getsampwidth()),channels=
    wf.getnchannels(),rate=wf.getframerate(),output=True)
    while True:
        data=wf.readframes(chunk)
        if data=="":break
        stream.write(data)
    stream.close()
    p.terminate()
Beispiel #40
0
	def play(self):
		print "play %s" % (self.fileName)
		pa = PyAudio()
		stream = pa.open(format=paInt16, channels=1, rate=SAMPLING_RATE, output=True, frames_per_buffer=BUFFER_SIZE)
		
		stream.write(self.stringAudioData)
		# stream.write(self.cutAudio)
		
		stream.stop_stream()
		stream.close()
		pa.terminate()
Beispiel #41
0
def play():
    wf=wave.open(file,'r')
    p=PyAudio()
    stream=p.open(format=p.get_format_from_width(wf.getsampwidth()),channels=
    wf.getnchannels(),rate=wf.getframerate(),output=True)
    data=wf.readframes(8000)
    while len(data)>0:
        stream.write(data)
        data=wf.readframes(8000)
    stream.close()
    p.terminate()
def play():
    wf=wave.open(r"01.wav",'rb')
    p=PyAudio()
    stream=p.open(format=p.get_format_from_width(wf.getsampwidth()),channels=
    wf.getnchannels(),rate=wf.getframerate(),output=True)
    while True:
        data=wf.readframes(chunk)
        if data=="":break
        stream.write(data)
    stream.close()
    p.terminate()
Beispiel #43
0
 def play(self,event):
     wf=wave.open(self.name,'rb')
     p=PyAudio()
     stream=p.open(format=p.get_format_from_width(wf.getsampwidth()),channels=
                 wf.getnchannels(),rate=wf.getframerate(),output=True)
     while True:
       data=wf.readframes(1024)
       if not data:
         break
       stream.write(data)
     stream.close()
     p.terminate()
Beispiel #44
0
class AudioStream(object):

    def __init__(self, sample_rate=44100, channels=1, width=2, chunk=1024,
                 input_device_index=None):
        self.sample_rate = sample_rate
        self.channels = channels
        self.width = width
        self.chunk = chunk
        self.input_device_index = input_device_index

    def __enter__(self):
        self._pa = PyAudio()
        if self.input_device_index is None:
            self.input_device_index = \
                self._pa.get_default_input_device_info()['index']
        self._stream = self._pa.open(
            format=self._pa.get_format_from_width(self.width),
            channels=self.channels,
            rate=self.sample_rate,
            input=True,
            frames_per_buffer=self.chunk,
            input_device_index=self.input_device_index)
        self._stream.start_stream()
        return self

    def read(self):
        ''' On a buffer overflow this returns 0 bytes. '''
        try:
            return self._stream.read(self.chunk)
        except IOError:
            return ''
        except AttributeError:
            raise Exception('Must be used as a context manager.')

    def stream(self):
        try:
            while True:
                bytes = self.read()
                if bytes:
                    self.handle(bytes)
        except (KeyboardInterrupt, SystemExit):
            pass

    def __exit__(self, type, value, traceback):
        self._stream.stop_stream()
        self._stream.close()
        self._pa.terminate()

    def handle(self, bytes):
        pass
Beispiel #45
0
    def openWav(self):

        chunk = 1024
        wf = wave.open(r"result.wav", 'rb')
        p = PyAudio()

        stream = p.open(format = p.get_format_from_width(wf.getsampwidth()), channels = wf.getnchannels(), rate = wf.getframerate(), output = True)
        while True:
            data = wf.readframes(chunk)
            if data == "":break
            stream.write(data)

        stream.close()
        p.terminate()
Beispiel #46
0
	def loaddb(self):
		print 'load', self.fileName
		pa = PyAudio()
		wf = wave.open(self.fileName, 'rb')
		save_buffer = []
		string_audio_data = wf.readframes(BUFFER_SIZE)
		while string_audio_data != '':
			audio_data = np.fromstring(string_audio_data, dtype=np.short)
			save_buffer.append( string_audio_data )
			string_audio_data = wf.readframes(BUFFER_SIZE)

		pa.terminate()
		self.stringAudioData = "".join(save_buffer)
		save_data = np.fromstring(self.stringAudioData, dtype=np.short)
		self.audioData = save_data
Beispiel #47
0
def play(wave_data):
    chunk_size = BITRATE/10

    p = PyAudio()
    stream = p.open(format = p.get_format_from_width(1), 
                channels = 1, 
                rate = BITRATE, 
                output = True)

    for chunk in itertools.islice(wave_data, chunk_size):
        stream.write(chunk)

    stream.stop_stream()
    stream.close()
    p.terminate()
Beispiel #48
0
    def throw_process_loop(self, q: Queue):
        """ A sound loop. """

        import sounddevice
        p = PyAudio()
        stream = p.open(
            format=self.formatting,
            channels=self.channels,
            rate=self.rate,
            output=True
        )
        while q.empty():
            stream.write(self.data)
        p.terminate()
        stream.close()
Beispiel #49
0
    def throw_process(self, q):
        """ Plays a sound. """

        import sounddevice
        p = PyAudio()
        stream = p.open(
            format=self.formatting,
            channels=self.channels,
            rate=self.rate,
            output=True
        )
        while True:
            if not q.get():
                break
            stream.write(self.data)
        p.terminate()
        stream.close()
Beispiel #50
0
    def run(self):
        pya = PyAudio()
        self._stream = pya.open(
            format=paInt16,
            channels=1,
            rate=SAMPLE_RATE,
            input=True,
            frames_per_buffer=WINDOW_SIZE,
            stream_callback=self._process_frame,
        )
        self._stream.start_stream()

        while self._stream.is_active() and not raw_input():
            time.sleep(0.1)

        self._stream.stop_stream()
        self._stream.close()
        pya.terminate()
Beispiel #51
0
def getWavFeature(wavName):
	print "processing %s" % (wavName)
	wf = wave.open(wavName, 'rb')
	pa = PyAudio()
	stream = pa.open(format=pa.get_format_from_width(wf.getsampwidth()),
					channels=wf.getnchannels(),
					rate=wf.getframerate(),
					output=True)

	data = wf.readframes(CHUNK)
	while data != '':
		stream.write(data)
		data = wf.readframes(CHUNK)

	stream.stop_stream()
	stream.close()
	pa.terminate()

	
Beispiel #52
0
def play(wavName):
	print "play %s" % (wavName)
	wf = wave.open(wavName, 'rb')
	pa = PyAudio()
	stream = pa.open(format=pa.get_format_from_width(wf.getsampwidth()),
					channels=wf.getnchannels(),
					rate=wf.getframerate(),
					output=True)

	# td = threading.Thread(target=startGame)
	# td.start()
	
	data = wf.readframes(CHUNK)
	while data != '':
		stream.write(data)
		data = wf.readframes(CHUNK)

	stream.stop_stream()
	stream.close()
	pa.terminate()
Beispiel #53
0
	def load(self):
		print 'load', self.fileName
		pa = PyAudio()
		wf = wave.open(self.fileName, 'rb')
		save_buffer = []
		string_audio_data = wf.readframes(BUFFER_SIZE)
		while string_audio_data != '':
			audio_data = np.fromstring(string_audio_data, dtype=np.short)
			save_buffer.append( string_audio_data )
			string_audio_data = wf.readframes(BUFFER_SIZE)

		pa.terminate()
		self.stringAudioData = "".join(save_buffer)
		save_data = np.fromstring(self.stringAudioData, dtype=np.short)
		self.audioData = save_data[10000:10000+4608*4]
		self.stringAudioData = self.audioData.tostring()
		self.cutAudio = self.audioData
			
		# self.cut2()
		self.getFeature()
 def record(self, time=5):
     audio = PyAudio()
     stream = audio.open(format=self.format, channels=self.channel,
                         rate=self.rate, input=True,
                         frames_per_buffer=self.chunk)
     print "RECORDING START"
     frames = []
     for i in range(0, self.rate / self.chunk * time):
         data = stream.read(self.chunk)
         frames.append(data)
     stream.stop_stream()
     stream.close()
     audio.terminate()
     print "RECORDING STOP"
     write_frames = open_audio(self.audio_file, 'wb')
     write_frames.setnchannels(self.channel)
     write_frames.setsampwidth(audio.get_sample_size(self.format))
     write_frames.setframerate(self.rate)
     write_frames.writeframes(''.join(frames))
     write_frames.close()
     self.convert()
Beispiel #55
0
    def Play_WAV(self, filename):
        chunk = 1024
        f = wave.open(filename,"rb")
        pa = PyAudio()

        # open stream
        stream = pa.open(format = pa.get_format_from_width(f.getsampwidth()),  
                        channels = f.getnchannels(),  
                        rate = f.getframerate(),  
                        output = True)

        # read data
        data = f.readframes(chunk)
        # play stream
        while data != '':
            stream.write(data)
            data = f.readframes(chunk)

        # stop stream
        stream.stop_stream() 
        stream.close()
        # close PyAudio  
        pa.terminate() 
Beispiel #56
0
 def record(self, time, device_i=None):
     audio = PyAudio()
     print audio.get_device_info_by_index(1)
     stream = audio.open(input_device_index=device_i,output_device_index=device_i,format=self.format, channels=self.channel,
                         rate=self.rate, input=True,
                         frames_per_buffer=self.chunk)
     playDing()
     print "REC: "
     frames = []
     for i in range(0, self.rate / self.chunk * time):
         data = stream.read(self.chunk)
         frames.append(data)
     stream.stop_stream()
     print "END"
     stream.close()
     playDing()
     audio.terminate()
     write_frames = open_audio(self.file, 'wb')
     write_frames.setnchannels(self.channel)
     write_frames.setsampwidth(audio.get_sample_size(self.format))
     write_frames.setframerate(self.rate)
     write_frames.writeframes(''.join(frames))
     write_frames.close()
Beispiel #57
0
							m=c
						elif tk=="sc":
							m=0.6
						for n in X(1,9):
							h=2*n-1
							sS+=m*math.sin((tp*(float(x)/R)*F)*h)/h
						A(sS)
			if ss>du:
				for x in X(int(du),int(ss)):
					A(0.0)
			nP+=1
			pP+=1
	tP+=1
lD=0
for x in z:
	lD=len(x) if len(x)>lD else lD
for x in X(lD):
	sum=0.0
	for y in X(len(z)):
		try:
			sum+=z[y][x]
		except IndexError:
			pass
	sample=min(int(((max(min(sum/len(z),1.0),-1.0)) + 1.0)*128.0),255)
	sgRaw+=chr(sample)
for D in X(1):
	st.write(sgRaw)
st.stop_stream()
st.close()
p.terminate()
Beispiel #58
0
# play
def InputFromMIC():
    while True:
        string_audio_data = streamin.read(FRAME_SIZE)
        encdata = e.encode(string_audio_data)
        q.put(encdata)


t1 = Thread(target=InputFromMIC)
t1.start()

def OutputToSpeaker():
    while True:
        if q.empty() == False:
            decdata = d.decode(q.get())
            streamout.write(decdata)

t2 = Thread(target=OutputToSpeaker)
t2.start()

# control
while True:
    quality=input('Please enter a quality [0-10]:')
    e.control(SPEEX_SET_QUALITY, int(quality))

streamin.close()
streamout.close()
pa.terminate()
p.terminate()