def callback(in_data, frame_count, time_info, status): data = wf.readframes(frame_count) audio_clip = Codec.decode_bytes_to_audio(data, wf.getnchannels(), wf.getsampwidth() * 8) audio_clip_md = Modulate.am_modulate(audio_clip, 2) data = Codec.encode_audio_to_bytes(audio_clip_md, 2, 16) return (data, pyaudio.paContinue)
def run(self): self.stream.start_stream() while not self.exit_flag: # 1.读取输入音频数据。此过程会阻塞,直到有足够多的数据 bytes_buffer = self.stream.read(self.chirp_nosie_frames_count) # 2.将bytes流输入转换为[-1,1]的浮点数一维数组 frames = Codec.decode_bytes_to_audio(bytes_buffer, self.in_channels, self.in_bit_depth) # 3.输入定位 located_frames = ActiveNoiseControl.location( frames, self.noise_lib.get_down_chirp(self.in_fs)) # 4.信道估计 or 噪声消除 if global_var.run_time < self.simulation_length: ActiveNoiseControl.channel_simulation( located_frames, self.noise_lib.get_chirp_noise(self.in_fs)) else: processed_input_frames = ActiveNoiseControl.eliminate_noise( located_frames, self.noise_lib.get_chirp_noise(self.in_fs)) global_var.processed_input_pool.put(processed_input_frames) # 5.更新系统时间 global_var.run_time += self.chirp_nosie_frames_count / self.in_fs print(global_var.run_time) self.stream.stop_stream() self.stream.close()
def run(self): for stream in self.streams: stream.start_stream() while not self.exit_flag: # 1.如果keyword池非空,则读取数据。跳3 if not global_var.keyword_pool.is_empty(): raw_output_frames = global_var.keyword_pool.get( self.frames_per_buffer) # 2.如果keyword池为空,直接读取noise池中数据。跳3 else: raw_output_frames = global_var.noise_pool.get( self.frames_per_buffer) # 3.调制 modulated_output_frames = Modulate.am_modulate( raw_output_frames, 2, self.out_fs) # 4.将[-1,1]的浮点数一维数组转换为bytes流输出 out_data = Codec.encode_audio_to_bytes(modulated_output_frames, self.out_channels, self.out_bit_depth) # 5.分声道输出 for i, stream in enumerate(self.streams): # data = bytes( # int(random.random() * 256) # for _ in range(len(raw_output_frames))) stream.write(out_data) # 此过程会阻塞,直到填入数据被全部消耗 for stream in self.streams: stream.stop_stream() stream.close()
def read_audio(in_data, frame_count, time_info, status): # 1.将bytes流输入转换为[-1,1]的浮点数一维数组 raw_input_frames = Codec.decode_bytes_to_audio(in_data) # 2.数据存入raw_input池,此过程不会被阻塞 global_var.raw_input_pool.put(raw_input_frames) return (None, pyaudio.paContinue)
def run(): anc_thread = ActiveNoiseControl( settings.OUT_FS, settings.IN_FS, settings.CHIRP_LENGTH + settings.NOISE_LENGTH, settings.SIMULATION_LENGTH) # 实际接收到train信号 reality = np.load( 'C:/Users/Tango/Documents/GitHub/Jamming/test_data/train_x0.npy') # 噪声库中的噪声 ideal = np.load( 'C:/Users/Tango/Documents/GitHub/Jamming/test_data/train_y0.npy') # 信道估计 anc_thread.channel_simulation(reality, ideal) ideal_audio = np.array([]) for i in range(0, 10): # 实际接收到的test信号 receive_path = "C:/Users/Tango/Documents/GitHub/Jamming/test_data/test_x" + str( i) + ".npy" receive = np.load(receive_path) # 噪声库中的噪声 noise_path = "C:/Users/Tango/Documents/GitHub/Jamming/test_data/test_y" + str( i) + ".npy" noise = np.load(noise_path) # 噪声消除 temp = anc_thread.eliminate_noise(receive, noise) ideal_audio = np.append(ideal_audio, temp) print(ideal_audio) audio = Codec.encode_audio_to_bytes(ideal_audio, 1, 16) f = wave.open("test.wav", "wb") f.setnchannels(1) f.setsampwidth(2) f.setframerate(48000) f.writeframes(audio)
def load_wave(self, filename): # 根据文件名读取音频文件 try: wf = wave.open(filename, "rb") print(wf.getparams()) nchannels = wf.getparams().nchannels sampwidth = wf.getparams().sampwidth framerate = wf.getparams().framerate nframes = wf.getparams().nframes bytes_buffer = wf.readframes(nframes) # 一次性读取所有frame audio_clip = Codec.decode_bytes_to_audio(bytes_buffer, nchannels, pyaudio.paInt16) audio_clip = signal.resample( audio_clip, int(self.out_fs / framerate * nframes)) self.test_wave = [filename, 2, sampwidth, self.out_fs, audio_clip] except: raise TypeError("Can't read wave file!") # def save_wave(self): # modulated_wave_dir = os.path.join(".\waves", "modulated") # for raw_wave in self.raw_waves: # filename = raw_wave[0] # nchannels = raw_wave[1] # sampwidth = raw_wave[2] # framerate = raw_wave[3] # audio_clip = raw_wave[4] # bytes_buffer = Codec.encode_audio_to_bytes(audio_clip, 2, # pyaudio.paInt16) # wf = wave.open(os.path.join(modulated_wave_dir, filename), "wb") # wf.setnchannels(nchannels) # wf.setsampwidth(sampwidth) # wf.setframerate(framerate) # wf.writeframes(bytes_buffer) # wf.close() # print(r"\x"+r" \x".join(format(x,'02x') for x in NoiseLib.raw_noise_data[0][7200:7220])) # 原始十六进制表示 # # 获取bytes类型的声音信号 # @classmethod # def get_wave_bytes_buffer(cls, index, frame_count=-1): # if frame_count == -1: # return cls.raw_noise_data[index][-1] # read_index, sampwidth, nframes, data = cls.raw_noise_data[index] # start_index = read_index # end_index = read_index + frame_count * sampwidth # if end_index >= nframes: # re = data[start_index:-1] + data[0:end_index % nframes] # else: # re = data[start_index:end_index] # cls.raw_noise_data[index][0] = end_index % nframes # return re
def read_audio(in_data, frame_count, time_info, status): # 1.将bytes流输入转换为[-1,1]的浮点数一维数组 raw_input_frames = Codec.decode_bytes_to_audio( in_data, InputOutPut.input_channel, InputOutPut.input_format) # 2.数据存入raw_input池,此过程不会被阻塞 global_var.raw_input_pool.put(raw_input_frames) # 3.更新系统时间 global_var.run_time = global_var.run_time + InputOutPut.frames_per_buffer / InputOutPut.input_fs return (None, pyaudio.paContinue)
def run(self): self.stream.start_stream() save_frames = np.array([]) while not self.exit_flag: data = self.stream.read(12000) # 此过程会阻塞,直到有足够多的数据 # frames = np.frombuffer(data,dtype=np.int16) frames = Codec.decode_bytes_to_audio(data, 1, 16) save_frames = np.concatenate((save_frames, frames)) if save_frames.size > 480000: np.save("./tests/save_frames.npy", save_frames) print("**") break self.stream.stop_stream() self.stream.close()
def _load_wave(self, filename): # 根据文件名读取音频文件 try: wf = wave.open(filename, "rb") nchannels = wf.getparams().nchannels sampwidth = wf.getparams().sampwidth framerate = wf.getparams().framerate nframes = wf.getparams().nframes bytes_buffer = wf.readframes(nframes) # 一次性读取所有frame audio_clip = Codec.decode_bytes_to_audio(bytes_buffer, nchannels, sampwidth * 8) audio_clip = Resampler.resample(audio_clip, framerate, self.out_fs) self.test_wave = [filename, 2, sampwidth, self.out_fs, audio_clip] except: raise TypeError("Can't read wave file!")
def write_audio(in_data, frame_count, time_info, status): # 1.如果keyword池非空,则读取数据。若数据长度不够本次输出,则再从noise池中读取一定长度数据,进行拼接补长。跳3 if not global_var.keyword_pool.is_empty(): raw_output_frames = global_var.keyword_pool.get(frame_count) if raw_output_frames.size < frame_count: raw_output_frames = np.concatenate( (raw_output_frames, global_var.noise_pool.get(frame_count - raw_output_frames.size))) # 2.如果keyword池为空,直接读取nosie池中数据。跳3 else: raw_output_frames = global_var.noise_pool.get(frame_count) # 3.调制 modulated_output_frames = Modulate.am_modulate(raw_output_frames) # 4.将[-1,1]的浮点数一维数组转换为bytes流输出 out_data = Codec.encode_audio_to_bytes(modulated_output_frames) return (out_data, pyaudio.paContinue)
def save_wave(self): modulated_wave_dir = os.path.join(".\waves", "modulated") for raw_wave in self.raw_waves: filename = raw_wave[0] nchannels = raw_wave[1] sampwidth = raw_wave[2] framerate = raw_wave[3] audio_clip = raw_wave[4] bytes_buffer = Codec.encode_audio_to_bytes(audio_clip, 2, pyaudio.paInt16) wf = wave.open(os.path.join(modulated_wave_dir, filename), "wb") wf.setnchannels(nchannels) wf.setsampwidth(sampwidth) wf.setframerate(framerate) wf.writeframes(bytes_buffer) wf.close() # print(r"\x"+r" \x".join(format(x,'02x') for x in NoiseLib.raw_noise_data[0][7200:7220])) # 原始十六进制表示 # # 获取bytes类型的声音信号 # @classmethod # def get_wave_bytes_buffer(cls, index, frame_count=-1): # if frame_count == -1: # return cls.raw_noise_data[index][-1] # read_index, sampwidth, nframes, data = cls.raw_noise_data[index] # start_index = read_index # end_index = read_index + frame_count * sampwidth # if end_index >= nframes: # re = data[start_index:-1] + data[0:end_index % nframes] # else: # re = data[start_index:end_index] # cls.raw_noise_data[index][0] = end_index % nframes # return re