def __init__(self, name, frame_size=512): self.file_pointer = WaveReader(name) self.name = name self.channels = self.file_pointer.channels self.data = numpy.zeros((self.channels, frame_size), numpy.float32, order='F') self.nframes = 1 self.frame_size = frame_size self.samplerate = self.file_pointer.samplerate
class Replayer: def __init__(self, wav_file, start_pos=None): """ Args: wav_file (str): File path start_pos (int): Starting chunk index """ self.file_path = wav_file self.file = WaveReader(wav_file, config.SAMPLE_RATE, config.NUM_CHANNELS, Format.WAV | Format.PCM_16) if start_pos is None: self.playback_pos = random.randint(0, self.file.frames) else: self.playback_pos = start_pos # Lower the amplitude so that older files (indicated in file name) # are softer # Time the file was created in seconds file_creation_time = int(os.path.basename(self.file_path)[:-6]) age = int(time.time()) - file_creation_time adjusted_age = age / 200 if adjusted_age == 0: adjusted_age = 0.0001 amp = (1 / adjusted_age) * 0.9 if amp > 0.3: amp = 0.3 self.amplitude = Amplitude(amp) def get_chunk(self): """ Returns: None """ # TODO: Document me! self.file.seek(self.playback_pos) # TODO: Is this zeros bit necessary? chunk = numpy.zeros((config.NUM_CHANNELS, config.CHUNK_SIZE), numpy.int16, order='F') # chunk = next(self.file.read_iter(shared.CHUNK_SIZE)) # chunk = self.data[self.playback_pos] * self.amplitude.value # Multiply by amplitude here? chunk = chunk * self.amplitude.value # print(chunk.shape) self.playback_pos += config.CHUNK_SIZE if self.playback_pos > self.file.frames: self.playback_pos = 0 return chunk
def addSound(self, sound_name): sound_file = WaveReader(sound_name) self.playing_files.append(sound_file) self.max_channels = max(self.max_channels, sound_file.channels) self.playing_files_buffers_for_read[sound_file] = numpy.zeros((sound_file.channels,512), numpy.float32, order='F') self.store_buffers.append([None]) self.data_list = numpy.zeros((len(self.playing_files), self.max_channels,512), numpy.float32, order='F')
def __init__(self, wav_file, start_pos=None): """ Args: wav_file (str): File path start_pos (int): Starting chunk index """ self.file_path = wav_file self.file = WaveReader(wav_file, config.SAMPLE_RATE, config.NUM_CHANNELS, Format.WAV | Format.PCM_16) if start_pos is None: self.playback_pos = random.randint(0, self.file.frames) else: self.playback_pos = start_pos # Lower the amplitude so that older files (indicated in file name) # are softer # Time the file was created in seconds file_creation_time = int(os.path.basename(self.file_path)[:-6]) age = int(time.time()) - file_creation_time adjusted_age = age / 200 if adjusted_age == 0: adjusted_age = 0.0001 amp = (1 / adjusted_age) * 0.9 if amp > 0.3: amp = 0.3 self.amplitude = Amplitude(amp)
def __init__(self, sound_name): self.sound_name = sound_name sound_file = WaveReader(sound_name) self.playing_files = [] self.playing_files.append(sound_file) print("playing_files = {}".format(self.playing_files)) self.player_lib = pyaudio.PyAudio() self._volume = 1 self.stream = None
def get_peak_volume(filepath): max_volume = 0.0 try: with WaveReader(filepath) as r: for data in r.read_iter(size=streamChunk): left_channel = data[0] volume = np.linalg.norm(left_channel) if volume > max_volume: max_volume = volume except Exception as e: logging.error("could not get peak volume, assuming 0. Exception: ", e) return max_volume
class SoundFile: def __init__(self, name, frame_size=512): self.file_pointer = WaveReader(name) self.name = name self.channels = self.file_pointer.channels self.data = numpy.zeros((self.channels, frame_size), numpy.float32, order='F') self.nframes = 1 self.frame_size = frame_size self.samplerate = self.file_pointer.samplerate def read(self): self.nframes = self.file_pointer.read(self.data) return self.data[:self.channels, :self.nframes]
def test_record_200ms(): filename = tempfile.gettempdir() + "/unittest_record.wav" recording_duration = 0.2 removeFileIfItExists(filename) sound_input.record(filename, 0.2) assert os.path.isfile( filename), "Expected recording to be present at {}.".format(filename) with WaveReader(filename) as f: file_duration = f.frames / f.samplerate assert file_duration == pytest.approx( recording_duration ), "Expected file recording to be of length {}s. It is {}s.".format( recording_duration, file_duration)
def run(self): while self._destroy == False: if (self.active == True) and (self.filepath): with WaveReader(self.filepath) as wav: print "Title:", wav.metadata.title print "Artist:", wav.metadata.artist print "Channels:", wav.channels print "Format: 0x%x" % wav.format print "Sample Rate:", wav.samplerate # Set device attributes self.device.setchannels(wav.channels) self.device.setrate(wav.samplerate) data = wav.buffer(self.periodsize) nframes = wav.read(data) while (nframes) and (self.active): self.device.write(data[:, :nframes]) nframes = wav.read(data) wav.close() self.active = False time.sleep(0.1)
import threading import wave from wavefile import WaveReader from pyaudio import PyAudio,paInt16 import time recordFile = 'audio_record/temp0.wav' with WaveReader(recordFile) as r: for data in r.read_iter(size=512): left_channel = data[0] volume = np.linalg.norm(left_channel) print volume
def __init__(self, sound_name): self.sound_name = sound_name self.sound_file = WaveReader(sound_name) self.channels = self.sound_file.channels self.player_lib = pyaudio.PyAudio() self._volume = 1
class Sound: CHUNK = 512 def __init__(self, sound_name): self.sound_name = sound_name self.sound_file = WaveReader(sound_name) self.channels = self.sound_file.channels self.player_lib = pyaudio.PyAudio() self._volume = 1 # self.wf = wave.open(sound_name, 'rb') # self.p = pyaudio.PyAudio() # self.device_index, self.max_channels = self.get_valid_device_info(self.p) def callback(self, in_data, frame_count, time_info, status): # data = self.wf.readframes(frame_count) # numpy_array = numpy.fromstring(data, 'int16') * self._volume # new_data = numpy_array.astype('int16').tostring() new_data = self.sound_file.read_iter(size=512) # new_data = self.sound_file.read(frame_count) return (new_data, pyaudio.paContinue) def open_stream(self): # stream = self.p.open(format=self.p.get_format_from_width(self.wf.getsampwidth()), # channels=min(self.wf.getnchannels(), self.max_channels), # output_device_index=1, # rate=self.wf.getframerate(), # output=True) # return stream # stream = self.player_lib.open(format=pyaudio.paFloat32, channels=self.channels, rate=self.sound_file.samplerate, frames_per_buffer=512, output=True) return stream @run_in_thread def play(self, repeat_count=1): stream = self.open_stream() stream.start_stream() while repeat_count != 0: repeat_count -= 1 # data = self.wf.readframes(self.CHUNK) for frame in self.sound_file.read_iter(size=512): stream.write(frame, frame.shape[1]) # self.wf.rewind() self.sound_file.seek(0) # data = self.wf.readframes(self.CHUNK) stream.stop_stream() stream.close() self.sound_file.close() self.player_lib.terminate() @property def volume(self): return self._volume @volume.setter def volume(self, value): self._volume = value self._volume_fraction = Fraction(self._volume).limit_denominator()
"\t\tDOLBY SMPTE 5.1 Channel order (L R C LFE Ls Rs)\n" "\t\t\t\tto\n" "\t\tProTools & Film Channel order (L C R Ls Rs LFE)\n"), formatter_class=RawTextHelpFormatter) parser.add_argument("-i", help="File/Folder for processing") args = parser.parse_args() # Initialize variables item = args.i wav_ext = '.wav' # figure out if arg is folder or file if os.path.isdir(item): for file in os.listdir(item): if file.endswith(wav_ext): # check to see if file is 5.1 or 7.1 for processing read_wav = WaveReader(os.path.join(item, file)) if read_wav.channels == 6: in_file = os.path.join(item, file) out_file = os.path.join(item, file[:-4]) reorderFolder = ffmpy3.FFmpeg( inputs={in_file: None}, # set Film order outputs={out_file + '_film.wav': "-rf64 auto -filter " "'channelmap=FL-FL|FR-FC|FC-FR|LFE-SR|SL-LFE|SR-SL'"} ) reorderFolder.run() elif read_wav.channels == 8: in_file = os.path.join(item, file) out_file = os.path.join(item, file[:-4]) reorderFolder = ffmpy3.FFmpeg( inputs={in_file: None},
target_gain = (1+threshold-self.envelope) else: target_gain = 1.0 self.gain = ( self.gain*self.attack_coeff + target_gain*(1-self.attack_coeff) ) # limit the delayed signal signal[i] = self.delay_line[self.delay_index] * self.gain if len(sys.argv) < 2: print("Plays a wave file.\n\nUsage: %s filename.wav" % sys.argv[0]) sys.exit(-1) wf = WaveReader(sys.argv[1]) #wf = wave.open(sys.argv[1], 'rb') # instantiate PyAudio (1) p = pyaudio.PyAudio() limiter = Limiter(attack_coeff, release_coeff, delay, dtype) def callback(in_data, frame_count, time_info, flag): if flag: print("Playback Error: %i" % flag) data = np.zeros((1,block_length), np.float32, order='F') nframes = wf.read(data) played_frames = callback.counter callback.counter += nframes
import numpy import pyfftw import os from base import * from wavefile import WaveReader,Format from time import sleep SAMPLING_RATE = 44100 CHANNELS = 1 FIFO = "/tmp/mpd.fifo" os.nice(-20) sleep(1) # wait for mpd to come up w = WaveReader(FIFO, SAMPLING_RATE, CHANNELS, Format.PCM_16 | Format.RAW | Format.ENDIAN_LITTLE) m = 8 colors = bytearray(b'\x00'*900) i = 0 for a in reversed(range(33, 154, 20)): for b in reversed(range(33, 154, 20)): if a != b: for c in reversed(range(33, 154, 20)): if i < 900 and b != c: colors[i] = a; i+=1 colors[i] = b; i+=1 colors[i] = c; i+=1 pyfftw.interfaces.cache.enable() a = pyfftw.empty_aligned(1024) for data in w.read_iter(size=1024):
class Sound: CHUNK = 512 def __init__(self, sound_name): self.sound_name = sound_name self.sound_file = WaveReader(sound_name) self.channels = self.sound_file.channels self.player_lib = pyaudio.PyAudio() self._volume = 1 # self.wf = wave.open(sound_name, 'rb') # self.p = pyaudio.PyAudio() # self.device_index, self.max_channels = self.get_valid_device_info(self.p) def callback(self, in_data, frame_count, time_info, status): # data = self.wf.readframes(frame_count) # numpy_array = numpy.fromstring(data, 'int16') * self._volume # new_data = numpy_array.astype('int16').tostring() data = self.sound_file.buffer(512) nframes = self.sound_file.read(data) new_data = data[:, :nframes] # new_data = self.sound_file.read(frame_count) # new_data = self.sound_file.read_iter(size=frame_count) # new_data = self.sound_file.read(frame_count) return (new_data, pyaudio.paContinue) def open_stream(self): # stream = self.p.open(format=self.p.get_format_from_width(self.wf.getsampwidth()), # channels=min(self.wf.getnchannels(), self.max_channels), # output_device_index=1, # rate=self.wf.getframerate(), # output=True) # return stream # stream = self.player_lib.open(format=pyaudio.paFloat32, channels=self.channels, rate=self.sound_file.samplerate, frames_per_buffer=512, output=True, stream_callback=self.callback) return stream @run_in_thread def play(self, repeat_count=1): stream = self.open_stream() stream.start_stream() print("File: {} playing\n".format(self.sound_name)) # while stream.is_active(): # time.sleep(0.1) # sys.stdout.write("."); sys.stdout.flush() # stream.stop_stream() # stream.close() # self.sound_file.close() # self.player_lib.terminate() @property def volume(self): return self._volume @volume.setter def volume(self, value): self._volume = value self._volume_fraction = Fraction(self._volume).limit_denominator()
def addSound(self, sound_name): sound_file = WaveReader(sound_name) self.playing_files.append(sound_file)
#!/usr/bin/python import serial import numpy from wavefile import WaveReader, Format from time import sleep SAMPLING_RATE = 44100 CHANNELS = 2 FIFO = "/tmp/mpd_stereo.fifo" NUM_LEDS = 300 ser = serial.Serial('/dev/ttyUSB0', 1000000) w = WaveReader(FIFO, SAMPLING_RATE, CHANNELS, Format.PCM_16 | Format.RAW | Format.ENDIAN_LITTLE) m = 16 colors = bytearray(b'\x00' * 900) i = 0 for a in reversed(range(33, 254, 40)): for b in reversed(range(33, 254, 40)): if a != b: for c in reversed(range(33, 254, 40)): if i < 900 and b != c: colors[i] = a i += 1 colors[i] = b i += 1 colors[i] = c i += 1
#!/usr/bin/env python ### Processing example import sys from wavefile import WaveReader, WaveWriter with WaveReader(sys.argv[1]) as r: with WaveWriter( 'output.wav', channels=r.channels, samplerate=r.samplerate, ) as w: w.metadata.title = r.metadata.title + " II" w.metadata.artist = r.metadata.artist for data in r.read_iter(size=512): sys.stdout.write("."); sys.stdout.flush() w.write(.8*data) # vim: noet ts=4 sw=4
class Sound: CHUNK = 512 def __init__(self, sound_name): self.sound_name = sound_name self.sound_file = WaveReader(sound_name) self.channels = self.sound_file.channels self.player_lib = pyaudio.PyAudio() self._volume = 1 # self.wf = wave.open(sound_name, 'rb') # self.p = pyaudio.PyAudio() # self.device_index, self.max_channels = self.get_valid_device_info(self.p) def callback(self, in_data, frame_count, time_info, status): # data = self.wf.readframes(frame_count) # numpy_array = numpy.fromstring(data, 'int16') * self._volume # new_data = numpy_array.astype('int16').tostring() new_data = self.sound_file.read_iter(size=512) # new_data = self.sound_file.read(frame_count) return (new_data, pyaudio.paContinue) def open_stream(self): # stream = self.p.open(format=self.p.get_format_from_width(self.wf.getsampwidth()), # channels=min(self.wf.getnchannels(), self.max_channels), # output_device_index=1, # rate=self.wf.getframerate(), # output=True) # return stream # stream = self.player_lib.open( format = pyaudio.paFloat32, channels = self.channels, rate = self.sound_file.samplerate, frames_per_buffer = 512, output=True) return stream @run_in_thread def play(self, repeat_count = 1): stream = self.open_stream() stream.start_stream() while repeat_count != 0: repeat_count -= 1 # data = self.wf.readframes(self.CHUNK) for frame in self.sound_file.read_iter(size=512) : stream.write(frame, frame.shape[1]) # self.wf.rewind() self.sound_file.seek(0) # data = self.wf.readframes(self.CHUNK) stream.stop_stream() stream.close() self.sound_file.close() self.player_lib.terminate() @property def volume(self): return self._volume @volume.setter def volume(self, value): self._volume = value self._volume_fraction = Fraction(self._volume).limit_denominator()