def __init__(self, filename): self.name = filename #load/set wav file if len(sys.argv) < 2: print("Usage: %s wavefile" % os.path.basename(sys.argv[0])) print(" Using an example wav file...") dirname = os.path.dirname(os.path.realpath(__file__)) fname = os.path.join(dirname, filename) else: fname = sys.argv[1] wavefp = wave.open(fname) channels = wavefp.getnchannels() bitrate = wavefp.getsampwidth() * 8 samplerate = wavefp.getframerate() wavbuf = wavefp.readframes(wavefp.getnframes()) self.duration = (len(wavbuf) / float(samplerate)) / 2 self.length = len(wavbuf) formatmap = { (1, 8): al.AL_FORMAT_MONO8, (2, 8): al.AL_FORMAT_STEREO8, (1, 16): al.AL_FORMAT_MONO16, (2, 16): al.AL_FORMAT_STEREO16, } alformat = formatmap[(channels, bitrate)] self.buf = al.ALuint(0) al.alGenBuffers(1, self.buf) #allocate buffer space to: buffer, format, data, len(data), and samplerate al.alBufferData(self.buf, alformat, wavbuf, len(wavbuf), samplerate)
def __init__(self, filename): self.name = filename # load/set wav file if len(sys.argv) < 2: print("Usage: %s wavefile" % os.path.basename(sys.argv[0])) print(" Using an example wav file...") dirname = os.path.dirname(os.path.realpath(__file__)) fname = os.path.join(dirname, filename) else: fname = sys.argv[1] wavefp = wave.open(fname) channels = wavefp.getnchannels() bitrate = wavefp.getsampwidth() * 8 samplerate = wavefp.getframerate() wavbuf = wavefp.readframes(wavefp.getnframes()) self.duration = (len(wavbuf) / float(samplerate)) / 2 self.length = len(wavbuf) formatmap = { (1, 8): al.AL_FORMAT_MONO8, (2, 8): al.AL_FORMAT_STEREO8, (1, 16): al.AL_FORMAT_MONO16, (2, 16): al.AL_FORMAT_STEREO16, } alformat = formatmap[(channels, bitrate)] self.buf = al.ALuint(0) al.alGenBuffers(1, self.buf) # allocate buffer space to: buffer, format, data, len(data), and samplerate al.alBufferData(self.buf, alformat, wavbuf, len(wavbuf), samplerate)
def write(self, audio_data): buffer = al.ALuint() al.alGenBuffers(1, buffer) al.alBufferData(buffer, self._al_format, audio_data.data, audio_data.length, self.audio_format.sample_rate) al.alSourceQueueBuffers(self._al_source, 1, ctypes.byref(buffer)) self._buffered_time += audio_data.duration self._timestamps.append((audio_data.timestamp, audio_data.duration)) audio_data.consume(audio_data.length, self.audio_format)
def __init__(self, file): self.name = file wavefp = wave.open(file) channels = wavefp.getnchannels() bitrate = wavefp.getsampwidth() * 8 samplerate = wavefp.getframerate() wavbuffer = wavefp.readframes(wavefp.getnframes()) self.duration = (len(wavbuffer) / float(samplerate)) / 2 self.length = len(wavbuffer) formatmap = { (1, 8): al.AL_FORMAT_MONO8, (2, 8): al.AL_FORMAT_STEREO8, (1, 16): al.AL_FORMAT_MONO16, (2, 16): al.AL_FORMAT_STEREO16, } alformat = formatmap[(channels, bitrate)] self.buffer = al.ALuint(0) al.alGenBuffers(1, self.buffer) al.alBufferData(self.buffer, alformat, wavbuffer, self.length, samplerate)
def load(self, data): self.wavbuf = data self.length = len(data) #allocate buffer space to: buffer, format, data, len(data), and samplerate al.alBufferData(self.buf, self.alformat, self.wavbuf, len(self.wavbuf), self.samplerate)
def dispatch_events(self): if not self._sources: return if not self._playing: # If paused, just update the video texture. if self._texture: self._sources[0]._update_texture(self, self.time) return # Calculate once only for this method. self_time = self.time # Update state of AL source state = al.ALint() al.alGetSourcei(self._al_source, al.AL_SOURCE_STATE, state) self._al_playing = state.value == al.AL_PLAYING if self._sources[0].al_format: # Find out how many buffers are done processed = al.ALint() al.alGetSourcei(self._al_source, al.AL_BUFFERS_PROCESSED, processed) processed = processed.value queued = al.ALint() al.alGetSourcei(self._al_source, al.AL_BUFFERS_QUEUED, queued) # Release spent buffers if processed: buffers = (al.ALuint * processed)() al.alSourceUnqueueBuffers(self._al_source, len(buffers), buffers) # If any buffers were EOS buffers, dispatch appropriate # event. for buffer in buffers: info = self._queued_buffers.pop(0) assert info is buffer_pool.info[buffer] if info.is_eos: if self._eos_action == self.EOS_NEXT: self.next() elif self._eos_action == self.EOS_STOP: # For ManagedSoundPlayer only. self.stop() self.dispatch_event('on_eos') buffer_pool.release(buffer) else: # Check for EOS on silent source if self_time > self._sources[0].duration: if self._eos_action == self.EOS_NEXT: self.next() self.dispatch_event('on_eos') # Determine minimum duration of audio already buffered (current buffer # is ignored, as this could be just about to be dequeued). buffer_time = sum([b.length for b in self._queued_buffers[1:]]) # Ensure audio buffers are full try: source = self._sources[self._source_read_index] except IndexError: source = None while source and buffer_time < self._min_buffer_time: # Read next packet of audio data if source.al_format: max_bytes = int( self._min_buffer_time * source.audio_format.bytes_per_second) max_bytes = min(max_bytes, self._max_buffer_size) audio_data = source._get_audio_data(max_bytes) # If there is audio data, create and queue a buffer if source.al_format and audio_data: buffer = buffer_pool.get(audio_data.timestamp, audio_data.duration, buffer_pool, audio_data.is_eos) al.alBufferData(buffer, source.al_format, audio_data.data, audio_data.length, source.audio_format.sample_rate) # TODO consolidate info and audio_data info = buffer_pool.info[buffer.value] self._queued_buffers.append(info) buffer_time += info.length # Queue this buffer onto the AL source. al.alSourceQueueBuffers(self._al_source, 1, ctypes.byref(buffer)) else: # No more data from source, check eos behaviour if self._eos_action == self.EOS_NEXT: self._source_read_index += 1 try: source = self._sources[self._source_read_index] source._play() # Preroll source ahead of buffering except IndexError: source = None elif self._eos_action == self.EOS_LOOP: source._seek(0) elif self._eos_action == self.EOS_PAUSE: source = None elif self._eos_action == self.EOS_STOP: source = None else: assert False, 'Invalid eos_action' source = None # Update video texture if self._texture: self._sources[0]._update_texture(self, self_time) # Ensure the AL source is playing (if there is a buffer underrun # this restarts the AL source). This needs to be at the end of the # function to ensure it catches newly queued sources without needing # a second iteration of dispatch_events. if (self._sources and self._sources[0].al_format and self._queued_buffers and self._playing and not self._al_playing): al.alSourcePlay(self._al_source) self._al_playing = True