def __init__(self, filename): self.name = filename #load/set wav file if len(sys.argv) < 2: print("Usage: %s wavefile" % os.path.basename(sys.argv[0])) print(" Using an example wav file...") dirname = os.path.dirname(os.path.realpath(__file__)) fname = os.path.join(dirname, filename) else: fname = sys.argv[1] wavefp = wave.open(fname) channels = wavefp.getnchannels() bitrate = wavefp.getsampwidth() * 8 samplerate = wavefp.getframerate() wavbuf = wavefp.readframes(wavefp.getnframes()) self.duration = (len(wavbuf) / float(samplerate)) / 2 self.length = len(wavbuf) formatmap = { (1, 8): al.AL_FORMAT_MONO8, (2, 8): al.AL_FORMAT_STEREO8, (1, 16): al.AL_FORMAT_MONO16, (2, 16): al.AL_FORMAT_STEREO16, } alformat = formatmap[(channels, bitrate)] self.buf = al.ALuint(0) al.alGenBuffers(1, self.buf) #allocate buffer space to: buffer, format, data, len(data), and samplerate al.alBufferData(self.buf, alformat, wavbuf, len(wavbuf), samplerate)
def __init__(self, audio_format): super(OpenALAudioPlayer, self).__init__(audio_format) try: self._al_format = format_map[(audio_format.channels, audio_format.sample_size)] except KeyError: raise OpenALException('Unsupported audio format.') self._al_source = al.ALuint() al.alGenSources(1, self._al_source) # Seconds of audio currently queued not processed (estimate) self._buffered_time = 0.0 # Seconds of audio into current (head) buffer self._current_buffer_time = 0.0 # List of (timestamp, duration) corresponding to currently queued AL # buffers self._timestamps = [] # OpenAL 1.0 timestamp interpolation self._timestamp_system_time = 0.0 # Desired play state (True even if stopped due to underrun) self._playing = False # Timestamp when paused self._pause_timestamp = 0.0 self._eos_count = 0
def write(self, audio_data): buffer = al.ALuint() al.alGenBuffers(1, buffer) al.alBufferData(buffer, self._al_format, audio_data.data, audio_data.length, self.audio_format.sample_rate) al.alSourceQueueBuffers(self._al_source, 1, ctypes.byref(buffer)) self._buffered_time += audio_data.duration self._timestamps.append((audio_data.timestamp, audio_data.duration)) audio_data.consume(audio_data.length, self.audio_format)
def __init__(self): self.source = al.ALuint(0) al.alGenSources(1, self.source) al.alSourcef(self.source, al.AL_ROLLOFF_FACTOR, 0) al.alSourcei(self.source, al.AL_SOURCE_RELATIVE, 0) self.state = al.ALint(0) self._volume = 1.0 self._pitch = 1.0 self._position = [0, 0, 0] self._rolloff = 1.0 self._loop = False self.queue = []
def __init__(self): #load source player self.source = al.ALuint(0) al.alGenSources(1, self.source) #disable rolloff factor by default al.alSourcef(self.source, al.AL_ROLLOFF_FACTOR, 0) #disable source relative by default al.alSourcei(self.source, al.AL_SOURCE_RELATIVE, 0) #capture player state buffer self.state = al.ALint(0) #set internal variable tracking self._volume = 1.0 self._pitch = 1.0 self._position = [0, 0, 0] self._rolloff = 1.0 self._loop = False self.queue = []
def __init__(self): self.channels = 1 self.bitrate = 16 self.samplerate = 8000 self.wavbuf = None self.alformat = al.AL_FORMAT_MONO16 self.length = None ## formatmap = { ## (1, 8) : al.AL_FORMAT_MONO8, ## (2, 8) : al.AL_FORMAT_STEREO8, ## (1, 16): al.AL_FORMAT_MONO16, ## (2, 16) : al.AL_FORMAT_STEREO16, ## } ## alformat = formatmap[(channels, bitrate)] self.buf = al.ALuint(0) al.alGenBuffers(1, self.buf)
def __init__(self, file): self.name = file wavefp = wave.open(file) channels = wavefp.getnchannels() bitrate = wavefp.getsampwidth() * 8 samplerate = wavefp.getframerate() wavbuffer = wavefp.readframes(wavefp.getnframes()) self.duration = (len(wavbuffer) / float(samplerate)) / 2 self.length = len(wavbuffer) formatmap = { (1, 8): al.AL_FORMAT_MONO8, (2, 8): al.AL_FORMAT_STEREO8, (1, 16): al.AL_FORMAT_MONO16, (2, 16): al.AL_FORMAT_STEREO16, } alformat = formatmap[(channels, bitrate)] self.buffer = al.ALuint(0) al.alGenBuffers(1, self.buffer) al.alBufferData(self.buffer, alformat, wavbuffer, self.length, samplerate)