def dispatch_events(self): super(QuickTimeStreamingSound, self).dispatch_events() needed_buffers = max(0, self._buffers_ahead - self._queued_buffers) buffers = [] for i in range(needed_buffers): buffer = self._extraction_session.get_buffer(self._buffer_size) if not buffer: break self.finished = False buffers.append(buffer) buffers = (al.ALuint * len(buffers))(*buffers) al.alSourceQueueBuffers(self.source, len(buffers), buffers)
def get_sound(self): if not self.has_audio: raise MediaException('No audio in media file') if self.streaming: extraction_session = self.extraction_session if not extraction_session: extraction_session = ExtractionSession(self.movie) self.extraction_session = None sound = QuickTimeStreamingSound(extraction_session) return sound else: sound = openal.OpenALStaticSound(self) al.alSourceQueueBuffers( sound.source, len(self.static_buffers), self.static_buffers) return sound
def get_sound(self): if not self.has_audio: raise InvalidMediumException('No audio in medium') if self._buffers is None: # Wait until sound has finished decoding self._element.buffers_semaphore.acquire() self._element.buffers_semaphore.release() # Save buffers here, then throw away the decoding pipeline self._buffers = self._element.buffers self._destroy_pipeline(self._pipeline) self._pipeline = None sound = GstreamerOpenALSound() al.alSourceQueueBuffers(sound.source, len(self._buffers), self._buffers) return sound
def _add_buffer(self, buffer, buffer_time): al.alSourceQueueBuffers(self.sound.source, 1, buffer) if buffer_time - self.sound.time > self.sound._buffer_time: time.sleep(self.sound._buffer_time - SLEEP_UNDERSHOOT)