def __init__(self, filename, file=None): if file is None: file = open(filename, 'rb') self._file = file # Read RIFF format, get format and data chunks riff = RIFFFile(file) wave_form = riff.get_wave_form() if wave_form: format = wave_form.get_format_chunk() data_chunk = wave_form.get_data_chunk() if not wave_form or not format or not data_chunk: raise WAVEFormatException('Not a WAVE file') if format.wFormatTag != WAVE_FORMAT_PCM: raise WAVEFormatException('Unsupported WAVE format category') self.audio_format = AudioFormat(channels=format.wChannels, sample_size=format.wBitsPerSample, sample_rate=format.dwSamplesPerSec) self._duration = \ float(data_chunk.length) / self.audio_format.bytes_per_second self._start_offset = data_chunk.offset self._max_offset = data_chunk.length self._offset = 0 self._file.seek(self._start_offset)
def _as_static(data, fs): """Helper to get data into the Pyglet audio format""" fs = int(fs) if data.ndim not in (1, 2): raise ValueError('Data must have one or two dimensions') n_ch = data.shape[0] if data.ndim == 2 else 1 audio_format = AudioFormat(channels=n_ch, sample_size=16, sample_rate=fs) data = data.T.ravel('C') data[data < -1] = -1 data[data > 1] = 1 data = (data * (2**15)).astype('int16').tostring() return StaticMemorySourceFixed(data, audio_format)
def __init__(self, duration, sample_rate=44800, sample_size=16): self._duration = float(duration) self.audio_format = AudioFormat(channels=1, sample_size=sample_size, sample_rate=sample_rate) self._offset = 0 self._bytes_per_sample = sample_size >> 3 self._bytes_per_second = self._bytes_per_sample * sample_rate self._max_offset = int(self._bytes_per_second * self._duration) if self._bytes_per_sample == 2: self._max_offset &= 0xfffffffe
def __init__(self, api, freq, channels, bytes_per_sample) -> None: """ :param api: Instance of the SunVox API to use for audio rendering. :param freq: Frequency, as frames per second. :param channels: Channels (1=mono, 2=stereo). :param bytes_per_sample: Bytes per sample (2=16-bit int, 4=32-bit float). """ super().__init__() self.audio_format = AudioFormat(channels, bytes_per_sample * 8, freq) self.sunvox = api self._duration = 0. self._tell = 0 self._freq = freq self._channels = channels self._bytes_per_sample = bytes_per_sample self._bytes_per_second = freq * bytes_per_sample * channels
def __init__(self, filename, file=None): if file is not None: raise NotImplementedError('TODO: Load from file stream') self._file = av.avbin_open_filename(filename) if not self._file: raise AVbinException('Could not open "%s"' % filename) self._video_stream = None self._video_stream_index = -1 self._audio_stream = None self._audio_stream_index = -1 file_info = AVbinFileInfo() file_info.structure_size = ctypes.sizeof(file_info) av.avbin_file_info(self._file, ctypes.byref(file_info)) self._duration = timestamp_from_avbin(file_info.duration) self.info = SourceInfo() self.info.title = file_info.title self.info.author = file_info.author self.info.copyright = file_info.copyright self.info.comment = file_info.comment self.info.album = file_info.album self.info.year = file_info.year self.info.track = file_info.track self.info.genre = file_info.genre # Pick the first video and audio streams found, ignore others. for i in range(file_info.n_streams): info = AVbinStreamInfo8() info.structure_size = ctypes.sizeof(info) av.avbin_stream_info(self._file, i, info) if (info.type == AVBIN_STREAM_TYPE_VIDEO and not self._video_stream): stream = av.avbin_open_stream(self._file, i) if not stream: continue self.video_format = VideoFormat( width=info.u.video.width, height=info.u.video.height) if info.u.video.sample_aspect_num != 0: self.video_format.sample_aspect = ( float(info.u.video.sample_aspect_num) / info.u.video.sample_aspect_den) if _have_frame_rate: self.video_format.frame_rate = ( float(info.u.video.frame_rate_num) / info.u.video.frame_rate_den) self._video_stream = stream self._video_stream_index = i elif (info.type == AVBIN_STREAM_TYPE_AUDIO and info.u.audio.sample_bits in (8, 16) and info.u.audio.channels in (1, 2) and not self._audio_stream): stream = av.avbin_open_stream(self._file, i) if not stream: continue self.audio_format = AudioFormat( channels=info.u.audio.channels, sample_size=info.u.audio.sample_bits, sample_rate=info.u.audio.sample_rate) self._audio_stream = stream self._audio_stream_index = i self._packet = AVbinPacket() self._packet.structure_size = ctypes.sizeof(self._packet) self._packet.stream_index = -1 self._events = [] # Timestamp of last video packet added to decoder queue. self._video_timestamp = 0 self._buffered_audio_data = [] if self.audio_format: self._audio_buffer = \ (ctypes.c_uint8 * av.avbin_get_audio_buffer_size())() if self.video_format: self._video_packets = [] self._decode_thread = WorkerThread() self._decode_thread.start() self._condition = threading.Condition()
def __init__(self, filename, file=None): if file is not None: raise NotImplementedError('TODO: Load from file stream') self._file = av.avbin_open_filename(filename) if not self._file: raise AVbinException('Could not open "%s"' % filename) self._video_stream = None self._audio_stream = None file_info = AVbinFileInfo() file_info.structure_size = ctypes.sizeof(file_info) av.avbin_file_info(self._file, ctypes.byref(file_info)) self._duration = timestamp_from_avbin(file_info.duration) # Pick the first video and audio streams found, ignore others. for i in range(file_info.n_streams): info = AVbinStreamInfo() info.structure_size = ctypes.sizeof(info) av.avbin_stream_info(self._file, i, info) if (info.type == AVBIN_STREAM_TYPE_VIDEO and not self._video_stream): stream = av.avbin_open_stream(self._file, i) if not stream: continue self.video_format = VideoFormat(width=info.u.video.width, height=info.u.video.height) if info.u.video.sample_aspect_num != 0: self.video_format.sample_aspect = ( float(info.u.video.sample_aspect_num) / info.u.video.sample_aspect_den) self._video_stream = stream self._video_stream_index = i elif (info.type == AVBIN_STREAM_TYPE_AUDIO and info.u.audio.sample_bits in (8, 16) and info.u.audio.channels in (1, 2) and not self._audio_stream): stream = av.avbin_open_stream(self._file, i) if not stream: continue self.audio_format = AudioFormat( channels=info.u.audio.channels, sample_size=info.u.audio.sample_bits, sample_rate=info.u.audio.sample_rate) self._audio_stream = stream self._audio_stream_index = i self._packet = AVbinPacket() self._packet.structure_size = ctypes.sizeof(self._packet) self._packet.stream_index = -1 self._buffered_packets = [] self._buffer_streams = [] self._buffered_images = [] if self.audio_format: self._audio_packet_ptr = 0 self._audio_packet_size = 0 self._audio_packet_timestamp = 0 self._audio_buffer = \ (ctypes.c_uint8 * av.avbin_get_audio_buffer_size())() self._buffer_streams.append(self._audio_stream_index) if self.video_format: self._buffer_streams.append(self._video_stream_index) self._force_next_video_image = True self._last_video_timestamp = None