Example #1
0
    def __init__(self, duration, sample_rate=44800, sample_size=16):
        self._duration = float(duration)
        self.audio_format = AudioFormat(channels=1,
                                        sample_size=sample_size,
                                        sample_rate=sample_rate)

        self._offset = 0
        self._bytes_per_sample = sample_size >> 3
        self._bytes_per_second = self._bytes_per_sample * sample_rate
        self._max_offset = int(self._bytes_per_second * self._duration)

        if self._bytes_per_sample == 2:
            self._max_offset &= 0xfffffffe
    def __init__(self, duration, sample_rate=44800, sample_size=16, envelope=None):

        self._duration = float(duration)
        self.audio_format = AudioFormat(
            channels=1,
            sample_size=sample_size,
            sample_rate=sample_rate)

        self._offset = 0
        self._sample_rate = sample_rate
        self._sample_size = sample_size
        self._bytes_per_sample = sample_size >> 3
        self._bytes_per_second = self._bytes_per_sample * sample_rate
        self._max_offset = int(self._bytes_per_second * self._duration)
        self._envelope = envelope
        
        if self._bytes_per_sample == 2:
            self._max_offset &= 0xfffffffe

        if not self._envelope:
            self._envelope = FlatEnvelope(amplitude=1.0)

        self._envelope_array = self._envelope.build_envelope(self._sample_rate, self._duration)
Example #3
0
    def __init__(self, filename, file=None):
        if file is None:
            file = open(filename, 'rb')

        self._file = file

        # Read RIFF format, get format and data chunks
        riff = RIFFFile(file)
        wave_form = riff.get_wave_form()
        if wave_form:
            format = wave_form.get_format_chunk()
            data_chunk = wave_form.get_data_chunk()

        if not wave_form or not format or not data_chunk:
            if not filename or filename.lower().endswith('.wav'):
                raise WAVEFormatException('Not a WAVE file')
            else:
                raise WAVEFormatException(
                    'FFmpeg is required to decode compressed media')

        if format.wFormatTag != WAVE_FORMAT_PCM:
            raise WAVEFormatException('Unsupported WAVE format category')

        if format.wBitsPerSample not in (8, 16):
            raise WAVEFormatException('Unsupported sample bit size: %d' %
                                      format.wBitsPerSample)

        self.audio_format = AudioFormat(channels=format.wChannels,
                                        sample_size=format.wBitsPerSample,
                                        sample_rate=format.dwSamplesPerSec)
        self._duration = \
            float(data_chunk.length) / self.audio_format.bytes_per_second

        self._start_offset = data_chunk.offset
        self._max_offset = data_chunk.length
        self._offset = 0
        self._file.seek(self._start_offset)
Example #4
0
    def __init__(self, filename, file=None):
        if file is not None:
            raise NotImplementedError('Loading from file stream is not supported')

        self._file = ffmpeg_open_filename(asbytes_filename(filename))
        if not self._file:
            raise FFmpegException('Could not open "{0}"'.format(filename))

        self._video_stream = None
        self._video_stream_index = -1
        self._audio_stream = None
        self._audio_stream_index = -1
        self._audio_format = None

        self.img_convert_ctx = POINTER(SwsContext)()
        self.audio_convert_ctx = POINTER(SwrContext)()

        file_info = ffmpeg_file_info(self._file)

        self.info = SourceInfo()
        self.info.title = file_info.title
        self.info.author = file_info.author
        self.info.copyright = file_info.copyright
        self.info.comment = file_info.comment
        self.info.album = file_info.album
        self.info.year = file_info.year
        self.info.track = file_info.track
        self.info.genre = file_info.genre

        # Pick the first video and audio streams found, ignore others.
        for i in range(file_info.n_streams):
            info = ffmpeg_stream_info(self._file, i)

            if (isinstance(info, StreamVideoInfo) and 
                self._video_stream is None):

                stream = ffmpeg_open_stream(self._file, i)

                self.video_format = VideoFormat(
                    width=info.width,
                    height=info.height)
                if info.sample_aspect_num != 0:
                    self.video_format.sample_aspect = (
                        float(info.sample_aspect_num) /
                            info.sample_aspect_den)
                self.video_format.frame_rate = (
                    float(info.frame_rate_num) / 
                        info.frame_rate_den)
                self._video_stream = stream
                self._video_stream_index = i

            elif (isinstance(info, StreamAudioInfo) and
                  info.sample_bits in (8, 16) and
                  self._audio_stream is None):

                stream = ffmpeg_open_stream(self._file, i)

                self.audio_format = AudioFormat(
                    channels=min(2, info.channels),
                    sample_size=info.sample_bits,
                    sample_rate=info.sample_rate)
                self._audio_stream = stream
                self._audio_stream_index = i

                channel_input = avutil.av_get_default_channel_layout(info.channels)
                channels_out = min(2, info.channels)
                channel_output = avutil.av_get_default_channel_layout(channels_out)

                sample_rate = stream.codec_context.contents.sample_rate
                sample_format = stream.codec_context.contents.sample_fmt
                if sample_format in (AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8P):
                    self.tgt_format = AV_SAMPLE_FMT_U8
                elif sample_format in (AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P):
                    self.tgt_format = AV_SAMPLE_FMT_S16
                elif sample_format in (AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32P):
                    self.tgt_format = AV_SAMPLE_FMT_S32
                elif sample_format in (AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP):
                    self.tgt_format = AV_SAMPLE_FMT_S16
                else:
                    raise FFmpegException('Audio format not supported.')

                self.audio_convert_ctx = swresample.swr_alloc_set_opts(None, 
                    channel_output, self.tgt_format,  sample_rate,
                    channel_input, sample_format, sample_rate,
                    0, None)
                if (not self.audio_convert_ctx or 
                        swresample.swr_init(self.audio_convert_ctx) < 0):
                    swresample.swr_free(self.audio_convert_ctx)
                    raise FFmpegException('Cannot create sample rate converter.')

        self._packet = ffmpeg_init_packet()
        self._events = [] # They don't seem to be used!

        self.audioq = deque()        
        # Make queue big enough to accomodate 1.2 sec?
        self._max_len_audioq = 50 # Need to figure out a correct amount
        if self.audio_format:
            # Buffer 1 sec worth of audio
            self._audio_buffer = \
                (c_uint8 * ffmpeg_get_audio_buffer_size(self.audio_format))()
        
        self.videoq = deque()
        self._max_len_videoq = 50 # Need to figure out a correct amount
        
        self.start_time = self._get_start_time()
        self._duration = timestamp_from_ffmpeg(file_info.duration)
        self._duration -= self.start_time

        # Flag to determine if the _fillq method was already scheduled
        self._fillq_scheduled = False
        self._fillq()
        # Don't understand why, but some files show that seeking without
        # reading the first few packets results in a seeking where we lose
        # many packets at the beginning. 
        # We only seek back to 0 for media which have a start_time > 0
        if self.start_time > 0:
            self.seek(0.0)
Example #5
0
    def __init__(self, filename, file=None):
        if file is not None:
            raise NotImplementedError(
                'Loading from file stream is not supported')

        self._file = av.avbin_open_filename(asbytes_filename(filename))
        if not self._file:
            raise AVbinException('Could not open "%s"' % filename)

        self._video_stream = None
        self._video_stream_index = -1
        self._audio_stream = None
        self._audio_stream_index = -1

        file_info = AVbinFileInfo()
        file_info.structure_size = ctypes.sizeof(file_info)
        av.avbin_file_info(self._file, ctypes.byref(file_info))
        self._duration = timestamp_from_avbin(file_info.duration)

        self.info = SourceInfo()
        self.info.title = file_info.title
        self.info.author = file_info.author
        self.info.copyright = file_info.copyright
        self.info.comment = file_info.comment
        self.info.album = file_info.album
        self.info.year = file_info.year
        self.info.track = file_info.track
        self.info.genre = file_info.genre

        # Pick the first video and audio streams found, ignore others.
        for i in range(file_info.n_streams):
            info = AVbinStreamInfo8()
            info.structure_size = ctypes.sizeof(info)
            av.avbin_stream_info(self._file, i, info)

            if (info.type == AVBIN_STREAM_TYPE_VIDEO
                    and not self._video_stream):

                stream = av.avbin_open_stream(self._file, i)
                if not stream:
                    continue

                self.video_format = VideoFormat(width=info.u.video.width,
                                                height=info.u.video.height)
                if info.u.video.sample_aspect_num != 0:
                    self.video_format.sample_aspect = (
                        float(info.u.video.sample_aspect_num) /
                        info.u.video.sample_aspect_den)
                if _have_frame_rate:
                    self.video_format.frame_rate = (
                        float(info.u.video.frame_rate_num) /
                        info.u.video.frame_rate_den)
                self._video_stream = stream
                self._video_stream_index = i

            elif (info.type == AVBIN_STREAM_TYPE_AUDIO
                  and info.u.audio.sample_bits in (8, 16)
                  and info.u.audio.channels in (1, 2)
                  and not self._audio_stream):

                stream = av.avbin_open_stream(self._file, i)
                if not stream:
                    continue

                self.audio_format = AudioFormat(
                    channels=info.u.audio.channels,
                    sample_size=info.u.audio.sample_bits,
                    sample_rate=info.u.audio.sample_rate)
                self._audio_stream = stream
                self._audio_stream_index = i

        self._packet = AVbinPacket()
        self._packet.structure_size = ctypes.sizeof(self._packet)
        self._packet.stream_index = -1

        self._events = []

        # Timestamp of last video packet added to decoder queue.
        self._video_timestamp = 0
        self._buffered_audio_data = []

        if self.audio_format:
            self._audio_buffer = \
                (ctypes.c_uint8 * av.avbin_get_audio_buffer_size())()

        if self.video_format:
            self._video_packets = []
            self._decode_thread = WorkerThread()
            self._decode_thread.start()
            self._condition = threading.Condition()