Exemple #1
0
    def setUp(self) -> None:
        super().setUp()
        vm = video_meta_data(duration=3600.0, width=640, height=360)
        am = audio_meta_data(duration=3600.0)
        self.source = inputs.input_file(
            'source.mp4',
            inputs.Stream(VIDEO, vm),
            inputs.Stream(AUDIO, am))

        self.logo = inputs.input_file(
            'logo.png',
            inputs.Stream(VIDEO, video_meta_data(width=64, height=64)))

        vm = video_meta_data(duration=10.0, width=640, height=360)
        am = audio_meta_data(duration=10.0)
        self.preroll = inputs.input_file(
            'preroll.mp4',
            inputs.Stream(VIDEO, vm),
            inputs.Stream(AUDIO, am))

        self.video_codec = X264(bitrate=3600000)
        self.audio_codec = AAC(bitrate=192000)

        self.output = outputs.output_file(
            'output.mp4',
            self.video_codec,
            self.audio_codec)

        self.ffmpeg = FFMPEG()
Exemple #2
0
    def test_detect_concat_buffering(self):
        """
        When single source is used for multiple outputs, and one of outputs
        has a preroll, buffering occurs, because to output first frame for a
        non-preroll output, we need to buffer all preroll frames.
        """

        cases = [
            (False, True, True),  # preroll + source / preroll + source
            (False, True, False),  # preroll + source / preroll
            (True, False, True),  # preroll + source / source
        ]
        for case in cases:
            with self.subTest(case):
                raises, split_pre, split_src = case
                ff = FFMPEG()
                v1 = inputs.Stream(VIDEO, self.preroll.streams[0].meta)
                a1 = inputs.Stream(AUDIO, self.preroll.streams[1].meta)
                v2 = inputs.Stream(VIDEO, self.source.streams[0].meta)
                a2 = inputs.Stream(AUDIO, self.source.streams[1].meta)
                ff < inputs.input_file('preroll.mp4', v1, a1)
                ff < inputs.input_file('source.mp4', v2, a2)
                vf1 = v1 | filters.Split(VIDEO, output_count=int(split_pre) + 1)
                vf2 = v2 | filters.Split(VIDEO, output_count=int(split_src) + 1)
                af1 = a1 | filters.Split(AUDIO, output_count=int(split_pre) + 1)
                af2 = a2 | filters.Split(AUDIO, output_count=int(split_src) + 1)

                vc1 = vf1 | filters.Concat(VIDEO, input_count=2)
                vf2 | vc1
                ac1 = af1 | filters.Concat(AUDIO, input_count=2)
                af2 | ac1

                vc2 = filters.Concat(VIDEO, int(split_pre) + int(split_src))
                if split_pre:
                    vf1 | vc2
                if split_src:
                    vf2 | vc2

                ac2 = filters.Concat(AUDIO, int(split_pre) + int(split_src))
                if split_pre:
                    af1 | ac2
                if split_src:
                    af2 | ac2

                o1 = outputs.output_file("o1.mp4", X264(), AAC())
                o2 = outputs.output_file("o2.mp4", X264(), AAC())

                vc1 > o1
                ac1 > o1
                vc2 > o2
                ac2 > o2

                ff > o1
                ff > o2
                try:
                    ff.check_buffering()
                except BufferError as e:
                    self.assertTrue(raises, e)
                else:
                    self.assertFalse(raises)
Exemple #3
0
    def test_shortcut_outputs_with_codec(self):
        """ Check ff > output shortcut if codecs list specified."""
        ff = FFMPEG(input=inputs.input_file("input.mp4"))
        scaled = ff.video | filters.Scale(width=1280, height=720)

        with self.assertRaises(RuntimeError):
            codec = codecs.VideoCodec("libx264")
            out = ff > outputs.output_file("output.mp4", codec)
            # at this moment codec is connected to ffmpeg input stream directly
            # so scaled video stream could not be connected to output
            scaled > out

        codec = codecs.VideoCodec("libx264")
        out = scaled > outputs.output_file("output.mp4", codec)
        ff > out
Exemple #4
0
    def test_fix_trim_buffering(self):
        """
        Trim buffering could be fixed with multiple source file deconding.
        """
        ff = FFMPEG()
        v1 = inputs.Stream(VIDEO, self.source.streams[0].meta)
        a1 = inputs.Stream(AUDIO, self.source.streams[1].meta)
        v2 = inputs.Stream(VIDEO, self.source.streams[0].meta)
        a2 = inputs.Stream(AUDIO, self.source.streams[1].meta)

        in1 = ff < inputs.input_file('input.mp4', v1, a1)
        in2 = ff < inputs.input_file('input.mp4', v2, a2)

        p1 = in1.video | filters.Trim(VIDEO, 2.0, 3.0) | filters.SetPTS(VIDEO)
        p2 = in2.video | filters.Trim(VIDEO, 1.0, 2.0) | filters.SetPTS(VIDEO)

        output = outputs.output_file('output.mp4',
                                     codecs.VideoCodec('libx264'))

        concat = p1 | filters.Concat(VIDEO)
        p2 | concat > output

        ff > output
        ff.check_buffering()
    def transcode(self) -> None:
        """ Transcodes video

        * checks source mediainfo
        * runs `ffmpeg`
        * validates result
        """
        audio_meta, video_meta = self.get_meta_data(self.source)

        # Get source mediainfo to use in validation
        source_media_info = self.get_media_info(video_meta, audio_meta)

        # set group of pixels length to segment size
        gop = math.floor(source_media_info[VIDEO_FRAME_RATE] * GOP_DURATION)
        # preserve original video FPS
        vrate = source_media_info[VIDEO_FRAME_RATE]
        # preserve source audio sampling rate
        arate = source_media_info[AUDIO_SAMPLING_RATE]

        # Common ffmpeg flags
        ff = FFMPEG(overwrite=True, loglevel='repeat+level+info')
        # Init source file
        ff < input_file(self.source, Stream(VIDEO, video_meta),
                        Stream(AUDIO, audio_meta))

        # Output codecs
        video_opts = cast(Dict[str, Any], TRANSCODING_OPTIONS[VIDEO_CODEC])
        cv0 = VideoCodec(gop=gop, rate=vrate, **video_opts)
        audio_opts = cast(Dict[str, Any], TRANSCODING_OPTIONS[AUDIO_CODEC])
        ca0 = AudioCodec(rate=arate, **audio_opts)

        # Scaling
        ff.video | Scale(**TRANSCODING_OPTIONS[SCALE]) > cv0

        # codecs, muxer and output path
        ff > output_file(self.destination, cv0, ca0, format='mp4')

        # Run ffmpeg
        self.run(ff)

        # Get result mediainfo
        audio_meta, video_meta = self.get_meta_data(self.destination)
        dest_media_info = self.get_media_info(video_meta, audio_meta)

        # Validate ffmpeg result
        self.validate(source_media_info, dest_media_info)
Exemple #6
0
    def test_concat_audio_metadata(self):
        """
        Concat filter sums samples count for audio streams.
        """
        audio_meta = audio_meta_data(duration=1000.0,
                                     sampling_rate=24000,
                                     samples_count=24000 * 1000)
        a = inputs.Stream(AUDIO, meta=audio_meta)
        self.input_list.append(inputs.input_file('second.mp4', a))
        concat = a | Concat(AUDIO)
        self.source | concat

        concat > self.output

        am = cast(AudioMeta, self.output.codecs[-1].get_meta_data())
        self.assertEqual(self.audio_metadata.duration + audio_meta.duration,
                         am.duration)
        self.assertEqual(round(am.duration * audio_meta.sampling_rate),
                         am.samples)
Exemple #7
0
    def test_overlay_metadata(self):
        """
        overlay takes bottom stream metadata

        $ ffmpeg -y -i source.mp4 -i logo.mp4 -t 1 \
         -filter_complex '[0:v][1:v]overlay=x=100:y=100' test.mp4
        """
        vs = inputs.Stream(VIDEO, meta=video_meta_data(width=100, height=100))
        self.input_list.append(inputs.input_file('logo.png', vs))
        overlay = self.source | Overlay(x=self.video_metadata.width - 2,
                                        y=self.video_metadata.height - 2)
        vs | overlay
        overlay > self.output

        expected = '[0:v][1:v]overlay=x=1918:y=1078[vout0]'
        self.assertEqual(expected, self.fc.render())
        vm = cast(VideoMeta, self.output.codecs[0].get_meta_data())
        self.assertEqual(vm.width, self.video_metadata.width)
        self.assertEqual(vm.height, self.video_metadata.height)
Exemple #8
0
    def test_concat_video_metadata(self):
        """
        Concat filter sums stream duration

        $ ffmpeg -y -i first.mp4 -i second.mp4 -filter_complex concat test.mp4
        """
        video_meta = video_meta_data(duration=1000.0,
                                     frame_count=10000,
                                     frame_rate=10.0)
        vs = inputs.Stream(VIDEO, meta=video_meta)
        self.input_list.append(inputs.input_file('second.mp4', vs))
        concat = vs | Concat(VIDEO)
        self.source | concat

        concat > self.output

        vm = cast(VideoMeta, self.output.codecs[0].get_meta_data())
        self.assertEqual(self.video_metadata.duration + vs.meta.duration,
                         vm.duration)
        self.assertEqual(self.video_metadata.frames + video_meta.frames,
                         vm.frames)
Exemple #9
0
    def test_codec_metadata_transform(self):
        """
        Codecs parameters applied to stream metadata when using transform.
        """
        with self.subTest('codec with transform'):
            self.source.audio > self.output
            am = cast(AudioMeta, self.output.codecs[1].meta)
            self.assertEqual(am.bitrate, self.target_audio_bitrate)

        with self.subTest('no input metadata'):
            no_meta_input = inputs.input_file('input.mp4')
            output = outputs.output_file('output.mp4',
                                         codecs.AudioCodec('aac'))
            no_meta_input.audio > output.audio
            self.assertIsNone(output.codecs[0].meta)

        with self.subTest('no transform'):
            output = outputs.output_file('output.mp4',
                                         codecs.AudioCodec('aac'))
            self.source.audio > output.audio
            am = cast(AudioMeta, output.codecs[0].meta)
            self.assertEqual(am.bitrate, self.audio_metadata.bitrate)
Exemple #10
0
    def test_detect_trim_buffering(self):
        """
        When trim and concat filters are used for editing timeline, buffering
        may occur if order of scenes in output file does not match order of same
        scenes in input file.
        """
        cases = [
            (False, [1.0, 2.0], [2.0, 3.0]),
            (True, [2.0, 3.0], [1.0, 2.0]),
            (True, [2.0, 3.0], [2.0, 4.0]),
        ]
        for case in cases:
            with self.subTest(case):
                raises, first, second = case
                ff = FFMPEG()
                s1 = inputs.Stream(VIDEO, self.source.streams[0].meta)
                s2 = inputs.Stream(VIDEO, self.source.streams[1].meta)

                ff < inputs.input_file('input.mp4', s1, s2)
                split = ff.video | filters.Split(VIDEO)
                t1 = split | filters.Trim(VIDEO, *first)
                p1 = t1 | filters.SetPTS(VIDEO)
                t2 = split | filters.Trim(VIDEO, *second)
                p2 = t2 | filters.SetPTS(VIDEO)

                concat = p1 | filters.Concat(VIDEO)
                output = outputs.output_file('output.mp4',
                                             codecs.VideoCodec('libx264'))
                p2 | concat > output

                ff > output
                try:
                    ff.check_buffering()
                except BufferError as e:
                    self.assertTrue(raises, e)
                else:
                    self.assertFalse(raises)
Exemple #11
0
    def test_filter_graph(self):
        """ Filter complex smoke test and features demo.

        [I-1/Logo]---<Scale>-------
                                  |
        [I-0/input]--<Deint>--<Overlay>--<Split>--<Scale>--[O/480p]
                                            |
                                            ------<Scale>--[O/720p]
        """
        vs = inputs.Stream(VIDEO)
        logo = inputs.input_file('logo.png', vs)
        self.input_list.append(logo)
        out1 = outputs.output_file('out1.mp4')
        self.output_list.append(out1)

        deint = Deint()
        deint.enabled = False  # deinterlace is skipped

        # first video stream is deinterlaced
        next_node = self.source | deint

        left, top = 20, 20  # logo position

        # first overlay input is deinterlaced source (or source itself as
        # deint filter is disabled)
        over = next_node | Overlay(left, top)

        logo_width, logo_height = 200, 50  # logo scaled

        # second input stream is connected to logo scaler, followed by overlay
        # filter
        next_node = vs | Scale(logo_width, logo_height) | over

        # audio is split to two streams
        asplit = self.source | Split(AUDIO)

        for out in self.output_list:
            asplit > out

        # video split to two steams

        # connect split filter to overlayed video stream
        split = next_node | Split(VIDEO)

        # intermediate video stream scaling
        sizes = [(640, 480), (1280, 720)]

        for out, size in zip(self.output_list, sizes):
            # add scale filters to video streams
            w, h = size
            scale = Scale(w, h)
            # connect scaled streams to video destinations
            split | scale > out

        result = self.fc.render()

        expected = ';'.join([
            # overlay logo
            '[0:v][v:scale0]overlay=x=20:y=20[v:overlay0]',
            # split video to two streams
            '[v:overlay0]split[v:split0][v:split1]',
            # each video is scaled to own size
            '[v:split0]scale=w=640:h=480[vout0]',
            '[v:split1]scale=w=1280:h=720[vout1]',

            # split audio to two streams
            '[0:a]asplit[aout0][aout1]',

            # logo scaling
            '[1:v]scale=w=200:h=50[v:scale0]',
        ])

        self.assertEqual(expected.replace(';', ';\n'),
                         result.replace(';', ';\n'))
Exemple #12
0
 def fc_factory():
     src = inputs.input_file("input.mp4", inputs.Stream(VIDEO))
     dst = outputs.output_file('output.mp4')
     fc = FilterComplex(inputs.InputList((src, )),
                        outputs.OutputList((dst, )))
     return fc, src, dst