def test_detect_concat_buffering(self): """ When single source is used for multiple outputs, and one of outputs has a preroll, buffering occurs, because to output first frame for a non-preroll output, we need to buffer all preroll frames. """ cases = [ (False, True, True), # preroll + source / preroll + source (False, True, False), # preroll + source / preroll (True, False, True), # preroll + source / source ] for case in cases: with self.subTest(case): raises, split_pre, split_src = case ff = FFMPEG() v1 = inputs.Stream(VIDEO, self.preroll.streams[0].meta) a1 = inputs.Stream(AUDIO, self.preroll.streams[1].meta) v2 = inputs.Stream(VIDEO, self.source.streams[0].meta) a2 = inputs.Stream(AUDIO, self.source.streams[1].meta) ff < inputs.input_file('preroll.mp4', v1, a1) ff < inputs.input_file('source.mp4', v2, a2) vf1 = v1 | filters.Split(VIDEO, output_count=int(split_pre) + 1) vf2 = v2 | filters.Split(VIDEO, output_count=int(split_src) + 1) af1 = a1 | filters.Split(AUDIO, output_count=int(split_pre) + 1) af2 = a2 | filters.Split(AUDIO, output_count=int(split_src) + 1) vc1 = vf1 | filters.Concat(VIDEO, input_count=2) vf2 | vc1 ac1 = af1 | filters.Concat(AUDIO, input_count=2) af2 | ac1 vc2 = filters.Concat(VIDEO, int(split_pre) + int(split_src)) if split_pre: vf1 | vc2 if split_src: vf2 | vc2 ac2 = filters.Concat(AUDIO, int(split_pre) + int(split_src)) if split_pre: af1 | ac2 if split_src: af2 | ac2 o1 = outputs.output_file("o1.mp4", X264(), AAC()) o2 = outputs.output_file("o2.mp4", X264(), AAC()) vc1 > o1 ac1 > o1 vc2 > o2 ac2 > o2 ff > o1 ff > o2 try: ff.check_buffering() except BufferError as e: self.assertTrue(raises, e) else: self.assertFalse(raises)
def test_fix_preroll_buffering_with_trim(self): """ We can fix buffering occurred from preroll by using trim filter. """ ff = self.ffmpeg ff < self.preroll ff < self.source output = outputs.output_file('original.mp4', codecs.VideoCodec("libx264")) original = outputs.output_file('original.mp4', codecs.VideoCodec("libx264")) preroll_stream = self.preroll.streams[0] source_stream = self.source.streams[0] concat = preroll_stream | filters.Concat(VIDEO) source_stream | concat split = concat | filters.Split(VIDEO) split > output pd = preroll_stream.meta.duration sd = source_stream.meta.duration trim = split | filters.Trim(VIDEO, start=pd, end=pd + sd) trim | filters.SetPTS(VIDEO) > original ff > original ff > output ff.check_buffering()
def test_handle_codec_copy_with_other_filters(self): """ vcodec=copy with separate transcoded output.""" ff = self.ffmpeg ff < self.source cv0 = codecs.VideoCodec('copy') ca0 = codecs.AudioCodec('copy') ff > outputs.output_file('/tmp/copy.flv', cv0, ca0) cv1 = codecs.VideoCodec('libx264') ca1 = codecs.AudioCodec('aac') self.source | filters.Scale(640, 360) > cv1 self.source > ca1 ff > outputs.output_file('/tmp/out.flv', cv1, ca1) self.assert_ffmpeg_args( '-i', 'source.mp4', '-filter_complex', '[0:v]scale=w=640:h=360[vout0]', '-map', '0:v', '-c:v', 'copy', '-map', '0:a', '-c:a', 'copy', '/tmp/copy.flv', '-map', '[vout0]', '-c:v', 'libx264', '-map', '0:a', '-c:a', 'aac', '/tmp/out.flv')
def test_shortcut_outputs_with_codec(self): """ Check ff > output shortcut if codecs list specified.""" ff = FFMPEG(input=inputs.input_file("input.mp4")) scaled = ff.video | filters.Scale(width=1280, height=720) with self.assertRaises(RuntimeError): codec = codecs.VideoCodec("libx264") out = ff > outputs.output_file("output.mp4", codec) # at this moment codec is connected to ffmpeg input stream directly # so scaled video stream could not be connected to output scaled > out codec = codecs.VideoCodec("libx264") out = scaled > outputs.output_file("output.mp4", codec) ff > out
def setUp(self) -> None: super().setUp() self.video_metadata = video_meta_data(width=1920, height=1080, dar=1.777777778, par=1.0, duration=300.0, frame_rate=10.0, frame_count=3000) self.source_audio_duration = 200.0 self.source_sampling_rate = 48000 self.source_samples_count = (self.source_audio_duration * self.source_sampling_rate) self.source_audio_bitrate = 128000 self.audio_metadata = audio_meta_data( duration=self.source_audio_duration, sampling_rate=self.source_sampling_rate, samples_count=self.source_samples_count, bit_rate=self.source_audio_bitrate, ) self.target_audio_bitrate = 64000 self.source = inputs.Input( input_file='input.mp4', streams=(inputs.Stream(VIDEO, meta=self.video_metadata), inputs.Stream(AUDIO, meta=self.audio_metadata))) self.output = outputs.output_file( 'output.mp4', codecs.VideoCodec('libx264'), FdkAAC(bitrate=self.target_audio_bitrate)) self.input_list = inputs.InputList((self.source, )) self.output_list = outputs.OutputList((self.output, )) self.fc = FilterComplex(self.input_list, self.output_list)
def setUp(self) -> None: super().setUp() vm = video_meta_data(duration=3600.0, width=640, height=360) am = audio_meta_data(duration=3600.0) self.source = inputs.input_file( 'source.mp4', inputs.Stream(VIDEO, vm), inputs.Stream(AUDIO, am)) self.logo = inputs.input_file( 'logo.png', inputs.Stream(VIDEO, video_meta_data(width=64, height=64))) vm = video_meta_data(duration=10.0, width=640, height=360) am = audio_meta_data(duration=10.0) self.preroll = inputs.input_file( 'preroll.mp4', inputs.Stream(VIDEO, vm), inputs.Stream(AUDIO, am)) self.video_codec = X264(bitrate=3600000) self.audio_codec = AAC(bitrate=192000) self.output = outputs.output_file( 'output.mp4', self.video_codec, self.audio_codec) self.ffmpeg = FFMPEG()
def test_ffmpeg(self): """ Smoke test and feature demo.""" ff = self.ffmpeg ff.loglevel = 'info' ff.realtime = True self.source.fast_seek = 123.2 self.source.duration = TS(321.2) ff < self.source cv0 = self.video_codec ca0 = self.audio_codec ca1 = codecs.AudioCodec('libmp3lame', bitrate=394000) asplit = self.source.audio | filters.Split(AUDIO) self.source.video | filters.Scale(640, 360) > cv0 asplit.connect_dest(ca0) asplit.connect_dest(ca1) out0 = self.output out1 = outputs.output_file('/tmp/out.mp3', ca1) ff > out0 ff > out1 self.assert_ffmpeg_args( '-loglevel', 'info', '-re', '-ss', '123.2', '-t', '321.2', '-i', 'source.mp4', '-filter_complex', '[0:v]scale=w=640:h=360[vout0];[0:a]asplit[aout0][aout1]', '-map', '[vout0]', '-c:v', 'libx264', '-b:v', '3600000', '-map', '[aout0]', '-c:a', 'aac', '-b:a', '192000', 'output.mp4', '-map', '[aout1]', '-c:a', 'libmp3lame', '-b:a', '394000', '-vn', '/tmp/out.mp3')
def test_reuse_input_files(self): """ Reuse input files multiple times.""" ff = self.ffmpeg ff < self.source v = self.source.streams[0] a = self.source.streams[1] ff > self.output cv1 = codecs.VideoCodec('copy') ca1 = codecs.AudioCodec('copy') out1 = outputs.output_file('/tmp/out1.flv', cv1, ca1) v > cv1 a > ca1 ff > out1 self.assert_ffmpeg_args( '-i', 'source.mp4', '-map', '0:v', '-c:v', 'libx264', '-b:v', '3600000', '-map', '0:a', '-c:a', 'aac', '-b:a', '192000', 'output.mp4', '-map', '0:v', '-c:v', 'copy', '-map', '0:a', '-c:a', 'copy', '/tmp/out1.flv', )
def test_transcoding_without_graph(self): """ Transcoding works without filter graph.""" ff = self.ffmpeg ff < self.source ff > outputs.output_file('/dev/null', format='null') self.assert_ffmpeg_args('-i', 'source.mp4', '-vn', '-an', '-f', 'null', '/dev/null')
def test_no_audio_if_no_codecs_found(self): """ If no audio codecs specified, set -an flag for an output.""" ff = self.ffmpeg ff < self.source output = outputs.output_file('out.mp4', codecs.VideoCodec('libx264')) ff.video | filters.Scale(640, 360) > output ff > output self.assert_ffmpeg_args('-i', 'source.mp4', '-filter_complex', '[0:v]scale=w=640:h=360[vout0]', '-map', '[vout0]', '-c:v', 'libx264', '-an', 'out.mp4')
def test_codec_metadata_transform(self): """ Codecs parameters applied to stream metadata when using transform. """ with self.subTest('codec with transform'): self.source.audio > self.output am = cast(AudioMeta, self.output.codecs[1].meta) self.assertEqual(am.bitrate, self.target_audio_bitrate) with self.subTest('no input metadata'): no_meta_input = inputs.input_file('input.mp4') output = outputs.output_file('output.mp4', codecs.AudioCodec('aac')) no_meta_input.audio > output.audio self.assertIsNone(output.codecs[0].meta) with self.subTest('no transform'): output = outputs.output_file('output.mp4', codecs.AudioCodec('aac')) self.source.audio > output.audio am = cast(AudioMeta, output.codecs[0].meta) self.assertEqual(am.bitrate, self.audio_metadata.bitrate)
def test_handle_codec_copy(self): """ vcodec=copy connects source directly to muxer.""" ff = self.ffmpeg ff < self.source cv0 = codecs.Copy(kind=VIDEO) ca0 = codecs.AudioCodec('aac', bitrate=128000) ff.audio | Volume(20) > ca0 ff > outputs.output_file('/tmp/out.flv', cv0, ca0) self.assert_ffmpeg_args('-i', 'source.mp4', '-filter_complex', '[0:a]volume=20.00[aout0]', '-map', '0:v', '-c:v', 'copy', '-map', '[aout0]', '-c:a', 'aac', '-b:a', '128000', '/tmp/out.flv')
def transcode(self) -> None: """ Transcodes video * checks source mediainfo * runs `ffmpeg` * validates result """ audio_meta, video_meta = self.get_meta_data(self.source) # Get source mediainfo to use in validation source_media_info = self.get_media_info(video_meta, audio_meta) # set group of pixels length to segment size gop = math.floor(source_media_info[VIDEO_FRAME_RATE] * GOP_DURATION) # preserve original video FPS vrate = source_media_info[VIDEO_FRAME_RATE] # preserve source audio sampling rate arate = source_media_info[AUDIO_SAMPLING_RATE] # Common ffmpeg flags ff = FFMPEG(overwrite=True, loglevel='repeat+level+info') # Init source file ff < input_file(self.source, Stream(VIDEO, video_meta), Stream(AUDIO, audio_meta)) # Output codecs video_opts = cast(Dict[str, Any], TRANSCODING_OPTIONS[VIDEO_CODEC]) cv0 = VideoCodec(gop=gop, rate=vrate, **video_opts) audio_opts = cast(Dict[str, Any], TRANSCODING_OPTIONS[AUDIO_CODEC]) ca0 = AudioCodec(rate=arate, **audio_opts) # Scaling ff.video | Scale(**TRANSCODING_OPTIONS[SCALE]) > cv0 # codecs, muxer and output path ff > output_file(self.destination, cv0, ca0, format='mp4') # Run ffmpeg self.run(ff) # Get result mediainfo audio_meta, video_meta = self.get_meta_data(self.destination) dest_media_info = self.get_media_info(video_meta, audio_meta) # Validate ffmpeg result self.validate(source_media_info, dest_media_info)
def setUp(self) -> None: super().setUp() self.video_metadata = video_meta_data(width=1920, height=1080, dar=1.777777778, par=1.0, duration=300.0, frame_rate=10.0, frame_count=3000) self.audio_metadata = audio_meta_data(duration=200.0, sampling_rate=48000, samples_count=200 * 48000) self.source = inputs.Input( input_file='input.mp4', streams=(inputs.Stream(VIDEO, meta=self.video_metadata), inputs.Stream(AUDIO, meta=self.audio_metadata))) self.output = outputs.output_file('output.mp4', codecs.VideoCodec('libx264'), codecs.AudioCodec('libfdk_aac')) self.input_list = inputs.InputList((self.source, )) self.output_list = outputs.OutputList((self.output, )) self.fc = FilterComplex(self.input_list, self.output_list)
def test_detect_trim_buffering(self): """ When trim and concat filters are used for editing timeline, buffering may occur if order of scenes in output file does not match order of same scenes in input file. """ cases = [ (False, [1.0, 2.0], [2.0, 3.0]), (True, [2.0, 3.0], [1.0, 2.0]), (True, [2.0, 3.0], [2.0, 4.0]), ] for case in cases: with self.subTest(case): raises, first, second = case ff = FFMPEG() s1 = inputs.Stream(VIDEO, self.source.streams[0].meta) s2 = inputs.Stream(VIDEO, self.source.streams[1].meta) ff < inputs.input_file('input.mp4', s1, s2) split = ff.video | filters.Split(VIDEO) t1 = split | filters.Trim(VIDEO, *first) p1 = t1 | filters.SetPTS(VIDEO) t2 = split | filters.Trim(VIDEO, *second) p2 = t2 | filters.SetPTS(VIDEO) concat = p1 | filters.Concat(VIDEO) output = outputs.output_file('output.mp4', codecs.VideoCodec('libx264')) p2 | concat > output ff > output try: ff.check_buffering() except BufferError as e: self.assertTrue(raises, e) else: self.assertFalse(raises)
def test_fix_trim_buffering(self): """ Trim buffering could be fixed with multiple source file deconding. """ ff = FFMPEG() v1 = inputs.Stream(VIDEO, self.source.streams[0].meta) a1 = inputs.Stream(AUDIO, self.source.streams[1].meta) v2 = inputs.Stream(VIDEO, self.source.streams[0].meta) a2 = inputs.Stream(AUDIO, self.source.streams[1].meta) in1 = ff < inputs.input_file('input.mp4', v1, a1) in2 = ff < inputs.input_file('input.mp4', v2, a2) p1 = in1.video | filters.Trim(VIDEO, 2.0, 3.0) | filters.SetPTS(VIDEO) p2 = in2.video | filters.Trim(VIDEO, 1.0, 2.0) | filters.SetPTS(VIDEO) output = outputs.output_file('output.mp4', codecs.VideoCodec('libx264')) concat = p1 | filters.Concat(VIDEO) p2 | concat > output ff > output ff.check_buffering()
def test_filter_graph(self): """ Filter complex smoke test and features demo. [I-1/Logo]---<Scale>------- | [I-0/input]--<Deint>--<Overlay>--<Split>--<Scale>--[O/480p] | ------<Scale>--[O/720p] """ vs = inputs.Stream(VIDEO) logo = inputs.input_file('logo.png', vs) self.input_list.append(logo) out1 = outputs.output_file('out1.mp4') self.output_list.append(out1) deint = Deint() deint.enabled = False # deinterlace is skipped # first video stream is deinterlaced next_node = self.source | deint left, top = 20, 20 # logo position # first overlay input is deinterlaced source (or source itself as # deint filter is disabled) over = next_node | Overlay(left, top) logo_width, logo_height = 200, 50 # logo scaled # second input stream is connected to logo scaler, followed by overlay # filter next_node = vs | Scale(logo_width, logo_height) | over # audio is split to two streams asplit = self.source | Split(AUDIO) for out in self.output_list: asplit > out # video split to two steams # connect split filter to overlayed video stream split = next_node | Split(VIDEO) # intermediate video stream scaling sizes = [(640, 480), (1280, 720)] for out, size in zip(self.output_list, sizes): # add scale filters to video streams w, h = size scale = Scale(w, h) # connect scaled streams to video destinations split | scale > out result = self.fc.render() expected = ';'.join([ # overlay logo '[0:v][v:scale0]overlay=x=20:y=20[v:overlay0]', # split video to two streams '[v:overlay0]split[v:split0][v:split1]', # each video is scaled to own size '[v:split0]scale=w=640:h=480[vout0]', '[v:split1]scale=w=1280:h=720[vout1]', # split audio to two streams '[0:a]asplit[aout0][aout1]', # logo scaling '[1:v]scale=w=200:h=50[v:scale0]', ]) self.assertEqual(expected.replace(';', ';\n'), result.replace(';', ';\n'))
def fc_factory(): src = inputs.input_file("input.mp4", inputs.Stream(VIDEO)) dst = outputs.output_file('output.mp4') fc = FilterComplex(inputs.InputList((src, )), outputs.OutputList((dst, ))) return fc, src, dst