def test_detect_concat_buffering(self): """ When single source is used for multiple outputs, and one of outputs has a preroll, buffering occurs, because to output first frame for a non-preroll output, we need to buffer all preroll frames. """ cases = [ (False, True, True), # preroll + source / preroll + source (False, True, False), # preroll + source / preroll (True, False, True), # preroll + source / source ] for case in cases: with self.subTest(case): raises, split_pre, split_src = case ff = FFMPEG() v1 = inputs.Stream(VIDEO, self.preroll.streams[0].meta) a1 = inputs.Stream(AUDIO, self.preroll.streams[1].meta) v2 = inputs.Stream(VIDEO, self.source.streams[0].meta) a2 = inputs.Stream(AUDIO, self.source.streams[1].meta) ff < inputs.input_file('preroll.mp4', v1, a1) ff < inputs.input_file('source.mp4', v2, a2) vf1 = v1 | filters.Split(VIDEO, output_count=int(split_pre) + 1) vf2 = v2 | filters.Split(VIDEO, output_count=int(split_src) + 1) af1 = a1 | filters.Split(AUDIO, output_count=int(split_pre) + 1) af2 = a2 | filters.Split(AUDIO, output_count=int(split_src) + 1) vc1 = vf1 | filters.Concat(VIDEO, input_count=2) vf2 | vc1 ac1 = af1 | filters.Concat(AUDIO, input_count=2) af2 | ac1 vc2 = filters.Concat(VIDEO, int(split_pre) + int(split_src)) if split_pre: vf1 | vc2 if split_src: vf2 | vc2 ac2 = filters.Concat(AUDIO, int(split_pre) + int(split_src)) if split_pre: af1 | ac2 if split_src: af2 | ac2 o1 = outputs.output_file("o1.mp4", X264(), AAC()) o2 = outputs.output_file("o2.mp4", X264(), AAC()) vc1 > o1 ac1 > o1 vc2 > o2 ac2 > o2 ff > o1 ff > o2 try: ff.check_buffering() except BufferError as e: self.assertTrue(raises, e) else: self.assertFalse(raises)
def setUp(self) -> None: super().setUp() vm = video_meta_data(duration=3600.0, width=640, height=360) am = audio_meta_data(duration=3600.0) self.source = inputs.input_file( 'source.mp4', inputs.Stream(VIDEO, vm), inputs.Stream(AUDIO, am)) self.logo = inputs.input_file( 'logo.png', inputs.Stream(VIDEO, video_meta_data(width=64, height=64))) vm = video_meta_data(duration=10.0, width=640, height=360) am = audio_meta_data(duration=10.0) self.preroll = inputs.input_file( 'preroll.mp4', inputs.Stream(VIDEO, vm), inputs.Stream(AUDIO, am)) self.video_codec = X264(bitrate=3600000) self.audio_codec = AAC(bitrate=192000) self.output = outputs.output_file( 'output.mp4', self.video_codec, self.audio_codec) self.ffmpeg = FFMPEG()
def setUp(self) -> None: super().setUp() self.video_metadata = video_meta_data(width=1920, height=1080, dar=1.777777778, par=1.0, duration=300.0, frame_rate=10.0, frame_count=3000) self.source_audio_duration = 200.0 self.source_sampling_rate = 48000 self.source_samples_count = (self.source_audio_duration * self.source_sampling_rate) self.source_audio_bitrate = 128000 self.audio_metadata = audio_meta_data( duration=self.source_audio_duration, sampling_rate=self.source_sampling_rate, samples_count=self.source_samples_count, bit_rate=self.source_audio_bitrate, ) self.target_audio_bitrate = 64000 self.source = inputs.Input( input_file='input.mp4', streams=(inputs.Stream(VIDEO, meta=self.video_metadata), inputs.Stream(AUDIO, meta=self.audio_metadata))) self.output = outputs.output_file( 'output.mp4', codecs.VideoCodec('libx264'), FdkAAC(bitrate=self.target_audio_bitrate)) self.input_list = inputs.InputList((self.source, )) self.output_list = outputs.OutputList((self.output, )) self.fc = FilterComplex(self.input_list, self.output_list)
def setUp(self) -> None: self.v1 = inputs.Stream(StreamType.VIDEO) self.v2 = inputs.Stream(StreamType.VIDEO) self.a1 = inputs.Stream(StreamType.AUDIO) self.a2 = inputs.Stream(StreamType.AUDIO) self.a3 = inputs.Stream(StreamType.AUDIO) self.i1 = inputs.Input(streams=(self.v1, self.a1)) self.i2 = inputs.Input(streams=(self.a2, self.v2, self.a3))
def setUp(self) -> None: self.video_metadata = meta.video_meta_data( width=1920, height=1080, dar=1.777777778, par=1.0, duration=300.0, ) self.audio_metadata = meta.audio_meta_data() self.source = inputs.Input( input_file='input.mp4', streams=(inputs.Stream(VIDEO, meta=self.video_metadata), inputs.Stream(AUDIO, meta=self.audio_metadata))) self.output = outputs.Output( output_file='output.mp4', codecs=[H264Cuda(), codecs.AudioCodec('aac')])
def test_validate_stream_kind(self): """ Stream without proper StreamType can't be added to input. """ # noinspection PyTypeChecker self.assertRaises(ValueError, inputs.Input, streams=(inputs.Stream(kind=None), ), input_file='input.mp4')
def test_append_source(self): """ Source file streams receive indices when appended to input list. """ il = inputs.InputList() v3 = inputs.Stream(StreamType.VIDEO) il.append(inputs.Input(streams=(v3, ))) self.assertEqual(v3.name, '0:v')
def setUp(self) -> None: super().setUp() self.video_metadata = video_meta_data(width=1920, height=1080, dar=1.777777778, par=1.0, duration=300.0, frame_rate=10.0, frame_count=3000) self.audio_metadata = audio_meta_data(duration=200.0, sampling_rate=48000, samples_count=200 * 48000) self.source = inputs.Input( input_file='input.mp4', streams=(inputs.Stream(VIDEO, meta=self.video_metadata), inputs.Stream(AUDIO, meta=self.audio_metadata))) self.output = outputs.output_file('output.mp4', codecs.VideoCodec('libx264'), codecs.AudioCodec('libfdk_aac')) self.input_list = inputs.InputList((self.source, )) self.output_list = outputs.OutputList((self.output, )) self.fc = FilterComplex(self.input_list, self.output_list)
def test_concat_scenes(self): """ Concat shifts scenes start/end timestamps. """ video_meta = video_meta_data(duration=1000.0, frame_count=10000, frame_rate=10.0) vs1 = inputs.Stream(VIDEO, meta=video_meta) vs2 = inputs.Stream(VIDEO, meta=video_meta) vs3 = inputs.Stream(VIDEO, meta=video_meta) c = Concat(VIDEO, input_count=3) vs1 | c vs2 | c vs3 | c expected = (deepcopy(vs1.meta.scenes) + deepcopy(vs2.meta.scenes) + deepcopy(vs3.meta.scenes)) assert len(expected) == 3 current_duration = TS(0) for scene in expected: scene.position += current_duration current_duration += scene.duration self.assertListEqual(c.meta.scenes, expected)
def test_detect_trim_buffering(self): """ When trim and concat filters are used for editing timeline, buffering may occur if order of scenes in output file does not match order of same scenes in input file. """ cases = [ (False, [1.0, 2.0], [2.0, 3.0]), (True, [2.0, 3.0], [1.0, 2.0]), (True, [2.0, 3.0], [2.0, 4.0]), ] for case in cases: with self.subTest(case): raises, first, second = case ff = FFMPEG() s1 = inputs.Stream(VIDEO, self.source.streams[0].meta) s2 = inputs.Stream(VIDEO, self.source.streams[1].meta) ff < inputs.input_file('input.mp4', s1, s2) split = ff.video | filters.Split(VIDEO) t1 = split | filters.Trim(VIDEO, *first) p1 = t1 | filters.SetPTS(VIDEO) t2 = split | filters.Trim(VIDEO, *second) p2 = t2 | filters.SetPTS(VIDEO) concat = p1 | filters.Concat(VIDEO) output = outputs.output_file('output.mp4', codecs.VideoCodec('libx264')) p2 | concat > output ff > output try: ff.check_buffering() except BufferError as e: self.assertTrue(raises, e) else: self.assertFalse(raises)
def test_fix_trim_buffering(self): """ Trim buffering could be fixed with multiple source file deconding. """ ff = FFMPEG() v1 = inputs.Stream(VIDEO, self.source.streams[0].meta) a1 = inputs.Stream(AUDIO, self.source.streams[1].meta) v2 = inputs.Stream(VIDEO, self.source.streams[0].meta) a2 = inputs.Stream(AUDIO, self.source.streams[1].meta) in1 = ff < inputs.input_file('input.mp4', v1, a1) in2 = ff < inputs.input_file('input.mp4', v2, a2) p1 = in1.video | filters.Trim(VIDEO, 2.0, 3.0) | filters.SetPTS(VIDEO) p2 = in2.video | filters.Trim(VIDEO, 1.0, 2.0) | filters.SetPTS(VIDEO) output = outputs.output_file('output.mp4', codecs.VideoCodec('libx264')) concat = p1 | filters.Concat(VIDEO) p2 | concat > output ff > output ff.check_buffering()
def test_concat_audio_metadata(self): """ Concat filter sums samples count for audio streams. """ audio_meta = audio_meta_data(duration=1000.0, sampling_rate=24000, samples_count=24000 * 1000) a = inputs.Stream(AUDIO, meta=audio_meta) self.input_list.append(inputs.input_file('second.mp4', a)) concat = a | Concat(AUDIO) self.source | concat concat > self.output am = cast(AudioMeta, self.output.codecs[-1].get_meta_data()) self.assertEqual(self.audio_metadata.duration + audio_meta.duration, am.duration) self.assertEqual(round(am.duration * audio_meta.sampling_rate), am.samples)
def test_overlay_metadata(self): """ overlay takes bottom stream metadata $ ffmpeg -y -i source.mp4 -i logo.mp4 -t 1 \ -filter_complex '[0:v][1:v]overlay=x=100:y=100' test.mp4 """ vs = inputs.Stream(VIDEO, meta=video_meta_data(width=100, height=100)) self.input_list.append(inputs.input_file('logo.png', vs)) overlay = self.source | Overlay(x=self.video_metadata.width - 2, y=self.video_metadata.height - 2) vs | overlay overlay > self.output expected = '[0:v][1:v]overlay=x=1918:y=1078[vout0]' self.assertEqual(expected, self.fc.render()) vm = cast(VideoMeta, self.output.codecs[0].get_meta_data()) self.assertEqual(vm.width, self.video_metadata.width) self.assertEqual(vm.height, self.video_metadata.height)
def test_validate_input_hardware(self): """ Hardware-decoded input could not be passed to CPU codec and so on. """ vs = inputs.Stream(StreamType.VIDEO, meta=video_meta_data(width=640, height=360)) src = inputs.Input(streams=(vs, ), hardware='cuda', device='foo') @dataclass class X264(VideoCodec): codec = 'libx264' hardware = None # cpu only with self.assertRaises(ValueError): src.video > X264() with self.assertRaises(ValueError): src.video | filters.Scale(640, 360) src.video | ScaleNPP(640, 360) > H264Cuda()
def test_concat_video_metadata(self): """ Concat filter sums stream duration $ ffmpeg -y -i first.mp4 -i second.mp4 -filter_complex concat test.mp4 """ video_meta = video_meta_data(duration=1000.0, frame_count=10000, frame_rate=10.0) vs = inputs.Stream(VIDEO, meta=video_meta) self.input_list.append(inputs.input_file('second.mp4', vs)) concat = vs | Concat(VIDEO) self.source | concat concat > self.output vm = cast(VideoMeta, self.output.codecs[0].get_meta_data()) self.assertEqual(self.video_metadata.duration + vs.meta.duration, vm.duration) self.assertEqual(self.video_metadata.frames + video_meta.frames, vm.frames)
def test_filter_graph(self): """ Filter complex smoke test and features demo. [I-1/Logo]---<Scale>------- | [I-0/input]--<Deint>--<Overlay>--<Split>--<Scale>--[O/480p] | ------<Scale>--[O/720p] """ vs = inputs.Stream(VIDEO) logo = inputs.input_file('logo.png', vs) self.input_list.append(logo) out1 = outputs.output_file('out1.mp4') self.output_list.append(out1) deint = Deint() deint.enabled = False # deinterlace is skipped # first video stream is deinterlaced next_node = self.source | deint left, top = 20, 20 # logo position # first overlay input is deinterlaced source (or source itself as # deint filter is disabled) over = next_node | Overlay(left, top) logo_width, logo_height = 200, 50 # logo scaled # second input stream is connected to logo scaler, followed by overlay # filter next_node = vs | Scale(logo_width, logo_height) | over # audio is split to two streams asplit = self.source | Split(AUDIO) for out in self.output_list: asplit > out # video split to two steams # connect split filter to overlayed video stream split = next_node | Split(VIDEO) # intermediate video stream scaling sizes = [(640, 480), (1280, 720)] for out, size in zip(self.output_list, sizes): # add scale filters to video streams w, h = size scale = Scale(w, h) # connect scaled streams to video destinations split | scale > out result = self.fc.render() expected = ';'.join([ # overlay logo '[0:v][v:scale0]overlay=x=20:y=20[v:overlay0]', # split video to two streams '[v:overlay0]split[v:split0][v:split1]', # each video is scaled to own size '[v:split0]scale=w=640:h=480[vout0]', '[v:split1]scale=w=1280:h=720[vout1]', # split audio to two streams '[0:a]asplit[aout0][aout1]', # logo scaling '[1:v]scale=w=200:h=50[v:scale0]', ]) self.assertEqual(expected.replace(';', ';\n'), result.replace(';', ';\n'))
def fc_factory(): src = inputs.input_file("input.mp4", inputs.Stream(VIDEO)) dst = outputs.output_file('output.mp4') fc = FilterComplex(inputs.InputList((src, )), outputs.OutputList((dst, ))) return fc, src, dst