def _make(images, scene_duration, dir, ffmpeg, width, height, audio, effect, transition, batch_mode): # exit if no images were found if bool(images) == False: return None scene_duration_f = scene_duration * FPS w = width / 2 * 2 if width != None else -2 if height != None else OUTPUT_VIDEO_WIDTH h = height / 2 * 2 if height != None else -2 if width != None else OUTPUT_VIDEO_HEIGHT # build the animation dictionary of filters and first slide handling flag animations = { "zoompan": (CombiningFilter([ ZoompanEffectFilter(maxzoom=MAX_ZOOM, frames=scene_duration_f), ImageSlideFilter(duration=scene_duration, width=w, height=h) ], outstreamprefix="zpaf"), False), "fadeinout": (CombiningFilter([ FadeTransitionFilter(transition_duration=TRANSITION_T, total_duration=scene_duration), ImageSlideFilter(duration=scene_duration, width=w, height=h) ], outstreamprefix="faf"), False), "zoompanfadeinout": (CombiningFilter([ ZoompanEffectFilter(maxzoom=MAX_ZOOM, frames=scene_duration_f), FadeTransitionFilter(transition_duration=TRANSITION_T, total_duration=scene_duration), ImageSlideFilter(duration=scene_duration, width=w, height=h) ], outstreamprefix="zpfaf"), False), "slidein": (FilterChain([ ImageSlideFilter(duration=scene_duration, width=w, height=h), SlideTransitionFilter( transition_duration=TRANSITION_T, preserve_first=batch_mode != BatchMode.non_initial_batch) ]), True), "zoompanslidein": (ZoompanSlideInTransitionFilter( transition_duration=TRANSITION_T, total_duration=scene_duration, fps=FPS, width=w, height=h, maxzoom=MAX_ZOOM, preserve_first=batch_mode != BatchMode.non_initial_batch), True) } animationkey = (effect if effect else "") + (transition if transition else "") animation = animations[animationkey] if animationkey in animations else None # determines if transition is requested and how to interpret the inputs list preserve_first_slide = animation[1] if animation else False if batch_mode != BatchMode.non_initial_batch: slides = images lenght_t = scene_duration * len(slides) elif preserve_first_slide: slides = images lenght_t = scene_duration * (len(slides) - 1) else: slides = images[1:] lenght_t = scene_duration * len(slides) inputs = OrderedDict([(i, "-loop 1") for i in slides]) # create the video filter chain videoseq = FilterChain() if animation: videoseq.append(animation[0]) else: videoseq.append( ImageSlideFilter(duration=scene_duration, width=w, height=h)) videoseq.append(ConcatFilter(True, "video")) applied_filters = videoseq.generate( ["%d:v" % i for (i, x) in enumerate(inputs)])[0] # load audio track if requested if audio == True: audio_track = _get_audio(lenght_t, dir) # build the filter chain and execute it audioseq = FilterChain([ ReplicateAudioFilter( repetitions=int(math.ceil(lenght_t / float(audio_track[1])))), ConcatFilter(is_video=False, outputtag="caf"), TrimAudioFilter(length=lenght_t), FadeOutAudioFilter(start=lenght_t - AUDIO_FADE_OUT_T, length=AUDIO_FADE_OUT_T, outstreamprefix="audio") ]) applied_filters += audioseq.generate(["%d:a" % len(inputs)])[0] # add the audio track to the inputs collection inputs.update({audio_track[0]: None}) # build the video output = "video.mp4" output = dir + "/" + output if dir else output ff = FFmpeg(executable=ffmpeg, global_options=["-y"], inputs=inputs, outputs={ output: "-filter_complex \"" + ";".join(applied_filters) + "\" -map \"[video]\"" + (" -map \"[audio]\"" if audio == True else "") + " -c:v libx264 -pix_fmt yuvj420p -q:v 1" }) #print ff.cmd ff.run() return output
def _make(images, scene_duration, dir, ffmpeg, width, height, audio, effect, transition, batch_mode): # exit if no images were found if bool(images) == False: return None scene_duration_f = scene_duration * FPS w = width/2*2 if width != None else -2 if height != None else OUTPUT_VIDEO_WIDTH h = height/2*2 if height != None else -2 if width != None else OUTPUT_VIDEO_HEIGHT # build the animation dictionary of filters and first slide handling flag animations = { "zoompan": ( CombiningFilter( [ ZoompanEffectFilter(maxzoom = MAX_ZOOM, frames = scene_duration_f), ImageSlideFilter(duration = scene_duration, width = w, height = h) ], outstreamprefix = "zpaf"), False ), "fadeinout": ( CombiningFilter([ FadeTransitionFilter(transition_duration = TRANSITION_T, total_duration = scene_duration), ImageSlideFilter(duration = scene_duration, width = w, height = h) ], outstreamprefix = "faf"), False ), "zoompanfadeinout": ( CombiningFilter( [ ZoompanEffectFilter(maxzoom = MAX_ZOOM, frames = scene_duration_f), FadeTransitionFilter(transition_duration = TRANSITION_T, total_duration = scene_duration), ImageSlideFilter(duration = scene_duration, width = w, height = h) ], outstreamprefix = "zpfaf"), False ), "slidein": ( FilterChain( [ ImageSlideFilter(duration = scene_duration, width = w, height = h), SlideTransitionFilter(transition_duration = TRANSITION_T, preserve_first = batch_mode != BatchMode.non_initial_batch) ]), True ), "zoompanslidein": ( ZoompanSlideInTransitionFilter(transition_duration = TRANSITION_T, total_duration = scene_duration, fps = FPS, width = w, height = h, maxzoom = MAX_ZOOM, preserve_first = batch_mode != BatchMode.non_initial_batch), True ) } animationkey = (effect if effect else "") + (transition if transition else "") animation = animations[animationkey] if animationkey in animations else None # determines if transition is requested and how to interpret the inputs list preserve_first_slide = animation[1] if animation else False if batch_mode != BatchMode.non_initial_batch: slides = images lenght_t = scene_duration * len(slides) elif preserve_first_slide: slides = images lenght_t = scene_duration * (len(slides) - 1) else: slides = images[1:] lenght_t = scene_duration * len(slides) inputs = OrderedDict([(i, "-loop 1") for i in slides]) # create the video filter chain videoseq = FilterChain() if animation: videoseq.append(animation[0]) else: videoseq.append(ImageSlideFilter(duration = scene_duration, width = w, height = h)) videoseq.append(ConcatFilter(True, "video")) applied_filters = videoseq.generate(["%d:v" % i for (i,x) in enumerate(inputs)])[0] # load audio track if requested if audio == True: audio_track = _get_audio(lenght_t, dir) # build the filter chain and execute it audioseq = FilterChain([ ReplicateAudioFilter(repetitions = int(math.ceil(lenght_t / float(audio_track[1])))), ConcatFilter(is_video = False, outputtag = "caf"), TrimAudioFilter(length = lenght_t), FadeOutAudioFilter(start = lenght_t-AUDIO_FADE_OUT_T, length = AUDIO_FADE_OUT_T, outstreamprefix="audio") ]) applied_filters += audioseq.generate(["%d:a" % len(inputs)])[0] # add the audio track to the inputs collection inputs.update({audio_track[0]: None}) # build the video output = "video.mp4" output = dir + "/" + output if dir else output ff = FFmpeg( executable = ffmpeg, global_options = ["-y"], inputs = inputs, outputs = {output: "-filter_complex \"" + ";".join(applied_filters) + "\" -map \"[video]\"" + (" -map \"[audio]\"" if audio == True else "") + " -c:v libx264 -pix_fmt yuvj420p -q:v 1"} ) #print ff.cmd ff.run() return output