Exemplo n.º 1
0
def concat_videos(list, outdir=None, ffmpeg='ffmpeg', audio=True):
    dir = outdir if outdir else os.path.dirname(os.path.realpath(__file__))
    videos = _download_file_list(list, dir)
    if bool(videos) == False:
        return None
    
    # make the video files list
    file_name = os.path.normpath(os.path.join(dir, str(uuid.uuid4())))
    with open(file_name, 'w') as file:
        for video in videos:
            file.write("file '" + video + "'\n")

    # concatenate the videos
    output = os.path.normpath(os.path.join(dir, "video.mp4"))
    ff = FFmpeg(
        executable = ffmpeg,
        global_options = ["-y", "-f" ,"concat", "-safe", "0", "-protocol_whitelist", "file,http,https,tcp,tls"],
        inputs = {file_name: None},
        outputs = {output: "-c copy"}
	)
    #print ff.cmd
    out = ff.run()

    # if audio background is requested we will try to get duration of movie and matching audio file
    if audio == True:
        # collect data for concatenated movie total duration
        length = time.strptime(re.findall("(?<=time\\=)[0-9.:]+", out)[-1],"%H:%M:%S.%f")
        lenght_t = datetime.timedelta(hours=length.tm_hour,minutes=length.tm_min,seconds=length.tm_sec).total_seconds()
        inputs = OrderedDict([(output, None)])
        applied_filters = ["[0:v]null[video]"]
        audio_track = _get_audio(lenght_t, dir)
        # build the filter chain and execute it
        audioseq = FilterChain([
            ReplicateAudioFilter(repetitions = int(math.ceil(lenght_t / float(audio_track[1])))), 
            ConcatFilter(is_video = False, outputtag = "caf"),
            TrimAudioFilter(length = lenght_t),
            FadeOutAudioFilter(start = lenght_t-AUDIO_FADE_OUT_T, length = AUDIO_FADE_OUT_T, outstreamprefix="audio")
        ])
        applied_filters += audioseq.generate(["1:a"])[0]
        # add the audio track to the inputs collection
        inputs.update({audio_track[0]: None})

        # build the video
        output = os.path.normpath(os.path.join(dir, "videoa.mp4"))
        ff = FFmpeg(
            executable = ffmpeg,
            global_options = ["-y"],
            inputs = inputs,
            outputs = {output: "-filter_complex \"" + ";".join(applied_filters) + "\" -map \"[video]\" -map \"[audio]\""}
	    )
        #print ff.cmd
        ff.run()

    return output
Exemplo n.º 2
0
def concat_videos(list, outdir=None, ffmpeg='ffmpeg', audio=True):
    dir = outdir if outdir else os.path.dirname(os.path.realpath(__file__))
    videos = _download_file_list(list, dir)
    if bool(videos) == False:
        return None

    # make the video files list
    file_name = os.path.normpath(os.path.join(dir, str(uuid.uuid4())))
    with open(file_name, 'w') as file:
        for video in videos:
            file.write("file '" + video + "'\n")

    # concatenate the videos
    output = os.path.normpath(os.path.join(dir, "video.mp4"))
    ff = FFmpeg(executable=ffmpeg,
                global_options=[
                    "-y", "-f", "concat", "-safe", "0", "-protocol_whitelist",
                    "file,http,https,tcp,tls"
                ],
                inputs={file_name: None},
                outputs={output: "-c copy"})
    #print ff.cmd
    out = ff.run()

    # if audio background is requested we will try to get duration of movie and matching audio file
    if audio == True:
        # collect data for concatenated movie total duration
        length = time.strptime(
            re.findall("(?<=time\\=)[0-9.:]+", out)[-1], "%H:%M:%S.%f")
        lenght_t = datetime.timedelta(hours=length.tm_hour,
                                      minutes=length.tm_min,
                                      seconds=length.tm_sec).total_seconds()
        inputs = OrderedDict([(output, None)])
        applied_filters = ["[0:v]null[video]"]
        audio_track = _get_audio(lenght_t, dir)
        # build the filter chain and execute it
        audioseq = FilterChain([
            ReplicateAudioFilter(
                repetitions=int(math.ceil(lenght_t / float(audio_track[1])))),
            ConcatFilter(is_video=False, outputtag="caf"),
            TrimAudioFilter(length=lenght_t),
            FadeOutAudioFilter(start=lenght_t - AUDIO_FADE_OUT_T,
                               length=AUDIO_FADE_OUT_T,
                               outstreamprefix="audio")
        ])
        applied_filters += audioseq.generate(["1:a"])[0]
        # add the audio track to the inputs collection
        inputs.update({audio_track[0]: None})

        # build the video
        output = os.path.normpath(os.path.join(dir, "videoa.mp4"))
        ff = FFmpeg(executable=ffmpeg,
                    global_options=["-y"],
                    inputs=inputs,
                    outputs={
                        output:
                        "-filter_complex \"" + ";".join(applied_filters) +
                        "\" -map \"[video]\" -map \"[audio]\""
                    })
        #print ff.cmd
        ff.run()

    return output
Exemplo n.º 3
0
def _make(images, scene_duration, dir, ffmpeg, width, height, audio, effect,
          transition, batch_mode):
    # exit if no images were found
    if bool(images) == False:
        return None

    scene_duration_f = scene_duration * FPS
    w = width / 2 * 2 if width != None else -2 if height != None else OUTPUT_VIDEO_WIDTH
    h = height / 2 * 2 if height != None else -2 if width != None else OUTPUT_VIDEO_HEIGHT

    # build the animation dictionary of filters and first slide handling flag
    animations = {
        "zoompan": (CombiningFilter([
            ZoompanEffectFilter(maxzoom=MAX_ZOOM, frames=scene_duration_f),
            ImageSlideFilter(duration=scene_duration, width=w, height=h)
        ],
                                    outstreamprefix="zpaf"), False),
        "fadeinout": (CombiningFilter([
            FadeTransitionFilter(transition_duration=TRANSITION_T,
                                 total_duration=scene_duration),
            ImageSlideFilter(duration=scene_duration, width=w, height=h)
        ],
                                      outstreamprefix="faf"), False),
        "zoompanfadeinout": (CombiningFilter([
            ZoompanEffectFilter(maxzoom=MAX_ZOOM, frames=scene_duration_f),
            FadeTransitionFilter(transition_duration=TRANSITION_T,
                                 total_duration=scene_duration),
            ImageSlideFilter(duration=scene_duration, width=w, height=h)
        ],
                                             outstreamprefix="zpfaf"), False),
        "slidein": (FilterChain([
            ImageSlideFilter(duration=scene_duration, width=w, height=h),
            SlideTransitionFilter(
                transition_duration=TRANSITION_T,
                preserve_first=batch_mode != BatchMode.non_initial_batch)
        ]), True),
        "zoompanslidein": (ZoompanSlideInTransitionFilter(
            transition_duration=TRANSITION_T,
            total_duration=scene_duration,
            fps=FPS,
            width=w,
            height=h,
            maxzoom=MAX_ZOOM,
            preserve_first=batch_mode != BatchMode.non_initial_batch), True)
    }
    animationkey = (effect if effect else "") + (transition
                                                 if transition else "")
    animation = animations[animationkey] if animationkey in animations else None

    # determines if transition is requested and how to interpret the inputs list
    preserve_first_slide = animation[1] if animation else False
    if batch_mode != BatchMode.non_initial_batch:
        slides = images
        lenght_t = scene_duration * len(slides)
    elif preserve_first_slide:
        slides = images
        lenght_t = scene_duration * (len(slides) - 1)
    else:
        slides = images[1:]
        lenght_t = scene_duration * len(slides)

    inputs = OrderedDict([(i, "-loop 1") for i in slides])

    # create the video filter chain
    videoseq = FilterChain()
    if animation:
        videoseq.append(animation[0])
    else:
        videoseq.append(
            ImageSlideFilter(duration=scene_duration, width=w, height=h))
    videoseq.append(ConcatFilter(True, "video"))
    applied_filters = videoseq.generate(
        ["%d:v" % i for (i, x) in enumerate(inputs)])[0]

    # load audio track if requested
    if audio == True:
        audio_track = _get_audio(lenght_t, dir)
        # build the filter chain and execute it
        audioseq = FilterChain([
            ReplicateAudioFilter(
                repetitions=int(math.ceil(lenght_t / float(audio_track[1])))),
            ConcatFilter(is_video=False, outputtag="caf"),
            TrimAudioFilter(length=lenght_t),
            FadeOutAudioFilter(start=lenght_t - AUDIO_FADE_OUT_T,
                               length=AUDIO_FADE_OUT_T,
                               outstreamprefix="audio")
        ])
        applied_filters += audioseq.generate(["%d:a" % len(inputs)])[0]
        # add the audio track to the inputs collection
        inputs.update({audio_track[0]: None})

    # build the video
    output = "video.mp4"
    output = dir + "/" + output if dir else output
    ff = FFmpeg(executable=ffmpeg,
                global_options=["-y"],
                inputs=inputs,
                outputs={
                    output:
                    "-filter_complex \"" + ";".join(applied_filters) +
                    "\" -map \"[video]\"" +
                    (" -map \"[audio]\"" if audio == True else "") +
                    " -c:v libx264 -pix_fmt yuvj420p -q:v 1"
                })
    #print ff.cmd
    ff.run()
    return output
Exemplo n.º 4
-1
def _make(images, scene_duration, dir, ffmpeg, width, height, audio, effect, transition, batch_mode):
    # exit if no images were found
    if bool(images) == False:
        return None

    scene_duration_f = scene_duration * FPS
    w = width/2*2 if width != None else -2 if height != None else OUTPUT_VIDEO_WIDTH
    h = height/2*2 if height != None else -2 if width != None else OUTPUT_VIDEO_HEIGHT

    # build the animation dictionary of filters and first slide handling flag
    animations = {
        "zoompan": (
            CombiningFilter(
                [
                    ZoompanEffectFilter(maxzoom = MAX_ZOOM, frames = scene_duration_f),
                    ImageSlideFilter(duration = scene_duration, width = w, height = h)
                ],
                outstreamprefix = "zpaf"),
            False
        ),
        "fadeinout": (
            CombiningFilter([
                    FadeTransitionFilter(transition_duration = TRANSITION_T, total_duration = scene_duration),
                    ImageSlideFilter(duration = scene_duration, width = w, height = h)
                ],
                outstreamprefix = "faf"),
            False
        ),
        "zoompanfadeinout": (
            CombiningFilter(
                [
                    ZoompanEffectFilter(maxzoom = MAX_ZOOM, frames = scene_duration_f),
                    FadeTransitionFilter(transition_duration = TRANSITION_T, total_duration = scene_duration),
                    ImageSlideFilter(duration = scene_duration, width = w, height = h)
                ],
                outstreamprefix = "zpfaf"),
            False
        ),
        "slidein": (
            FilterChain(
                [
                    ImageSlideFilter(duration = scene_duration, width = w, height = h),
                    SlideTransitionFilter(transition_duration = TRANSITION_T, preserve_first = batch_mode != BatchMode.non_initial_batch)
                ]),
            True
        ),
        "zoompanslidein": (
            ZoompanSlideInTransitionFilter(transition_duration = TRANSITION_T, total_duration = scene_duration, fps = FPS, width = w, height = h, maxzoom = MAX_ZOOM, preserve_first = batch_mode != BatchMode.non_initial_batch),
            True
        )
    }
    animationkey = (effect if effect else "") + (transition if transition else "")
    animation = animations[animationkey] if animationkey in animations else None

    # determines if transition is requested and how to interpret the inputs list
    preserve_first_slide = animation[1] if animation else False
    if batch_mode != BatchMode.non_initial_batch:
        slides = images
        lenght_t = scene_duration * len(slides)
    elif preserve_first_slide:
        slides = images
        lenght_t = scene_duration * (len(slides) - 1)
    else:
        slides = images[1:]
        lenght_t = scene_duration * len(slides)
               
    inputs = OrderedDict([(i, "-loop 1") for i in slides])

    # create the video filter chain
    videoseq = FilterChain()
    if animation: 
        videoseq.append(animation[0])
    else:
        videoseq.append(ImageSlideFilter(duration = scene_duration, width = w, height = h))
    videoseq.append(ConcatFilter(True, "video"))
    applied_filters = videoseq.generate(["%d:v" % i for (i,x) in enumerate(inputs)])[0]
    
    # load audio track if requested
    if audio == True:
        audio_track = _get_audio(lenght_t, dir)
        # build the filter chain and execute it
        audioseq = FilterChain([
            ReplicateAudioFilter(repetitions = int(math.ceil(lenght_t / float(audio_track[1])))), 
            ConcatFilter(is_video = False, outputtag = "caf"),
            TrimAudioFilter(length = lenght_t),
            FadeOutAudioFilter(start = lenght_t-AUDIO_FADE_OUT_T, length = AUDIO_FADE_OUT_T, outstreamprefix="audio")
        ])
        applied_filters += audioseq.generate(["%d:a" % len(inputs)])[0]
        # add the audio track to the inputs collection
        inputs.update({audio_track[0]: None})

    # build the video
    output = "video.mp4"
    output = dir + "/" + output if dir else output
    ff = FFmpeg(
        executable = ffmpeg,
        global_options = ["-y"],
        inputs = inputs,
        outputs = {output: "-filter_complex \"" + ";".join(applied_filters) + "\" -map \"[video]\"" + (" -map \"[audio]\"" if audio == True else "") + " -c:v libx264 -pix_fmt yuvj420p -q:v 1"}
	)
    #print ff.cmd
    ff.run()
    return output