Ejemplo n.º 1
0
def create_image_slideshow_fade(images,
                                output_filename,
                                seconds,
                                verbose=True):
    """Creates a simple image slideshow with fade transition"""
    all_dimensions = list(map(lambda x: get_dimensions(x), images))
    width, height = max(map(lambda x: x, all_dimensions))
    part1 = []
    part2, part3 = '', ''
    if len(images) > 2:
        for (idx, i) in enumerate(images):
            part1 += (f'-loop 1 -t {seconds} -i'.split() + [i])
            if idx == 0:
                part2 += f'[0:v]scale={width}:{height}:force_original_aspect_ratio=decrease,pad={width}:{height}:(ow-iw)/2:(oh-ih)/2,setsar=1,fade=t=out:st={seconds - 0.75}:d=1[v0];'
            else:
                part2 += f'[{idx}:v]scale={width}:{height}:force_original_aspect_ratio=decrease,pad={width}:{height}:(ow-iw)/2:(oh-ih)/2,setsar=1,fade=t=in:st=0:d=1,fade=t=out:st={seconds - 0.75}:d=1[v{idx}];'
            part3 += f'[v{idx}]'
        part3 += f'concat=n={len(images)}:v=1:a=0,format=yuv420p[v]'
    elif len(images) == 2:
        raise NotImplementedError(
        )  # todo. easiest to create two image videos and concat?
    elif len(images) == 1:
        create_image_video(images[0], output_filename, seconds, verbose=True)
    else:
        raise (ValueError("No images for slideshow"))
    call_ffmpeg(
        f'ffmpeg {part1} -filter_complex {part2 + part3} -map [v] {output_filename}',
        verbose=True)
Ejemplo n.º 2
0
def extract_audio(video_file, output_file, time1=None, time2=None):
    """Creates audio file from video and timestamps"""
    if time1:
        ss_str = f'-ss {time1} '
    if time2:
        to_str = f'-to {time2} '
    call_ffmpeg(
        f'ffmpeg -i {video_file} {ss_str}{to_str}-c:a libmp3lame {output_file}',
        verbose=True)
Ejemplo n.º 3
0
def blur_video(filePath, output_filename, resolution=None, debugging=False):
    """Creates a video with blurred sides (for clips that are not the same width as the final video's width.
    This option works well for now, but takes the resolution from the first video"""
    call_ffmpeg(
        f'ffmpeg -i {filePath} -filter_complex ' +
        r"[0:v]scale=ih*16/9:-1,boxblur=luma_radius=min(h\,w)/20:luma_power=1:chroma_radius=min(cw\,ch)/20:chroma_power=1[bg];[bg][0:v]overlay=(W-w)/2:(H-h)/2,crop='if(gte(dar,16/9),ih*16/9,iw)':'if(gte(dar,16/9),ih,iw*9/16)' "
        + output_filename,
        verbose=True)
    if not debugging:
        os.remove(filePath)
    return output_filename
Ejemplo n.º 4
0
def combine_audio_video(audioPath,
                        videoPath,
                        output_filename,
                        debugging=False):
    """Combining audio and video into one video file. Shorter duration of video/audio determines the final video's duration"""
    call_ffmpeg(
        f'ffmpeg -i {audioPath} -i {videoPath} -codec copy -shortest {output_filename}',
        verbose=True)

    if not debugging:
        os.remove(audioPath)
        os.remove(videoPath)
    return output_filename
Ejemplo n.º 5
0
def create_scrolling_image(image, output_filename, seconds, direction):
    """Creates horizontal video to scroll through a vertical image."""
    (width, height) = get_dimensions(image)
    adj_height = int(width * 9.0 / 16)
    dimensions = str(width) + 'x' + str(adj_height)  # horizontal dimensions
    speed = int((height - adj_height * 1.0) / seconds)

    if direction == 'zoomout':
        # subprocess.call(['ffmpeg', '-hide_banner', '-loglevel', 'panic', '-loop', '1', '-t', str(seconds), '-i', image, '-vf', '''"zoompan=z='if(lte(zoom,1.0),1.5,max(1.001,zoom-0.0015))':d=125"''', '-c:v', 'libx264', output_filename,  '-y'])
        call_ffmpeg(
            f'''ffmpeg -loop 1 -t {seconds} -i {image} -vf "zoompan=z='if(lte(zoom,1.0),1.5,max(1.001,zoom-0.0015))':d=125" -c:v libx264 {output_filename}''',
            verbose=True)
    elif direction == 'zoomin':
        # subprocess.call(['ffmpeg', '-hide_banner', '-loglevel', 'panic', '-r', '25', '-i', image, '-filter_complex', "scale=-2:10*ih,zoompan=z='min(zoom+0.0015,1.5)':d=125:x='iw/2-(iw/zoom/2)':y='ih/2-(ih/zoom/2)',scale=-2:720", '-shortest', '-c:v', 'libx264', output_filename,  '-y'])
        (width, height) = get_dimensions(image)
        width = int(width * 0.25)
        height = int(height * 0.25)
        call_ffmpeg(
            f'''ffmpeg -loop 1 -t {seconds} -i {image} -vf zoompan=z='zoom+0.001:s={width}x{height} -c:v libx264 -preset fast {output_filename}''',
            verbose=True)
        #subprocess.call(['ffmpeg', '-loop', '1', '-i', image, '-vf', f"zoompan=z='zoom+0.001:s={width}x{height}", '-c:v', 'libx264', '-preset', 'fast', '-t', str(seconds), '-y', output_filename ])
    elif direction == "down":
        call_ffmpeg(
            f'''ffmpeg -loop 1 -t {seconds} -i {image} -filter_complex color=white:s={dimensions}[bg];[bg][0]overlay=y=-'t*{speed}':shortest=1[video] -r 25/1 -preset ultrafast -map [video] {output_filename}''',
            verbose=True)
        #subprocess.call(['ffmpeg', '-hide_banner', '-loglevel', 'panic', '-loop', '1', '-t', str(seconds), '-i', image, '-filter_complex', 'color=white:s=' + dimensions + "[bg];[bg][0]overlay=y=-'t*" + str(speed) + "':shortest=1[video]", '-r', '25/1', '-preset', 'ultrafast', '-map', '[video]', output_filename,  '-y'])
    else:
        # going up:
        call_ffmpeg(
            f'''ffmpeg -loop 1 -t {seconds} -i {image} -filter_complex color=white:s={dimensions}[bg];[bg][0]overlay=y=main_h-overlay_h+'t*{speed}':shortest=1[video] -r 25/1 -preset ultrafast -map [video] {output_filename}''',
            verbose=True)
Ejemplo n.º 6
0
def slowmo(input_vid, output_path, multiplier=2.0, duration=None):
    if duration:
        video_dur = get_duration(input_vid)
        if duration > video_dur:
            call_ffmpeg(
                f'ffmpeg -i {input_vid} -filter:v setpts= {duration*1.0/video_dur} *PTS {output_path}',
                verbose=True)
            #subprocess.call(['ffmpeg', '-hide_banner', '-loglevel', 'panic', '-i', input_vid, '-filter:v', 'setpts=' + str(duration*1.0/video_dur) + '*PTS', output_path, '-y'])
        else:
            shutil.copyfile(input_vid, output_path)
    else:
        call_ffmpeg(
            f'ffmpeg -i {input_vid} -filter:v setpts= {multiplier} *PTS {output_path}',
            verbose=True)
Ejemplo n.º 7
0
def combine_videos(files, output_filename, debugging=False):
    """Concatting video files of all dimensions and durations together"""
    def intermediate_file(idx):
        return 'intermediate' + str(idx) + '.ts'

    if (len(files) < 400):
        for (idx, f) in enumerate(files):
            call_ffmpeg(
                f'ffmpeg -i {f} -c copy -bsf:v h264_mp4toannexb -f mpegts {intermediate_file(idx)}',
                verbose=True)
        call_ffmpeg(
            f"ffmpeg -i concat:{'|'.join(map(lambda x: intermediate_file(x), range(0,len(files)) ))} -c copy {output_filename}",
            verbose=True)
        #subprocess.call(['ffmpeg', '-y', '-i', 'concat:' + '|'.join(map(lambda x: intermediate_file(x), range(0,len(files)) )), '-c', 'copy', output_filename])

        if not debugging:
            for (idx, _) in enumerate(files):
                os.remove(intermediate_file(idx))
    else:
        # there was an issue with concatting over 418 files: "The command is is too long."
        with open('templist.txt', 'w', encoding='utf-8') as f:
            f.write('\n'.join(map(lambda x: "file '" + x + "'", files)))
        # this seems to work as well, but I get a lot of warning-like messages:
        # os.system('ffmpeg -hide_banner -loglevel panic -f concat -safe 0 -i templist.txt -codec copy ' + output_filename)
        call_ffmpeg(
            f'ffmpeg -i templist.txt -f concat -safe 0 -codec copy {output_filename}',
            verbose=True)
Ejemplo n.º 8
0
def combine_audio(audio_list,
                  output_path,
                  transition_time=13,
                  debugging=False):
    """Creates a single audio file from a list"""
    temp0 = os.path.join(os.path.dirname(output_path), 'temp0.wav')
    temp1 = os.path.join(os.path.dirname(output_path), 'temp1.wav')

    def temp_file(i):
        if i % 2 == 0:
            return temp0
        else:
            return temp1

    if len(audio_list) > 2:
        print(audio_list)
        call_ffmpeg(
            f'ffmpeg -i {audio_list[0]} -i {audio_list[1]} -vn -filter_complex acrossfade=d={transition_time}:c1=tri:c2=squ {temp1}',
            verbose=True)
        for i in range(2, len(audio_list) - 1):
            call_ffmpeg(
                f'ffmpeg -i {temp_file(i-1)} -i {audio_list[i]} -vn -filter_complex acrossfade=d={transition_time}:c1=tri:c2=squ {temp_file(i)}',
                verbose=True)
        # final call to convert to mp3
        call_ffmpeg(
            f'ffmpeg -i {temp_file(len(audio_list) - 2)} -i {audio_list[-1]} -vn -filter_complex acrossfade=d={transition_time}:c1=tri:c2=squ {output_path}',
            verbose=True)
    elif len(audio_list) == 2:
        call_ffmpeg(
            f'ffmpeg -i {audio_list[0]} -i {audio_list[1]} -vn -filter_complex acrossfade=d={transition_time}:c1=tri:c2=squ {output_path}',
            verbose=True)
    elif len(audio_list) == 1:
        shutil.copyfile(audio_list[0], output_path)
    else:
        raise ValueError("Empty audio list")

    if not debugging:
        try:
            os.remove(temp0)
            os.remove(temp1)
        except OSError:
            pass

    return output_path
Ejemplo n.º 9
0
def create_image_video(image, output_filename, seconds, verbose=True):
    """creates video from single image. seconds determines length of video"""
    call_ffmpeg(
        f'ffmpeg -loop 1 -i {image} -c:v libx264 -t {seconds} -pix_fmt yuv420p -vf pad=ceil(iw/2)*2:ceil(ih/2)*2 {output_filename}',
        verbose=True)
Ejemplo n.º 10
0
def trim_video(inp, outp, start_time, end_time, verbose=True):
    """Trims a single video"""
    call_ffmpeg(f'ffmpeg -i {inp} -ss {start_time} -to {end_time} {outp}',
                verbose)
Ejemplo n.º 11
0
def merge_audio(audio_file1, audio_file2, output_file, vol1=1.0, vol2=1.0):
    """Merges two audios into one. option to adjust volumes for both audio"""
    call_ffmpeg(
        f'ffmpeg -i {audio_file1} -i {audio_file2} -filter_complex [0:0]volume={vol1}[a];[1:0]volume={vol2}[b];[a][b]amix=inputs=2:duration=longest -c:a libmp3lame {output_file}',
        verbose=True)