예제 #1
0
def rotate_stream(filename, rotation):
    """
    Rotate stream so that it looks like the rotation of the GUI
    The rotation value as seen on the GUI is defined as the following:
    0: No rotation
    1: 90 degrees clockwise rotation
    2: 180 degrees rotation
    3: 90 degrees counterclockwise rotation
    """

    if 'jpg' in filename:
        temp_filename = filename + '.jpg'
    elif 'mp4' in filename:
        temp_filename = filename + '.mp4'

    stream = ffmpeg.input(filename)
    # If rotation is 180 degrees, use vflip. Else use transpose filter
    if rotation == 0:
        pass
    elif rotation == 1:
        stream = ffmpeg.filter_(stream, 'transpose', 1)
    elif rotation == 2:
        stream = ffmpeg.filter_(stream, 'vflip')
    elif rotation == 3:
        stream = ffmpeg.filter_(stream, 'transpose', 2)
    else:
        raise ValueError('Rotation value is not valid')

    stream = ffmpeg.output(stream, temp_filename)
    ffmpeg.run(stream)
    os.remove(filename)
    os.rename(temp_filename, filename)
예제 #2
0
def make_gif_with_subtitles(video_path, subtitle_path, start_ms, end_ms):
    start_s = str(start_ms / 1000)
    end_s = str(end_ms / 1000)
    duration = str((end_ms - start_ms) / 1000)
    vres = current_app.config.get('GIF_VRES')

    # Get color palette for the highest quality
    pstream = ffmpeg.input(video_path, ss=start_s, t=duration)
    pstream = ffmpeg.filter_(pstream, 'scale', -1, vres)
    pstream = ffmpeg_subtitles_filter(pstream, subtitle_path, start_ms)
    pstream = ffmpeg.filter_(pstream, 'palettegen', stats_mode='full')

    # Create the actual jif
    gstream = ffmpeg.input(video_path, ss=start_s)
    gstream = ffmpeg.filter_(gstream, 'scale', -1, vres)
    gstream = ffmpeg_subtitles_filter(gstream, subtitle_path, start_ms)
    gstream = ffmpeg_paletteuse_filter(gstream,
                                       pstream,
                                       dither='bayer',
                                       bayer_scale=5,
                                       diff_mode='rectangle')
    gstream = ffmpeg.output(gstream,
                            'pipe:1',
                            format='gif',
                            t=duration,
                            threads=1)
    return ffmpeg_run_stdout(gstream)
예제 #3
0
 def _estublish_cmd(self, scenes: List[Scene]):
     inputfile = self.input_media_path.as_posix()
     outputfile = self.output_media_path.as_posix()
     stream = ffmpeg.input(inputfile)
     video_streams = list()
     audio_streams = list()
     for scene in scenes:
         start = scene.get_startat()
         duration = scene.get_interval()
         v_clip_stream = ffmpeg.trim(
             stream, start=start, duration=duration)
         v_clip_stream = ffmpeg.setpts(v_clip_stream, 'PTS-STARTPTS')
         a_clip_stream = ffmpeg.filter_(
             stream, 'atrim', start=start, duration=duration)
         a_clip_stream = ffmpeg.filter_(
             a_clip_stream, 'asetpts', 'PTS-STARTPTS')
         video_streams.append(v_clip_stream)
         audio_streams.append(a_clip_stream)
     v_stream = ffmpeg.concat(
         *video_streams, n=len(video_streams), v=1, a=0)
     a_stream = ffmpeg.concat(
         *audio_streams, n=len(audio_streams), v=0, a=1)
     stream = ffmpeg.output(
         v_stream, a_stream, outputfile, **self.CONFIG_720P)
     # ffmpeg.view(stream)  # Debug
     self.stream = stream
     return ' '.join(ffmpeg.compile(stream))
예제 #4
0
def test_custom_filter():
    stream = ffmpeg.input('dummy.mp4')
    stream = ffmpeg.filter_(stream, 'custom_filter', 'a', 'b', kwarg1='c')
    stream = ffmpeg.output(stream, 'dummy2.mp4')
    assert stream.get_args() == [
        '-i', 'dummy.mp4', '-filter_complex',
        '[0]custom_filter=a:b:kwarg1=c[s0]', '-map', '[s0]', 'dummy2.mp4'
    ]
예제 #5
0
def ffmpeg_subtitles_filter(stream, subtitle_path, start_ms):
    font_dir = current_app.config.get('FF_FONT_DIR', None)
    font_name = current_app.config.get('FF_FONT_NAME', None)
    font_size = current_app.config.get('FF_FONT_SIZE', 24)

    stream = ffmpeg.setpts(stream, 'PTS+%f/TB' % (start_ms / 1000))
    sargs = {'force_style': 'Fontsize=%d' % font_size}
    if font_dir is not None:
        sargs['fontsdir'] = str(font_dir)
    if font_name is not None:
        sargs['force_style'] += ',FontName=%s' % font_name
    stream = ffmpeg.filter_(stream, 'subtitles', str(subtitle_path), **sargs)
    stream = ffmpeg.setpts(stream, 'PTS-STARTPTS')
    return stream
예제 #6
0
def diagonal(cam_path, pc_path, out_path, preset='veryfast', overlap=(0, 0)):
    cam_crop = detect_crop(cam_path)
    pc_crop = detect_crop(pc_path)

    cam_stream = ffmpeg.input(cam_path).filter('crop', *cam_crop)
    pc_stream = ffmpeg.input(pc_path).filter('crop', *pc_crop)

    xo, yo = cam_crop[0] - overlap[0], cam_crop[1] - overlap[1]
    layout = '0_0|{}_{}'.format(xo, yo)

    # Calculate the total width/height of the video
    tw = cam_crop[0] + pc_crop[0] - overlap[0]
    th = cam_crop[1] + pc_crop[1] - overlap[1]
    # Pad the stream in the top-left corner to add a black background
    cam_padded = cam_stream.filter('pad', tw, th, 0, 0)

    ffmpeg.filter_([cam_padded, pc_stream], 'xstack', layout=layout).output(
        out_path, **{
            'map': '1:a',
            'preset': preset,
            'crf': 23,
            'format': 'mp4'
        }).overwrite_output().global_args('-threads', '0').run()
예제 #7
0
	def build(self, data):
		input = self.tempDir + data

		name = os.path.splitext(data)[0]

		# Get output folder
		outputDir = os.path.join(self.tempDir, name)
		print(outputDir)

		if not os.path.isdir(outputDir):
			os.makedirs(outputDir)

		stream = ffmpeg.input(input)
		stream = ffmpeg.filter_(stream, 'fps', fps=0.25, round='up')
		stream = ffmpeg.output(stream, os.path.join(outputDir, '%04d.png'))
		result = ffmpeg.run(stream)
예제 #8
0
def alterTempo(src, original_tempo, goal):
    factor = float(goal) / original_tempo
    print("\nGoal Tempo: " + str(goal))
    print("Altering tempo by factor of " + str(factor) + "...\n\n")

    filename, filetype = os.path.splitext(src)
    new_name = filename + "_" + str(goal) + filetype
    # windows shell command
    # command = 'ffmpeg -i %s -filter:a "atempo=%f" -vn %s' %(src, factor, new_name)
    # os.system('cmd /c "%s"'%command)

    #using ffmpeg-pyton
    input = ffmpeg.input(src)
    input = ffmpeg.filter_(input, 'atempo', factor)
    output = ffmpeg.output(input, new_name)
    ffmpeg.run(output)

    return new_name
예제 #9
0
def make_webm(video_path, start_ms, end_ms):
    start_s = str(start_ms / 1000)
    end_s = str(end_ms / 1000)
    duration = str((end_ms - start_ms) / 1000)
    vres = current_app.config.get('WEBM_VRES')

    stream = ffmpeg.input(video_path, ss=start_s)
    stream = ffmpeg.filter_(stream, 'scale', -1, vres)
    stream = ffmpeg.output(
        stream, 'pipe:1', **{
            'format': 'webm',
            't': duration,
            'an': None,
            'sn': None,
            'c:v': 'libvpx-vp9',
            'crf': 35,
            'b:v': '1000k',
            'cpu-used': 2,
            'threads': 1
        })
    return ffmpeg_run_stdout(stream)
예제 #10
0
    def __enter__(self):
        global_args = []
        ffinput = ffmpeg.input('pipe:',
                               framerate=self.fps,
                               format='rawvideo',
                               pix_fmt='rgb32',
                               hwaccel='auto',
                               s='{}x{}'.format(self.width, self.height))

        if self.quiet:
            global_args = ['-hide_banner', '-nostats', '-loglevel', 'fatal']

        if self.multi_output:
            split = ffinput.filter_multi_output('split')
            raw_alpha = ffmpeg.filter_(
                split.stream(1), 'lutrgb', **{
                    'r': 'maxval',
                    'g': 'maxval',
                    'b': 'maxval'
                })
            key_video = ffmpeg.overlay(split.stream(2), raw_alpha)
            out_key = key_video.output(self.key_out_filename)
            out_main = split.stream(3).output(self.out_filename,
                                              **self.options)

            self.proc = (ffmpeg.merge_outputs(
                out_main,
                out_key).global_args(*global_args).run_async(pipe_stdin=True))

        else:

            self.proc = (ffinput.output(
                self.out_filename,
                **self.options).overwrite_output().global_args(
                    *global_args).run_async(pipe_stdin=True))
        return self.proc
예제 #11
0
def generateCRT(stream):
    # Reduce input to 25% PAL resolution
    stream = stream.filter_("scale", w=-2, h=144)
    # Crop to 4:3 aspect ratio at 25% PAL resolution
    stream = stream.filter_("crop", w=180, h=144)

    # Create RGB chromatic aberration
    streams = stream.filter_multi_output("split", 3)
    stream0 = streams[0].filter_("lutrgb", g="0",
                                 b="0").filter_("scale", w=188,
                                                h=144).filter_("crop",
                                                               w=180,
                                                               h=144)
    stream1 = streams[1].filter_("lutrgb", r="0",
                                 b="0").filter_("scale", w=184,
                                                h=144).filter_("crop",
                                                               w=180,
                                                               h=144)
    stream2 = streams[2].filter_("lutrgb", r="0",
                                 g="0").filter_("scale", w=180,
                                                h=144).filter_("crop",
                                                               w=180,
                                                               h=144)

    stream = ffmpeg.filter_([
        ffmpeg.filter_([stream0, stream2], "blend", all_mode="addition"),
        stream1
    ],
                            "blend",
                            all_mode="addition").filter_("format", "gbrp")

    # Create YUV chromatic aberration
    streams = stream.filter_multi_output("split", 3)
    stream0 = streams[0].filter_("lutyuv", u="0",
                                 v="0").filter_("scale", w=192,
                                                h=144).filter_("crop",
                                                               w=180,
                                                               h=144)
    stream1 = streams[1].filter_("lutyuv", y="0",
                                 v="0").filter_("scale", w=188,
                                                h=144).filter_("crop",
                                                               w=180,
                                                               h=144)
    stream2 = streams[2].filter_("lutyuv", y="0",
                                 u="0").filter_("scale", w=180,
                                                h=144).filter_("crop",
                                                               w=180,
                                                               h=144)

    stream = ffmpeg.filter_([
        ffmpeg.filter_([stream0, stream2], "blend", all_mode="lighten"),
        stream1
    ],
                            "blend",
                            all_mode="lighten")

    # Create edge contour effect
    #    stream = stream.filter_("edgedetect", mode="colormix", high=0)

    # Add noise to each frame of input
    stream = stream.filter_("noise", c0s=8, allf="t")

    # Add interlaced fields effect to input
    streams = stream.filter_multi_output("split")
    stream0 = streams[0].filter_("format", "yuv420p").filter_("curves",
                                                              preset="darker")
    stream = ffmpeg.filter_([stream0, streams[1]],
                            "blend",
                            all_expr="if(eq(0,mod(Y,2)),A,B)")

    # Re-scale input to full PAL resolution
    stream = stream.filter_("scale", w=720, h=576)

    fontfile = "/usr/share/fonts/truetype/freefont/FreeSerif.ttf"

    # Add magnetic damage effect to input [crt screen]
    magnetic_damage = ffmpeg.input("input/PAL.png", loop=1)
    #    magnetic_damage = ffmpeg.input("nullsrc=s=720x576", f="lavfi")
    magnetic_damage = magnetic_damage.filter_("drawtext",
                                              fontfile=fontfile,
                                              text="@",
                                              x=600,
                                              y=30,
                                              fontsize=170,
                                              fontcolor="[email protected]")
    magnetic_damage = magnetic_damage.filter_("boxblur", 80)
    stream = ffmpeg.filter_([magnetic_damage, stream],
                            "blend",
                            all_mode="screen",
                            shortest=1)

    tmppal = ffmpeg.input("input/PAL.png", loop=1)
    #    tmppal = ffmpeg.input("nullsrc=s=720x576", f="lavfi")
    tmppal = tmppal.filter_("format", "gbrp")
    tmppal = tmppal.filter_multi_output("split")

    # Add reflections to input [crt screen]
    reflections = tmppal[0]
    reflections = reflections.filter_("drawtext",
                                      fontfile=fontfile,
                                      text="€",
                                      x=50,
                                      y=50,
                                      fontsize=150,
                                      fontcolor="white")
    reflections = reflections.filter_("drawtext",
                                      fontfile=fontfile,
                                      text="J",
                                      x=600,
                                      y=460,
                                      fontsize=120,
                                      fontcolor="white")
    reflections = reflections.filter_("boxblur", 25)
    stream = ffmpeg.filter_([reflections, stream],
                            "blend",
                            all_mode="screen",
                            shortest=1)

    # Add more detailed highlight to input [crt screen]
    highlights = tmppal[1]
    highlights = highlights.filter_("drawtext",
                                    fontfile=fontfile,
                                    text="¡",
                                    x=80,
                                    y=60,
                                    fontsize=90,
                                    fontcolor="white")
    highlights = highlights.filter_("boxblur", 7)

    stream = ffmpeg.filter_([highlights, stream],
                            "blend",
                            all_mode="screen",
                            shortest=1)

    # Curve input to mimic curve of crt screen
    stream = stream.filter_("vignette").filter_("format", "gbrp").filter_(
        "lenscorrection", k1=0.2, k2=0.2)

    # Add bloom effect to input [crt screen]
    streams = stream.filter_multi_output("split")

    stream1 = streams[1].filter_("boxblur", 26)
    stream1 = stream1.filter_("format", "gbrp")

    stream = ffmpeg.filter_([stream1, streams[0]],
                            "blend",
                            all_mode="screen",
                            shortest=1)

    return stream
# -*-coding: utf-8 -*-

import ffmpeg

stream = ffmpeg.input('temp/pipaek.avi')
# stream = ffmpeg.input('webcam')
stream = ffmpeg.filter_(stream,
                        'drawtext',
                        fontfile="fonts/hack/Hack-Regular.ttf",
                        text="%{pts}",
                        box='1',
                        boxcolor='0x00000000@1',
                        fontcolor='white')
stream = ffmpeg.output(stream, 'temp/output6.mp4')
ffmpeg.run(stream)

# import cv2
#
# cap = cv2.VideoCapture(0)
#
# fourcc = cv2.VideoWriter_fourcc(*'DIVX')
# out = cv2.VideoWriter('output.avi', fourcc, 25.0, (640,480))
#
# while (cap.isOpened()):
#     ret, frame = cap.read()
#
#     if ret:
#         # 이미지 반전,  0:상하, 1 : 좌우
#         frame = cv2.flip(frame, 1)
#
#         out.write(frame)
예제 #13
0
def alterTempo(src, original_tempo, goal):
    factor = float(goal) / original_tempo
    print("\nGoal Tempo: " + str(goal))
    print("Altering tempo by factor of " + str(factor) + "...\n\n")

    #local files
    filename, filetype = os.path.splitext(src)
    new_name = filename + "_" + floatToString(goal)

    while os.path.isfile(new_name + filetype):
        print(new_name + " is taken")
        new_name += "_new"

    new_name += filetype

    try:
        input = ffmpeg.input(src)
        input = ffmpeg.filter_(input, 'atempo', factor)
        output = ffmpeg.output(input, new_name)
        ffmpeg.run(output)
    except Exception as e:
        print("Conversion Error: ", e)

    # remove original file
    os.remove(src)

    #s3 bucket

    #src_filename is of the form http://bucket-name.../name.wav
    # src_filename, filetype = os.path.splitext(src)
    # #name
    # src_filename = ntpath.basename(src_filename)
    # #name_120
    # filename = src_filename + "_" + floatToString(goal)

    # #if filename is already in the bucket, append "_new" to the end
    # s3_files = [file["Key"] for file in s3.list_objects(Bucket=bucket_name)["Contents"]]
    # while (filename + filetype) in s3_files:
    # 	filename += "_new"

    # #name_120.wav
    # filename += filetype
    # #name.wav
    # src_filename += filetype

    # print("FILENAME:" , filename)

    # #run ffmpeg in subprocess, feed stdout into output
    # output, _ = (
    # 	ffmpeg
    # 	.input(src)
    # 	.filter_('atempo', factor)
    # 	.output('pipe:', format='wav')
    # 	.run(capture_stdout=True)
    # )

    # print("DONE PROCESSING")

    # #cast output as FileObj, send to s3 bucket, get new filename
    # new_name = upload_bytes_to_s3(io.BytesIO(output), filename, bucket_name)

    # #delete original file from s3 bucket
    # delete_from_s3(src_filename, application.config["S3_BUCKET"])

    return new_name
예제 #14
0
 def scale(self, scale):
     self.stream = ffmpeg.filter_(self.stream, 'scale', size=scale)
예제 #15
0
def blur_softly(matrix: List[Dict[str, Any]], video_in, video_out=""):
    """ Запускает обработку видео через ffmpeg

    Обработываемых областей может быть несколько, они могут пересекаться по координатам и времени.
    Для непересекающихся областей порядок их следования в массиве не имеет значения.
    Параметры разных областей друг на друга не влияют, массив и словари не модифицируются при работе.

    Если путь к результату не задан, то он помещается рядом с исходным файлом, с припиской к имени "_blurred".

    Обрабатываемая область описывается тремя диапазонами: ширина, высота и продолжительносью, и двумя параметрами обработки: радиусом и степенью.
    Ширина может быть задана любыми двумя параметрами из списка: 'left', 'right', 'width'
    Высота может быть задана любыми двумя параметрами из списка: 'top', 'bottom', 'height'
    Продолжительность тоже задаётся двумя параметрами, но их имена и значения зависят от желаемого спасоба измерения времени.

    Для задания продолжительности можно использовать разные единицы измерения: секунды или кадры.
    Секунды могут быть дробными, задаются в виде чисел с плавающей точкой.
    Продолжительность в секундах задаётся любыми двумя параметрами из списка: 'timestart', 'timeend', 'length'
    Кадры могут быть только целыми, начальный кадр имеет номер 0.
    Продолжительность в кадрах задаётся любыми двумя параметрами из списка: 'framestart', 'frameend', 'length'

    Радиус размытия по умолчанию ставится как четверть от меньшего из размеров области.
    Задать его явно можно через параметр 'radius'.
    При превышении допустимого значения ffmpeg откажется от работы и выведет соответствующее сообщение.

    Степень размытия задаётся через параметр 'power'.
    По умолчанию его значение 5.

    :param matrix: список областей обработки
    :param video_in: путь к исходному видео
    :param video_out: путь к результату
    """
    input_file = ffmpeg.input(video_in)

    source_stream = input_file.video
    st0 = source_stream

    for d in matrix:
        top, height = _calc_size(d, 'top', 'bottom', 'height')
        left, width = _calc_size(d, 'left', 'right', 'width')
        if 'timestart' in d or 'timeend' in d:
            start, length = _calc_size(d, 'timestart', 'timeend', 'length')
            time_unit = 't'
        else:
            start, length = _calc_size(d, 'framestart', 'frameend', 'length')
            time_unit = 'n'

        radius = d.get('radius')
        if radius is None:
            radius = min(width, height) / 4
        power = d.get('power')
        if power is None:
            power = 5

        enable = f'between({time_unit},{start},{start + length})'

        st1 = ffmpeg.crop(source_stream, left, top, width, height)
        st2 = ffmpeg.filter_(st1, 'boxblur', lr=radius, lp=power, enable=enable)
        st0 = ffmpeg.overlay(st0, st2, x=left, y=top, enable=enable)

    if video_out == "":
        video_in = PurePath(video_in)
        video_out = str(video_in.parent.joinpath(video_in.stem + "_blurred" + video_in.suffix))

    output = ffmpeg.output(st0, input_file.audio, video_out,
                           vcodec='libx264',
                           acodec='copy',
                           crf=17,
                           preset='fast',
                           tune='stillimage')

    ffmpeg.run(output, overwrite_output=True)