Example #1
0
    def trim(self):
        self.markers.sort()

        list_filename = 'list.txt'
        list_file = open(list_filename, 'w')

        try:
            source_name = self.source.split('/')[-1]

            for i in range(0, len(self.markers), 2):
                start_time = self.markers[i] * self.video.duration
                end_time = self.markers[i + 1] * self.video.duration

                start_time = time.strftime('%H:%M:%S', time.gmtime(start_time))
                end_time = time.strftime('%H:%M:%S', time.gmtime(end_time))

                output = 'temp-{}-{}'.format(i, source_name)

                ffmpeg.crop(self.source, start_time, end_time, output)
                list_file.write('file \'{}\'\n'.format(output))

        except AttributeError:
            list_file.close()
            return

        list_file.close()
        ffmpeg.concat(list_file.name, 'trimmed-{}'.format(source_name))

        list_file = open(list_filename, 'r')
        for file in list_file:
            os.remove(file.split('\'')[1::2][0])
        list_file.close()
        os.remove(list_filename)
Example #2
0
def crop_video(infile, outfile, size, center=None):
    """
    crop image using rect
    if position is None:
        the center of rect is the center of each frame
    else:
        the center of rect is `position`

    Requires:
    - infile: input video file name
    - outfile: output video file name
    - size [tuple|list]: tuple or list of length 2; 
        size[0] is the width of crop rect
        size[1] is the height of crop rect
    - center: center of rect
    """
    w, h, _ = get_resolution(infile)
    if center is None:
        center = (w//2, h//2)
    else:
        center = tuple(center)
    size = tuple(size)
    # top left
    postl = (center[0] - size[0]//2, center[1] - size[1]//2)
    if postl[0] < 0 or postl[1] < 0:
        print("rect {} center at {} is out of image!\n".format(size, center))
        return

    print("convering video {} {} to video {} {}; center at {}".format(
        infile, (w, h), outfile, size, center))
    instream = ffmpeg.input(infile)
    video = ffmpeg.crop(instream.video, postl[0], postl[1], size[0], size[1])
    outstream = ffmpeg.output(video, instream.audio, outfile)
    ffmpeg.run(outstream, quiet=True, overwrite_output=True)
    print("conver succeed")
Example #3
0
def crop_video(
        filename,
        xmin,
        xmax,
        ymin,
        ymax,
        out_name=None,
        speed='superfast',
        bit_rate='20000k'):

    if out_name is None:
        core, ext = os.path.splitext(filename)
        if ext == '.MP4':
            ext = '.mp4'
        out_name = core + '_crop' + ext
    width = xmax - xmin
    height = ymax - ymin
    stream = ffmpeg.input(filename)
    stream = ffmpeg.crop(
        stream,
        xmin,
        ymin,
        width,
        height)
    stream = ffmpeg.output(
        stream,
        out_name,
        preset=speed,
        video_bitrate=bit_rate)
    stream = ffmpeg.overwrite_output(stream)
    ffmpeg.run(stream, quiet=True)
Example #4
0
def ffmpeg_export(
    p_indir
):  # the real meat, this is where i struggle with ffmpeg-python and occasionally succeed
    des_w = int(entry_w.get())
    des_h = int(entry_h.get())
    if (des_w > sel_w) or (des_h > sel_h):
        messagebox.showerror(
            title="Error", message="Desired size is larger than source size!")
        return
    sel_ratio = sel_w / sel_h
    des_ratio = des_w / des_h
    # safe placeholder values for when src and output have the same ratio
    x_offset = 0
    y_offset = 0
    adj_w = sel_w
    adj_h = sel_h
    if (crop_h.get() == 1) and (sel_ratio != des_ratio):
        adj_w = des_ratio * sel_h  # get the new width for the desired aspect ratio
        x_offset = (sel_w - adj_w) / 2  # centering math
    elif (crop_h.get() == 0) and (sel_ratio != des_ratio):
        adj_h = des_ratio * sel_w
        y_offset = (sel_h - adj_h) / 2
    for x in files:
        x = p_indir + os.sep + x
        progress_ffmpeg.config(text='Rendering: ' + os.path.split(x)[1])
        frame_ffmpeg.update()
        # ffmpeg complains if we try to output to the same file as our input
        # so we output to a different file and replace the input afterwards
        outdir = x + '~.png'
        if overwrite_og.get() == 0 and use_custom_outdir.get() == 0:
            newdir = os.path.dirname(str(x)) + '_' + \
                str(des_w) + 'x' + str(
                    des_h)  # results in {old directory}_{width}x{height} in the old directory's parent dir
            outdir = newdir + os.sep + str(os.path.split(x)[1])
            if not os.path.isdir(newdir):
                os.mkdir(newdir)
        elif use_custom_outdir.get() == 1:
            outdir = custom_outdir + os.sep + str(os.path.split(x)[1])
        stream = ffmpeg.input(str(x), nostdin=None)
        stream = ffmpeg.crop(stream, x_offset, y_offset, adj_w, adj_h)
        stream = ffmpeg.filter(
            stream, "scale", des_w, des_h,
            flags="bilinear")  # TODO: allow user to choose algorithm
        stream = ffmpeg.output(
            stream, outdir, hide_banner=None
        )  # TODO: find a way to stop making a new shell for each op
        stream = ffmpeg.overwrite_output(stream)
        ffmpeg.run_async(stream)
        if overwrite_og.get(
        ) == 1:  # check again because overwriting when we're not supposed to is bad mkay
            os.remove(x)
            os.rename(x + '~.png', x)
    progress_ffmpeg.config(text='Rendering: Done!')
def _get_complex_filter_example():
    split = (ffmpeg.input(TEST_INPUT_FILE1).vflip().split())
    split0 = split[0]
    split1 = split[1]

    overlay_file = ffmpeg.input(TEST_OVERLAY_FILE)
    overlay_file = ffmpeg.crop(overlay_file, 10, 10, 158, 112)
    return (ffmpeg.concat(
        split0.trim(start_frame=10, end_frame=20),
        split1.trim(start_frame=30, end_frame=40),
    ).overlay(overlay_file.hflip()).drawbox(
        50, 50, 120, 120, color='red',
        thickness=5).output(TEST_OUTPUT_FILE1).overwrite_output())
Example #6
0
def crop(file_path, Xs, Ys, Xl, Yl):
  basename_without_ext = os.path.splitext(os.path.basename(file_path))[0]
  dir_name = os.path.dirname(file_path)
  out_name = dir_name + '/' + basename_without_ext + '_converted.mp4'

  command = f"ffmpeg -i {file_path} -vf crop={Xl}:{Yl}:{Xs}:{Ys} {out_name}"
  print(command)
  subprocess.run([command], shell=True)
  return out_name
  # 取り出したい静止画のファイル名を指定
  stream = ffmpeg.input(file_path)
  # sample.mp4に切り取りたい動画を入れる
  stream = ffmpeg.crop(stream, Xs, Ys, Xl, Yl)
  stream = ffmpeg.output(stream, out_name)
  ffmpeg.run(stream, overwrite_output=True)
  return out_name
Example #7
0
    def make_wide(self):
        stream = ffmpeg.input(self.file_url)
        stream = ffmpeg.filter(stream.video, "scale", -1, self.h_out)
        stream = ffmpeg.crop(stream, f"iw / 2 - {self.w_out * self.r / 2}",
                             "0", f"{self.w_out * self.r}", "ih")
        stream = ffmpeg.filter(stream, "scale", 768, 480)
        stream = ffmpeg.filter(stream, "setsar", 1)
        audio = ffmpeg.input(self.song_name)

        out = ffmpeg.output(stream,
                            audio,
                            self.full_wide_file_name,
                            shortest=None,
                            y=None)
        out.run()

        return self.full_wide_file_name
Example #8
0
try:
    probe = ffmpeg.probe(args.in_filename)
except ffmpeg.Error as e:
    print(e.stderr, file=sys.stderr)
    sys.exit(1)

#get stream
#stream = ffmpeg.input('trim3sec.mp4')
stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)

width = int(stream['width'])
height = int(stream['height'])

print('width, height are {}, {}'.format(width, height))

#calculate new width and height for 270 degree by 30 degree space
new_w = int(width * 3/4)

#the new height is derived by the the ratio of the new to old height which is
new_h = int(height * math.sin(phi))
margin_h = int((height-new_h)/2)

print('new dimensions are {}, {}'.format(new_w, new_h))

#crop top, bottom, and part of width
#ffmpeg.crop(stream, x, y, width, height, **kwargs)
stream = ffmpeg.input(str(args.in_filename))
stream = ffmpeg.crop(stream, 0, margin_h, new_w, new_h)
stream = ffmpeg.output(stream, 'cropped_' args.in_filename + '.mp4')
ffmpeg.run(stream)
Example #9
0
def crop(video, audio, x, y, width, height):
    video = ffmpeg.crop(video, x, y, width, height)
    return video, audio
Example #10
0
def blur_softly(matrix: List[Dict[str, Any]], video_in, video_out=""):
    """ Запускает обработку видео через ffmpeg

    Обработываемых областей может быть несколько, они могут пересекаться по координатам и времени.
    Для непересекающихся областей порядок их следования в массиве не имеет значения.
    Параметры разных областей друг на друга не влияют, массив и словари не модифицируются при работе.

    Если путь к результату не задан, то он помещается рядом с исходным файлом, с припиской к имени "_blurred".

    Обрабатываемая область описывается тремя диапазонами: ширина, высота и продолжительносью, и двумя параметрами обработки: радиусом и степенью.
    Ширина может быть задана любыми двумя параметрами из списка: 'left', 'right', 'width'
    Высота может быть задана любыми двумя параметрами из списка: 'top', 'bottom', 'height'
    Продолжительность тоже задаётся двумя параметрами, но их имена и значения зависят от желаемого спасоба измерения времени.

    Для задания продолжительности можно использовать разные единицы измерения: секунды или кадры.
    Секунды могут быть дробными, задаются в виде чисел с плавающей точкой.
    Продолжительность в секундах задаётся любыми двумя параметрами из списка: 'timestart', 'timeend', 'length'
    Кадры могут быть только целыми, начальный кадр имеет номер 0.
    Продолжительность в кадрах задаётся любыми двумя параметрами из списка: 'framestart', 'frameend', 'length'

    Радиус размытия по умолчанию ставится как четверть от меньшего из размеров области.
    Задать его явно можно через параметр 'radius'.
    При превышении допустимого значения ffmpeg откажется от работы и выведет соответствующее сообщение.

    Степень размытия задаётся через параметр 'power'.
    По умолчанию его значение 5.

    :param matrix: список областей обработки
    :param video_in: путь к исходному видео
    :param video_out: путь к результату
    """
    input_file = ffmpeg.input(video_in)

    source_stream = input_file.video
    st0 = source_stream

    for d in matrix:
        top, height = _calc_size(d, 'top', 'bottom', 'height')
        left, width = _calc_size(d, 'left', 'right', 'width')
        if 'timestart' in d or 'timeend' in d:
            start, length = _calc_size(d, 'timestart', 'timeend', 'length')
            time_unit = 't'
        else:
            start, length = _calc_size(d, 'framestart', 'frameend', 'length')
            time_unit = 'n'

        radius = d.get('radius')
        if radius is None:
            radius = min(width, height) / 4
        power = d.get('power')
        if power is None:
            power = 5

        enable = f'between({time_unit},{start},{start + length})'

        st1 = ffmpeg.crop(source_stream, left, top, width, height)
        st2 = ffmpeg.filter_(st1, 'boxblur', lr=radius, lp=power, enable=enable)
        st0 = ffmpeg.overlay(st0, st2, x=left, y=top, enable=enable)

    if video_out == "":
        video_in = PurePath(video_in)
        video_out = str(video_in.parent.joinpath(video_in.stem + "_blurred" + video_in.suffix))

    output = ffmpeg.output(st0, input_file.audio, video_out,
                           vcodec='libx264',
                           acodec='copy',
                           crf=17,
                           preset='fast',
                           tune='stillimage')

    ffmpeg.run(output, overwrite_output=True)
def read_clips_from_video(dirname, model_settings):
    print('Dirname: ', dirname)
    # Input size for the network
    frames_per_batch = model_settings['frames_per_batch']
    video_fps = model_settings['video_fps']
    crop_size = model_settings['crop_size']
    np_mean = model_settings['np_mean']
    trans_max = model_settings['trans_max']

    # Data augmentation randoms
    horizontal_flip = random.random()
    trans_factor = random.randint(-trans_max, trans_max)

    # Video information
    probe = ffmpeg.probe(dirname)
    for video_info in probe['streams']:
        if 'width' in video_info:
            break
    video_width = video_info["width"]
    video_height = video_info["height"]
    video_duration = float(video_info["duration"])
    num_frame = int(video_info["nb_frames"])

    # Select which portion of the video will be input
    rand_max = int(num_frame - ((num_frame / video_duration) *
                                (frames_per_batch / video_fps)))

    start_frame = random.randint(0, max(rand_max - 1, 0))
    # end_frame = ceil(start_frame + (num_frame / video_duration) * frames_per_batch / video_fps + 1)
    video_start = (video_duration / num_frame) * start_frame
    video_end = min(video_duration,
                    video_start + ((frames_per_batch + 1) / video_fps))

    # Cropping factor
    x_pos = max(video_width - video_height + 2 * trans_factor, 0) // 2
    y_pos = max(video_height - video_width + 2 * trans_factor, 0) // 2
    crop_size1 = min(video_height, video_width)
    # Read specified times of the video
    ff = ffmpeg.input(dirname, ss=video_start, t=video_end - video_start)
    # Trim video -> did not work :(
    # ff = ff.trim(end_frame='50')
    # Divide into frames
    ff = ffmpeg.filter(ff, 'fps', video_fps)
    # Crop
    ff = ffmpeg.crop(ff, x_pos, y_pos, crop_size1, crop_size1)
    # Subsample
    ff = ffmpeg.filter(ff, 'scale', crop_size, crop_size)
    # Horizontal flip with some probability
    if horizontal_flip > 0.5:
        ff = ffmpeg.hflip(ff)
    # Output the video
    ff = ffmpeg.output(ff, 'pipe:', format='rawvideo', pix_fmt='rgb24')
    # Run Process in quiet mode
    out, _ = ffmpeg.run(ff, capture_stdout=True, quiet=True)
    # Extract to numpy array
    video = np.frombuffer(out, np.uint8). \
        reshape([-1, crop_size, crop_size, 3])

    # Copies last frame if # of frames < 16
    # Subtracts the mean and converts type to float32
    num_frames = video.shape[0]
    if num_frames < frames_per_batch:
        last_frame = video[-1]
        num_frame_repeat = frames_per_batch - num_frames
        # print('Frames repeated: ', num_frame_repeat)
        last_repeat = np.repeat(last_frame[np.newaxis],
                                num_frame_repeat,
                                axis=0)
        video = np.concatenate((video, last_repeat), axis=0) - np_mean
    else:
        video = video[:frames_per_batch] - np_mean

    return video
Example #12
0
import ffmpeg

#切り取りたい座標軸を選ぶ
upper_left_x = 200
# 切り取りたい区画のx座標(px)
upper_left_y = 400
# 切り取りたい区画のy座標(px)
width = 500
# 切り取りたい区画の幅(px)
height = 600
# 切り取りたい区画の高さ(px)

stream = ffmpeg.input(
    'C:/Users/masho/Desktop/work/python/Python/lib/movie/videooriginal.mp4')
# sample.mp4に切り取りたい動画を入れる

stream = ffmpeg.crop(stream, upper_left_x, upper_left_y, width, height)

stream = ffmpeg.output(
    stream,
    'C:/Users/masho/Desktop/work/python/Python/lib/movie/videooriginalout.mp4')
# cropoutput.mp4が切り取った動画で出てきます。

ffmpeg.run(stream, overwrite_output=True)
# overwrite_output=Trueとすることで同じファイル名の動画がすでにあっても上書きします。