Example #1
0
def process(item) -> Result:
    if os.path.isfile(f"./gifs/{item.uuid}.gif"):
        return Result(stdout=b"File exists")
    try:
        # s = ffmpeg.input(f"./videos/{item.source_fname}")
        fname = list(glob.glob(f"./videos/{item.uuid}*"))[0]
        s = ffmpeg.input(fname)
        if item.end:
            s = ffmpeg.trim(s, start=item.start, end=item.end)
        else:
            s = ffmpeg.trim(s, start=item.start)

        s = ffmpeg.filter(s, 'fps', fps=item.fps)
        s = ffmpeg.filter(s, 'scale', item.size, -1)
        split = ffmpeg.filter_multi_output(s, "split")
        p = ffmpeg.filter(split[0], "palettegen")
        s = ffmpeg.filter([split[1], p], filter_name="paletteuse")

        o = ffmpeg.output(s, f"./gifs/{item.uuid}.gif")
        out, err = o.run(quiet=True)
        return Result(stderr=err, stdout=out)

    except Exception as e:
        if hasattr(e, "stderr"):
            return Result(exception=e, stderr=e.stderr, stdout=e.stdout)
        else:
            return Result(exception=e)
Example #2
0
def test_node_repr():
    in_file = ffmpeg.input('dummy.mp4')
    trim1 = ffmpeg.trim(in_file, start_frame=10, end_frame=20)
    trim2 = ffmpeg.trim(in_file, start_frame=30, end_frame=40)
    trim3 = ffmpeg.trim(in_file, start_frame=50, end_frame=60)
    concatted = ffmpeg.concat(trim1, trim2, trim3)
    output = ffmpeg.output(concatted, 'dummy2.mp4')
    assert repr(in_file.node) == 'input(filename={!r}) <{}>'.format('dummy.mp4', in_file.node.short_hash)
    assert repr(trim1.node) == 'trim(end_frame=20, start_frame=10) <{}>'.format(trim1.node.short_hash)
    assert repr(trim2.node) == 'trim(end_frame=40, start_frame=30) <{}>'.format(trim2.node.short_hash)
    assert repr(trim3.node) == 'trim(end_frame=60, start_frame=50) <{}>'.format(trim3.node.short_hash)
    assert repr(concatted.node) == 'concat(n=3) <{}>'.format(concatted.node.short_hash)
    assert repr(output.node) == 'output(filename={!r}) <{}>'.format('dummy2.mp4', output.node.short_hash)
Example #3
0
def test_repr():
    in_file = ffmpeg.file_input('dummy.mp4')
    trim1 = ffmpeg.trim(in_file, 10, 20)
    trim2 = ffmpeg.trim(in_file, 30, 40)
    trim3 = ffmpeg.trim(in_file, 50, 60)
    concatted = ffmpeg.concat(trim1, trim2, trim3)
    output = ffmpeg.file_output(concatted, 'dummy2.mp4')
    assert repr(in_file) == "file_input(filename='dummy.mp4')"
    assert repr(
        trim1) == "trim(end_frame=20,setpts='PTS-STARTPTS',start_frame=10)"
    assert repr(
        trim2) == "trim(end_frame=40,setpts='PTS-STARTPTS',start_frame=30)"
    assert repr(
        trim3) == "trim(end_frame=60,setpts='PTS-STARTPTS',start_frame=50)"
    assert repr(concatted) == "concat()"
    assert repr(output) == "file_output(filename='dummy2.mp4')"
def main():
    mass_parts = mass_part_inputs(os.path.join(MASS_VIDEOS_DIR, '2020-05-23'))

    # TODO Read from mass video
    FRAME_RATE = 25

    # Before/after
    before_logo = input('stalbans_logo_5.mp4')
    after_screen = input('after-message.png', loop_frames=FRAME_RATE * 30)

    # Superimposed bits
    announcements = input('announcements-background.png')
    offertory = input('offertory-background.png')

    print(before_logo)
    # print(mass_parts)
    print(after_screen)

    split_last_mass_part = ffmpeg.filter_multi_output(mass_parts[-1], 'split')
    mass_parts[-1] = split_last_mass_part.stream(0)
    last_mass_part_fade = split_last_mass_part.stream(1)
    # ffmpeg.concat(split0, split1).output('out.mp4').run()

    print(mass_parts[-1])
    mass_parts[-1] = ffmpeg.trim(mass_parts[-1], end=10)
    print(mass_parts[-1])

    result = ffmpeg.concat(
        mass_parts[-1],
        # ffmpeg.filter([last_mass_part_fade, after_screen], 'xfade'),
        after_screen,
    ).output('out.mp4')
    print(' '.join(ffmpeg.get_args(result)))
    result.run()
Example #5
0
def cut(video, audio, arg):
    st, end = arg
    video = ffmpeg.trim(video, start=st, end=end)
    video = ffmpeg.setpts(video, 'PTS-STARTPTS')
    audio = ffmpeg.filter(audio, 'atrim', start=st, end=end)
    audio = ffmpeg.filter(audio, 'asetpts', 'PTS-STARTPTS')
    return video, audio
Example #6
0
 def _estublish_cmd(self, scenes: List[Scene]):
     inputfile = self.input_media_path.as_posix()
     outputfile = self.output_media_path.as_posix()
     stream = ffmpeg.input(inputfile)
     video_streams = list()
     audio_streams = list()
     for scene in scenes:
         start = scene.get_startat()
         duration = scene.get_interval()
         v_clip_stream = ffmpeg.trim(
             stream, start=start, duration=duration)
         v_clip_stream = ffmpeg.setpts(v_clip_stream, 'PTS-STARTPTS')
         a_clip_stream = ffmpeg.filter_(
             stream, 'atrim', start=start, duration=duration)
         a_clip_stream = ffmpeg.filter_(
             a_clip_stream, 'asetpts', 'PTS-STARTPTS')
         video_streams.append(v_clip_stream)
         audio_streams.append(a_clip_stream)
     v_stream = ffmpeg.concat(
         *video_streams, n=len(video_streams), v=1, a=0)
     a_stream = ffmpeg.concat(
         *audio_streams, n=len(audio_streams), v=0, a=1)
     stream = ffmpeg.output(
         v_stream, a_stream, outputfile, **self.CONFIG_720P)
     # ffmpeg.view(stream)  # Debug
     self.stream = stream
     return ' '.join(ffmpeg.compile(stream))
Example #7
0
def frame_array_from_video(video_path,
                           ts_start=0.,
                           ts_end=None,
                           drop_frames_fps=None):
    """
    :param video_path: path to video
    :param ts_start: start timestamp in seconds
    :param ts_end: end timestamp in seconds
    :param drop_frames_fps: if not None, then will drop or add frames to make video 25fps
    :return: array of shape (t, h, w, 3)
    """
    logger.debug('read {}'.format(video_path))
    ts_end = ts_end or get_video_length(video_path)

    probe = ffmpeg.probe(video_path)
    video_stream = next(
        (stream
         for stream in probe['streams'] if stream['codec_type'] == 'video'),
        None)
    w = int(video_stream['width'])
    h = int(video_stream['height'])

    stream = ffmpeg.input(video_path)

    stream = ffmpeg.trim(stream=stream, start=ts_start, end=ts_end)
    out, _ = stream.output('pipe:', format='rawvideo',
                           pix_fmt='rgb24').run(capture_stdout=True)
    video = np.frombuffer(out, np.uint8).reshape([-1, h, w, 3])

    if drop_frames_fps is not None:
        fps = get_fps(video_path)
        fps_ratio = float(fps) / float(drop_frames_fps)

        num_frames_to_take = video.shape[0] / fps_ratio
        frames_getter = np.linspace(0,
                                    video.shape[0],
                                    num=int(num_frames_to_take),
                                    endpoint=False).astype(int)

        video = video[frames_getter, :]

    logger.debug('read end {}'.format(video_path))

    return video
Example #8
0
def trim(root_dir, out_dir, annotation_file):
    with open(annotation_file) as f:
        reader = csv.reader(f, delimiter=',')
        next(reader)
        for row in reader:
            classname, vid, subset, start, end, label = row
            if subset == 'testing':
                continue
            classname = classname.replace(' ', '_')
            basename = 'v_{}.mp4'.format(vid)
            folder = os.path.join(root_dir, classname)
            video_path = os.path.join(folder, basename)
            if not os.path.exists(video_path):
                continue
            stream = ffmpeg.input(video_path)
            stream = ffmpeg.trim(stream, start=start, end=end)

            folder = os.path.join(out_dir, classname)
            if not os.path.isdir(folder):
                os.mkdir(folder)
            video_path = os.path.join(folder, basename)
            stream = ffmpeg.output(stream, video_path)
            ffmpeg.run(stream, overwrite_output=True)
Example #9
0
def main():

    for root, dirs, files in os.walk('material'):
        for file in files:
            file = os.path.join(root, file)

            video_manager = VideoManager([file])
            stats_manager = StatsManager()
            scene_manager = SceneManager(stats_manager)
            scene_manager.add_detector(ContentDetector())
            base_timecode = video_manager.get_base_timecode()
            end_timecode = video_manager.get_duration()

            start_time = base_timecode
            end_time = end_timecode[2]

            video_manager.set_duration(start_time=start_time,
                                       end_time=end_time)
            video_manager.set_downscale_factor()
            video_manager.start()
            scene_manager.detect_scenes(frame_source=video_manager)

            scene_list = scene_manager.get_scene_list(base_timecode)

            if stats_manager.is_save_required():
                with open(STATS_FILE_PATH, 'w') as stats_file:
                    stats_manager.save_to_csv(stats_file, base_timecode)

            print('List of scenes obtained:')
            for i, scene in enumerate(scene_list):
                print('    Scene %2d: Start %s / Frame %d, End %s / Frame %d' %
                      (
                          i + 1,
                          scene[0].get_timecode(),
                          scene[0].get_frames(),
                          scene[1].get_timecode(),
                          scene[1].get_frames(),
                      ))

                raw = ffmpeg.input(file)

                start = scene[0].get_timecode()
                end = scene[1].get_timecode()

                audio = (raw.filter_('atrim', start=start,
                                     end=end).filter_('asetpts',
                                                      'PTS-STARTPTS'))

                raw = ffmpeg.trim(raw, start=start, end=end)
                raw = raw.setpts('PTS-STARTPTS')

                joined = ffmpeg.concat(raw, audio, v=1, a=1).node
                stream = ffmpeg.output(joined[0], joined[1],
                                       'scene%d.mp4' % (i + 1))
                stream.run()

            shuffled = sorted(scene_list, key=lambda k: random.random())

            stream = 0
            video_list = []
            audio_list = []
            merge_list = []
            raw = ffmpeg.input(file)

            for i, scene in enumerate(shuffled):
                start = scene[0].get_timecode()
                end = scene[1].get_timecode()

                audio = (raw.filter_('atrim', start=start,
                                     end=end).filter_('asetpts',
                                                      'PTS-STARTPTS'))

                video = ffmpeg.trim(raw, start=start, end=end)
                video = video.setpts('PTS-STARTPTS')

                video_list.append(video)
                audio_list.append(audio)

                if (i == len(shuffled) - 1):
                    for i in range(len(video_list)):
                        merge_list.append(video_list[i])
                        merge_list.append(audio_list[i])

                    stream = ffmpeg.concat(*merge_list, v=1, a=1)
                    stream = ffmpeg.output(stream, 'new.mp4')
                    stream.run()
Example #10
0
def main():
    """Automatically cutting a video using ffmpeg."""

    parser = argparse.ArgumentParser(
        prog="cutter",
        description="Cutting videos using a decision list and FFmpeg")

    parser.add_argument("list", help="path to cut list")
    parser.add_argument("input", help="input video file")
    parser.add_argument("-t",
                        "--tolerance",
                        action="store",
                        default=0,
                        help="number of frames added")
    #parser.add_argument("basename", help="basename for the scenes")
    parser.add_argument("-v",
                        "--verbose",
                        action='store_true',
                        help="verbose mode")

    args = parser.parse_args()

    stream = 0
    output = 'scene'

    video_manager = VideoManager([args.input])
    fps = video_manager.get_framerate()
    # tolerance = Timecode(fps, frames=int(args.tolerance)).frames_to_tc
    tolerance = int(args.tolerance)

    if args.verbose:
        print("FPS: %d" % (fps))
        print(tolerance)

    raw = ffmpeg.input(args.input)
    cut_list = pandas.read_pickle(args.list)

    for i, scene in cut_list.iterrows():
        start = re.sub(',', '.', cut_list.start[i])
        end = re.sub(',', '.', cut_list.end[i])

        print("%s %s" % (start, end))

        start = Timecode(fps, start) - tolerance
        start.set_fractional(True)

        end = Timecode(fps, end) + tolerance
        end.set_fractional(True)

        print("%s %s" % (start, end))

        if args.verbose:
            print('   Start %s, End %s' % (start, end))

        audio = (raw.filter_('atrim', start=start,
                             end=end).filter_('asetpts', 'PTS-STARTPTS'))

        video = ffmpeg.trim(raw, start=start, end=end)
        video = video.setpts('PTS-STARTPTS')

        stream = ffmpeg.concat(video, audio, v=1, a=1)
        stream = ffmpeg.output(stream, output + str(i) + '.mp4')
        stream.run()
Example #11
0
def segment_video_mutliprocess(segment_video_infos):
    segment_video_start_time = time.time()

    segment = segment_video_infos['segment']
    start_timestamp = segment_video_infos['start_timestamp']
    segment_video_path = "tmp/" + start_timestamp + "/video_" + str(
        segment).zfill(3) + ".mp4"
    segment_video_input_file = config.background_video
    segment_video = ffmpeg.input(segment_video_input_file)
    counter = 0
    for segment_video_info in segment_video_infos['data']:
        counter += 1

        segment_start = segment_video_info['segment_start']
        step_note = segment_video_info['step_note']
        note_instrument = segment_video_info['note_instrument']
        duration = segment_video_info['duration']
        octave = segment_video_info['octave']

        if config.one_video_only['enabled']:
            note_file = note_instrument + '/video.mp4'
        else:
            note_file = note_to_file(octave, step_note, note_instrument)

        segment_video_output_file = segment_video_path
        # if os.path.exists(segment_video_path):
        #     os.rename(segment_video_path, segment_video_path + '.old')
        #     segment_video_input_file = segment_video_path + '.old'
        # else:
        #     segment_video_input_file = config.background_video

        split_preventing_offset = 1 / float(counter + 1000)
        itsoffset = segment_start / float(1000) + split_preventing_offset
        x = random.randint(config.subvideo['x_min'], config.subvideo['x_max'])
        y = random.randint(config.subvideo['y_min'], config.subvideo['y_max'])
        end = config.segment_duration / float(1000)
        trim_end = itsoffset + duration

        temp_video = ffmpeg.input(note_file, itsoffset=itsoffset)
        temp_video = ffmpeg.trim(temp_video, start=0, end=trim_end)

        if config.green_screen['enabled'] == True:
            gs_color = config.green_screen['color']
            gs_similarity = config.green_screen['similarity']
            gs_blend = config.green_screen['blend']
            temp_video = ffmpeg.filter(temp_video,
                                       'colorkey',
                                       color=gs_color,
                                       similarity=gs_similarity,
                                       blend=gs_blend)
            #output sliced video to temp
        segment_video = ffmpeg.overlay(segment_video,
                                       temp_video,
                                       x=x,
                                       y=y,
                                       eof_action='pass')
        segment_video = ffmpeg.trim(segment_video, start=0, end=end)
        # Process a batch of video
        if counter > config.video_batch['limit']:
            continue
            print(
                'Warning: The number of video to be process is too much ( larger than '
                + str(config.video_batch['limit']) +
                ' ), maybe need to change speed_control?')
        #print ('segment_video_info', segment_video_info)
    output_segment_video(segment_video, segment_video_path)
    # segment_video = ffmpeg.output(segment_video, segment_video_path, preset='ultrafast',
    #     loglevel=config.ffmpeg['error_level'], threads=config.ffmpeg['threads'])
    # ffmpeg.run(segment_video)

    #os.system("ffmpeg -i "+segment_video_input_file+" -itsoffset "+str(segment_start/float(1000))+" -i "+temp_mp4+" -loglevel panic -preset ultrafast -filter_complex '[1:v]colorkey=0x"+green_screen_color+":"+green_screen_similarity+":"+gs_blend+"[ckout];[0:v][ckout]overlay[out]' -map '[out]' " + segment_video_output_file)

    segment_video_end_time = time.time()
    segment_video_elapsed = segment_video_end_time - segment_video_start_time

    print('Video process segment: ' + str(segment) +
          ' done, total duration: ' +
          str(datetime.timedelta(seconds=segment_video_elapsed)))
    return segment_video_path
Example #12
0
import ffmpeg

print('How long in seconds?')
x = input()

stream = ffmpeg.input('input.mp4')
stream = ffmpeg.trim(stream, duration=x)
stream = ffmpeg.output(stream, 'trim' + x + 'sec.mp4')
ffmpeg.run(stream)