Esempio n. 1
0
def create_video_queue_vsffms2(args: Args, split_locations: List[int]) -> List[Chunk]:
    """
    Create a list of chunks using vspipe and ffms2 for frame accurate seeking

    :param args: the Args
    :param split_locations: a list of frames to split on
    :return: A list of chunks
    """
    # add first frame and last frame
    last_frame = frame_probe(args.input)
    split_locs_fl = [0] + split_locations + [last_frame]

    # pair up adjacent members of this list ex: [0, 10, 20, 30] -> [(0, 10), (10, 20), (20, 30)]
    chunk_boundaries = zip(split_locs_fl, split_locs_fl[1:])

    # create a vapoursynth script that will load the source with ffms2
    load_script = args.temp / 'split' / 'loadscript.vpy'
    source_file = args.input.absolute().as_posix()
    cache_file = (args.temp / 'split' / 'ffms2cache.ffindex').absolute().as_posix()
    with open(load_script, 'w') as file:
        file.writelines([
            'from vapoursynth import core\n',
            f'core.ffms2.Source("{source_file}", cachefile="{cache_file}").set_output()\n',
        ])

    chunk_queue = [create_vsffms2_chunk(args, index, load_script, *cb) for index, cb in enumerate(chunk_boundaries)]

    return chunk_queue
Esempio n. 2
0
def frame_check_output(chunk: Chunk, expected_frames: int) -> int:
    actual_frames = frame_probe(chunk.output_path)
    if actual_frames != expected_frames:
        print(
            f'Frame Count Differ for Source {chunk.name}: {actual_frames}/{expected_frames}'
        )
    return actual_frames
Esempio n. 3
0
def startup(args: Args, chunk_queue: List[Chunk]):
    """
    If resuming, open done file and get file properties from there
    else get file properties and

    """
    # TODO: move this out and pass in total frames and initial frames
    done_path = args.temp / 'done.json'
    if args.resume and done_path.exists():
        log('Resuming...\n')
        with open(done_path) as done_file:
            data = json.load(done_file)
        total = data['total']
        done = len(data['done'])
        initial = sum(data['done'].values())
        log(f'Resumed with {done} encoded clips done\n\n')
    else:
        initial = 0
        total = frame_probe_cv2(args.input)
        if total < 1:
            total = frame_probe(args.input)
        d = {'total': total, 'done': {}}
        with open(done_path, 'w') as done_file:
            json.dump(d, done_file)
    clips = len(chunk_queue)
    args.workers = min(args.workers, clips)
    print(f'\rQueue: {clips} Workers: {args.workers} Passes: {args.passes}\n'
          f'Params: {" ".join(args.video_params)}')
    counter = Manager().Counter(total, initial)
    args.counter = counter
Esempio n. 4
0
def create_chunk_from_segment(args: Args, index: int, file: Path) -> Chunk:
    """
    Creates a Chunk object from a segment file generated by ffmpeg

    :param args: the Args
    :param index: the index of the chunk
    :param file: the segmented file
    :return: A Chunk
    """
    ffmpeg_gen_cmd = ['ffmpeg', '-y', '-hide_banner', '-loglevel', 'error', '-i', file.as_posix(), *args.pix_format,
                      '-bufsize', '50000K', '-f', 'yuv4mpegpipe', '-']
    file_size = file.stat().st_size
    frames = frame_probe(file)
    extension = ENCODERS[args.encoder].output_extension

    chunk = Chunk(args.temp, index, ffmpeg_gen_cmd, extension, file_size, frames)

    return chunk
Esempio n. 5
0
def create_video_queue_select(args: Args, split_locations: List[int]) -> List[Chunk]:
    """
    Create a list of chunks using the select filter

    :param args: the Args
    :param split_locations: a list of frames to split on
    :return: A list of chunks
    """
    # add first frame and last frame
    last_frame = frame_probe(args.input)
    split_locs_fl = [0] + split_locations + [last_frame]

    # pair up adjacent members of this list ex: [0, 10, 20, 30] -> [(0, 10), (10, 20), (20, 30)]
    chunk_boundaries = zip(split_locs_fl, split_locs_fl[1:])

    chunk_queue = [create_select_chunk(args, index, args.input, *cb) for index, cb in enumerate(chunk_boundaries)]

    return chunk_queue
Esempio n. 6
0
def create_video_queue_hybrid(args: Args,
                              split_locations: List[int]) -> List[Chunk]:
    """
    Create list of chunks using hybrid segment-select approach

    :param args: the Args
    :param split_locations: a list of frames to split on
    :return: A list of chunks
    """
    keyframes = get_keyframes(args.input)

    end = [frame_probe_cv2(args.input)
           ] if frame_probe_cv2(args.input) > 1 else [frame_probe(args.input)]

    splits = [0] + split_locations + end

    segments_list = list(zip(splits, splits[1:]))
    to_split = [x for x in keyframes if x in splits]

    if os.name == 'nt':
        to_split = reduce_segments(to_split)

    segments = []

    # Make segments
    segment(args.input, args.temp, to_split[1:])
    source_path = args.temp / 'split'
    queue_files = [x for x in source_path.iterdir() if x.suffix == '.mkv']
    queue_files.sort(key=lambda p: p.stem)

    kf_list = list(zip(to_split, to_split[1:] + end))
    for f, (x, y) in zip(queue_files, kf_list):
        to_add = [(f, [s[0] - x, s[1] - x]) for s in segments_list
                  if s[0] >= x and s[1] <= y and s[0] - x < s[1] - x]
        segments.extend(to_add)

    chunk_queue = [
        create_select_chunk(args, index, file, *cb)
        for index, (file, cb) in enumerate(segments)
    ]
    return chunk_queue
Esempio n. 7
0
def aom_keyframes(video_path: Path, stat_file, min_scene_len, ffmpeg_pipe,
                  video_params):
    """[Get frame numbers for splits from aomenc 1 pass stat file]
    """

    log(f'Started aom_keyframes scenedetection\nParams: {video_params}\n')

    # Get CV2 fast framecount
    video = cv2.VideoCapture(video_path.as_posix())
    total = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
    video.release()

    if total < 1:
        total = frame_probe(video_path)

    f, e = compose_aomsplit_first_pass_command(video_path, stat_file,
                                               ffmpeg_pipe, video_params)

    tqdm_bar = tqdm(total=total,
                    initial=0,
                    dynamic_ncols=True,
                    unit="fr",
                    leave=True,
                    smoothing=0.2)

    ffmpeg_pipe = subprocess.Popen(f, stdout=PIPE, stderr=STDOUT)
    pipe = subprocess.Popen(e,
                            stdin=ffmpeg_pipe.stdout,
                            stdout=PIPE,
                            stderr=STDOUT,
                            universal_newlines=True)

    encoder_history = deque(maxlen=20)
    frame = 0

    while True:
        line = pipe.stdout.readline()
        if len(line) == 0 and pipe.poll() is not None:
            break
        line = line.strip()

        if line:
            encoder_history.append(line)

        match = re.search(r"frame.*?/([^ ]+?) ", line)
        if match:
            new = int(match.group(1))
            if new > frame:
                tqdm_bar.update(new - frame)
            frame = new

    if pipe.returncode != 0 and pipe.returncode != -2:  # -2 is Ctrl+C for aom
        enc_hist = '\n'.join(encoder_history)
        er = f"\nAom first pass encountered an error: {pipe.returncode}\n{enc_hist}"
        log(er)
        print(er)
        if not stat_file.exists():
            terminate()
        else:
            # aom crashed, but created keyframes.log, so we will try to continue
            print(
                "WARNING: Aom first pass crashed, but created a first pass file. Keyframe splitting may not be accurate."
            )

    # aom kf-min-dist defaults to 0, but hardcoded to 3 in pass2_strategy.c test_candidate_kf. 0 matches default aom behavior
    # https://aomedia.googlesource.com/aom/+/8ac928be918de0d502b7b492708d57ad4d817676/av1/av1_cx_iface.c#2816
    # https://aomedia.googlesource.com/aom/+/ce97de2724d7ffdfdbe986a14d49366936187298/av1/encoder/pass2_strategy.c#1907
    min_scene_len = 0 if min_scene_len is None else min_scene_len

    keyframes = find_aom_keyframes(stat_file, min_scene_len)

    return keyframes