Ejemplo n.º 1
0
def compose_aomsplit_first_pass_command(video_path: Path, stat_file: Path, ffmpeg_pipe, video_params, is_vs) -> CommandPair:
    """
    Generates the command for the first pass of the entire video used for aom keyframe split

    :param video_path: the video path
    :param stat_file: the stat_file output
    :param ffmpeg_pipe: the av1an.ffmpeg_pipe with pix_fmt and -ff option
    :param video_params: the video params for aomenc first pass
    :param is_vs: is video_path a vapoursynth script
    :return: ffmpeg, encode
    """

    if is_vs:
        f = compose_vapoursynth_pipe(video_path)
    else:
        f = ['ffmpeg', '-y', '-hide_banner', '-loglevel', 'error', '-i', video_path.as_posix(), *ffmpeg_pipe]
    # removed -w -h from aomenc since ffmpeg filters can change it and it can be added into video_params
    # TODO(n9Mtq4): if an encoder other than aom is being used, video_params becomes the default so -w -h may be needed again


    # Adjust number of threads
    video_params = ' '.join(video_params)

    video_params = re.sub(r'(--threads=[0-9]+)', f'--threads={min(32 ,os.cpu_count() * 3)}', video_params)

    e = ['aomenc', '--passes=2', '--pass=1', *video_params.split(), f'--fpf={stat_file.as_posix()}', '-o', os.devnull, '-']
    return CommandPair(f, e)
Ejemplo n.º 2
0
def pyscene(video, threshold, min_scene_len, is_vs, temp, quiet):
    """
    Running PySceneDetect detection on source video for segmenting.
    Optimal threshold settings 15-50
    """

    if ContentDetector is None:
        log(f"Unable to start PySceneDetect because it was not found. Please install scenedetect[opencv] to use"
            )
        return []

    log(f"Starting PySceneDetect:")
    log(f"Threshold: {threshold}")
    log(f"Min scene length: {min_scene_len}")
    log(f"Is Vapoursynth input: {is_vs}")

    if is_vs:
        # Handling vapoursynth, so we need to create a named pipe to feed to VideoManager.
        # TODO: Do we clean this up after pyscenedetect has run, or leave it as part of the temp dir, where it will be cleaned up later?
        if sys.platform == "linux":
            vspipe_fifo = temp / "vspipe.y4m"
            mkfifo(vspipe_fifo)
        else:
            vspipe_fifo = None

        vspipe_cmd = compose_vapoursynth_pipe(video, vspipe_fifo)
        vspipe_process = Popen(vspipe_cmd)

        # Get number of frames from Vapoursynth script to pass as duration to VideoManager.
        # We need to pass the number of frames to the manager, otherwise it won't close the
        # receiving end of the pipe, and will simply sit waiting after vspipe has finished sending
        # the last frame.
        frames = frame_probe(video)

    video_manager = VideoManager([str(vspipe_fifo if is_vs else video)])
    scene_manager = SceneManager()
    scene_manager.add_detector(
        ContentDetector(threshold=threshold, min_scene_len=min_scene_len))
    base_timecode = video_manager.get_base_timecode()

    video_manager.set_duration(duration=FrameTimecode(
        frames, video_manager.get_framerate()) if is_vs else None)

    # Set downscale factor to improve processing speed.
    video_manager.set_downscale_factor()

    # Start video_manager.
    video_manager.start()

    scene_manager.detect_scenes(frame_source=video_manager,
                                show_progress=(not quiet))

    # If fed using a vspipe process, ensure that vspipe has finished.
    if is_vs:
        vspipe_process.wait()

    # Obtain list of detected scenes.
    scene_list = scene_manager.get_scene_list(base_timecode)

    scenes = [int(scene[0].get_frames()) for scene in scene_list]

    # Remove 0 from list
    if scenes[0] == 0:
        scenes.remove(0)
    log(f"Found scenes: {len(scenes)}")

    return scenes
Ejemplo n.º 3
0
def ffmpeg(video, threshold, min_scene_len, total_frames, is_vs, temp):
    """
    Running FFMPEG detection on source video for segmenting.
    Usually the optimal threshold is 0.1 - 0.3 but it can vary a lot
    based on your source content.

    Threshold value increased by x100 for matching with pyscene range
    """

    log(f'Starting FFMPEG detection:\nThreshold: {threshold}, \nIs Vapoursynth input: {is_vs}\n'
        )
    scenes = []
    frame: int = 0

    if is_vs:
        if sys.platform == "linux":
            vspipe_fifo = temp / 'vspipe.y4m'
            mkfifo(vspipe_fifo)
        else:
            vspipe_fifo = None

        vspipe_cmd = compose_vapoursynth_pipe(video, vspipe_fifo)
        vspipe_process = Popen(vspipe_cmd)

    cmd = [
        'ffmpeg', '-hwaccel', 'auto', '-hide_banner', '-i',
        str(vspipe_fifo if is_vs else video.as_posix()), '-an', '-sn', '-vf',
        'scale=\'min(960,iw):-1:flags=neighbor\',select=\'gte(scene,0)\',metadata=print',
        '-f', 'null', '-'
    ]
    pipe = Popen(cmd,
                 stdout=subprocess.PIPE,
                 stderr=subprocess.STDOUT,
                 universal_newlines=True)

    while True:
        line = pipe.stdout.readline().strip()
        if len(line) == 0 and pipe.poll() is not None:
            break
        if len(line) == 0:
            continue

        if 'frame' in line:
            match = re.findall(r':(\d+)', line)
            if match:
                frame = int(match[0])
                continue

        if 'score' in line:
            matches = re.findall(r"=\s*([\S\s]+)", line)
            if matches:
                score = float(matches[-1]) * 100
                if score > threshold and frame - max(
                        scenes, default=0) > min_scene_len:
                    scenes.append(frame)

    if pipe.returncode != 0 and pipe.returncode != -2:
        print(f"\n:: Error in ffmpeg scenedetection {pipe.returncode}")
        print('\n'.join(scenes))

    if is_vs:
        vspipe_process.wait()

    log(f'Found split points: {len(scenes)}\n')
    log(f'Splits: {scenes}\n')

    return scenes
Ejemplo n.º 4
0
def ffmpeg(video, threshold, min_scene_len, total_frames, is_vs, temp):
    """
    Running FFMPEG detection on source video for segmenting.
    Usually the optimal threshold is 0.1 - 0.3 but it can vary a lot
    based on your source content.
    """

    log(f'Starting FFMPEG detection:\nThreshold: {threshold}, Is Vapoursynth input: {is_vs}\n'
        )

    if is_vs:
        # Handling vapoursynth. Outputs vs to a file so ffmpeg can handle it.
        if sys.platform == "linux":
            vspipe_fifo = temp / 'vspipe.y4m'
            mkfifo(vspipe_fifo)
        else:
            vspipe_fifo = None

        vspipe_cmd = compose_vapoursynth_pipe(video, vspipe_fifo)
        vspipe_process = Popen(vspipe_cmd)

    finfo = "showinfo,select=gt(scene\\," + str(threshold) + "),showinfo"
    ffmpeg_cmd = [
        "ffmpeg", "-i",
        str(vspipe_fifo if is_vs else video.as_posix()), "-hide_banner",
        "-loglevel", "32", "-filter_complex", finfo, "-an", "-f", "null", "-"
    ]
    pipe = subprocess.Popen(ffmpeg_cmd,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE)
    last_frame = -1
    scenes = []
    while True:
        line = pipe.stderr.readline().strip()
        if len(line) == 0 and pipe.poll() is not None:
            print(pipe.poll())
            break
        if len(line) == 0:
            continue
        if line:
            cur_frame = re.search("n:\\ *[0-9]+", str(line))
            if cur_frame is not None:
                frame_num = re.search("[0-9]+", cur_frame.group(0))
                if frame_num is not None:
                    frame_num = int(frame_num.group(0))
                    if frame_num < last_frame:
                        scenes += [last_frame]
                    else:
                        last_frame = frame_num

    # If fed using a vspipe process, ensure that vspipe has finished.
    if is_vs:
        vspipe_process.wait()

    # General purpose min_scene_len implementation that works if "scenes" are sorted from smallest
    # to largest.

    # First add the first and last frame so you can test if those are too close
    scenes = [0] + scenes + [total_frames]
    index = 1

    while index < len(scenes):
        # Check if this current split is too close to the previous split
        if scenes[index] < (scenes[index - 1] + min_scene_len):
            # if so remove the current split and then recheck if index < len(scenes)
            scenes.pop(index)
        else:
            index = index + 1

    # Remove the first and last splits. the first split will always be at frame 0 which is bad
    # and the last split will either be the last frame of video, or the actual last split.
    # if it's the last frame of video it should be removed
    # and if it's the last split it means that the last frame of video was too close to that
    # last split and thus the duration of the last split was too small and should have been removed
    if len(scenes) > 2:
        scenes.pop(0)
        scenes.pop(len(scenes) - 1)
    else:
        # Will only occur if literally all possible splits were removed for the min_scene_len
        return []

    # Remove 0 from list
    if len(scenes) > 0 and scenes[0] == 0:
        scenes.remove(0)
    log(f'Found split points: {len(scenes)}\n')
    log(f'Splits: {scenes}\n')

    return scenes