def frame_check_output(chunk: Chunk, expected_frames: int) -> int: actual_frames = frame_probe(chunk.output_path) if actual_frames != expected_frames: print( f'Frame Count Differ for Source {chunk.name}: {actual_frames}/{expected_frames}' ) return actual_frames
def create_video_queue_vs(args: Args, split_locations: List[int], script: str) -> List[Chunk]: """ Create a list of chunks using vspipe and ffms2 for frame accurate seeking :param args: the Args :param split_locations: a list of frames to split on :param script: source filter script to use with vspipe (ignored with vs input) :return: A list of chunks """ # add first frame and last frame last_frame = frame_probe(args.input) split_locs_fl = [0] + split_locations + [last_frame] # pair up adjacent members of this list ex: [0, 10, 20, 30] -> [(0, 10), (10, 20), (20, 30)] chunk_boundaries = zip(split_locs_fl, split_locs_fl[1:]) source_file = args.input.absolute().as_posix() vs_script = args.input if not args.is_vs: # create a vapoursynth script that will load the source with ffms2 load_script = args.temp / 'split' / 'loadscript.vpy' cache_file = (args.temp / 'split' / 'ffms2cache.ffindex').absolute().as_posix() with open(load_script, 'w+') as file: file.write(script.format(source_file, cache_file)) vs_script = load_script chunk_queue = [ create_vs_chunk(args, index, vs_script, *cb) for index, cb in enumerate(chunk_boundaries) ] return chunk_queue
def create_chunk_from_segment(args: Args, index: int, file: Path) -> Chunk: """ Creates a Chunk object from a segment file generated by ffmpeg :param args: the Args :param index: the index of the chunk :param file: the segmented file :return: A Chunk """ ffmpeg_gen_cmd = [ 'ffmpeg', '-y', '-hide_banner', '-loglevel', 'error', '-i', file.as_posix(), *args.pix_format, '-bufsize', '50000K', '-f', 'yuv4mpegpipe', '-' ] file_size = file.stat().st_size frames = frame_probe(file) extension = ENCODERS[args.encoder].output_extension chunk = Chunk(args.temp, index, ffmpeg_gen_cmd, extension, file_size, frames) return chunk
def create_video_queue_select(args: Args, split_locations: List[int]) -> List[Chunk]: """ Create a list of chunks using the select filter :param args: the Args :param split_locations: a list of frames to split on :return: A list of chunks """ # add first frame and last frame last_frame = frame_probe(args.input) split_locs_fl = [0] + split_locations + [last_frame] # pair up adjacent members of this list ex: [0, 10, 20, 30] -> [(0, 10), (10, 20), (20, 30)] chunk_boundaries = zip(split_locs_fl, split_locs_fl[1:]) chunk_queue = [ create_select_chunk(args, index, args.input, *cb) for index, cb in enumerate(chunk_boundaries) ] return chunk_queue
def frame_check_output(chunk: Chunk, expected_frames: int) -> int: actual_frames = frame_probe(chunk.output_path) if actual_frames != expected_frames: print(f'Chunk #{chunk.name}: {actual_frames}/{expected_frames} fr') return actual_frames
def pyscene(video, threshold, min_scene_len, is_vs, temp): """ Running PySceneDetect detection on source video for segmenting. Optimal threshold settings 15-50 """ if not min_scene_len: min_scene_len = 15 log(f'Starting PySceneDetect:\nThreshold: {threshold}, Min scene length: {min_scene_len}\n Is Vapoursynth input: {is_vs}\n' ) if is_vs: # Handling vapoursynth, so we need to create a named pipe to feed to VideoManager. # TODO: Do we clean this up after pyscenedetect has run, or leave it as part of the temp dir, where it will be cleaned up later? if sys.platform == "linux": vspipe_fifo = temp / 'vspipe.y4m' mkfifo(vspipe_fifo) else: vspipe_fifo = None vspipe_cmd = compose_vapoursynth_pipe(video, vspipe_fifo) vspipe_process = Popen(vspipe_cmd) # Get number of frames from Vapoursynth script to pass as duration to VideoManager. # We need to pass the number of frames to the manager, otherwise it won't close the # receiving end of the pipe, and will simply sit waiting after vspipe has finished sending # the last frame. frames = frame_probe(video) video_manager = VideoManager([str(vspipe_fifo if is_vs else video)]) scene_manager = SceneManager() scene_manager.add_detector( ContentDetector(threshold=threshold, min_scene_len=min_scene_len)) base_timecode = video_manager.get_base_timecode() video_manager.set_duration(duration=FrameTimecode( frames, video_manager.get_framerate()) if is_vs else None) # Set downscale factor to improve processing speed. video_manager.set_downscale_factor() # Start video_manager. video_manager.start() scene_manager.detect_scenes(frame_source=video_manager, show_progress=True) # If fed using a vspipe process, ensure that vspipe has finished. if is_vs: vspipe_process.wait() # Obtain list of detected scenes. scene_list = scene_manager.get_scene_list(base_timecode) scenes = [int(scene[0].get_frames()) for scene in scene_list] # Remove 0 from list if scenes[0] == 0: scenes.remove(0) log(f'Found scenes: {len(scenes)}\n') return scenes