コード例 #1
0
    def check_exes(self):
        """
        Checking required executables
        """

        if not find_executable("ffmpeg"):
            print("No ffmpeg")
            sys.exit(1)
        else:
            log(get_ffmpeg_info())

        if self.chunk_method in ["vs_ffms2", "vs_lsmash"]:
            if not find_executable("vspipe"):
                print("vspipe executable not found")
                sys.exit(1)

            try:
                import vapoursynth

                plugins = vapoursynth.get_core().get_plugins()

                if (self.chunk_method == "vs_lsmash"
                        and "systems.innocent.lsmas" not in plugins):
                    print("lsmas is not installed")
                    sys.exit(1)

                if (self.chunk_method == "vs_ffms2"
                        and "com.vapoursynth.ffms2" not in plugins):
                    print("ffms2 is not installed")
                    sys.exit(1)
            except ModuleNotFoundError:
                print("Vapoursynth is not installed")
                sys.exit(1)
コード例 #2
0
    def select_best_chunking_method(self):
        """
        Selecting best chunking method based on available methods
        """
        if not find_executable("vspipe"):
            self.chunk_method = "hybrid"
            log("Set Chunking Method: Hybrid")
        else:
            try:
                import vapoursynth

                plugins = vapoursynth.get_core().get_plugins()

                if "systems.innocent.lsmas" in plugins:
                    log("Set Chunking Method: L-SMASH")
                    self.chunk_method = "vs_lsmash"

                elif "com.vapoursynth.ffms2" in plugins:
                    log("Set Chunking Method: FFMS2")
                    self.chunk_method = "vs_ffms2"

            except Exception as e:
                log(f"Vapoursynth not installed but vspipe reachable")
                log(f"Error:{e}" + "Fallback to Hybrid")
                self.chunk_method = "hybrid"
コード例 #3
0
    def plot_probes(self, vmaf_cq, chunk: Chunk, frames):
        """
        Makes graph with probe decisions
        """
        if plt is None:
            log("Matplotlib is not installed or could not be loaded\
            . Unable to plot probes.")
            return
        # Saving plot of vmaf calculation

        x = [x[1] for x in sorted(vmaf_cq)]
        y = [float(x[0]) for x in sorted(vmaf_cq)]

        cq, tl, f, xnew = self.interpolate_data(vmaf_cq, self.target)
        matplotlib.use("agg")
        plt.ioff()
        plt.plot(xnew, f(xnew), color="tab:blue", alpha=1)
        plt.plot(x, y, "p", color="tab:green", alpha=1)
        plt.plot(cq[0], cq[1], "o", color="red", alpha=1)
        plt.grid(True)
        plt.xlim(self.min_q, self.max_q)
        vmafs = [
            int(x[1]) for x in tl
            if isinstance(x[1], float) and not isnan(x[1])
        ]
        plt.ylim(min(vmafs), max(vmafs) + 1)
        plt.ylabel("VMAF")
        plt.title(f"Chunk: {chunk.name}, Frames: {frames}")
        plt.xticks(np.arange(self.min_q, self.max_q + 1, 1.0))
        temp = self.temp / chunk.name
        plt.savefig(f"{temp}.png", dpi=200, format="png")
        plt.close()
コード例 #4
0
ファイル: vmaf.py プロジェクト: simon-huber/Av1an
    def plot_vmaf_score_file(self, scores: Path, plot_path: Path):
        """
        Read vmaf json and plot VMAF values for each frame
        """
        if plt is None:
            log(
                f"Matplotlib is not installed or could not be loaded, aborting plot_vmaf"
            )
            return

        perc_1 = self.read_weighted_vmaf(scores, 0.01)
        perc_25 = self.read_weighted_vmaf(scores, 0.25)
        perc_75 = self.read_weighted_vmaf(scores, 0.75)
        mean = self.read_weighted_vmaf(scores, 0.50)

        with open(scores) as f:
            file = json.load(f)
            vmafs = [x["metrics"]["vmaf"] for x in file["frames"]]
            plot_size = len(vmafs)

        figure_width = 3 + round((4 * log10(plot_size)))
        plt.figure(figsize=(figure_width, 5))

        plt.plot([1, plot_size], [perc_1, perc_1], "-", color="red")
        plt.annotate(f"1%: {perc_1}", xy=(0, perc_1), color="red")

        plt.plot([1, plot_size], [perc_25, perc_25], ":", color="orange")
        plt.annotate(f"25%: {perc_25}", xy=(0, perc_25), color="orange")

        plt.plot([1, plot_size], [perc_75, perc_75], ":", color="green")
        plt.annotate(f"75%: {perc_75}", xy=(0, perc_75), color="green")

        plt.plot([1, plot_size], [mean, mean], ":", color="black")
        plt.annotate(f"Mean: {mean}", xy=(0, mean), color="black")

        for i in range(0, 100):
            plt.axhline(i, color="grey", linewidth=0.4)
            if i % 5 == 0:
                plt.axhline(i, color="black", linewidth=0.6)

        plt.plot(
            range(plot_size),
            vmafs,
            label=f"Frames: {plot_size}\nMean:{mean}\n"
            f"1%: {perc_1} \n25%: {perc_25} \n75%: {perc_75}",
            linewidth=0.7,
        )
        plt.ylabel("VMAF")
        plt.legend(
            loc="lower right",
            markerscale=0,
            handlelength=0,
            fancybox=True,
        )
        plt.ylim(int(perc_1), 100)
        plt.tight_layout()
        plt.margins(0)

        # Save
        plt.savefig(plot_path, dpi=250)
コード例 #5
0
    def validate_vmaf(self):
        """
        Test run of ffmpeg for validating that ffmpeg/libmaf/models properly setup
        """

        if self.model or self.n_threads:
            add = f"={self.model}{self.n_threads}"
        else:
            add = ""

        cmd = f" ffmpeg -hide_banner -filter_complex testsrc=duration=1:size=1920x1080:rate=1[B];testsrc=duration=1:size=1920x1080:rate=1[A];[B][A]libvmaf{add} -t 1  -f null - ".split(
        )

        pipe = subprocess.Popen(cmd,
                                stdout=PIPE,
                                stderr=STDOUT,
                                universal_newlines=True)

        encoder_history = deque(maxlen=30)

        while True:
            line = pipe.stdout.readline().strip()
            if len(line) == 0 and pipe.poll() is not None:
                break
            if len(line) == 0:
                continue
            if line:
                encoder_history.append(line)

        if pipe.returncode != 0 and pipe.returncode != -2:
            msg1, msg2 = f"VMAF validation error: {pipe.returncode}", "\n".join(
                encoder_history)
            log(msg1, msg2)
            print(f"::{msg1}\n::{msg2}")
            sys.exit()
コード例 #6
0
 def frame_check_output(self, chunk: Chunk, expected_frames: int, last_chunk=False) -> int:
     actual_frames = frame_probe(chunk.output_path)
     if actual_frames != expected_frames:
         msg = f':: Chunk #{chunk.index}: {actual_frames}/{expected_frames} fr'
         log(msg)
         print(msg)
     return actual_frames
コード例 #7
0
def process_encoding_pipe(pipe, encoder, counter, chunk: Chunk):
    encoder_history = deque(maxlen=20)
    frame = 0
    enc = ENCODERS[encoder]
    while True:
        line = pipe.stdout.readline().strip()

        if len(line) == 0 and pipe.poll() is not None:
            break

        if len(line) == 0:
            continue

        match = enc.match_line(line)

        if match:
            new = int(match.group(1))
            if new > frame:
                counter.update(new - frame)
                frame = new

        if line:
            encoder_history.append(line)

    if pipe.returncode != 0 and pipe.returncode != -2:  # -2 is Ctrl+C for aom
        msg1 = f'Encoder encountered an error: {pipe.returncode}'
        msg2 = f'Chunk: {chunk.index}'
        msg3 = '\n'.join(encoder_history)
        log(msg1, msg2, msg3)
        print(f'::{msg1}\n::{msg2}\n::{msg3}')
        tb = sys.exc_info()[2]
        raise RuntimeError("Error in processing encoding pipe").with_traceback(
            tb)
コード例 #8
0
ファイル: target_quality.py プロジェクト: natis1/Pav1n
def plot_probes(project, vmaf_cq, chunk: Chunk, frames):
    if plt is None:
        log(f'Matplotlib is not installed or could not be loaded. Unable to plot probes.'
            )
        return
    # Saving plot of vmaf calculation

    x = [x[1] for x in sorted(vmaf_cq)]
    y = [float(x[0]) for x in sorted(vmaf_cq)]

    cq, tl, f, xnew = interpolate_data(vmaf_cq, project.target_quality)
    matplotlib.use('agg')
    plt.ioff()
    plt.plot(xnew, f(xnew), color='tab:blue', alpha=1)
    plt.plot(x, y, 'p', color='tab:green', alpha=1)
    plt.plot(cq[0], cq[1], 'o', color='red', alpha=1)
    plt.grid(True)
    plt.xlim(project.min_q, project.max_q)
    vmafs = [
        int(x[1]) for x in tl if isinstance(x[1], float) and not isnan(x[1])
    ]
    plt.ylim(min(vmafs), max(vmafs) + 1)
    plt.ylabel('VMAF')
    plt.title(f'Chunk: {chunk.name}, Frames: {frames}')
    plt.xticks(np.arange(project.min_q, project.max_q + 1, 1.0))
    temp = project.temp / chunk.name
    plt.savefig(f'{temp}.png', dpi=200, format='png')
    plt.close()
コード例 #9
0
def _concatenate_mkvmerge(files, output, file_limit, cmd_limit, flip=False):
    tmp_out = "{}.tmp{}.mkv".format(output, int(flip))
    cmd = ["mkvmerge", "-o", tmp_out, files[0]]

    remaining = []
    for i, file in enumerate(files[1:]):
        new_cmd = cmd + ['+{}'.format(file)]
        if sum(len(s) for s in new_cmd) < cmd_limit \
            and (file_limit == -1 or i < max(1, file_limit - 10)):
            cmd = new_cmd
        else:
            remaining = files[i + 1:]
            break

    concat = subprocess.Popen(cmd, stdout=PIPE, universal_newlines=True)
    message, _ = concat.communicate()
    concat.wait()

    if concat.returncode != 0:
        log(message)
        print(message)
        tb = sys.exc_info()[2]
        raise RuntimeError.with_traceback(tb)

    if len(remaining) > 0:
        return _concatenate_mkvmerge([tmp_out] + remaining, output, file_limit,
                                     cmd_limit, not flip)
    return tmp_out
コード例 #10
0
    def concat_routine(self):
        """
        Runs the concatenation routine with project

        :param project: the Project
        :return: None
        """
        try:
            log("Concatenating")
            if self.output_ivf:
                concatenate_ivf(
                    str((self.temp / "encode").resolve()),
                    str(self.output_file.with_suffix(".ivf").resolve()),
                )
            elif self.mkvmerge:
                concatenate_mkvmerge(self.temp, self.output_file)
            else:
                concatenate_ffmpeg(
                    str(str(self.temp.resolve())),
                    str(str(self.output_file.resolve())),
                    self.encoder,
                )
        except Exception as e:
            _, _, exc_tb = sys.exc_info()
            print(
                f"Concatenation failed, error At line: {exc_tb.tb_lineno}\nError:{str(e)}"
            )
            log(f"Concatenation failed, aborting, error: {e}")
            sys.exit(1)
コード例 #11
0
ファイル: Pipes.py プロジェクト: parallelencode/Av1an
def process_encoding_pipe(pipe, encoder, counter, chunk: Chunk):
    encoder_history = deque(maxlen=20)
    frame = 0
    enc = ENCODERS[encoder]
    while True:
        line = pipe.stdout.readline().strip()

        if len(line) == 0 and pipe.poll() is not None:
            break

        if len(line) == 0:
            continue

        match = enc.match_line(line)

        if match:
            new = int(match.group(1))
            if new > frame:
                counter.update(new - frame)
                frame = new

        if line:
            encoder_history.append(line)

    if pipe.returncode != 0 and pipe.returncode != -2:  # -2 is Ctrl+C for aom
        msg = f':: Encoder encountered an error: {pipe.returncode}\n:: Chunk: {chunk.index}\n' + \
             '\n'.join(encoder_history)
        log(msg + '\n\n')
        print(msg)
        raise Exception("Error in processing encoding pipe")
コード例 #12
0
ファイル: chunk_queue.py プロジェクト: zhangjinrong/Av1an
def create_video_queue_segment(project: Project,
                               split_locations: List[int]) -> List[Chunk]:
    """
    Create a list of chunks using segmented files

    :param project: Project
    :param split_locations: a list of frames to split on
    :return: A list of chunks
    """

    # segment into separate files
    segment(project.input, project.temp, split_locations)

    # get the names of all the split files
    source_path = project.temp / 'split'
    queue_files = [x for x in source_path.iterdir() if x.suffix == '.mkv']
    queue_files.sort(key=lambda p: p.stem)

    if len(queue_files) == 0:
        er = 'Error: No files found in temp/split, probably splitting not working'
        print(er)
        log(er)
        terminate()

    chunk_queue = [
        create_chunk_from_segment(project, index, file)
        for index, file in enumerate(queue_files)
    ]

    return chunk_queue
コード例 #13
0
def aom_keyframes(video_path: Path, stat_file, min_scene_len, ffmpeg_pipe, video_params, is_vs, quiet):
    """[Get frame numbers for splits from aomenc 1 pass stat file]
    """

    log(f'Started aom_keyframes scenedetection\nParams: {video_params}\n')

    total = frame_probe_fast(video_path, is_vs)

    f, e = compose_aomsplit_first_pass_command(video_path, stat_file, ffmpeg_pipe, video_params, is_vs)

    tqdm_bar = None
    if (not quiet) and (not (tqdm is None)):
        tqdm_bar = tqdm(total=total, initial=0, dynamic_ncols=True, unit="fr", leave=True, smoothing=0.2)

    ffmpeg_pipe = subprocess.Popen(f, stdout=PIPE, stderr=STDOUT)
    pipe = subprocess.Popen(e, stdin=ffmpeg_pipe.stdout, stdout=PIPE,
                            stderr=STDOUT, universal_newlines=True)

    encoder_history = deque(maxlen=20)
    frame = 0

    while True:
        line = pipe.stdout.readline()
        if len(line) == 0 and pipe.poll() is not None:
            break
        line = line.strip()

        if line:
            encoder_history.append(line)

        if quiet or (tqdm is None):
            continue

        match = re.search(r"frame.*?/([^ ]+?) ", line)
        if match:
            new = int(match.group(1))
            if new > frame:
                tqdm_bar.update(new - frame)
            frame = new

    if pipe.returncode != 0 and pipe.returncode != -2:  # -2 is Ctrl+C for aom
        enc_hist = '\n'.join(encoder_history)
        er = f"\nAom first pass encountered an error: {pipe.returncode}\n{enc_hist}"
        log(er)
        print(er)
        if not stat_file.exists():
            terminate()
        else:
            # aom crashed, but created keyframes.log, so we will try to continue
            print("WARNING: Aom first pass crashed, but created a first pass file. Keyframe splitting may not be accurate.")

    # aom kf-min-dist defaults to 0, but hardcoded to 3 in pass2_strategy.c test_candidate_kf. 0 matches default aom behavior
    # https://aomedia.googlesource.com/aom/+/8ac928be918de0d502b7b492708d57ad4d817676/av1/av1_cx_iface.c#2816
    # https://aomedia.googlesource.com/aom/+/ce97de2724d7ffdfdbe986a14d49366936187298/av1/encoder/pass2_strategy.c#1907
    min_scene_len = 0 if min_scene_len is None else min_scene_len

    keyframes = find_aom_keyframes(stat_file, min_scene_len)

    return keyframes
コード例 #14
0
ファイル: Queue.py プロジェクト: jrodzar/Av1an
    def encode_chunk(self, chunk: Chunk):
        """
        Encodes a chunk. If chunk fails, restarts it limited amount of times.
        Return if executed just fine, sets status fatal for queue if failed

        :param chunk: The chunk to encode
        :return: None
        """
        restart_count = 0

        while restart_count < 3:
            try:
                st_time = time.time()

                chunk_frames = chunk.frames

                log(f'Enc: {chunk.index}, {chunk_frames} fr')

                # Target Quality Mode
                if self.project.target_quality:
                    if self.project.target_quality_method == 'per_shot':
                        self.tq.per_shot_target_quality_routine(chunk)
                    if self.project.target_quality_method == 'per_frame':
                        self.tq.per_frame_target_quality_routine(chunk)

                # skip first pass if reusing
                start = 2 if self.project.reuse_first_pass and self.project.passes >= 2 else 1

                # Run all passes for this chunk
                for current_pass in range(start, self.project.passes + 1):
                    tqdm_bar(self.project, chunk, self.project.encoder,
                             self.project.counter, chunk_frames,
                             self.project.passes, current_pass)

                # get the number of encoded frames, if no check assume it worked and encoded same number of frames
                encoded_frames = chunk_frames if self.project.no_check else self.frame_check_output(
                    chunk, chunk_frames)

                # write this chunk as done if it encoded correctly
                if encoded_frames == chunk_frames:
                    write_progress_file(Path(self.project.temp / 'done.json'),
                                        chunk, encoded_frames)

                enc_time = round(time.time() - st_time, 2)
                log(f'Done: {chunk.index} Fr: {encoded_frames}/{chunk_frames}')
                log(f'Fps: {round(encoded_frames / enc_time, 4)} Time: {enc_time} sec.'
                    )
                return

            except Exception as e:
                msg1, msg2, msg3 = f'Chunk #{chunk.index} crashed', f'Exception: {type(e)} {e}', 'Restarting chunk'
                log(msg1, msg2, msg3)
                print(f'{msg1}\n::{msg2}\n::{msg3}')
                restart_count += 1

        msg1, msg2 = 'FATAL', f'Chunk #{chunk.index} failed more than 3 times, shutting down thread'
        log(msg1, msg2)
        print(f'::{msg1}\n::{msg2}')
        self.status = 'FATAL'
コード例 #15
0
    def plot_vmaf_score_file(self, scores: Path, plot_path: Path):
        """
        Read vmaf json and plot VMAF values for each frame
        """
        if plt is None:
            log(f'Matplotlib is not installed or could not be loaded, aborting plot_vmaf'
                )
            return

        perc_1 = self.read_weighted_vmaf(scores, 0.01)
        perc_25 = self.read_weighted_vmaf(scores, 0.25)
        perc_75 = self.read_weighted_vmaf(scores, 0.75)
        mean = self.read_weighted_vmaf(scores, 0.50)

        with open(scores) as f:
            file = json.load(f)
            vmafs = [x['metrics']['vmaf'] for x in file['frames']]
            plot_size = len(vmafs)

        figure_width = 3 + round((4 * log10(plot_size)))
        plt.figure(figsize=(figure_width, 5))

        plt.plot([1, plot_size], [perc_1, perc_1], '-', color='red')
        plt.annotate(f'1%: {perc_1}', xy=(0, perc_1), color='red')

        plt.plot([1, plot_size], [perc_25, perc_25], ':', color='orange')
        plt.annotate(f'25%: {perc_25}', xy=(0, perc_25), color='orange')

        plt.plot([1, plot_size], [perc_75, perc_75], ':', color='green')
        plt.annotate(f'75%: {perc_75}', xy=(0, perc_75), color='green')

        plt.plot([1, plot_size], [mean, mean], ':', color='black')
        plt.annotate(f'Mean: {mean}', xy=(0, mean), color='black')

        for i in range(0, 100):
            plt.axhline(i, color='grey', linewidth=0.4)
            if i % 5 == 0:
                plt.axhline(i, color='black', linewidth=0.6)

        plt.plot(range(plot_size),
                 vmafs,
                 label=f'Frames: {plot_size}\nMean:{mean}\n'
                 f'1%: {perc_1} \n25%: {perc_25} \n75%: {perc_75}',
                 linewidth=0.7)
        plt.ylabel('VMAF')
        plt.legend(
            loc="lower right",
            markerscale=0,
            handlelength=0,
            fancybox=True,
        )
        plt.ylim(int(perc_1), 100)
        plt.tight_layout()
        plt.margins(0)

        # Save
        plt.savefig(plot_path, dpi=250)
コード例 #16
0
def hash_path(s: str) -> int:
    """
    Return hash of full path to file
    :param s: string
    """
    assert isinstance(s, str)
    file_hash = str(hashlib.sha3_512(s.encode()).hexdigest())[-8:]
    log(f"File hash: {file_hash}")

    return file_hash
コード例 #17
0
ファイル: concat.py プロジェクト: traverseda/Av1an
def concatenate_mkvmerge(temp: Path, output):
    """
    Uses mkvmerge to concatenate encoded segments into the final file

    :param temp: the temp directory
    :param output: the final output file
    :return: None
    """

    log("Concatenating")

    output = shlex.quote(output.as_posix())

    encode_files = sorted(
        (temp / "encode").iterdir(),
        key=lambda x: int(x.stem) if x.stem.isdigit() else x.stem,
    )
    encode_files = [shlex.quote(f.as_posix()) for f in encode_files]

    if platform.system() == "Linux":
        file_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
        cmd_limit = os.sysconf(os.sysconf_names["SC_ARG_MAX"])
    else:
        file_limit = -1
        cmd_limit = 32767

    audio_file = temp / "audio.mkv"
    audio = audio_file.as_posix() if audio_file.exists() else ""

    if len(encode_files) > 1:
        encode_files = [
            _concatenate_mkvmerge(encode_files, output, file_limit, cmd_limit)
        ]

    cmd = ["mkvmerge", "-o", output, encode_files[0]]

    if audio:
        cmd.append(audio)

    concat = subprocess.Popen(cmd, stdout=PIPE, universal_newlines=True)
    message, _ = concat.communicate()
    concat.wait()

    if concat.returncode != 0:
        log(message)
        print(message)
        tb = sys.exc_info()[2]
        raise RuntimeError.with_traceback(tb)

    # remove temporary files used by recursive concat
    if os.path.exists("{}.tmp0.mkv".format(output)):
        os.remove("{}.tmp0.mkv".format(output))

    if os.path.exists("{}.tmp1.mkv".format(output)):
        os.remove("{}.tmp1.mkv".format(output))
コード例 #18
0
def concatenate_ffmpeg(temp: Path, output: Path, encoder: str):
    """
    Uses ffmpeg to concatenate encoded segments into the final file

    :param temp: the temp directory
    :param output: the final output file
    :param encoder: the encoder
    :return: None
    """

    log('Concatenating')

    with open(temp / "concat", 'w') as f:

        encode_files = sorted((temp / 'encode').iterdir())
        f.writelines(f'file {shlex.quote("file:"+str(file.absolute()))}\n'
                     for file in encode_files)

    # Add the audio/subtitles/else file if one was extracted from the input
    audio_file = temp / "audio.mkv"
    if audio_file.exists():
        audio = ('-i', audio_file.as_posix(), '-c', 'copy', '-map', '1')
    else:
        audio = ()

    if encoder == 'x265':

        cmd = [
            'ffmpeg', '-y', '-fflags', '+genpts', '-hide_banner',
            '-loglevel', 'error', '-f', 'concat', '-safe', '0', '-i',
            (temp / "concat").as_posix(), *audio, '-c', 'copy', '-movflags',
            'frag_keyframe+empty_moov', '-map', '0', '-f', 'mp4',
            output.as_posix()
        ]
        concat = subprocess.run(cmd, stdout=PIPE, stderr=STDOUT,
                                check=True).stdout

    else:
        cmd = [
            'ffmpeg', '-y', '-hide_banner',
            '-loglevel', 'error', '-f', 'concat', '-safe', '0', '-i',
            (temp / "concat").as_posix(), *audio, '-c', 'copy', '-map', '0',
            output.as_posix()
        ]

        concat = subprocess.run(cmd, stdout=PIPE, stderr=STDOUT,
                                check=True).stdout

    if len(concat) > 0:
        log(concat.decode())
        print(concat.decode())
        tb = sys.exc_info()[2]
        raise RuntimeError.with_traceback(tb)
コード例 #19
0
    def encode_file(self, project: Project):
        """
        Encodes a single video file on the local machine.

        :param project: The project for this encode
        :return: None
        """

        project.setup()
        set_log(project.logging, project.temp)

        # find split locations
        split_locations = split_routine(project, project.resume)

        # create a chunk queue
        chunk_queue = load_or_gen_chunk_queue(project, project.resume,
                                              split_locations)

        self.done_file(project, chunk_queue)
        if not project.resume:
            extract_audio(
                str(project.input.resolve()),
                str(project.temp.resolve()),
                project.audio_params,
            )

        # do encoding loop
        project.determine_workers()
        self.startup(project, chunk_queue)
        queue = Queue(project, chunk_queue)
        queue.encoding_loop()

        if queue.status.lower() == "fatal":
            msg = "FATAL Encoding process encountered fatal error, shutting down"
            print("\n::", msg)
            log(msg)
            sys.exit(1)

        # concat
        project.concat_routine()

        if project.vmaf or project.vmaf_plots:
            self.vmaf = VMAF(
                n_threads=project.n_threads,
                model=project.vmaf_path,
                res=project.vmaf_res,
                vmaf_filter=project.vmaf_filter,
            )
            self.vmaf.plot_vmaf(project.input, project.output_file, project)

        # Delete temp folders
        if not project.keep:
            shutil.rmtree(project.temp)
コード例 #20
0
def extract_audio(input_vid: Path, temp, audio_params):
    """Extracting audio from source, transcoding if needed."""
    log(f'Audio processing\nParams: {" ".join(audio_params)}\n')
    audio_file = temp / 'audio.mkv'

    # Checking is source have audio track
    check = ['ffmpeg', '-y', '-hide_banner', '-loglevel', 'error', '-ss', '0', '-i', input_vid.as_posix(), '-t', '0',
             '-vn', '-c:a', 'copy', '-f', 'null', '-']
    is_audio_here = len(subprocess.run(check, stdout=PIPE, stderr=STDOUT).stdout) == 0

    # If source have audio track - process it
    if is_audio_here:
        cmd = ('ffmpeg', '-y', '-hide_banner', '-loglevel', 'error', '-i', input_vid.as_posix(), '-map_metadata', '0',
               '-map', '0', '-c', 'copy', '-vn', *audio_params, audio_file.as_posix())
        subprocess.run(cmd)
コード例 #21
0
ファイル: ffmpeg.py プロジェクト: Nova-Aurora/Av1an
def extract_audio(input_vid: Path, temp, audio_params):
    """Extracting audio from source, transcoding if needed."""
    log(f"Audio processing")
    log(f'Params: {" ".join(audio_params)}')
    audio_file = temp / "audio.mkv"

    # Checking is source have audio track
    check = [
        "ffmpeg",
        "-y",
        "-hide_banner",
        "-loglevel",
        "error",
        "-ss",
        "0",
        "-i",
        input_vid.as_posix(),
        "-t",
        "0",
        "-vn",
        "-c:a",
        "copy",
        "-f",
        "null",
        "-",
    ]
    is_audio_here = len(
        subprocess.run(check, stdout=PIPE, stderr=STDOUT).stdout) == 0

    # If source have audio track - process it
    if is_audio_here:
        cmd = (
            "ffmpeg",
            "-y",
            "-hide_banner",
            "-loglevel",
            "error",
            "-i",
            input_vid.as_posix(),
            "-map_metadata",
            "-1",
            "-dn",
            "-vn",
            *audio_params,
            audio_file.as_posix(),
        )
        subprocess.run(cmd)
コード例 #22
0
ファイル: Manager.py プロジェクト: natis1/Pav1n
    def done_file(self, project: Project, chunk_queue: List[Chunk]):
        done_path = project.temp / 'done.json'
        if project.resume and done_path.exists():
            log('Resuming...\n')
            with open(done_path) as done_file:
                data = json.load(done_file)

            project.set_frames(data['frames'])
            done = len(data['done'])
            self.initial_frames = sum(data['done'].values())
            log(f'Resumed with {done} encoded clips done\n\n')
        else:
            self.initial_frames = 0
            total = project.get_frames()
            d = {'frames': total, 'done': {}}
            with open(done_path, 'w') as done_file:
                json.dump(d, done_file)
コード例 #23
0
ファイル: Pipes.py プロジェクト: parallelencode/Av1an
def process_pipe(pipe, chunk: Chunk):
    encoder_history = deque(maxlen=20)
    while True:
        line = pipe.stdout.readline().strip()
        if len(line) == 0 and pipe.poll() is not None:
            break
        if len(line) == 0:
            continue
        if line:
            encoder_history.append(line)

    if pipe.returncode != 0 and pipe.returncode != -2:
        msg = f':: Encoder encountered an error: {pipe.returncode}\n:: Chunk: {chunk.index}' + \
             '\n'.join(encoder_history)
        log(msg + '\n\n')
        print(msg)
        raise Exception("Error in processing pipe")
コード例 #24
0
ファイル: Manager.py プロジェクト: Nova-Aurora/Av1an
    def done_file(self, project: Project, chunk_queue: List[Chunk]):
        done_path = project.temp / "done.json"
        if project.resume and done_path.exists():
            log("Resuming...")
            with open(done_path) as done_file:
                data = json.load(done_file)

            project.set_frames(data["frames"])
            done = len(data["done"])
            self.initial_frames = sum(data["done"].values())
            log(f"Resumed with {done} encoded clips done")
        else:
            self.initial_frames = 0
            total = project.get_frames()
            d = {"frames": total, "done": {}}
            with open(done_path, "w") as done_file:
                json.dump(d, done_file)
コード例 #25
0
def process_pipe(pipe, chunk: Chunk):
    encoder_history = deque(maxlen=20)
    while True:
        line = pipe.stdout.readline().strip()
        if len(line) == 0 and pipe.poll() is not None:
            break
        if len(line) == 0:
            continue
        if line:
            encoder_history.append(line)

    if pipe.returncode != 0 and pipe.returncode != -2:
        msg1 = f'Encoder encountered an error: {pipe.returncode}'
        msg2 = f'Chunk: {chunk.index}' + \
             '\n'.join(encoder_history)
        log(msg1, msg2)
        tb = sys.exc_info()[2]
        raise RuntimeError("Error in processing encoding pipe").with_traceback(
            tb)
コード例 #26
0
    def concat_routine(self):
        """
        Runs the concatenation routine with project

        :param project: the Project
        :return: None
        """
        try:
            if self.encoder == "vvc":
                vvc_concat(self.temp, self.output_file.with_suffix(".h266"))
            elif self.mkvmerge:
                concatenate_mkvmerge(self.temp, self.output_file)
            else:
                concatenate_ffmpeg(self.temp, self.output_file, self.encoder)
        except Exception as e:
            _, _, exc_tb = sys.exc_info()
            print(
                f"Concatenation failed, error At line: {exc_tb.tb_lineno}\nError:{str(e)}"
            )
            log(f"Concatenation failed, aborting, error: {e}")
            terminate()
コード例 #27
0
    def log_probes(self,
                   vmaf_cq,
                   frames,
                   name,
                   target_q,
                   target_vmaf,
                   skip=None):
        """
        Logs probes result
        :type vmaf_cq: list probe measurements (q_vmaf, q)
        :type frames: int frame count of chunk
        :type name: str chunk name
        :type skip: str None if normal results, else "high" or "low"
        :type target_q: int Calculated q to be used
        :type target_vmaf: float Calculated VMAF that would be achieved by using the q
        :return: None
        """
        if skip == "high":
            sk = " Early Skip High CQ"
        elif skip == "low":
            sk = " Early Skip Low CQ"
        else:
            sk = ""

        log(f"Chunk: {name}, Rate: {self.probing_rate}, Fr: {frames}")
        log(f"Probes: {str(sorted(vmaf_cq))[1:-1]}{sk}")
        log(f"Target Q: {target_q} VMAF: {round(target_vmaf, 2)}")
コード例 #28
0
def frame_probe_fast(source: Path, is_vs: bool = False):
    """
    Consolidated function to retrieve the number of frames from the input quickly,
    falls back on a slower (but accurate) frame count if a quick count cannot be found.

    Handles vapoursynth input as well.
    """
    total = 0
    if not is_vs:
        try:
            import vapoursynth
            from vapoursynth import core

            plugins = vapoursynth.get_core().get_plugins()
            if "systems.innocent.lsmas" in plugins:
                total = core.lsmas.LWLibavSource(source.as_posix(),
                                                 cache=False).num_frames
                log("Get frame count with lsmash")
                log(f"Frame count: {total}")
                return total
        except:
            video = cv2.VideoCapture(source.as_posix())
            total = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
            video.release()
            log("Can't open input with Pyscenedetect OpenCV")
    if is_vs or total < 1:
        total = frame_probe(source)

    return total
コード例 #29
0
ファイル: Pipes.py プロジェクト: wonlee2019/Av1an
def process_pipe(pipe, chunk: Chunk, utility: Iterable[Popen]):
    encoder_history = deque(maxlen=20)
    while True:
        line = pipe.stdout.readline().strip()
        if len(line) == 0 and pipe.poll() is not None:
            break
        if len(line) == 0:
            continue
        if line:
            encoder_history.append(line)

    for u_pipe in utility:
        if u_pipe.poll() is None:
            u_pipe.kill()

    if pipe.returncode != 0 and pipe.returncode != -2:
        msg1 = f"Encoder encountered an error: {pipe.returncode}"
        msg2 = f"Chunk: {chunk.index}" + "\n".join(encoder_history)
        log(msg1, msg2)
        tb = sys.exc_info()[2]
        raise RuntimeError("Error in processing encoding pipe").with_traceback(
            tb)
コード例 #30
0
ファイル: chunk_queue.py プロジェクト: master-of-zen/Av1an
def create_video_queue_hybrid(project: Project,
                              split_locations: List[int]) -> List[Chunk]:
    """
    Create list of chunks using hybrid segment-select approach

    :param project: the Project
    :param split_locations: a list of frames to split on
    :return: A list of chunks
    """
    keyframes = get_keyframes(str(project.input.resolve()))

    end = [project.get_frames()]

    splits = [0] + split_locations + end

    segments_list = list(zip(splits, splits[1:]))
    to_split = [x for x in keyframes if x in splits]
    segments = []

    # Make segments
    log("Segmenting Video")
    segment(str(project.input.resolve()), str(project.temp.resolve()),
            to_split[1:])
    log("Segment Done")
    source_path = project.temp / "split"
    queue_files = [x for x in source_path.iterdir() if x.suffix == ".mkv"]
    queue_files.sort(key=lambda p: p.stem)

    kf_list = list(zip(to_split, to_split[1:] + end))
    for f, (x, y) in zip(queue_files, kf_list):
        to_add = [(f, [s[0] - x, s[1] - x]) for s in segments_list
                  if s[0] >= x and s[1] <= y and s[0] - x < s[1] - x]
        segments.extend(to_add)

    chunk_queue = [
        create_select_chunk(project, index, file, *cb)
        for index, (file, cb) in enumerate(segments)
    ]
    return chunk_queue