예제 #1
0
def encode(clip: vs.VideoNode, binary: str, output_file: str, **args) -> None:
	"""Stolen from lyfunc
	Args:
		clip (vs.VideoNode): Source filtered clip
		binary (str): Path to x264 binary.
		output_file (str): Path to the output file.
	"""
	cmd = [binary,
		   "--demuxer", "y4m",
		   "--frames", f"{clip.num_frames}",
		   "--sar", "1:1",
		   "--output-depth", "10",
		   "--output-csp", "i420",
		   "--colormatrix", "bt709",
		   "--colorprim", "bt709",
		   "--transfer", "bt709",
		   "--no-fast-pskip",
		   "--no-dct-decimate",
		   "--partitions", "all",
		   "-o", output_file,
		   "-"]
	for i, v in args.items():
		i = "--" + i if i[:2] != "--" else i
		i = i.replace("_", "-")
		if i in cmd:
			cmd[cmd.index(i)+ 1] = str(v)
		else:
			cmd.extend([i, str(v)])

	print("Encoder command: ", " ".join(cmd), "\n")
	process = subprocess.Popen(cmd, stdin=subprocess.PIPE)
	clip.output(process.stdin, y4m=True, progress_update=lambda value, endvalue:
				print(f"\rVapourSynth: {value}/{endvalue} ~ {100 * value // endvalue}% || Encoder: ", end=""))
	process.communicate()
예제 #2
0
def generate_keyframes(clip: vs.VideoNode,
                       out_path=None,
                       no_header=False) -> None:
    """
    The exact same version as found in kagefunc.
    """
    clip = core.resize.Bilinear(clip, 640, 360, format=vs.YUV420P8)

    if args.scxvid:
        clip = core.scxvid.Scxvid(clip)
    else:
        clip = core.wwxd.WWXD(clip)  # speed up the analysis by resizing first

    out_txt = '' if no_header else "# WWXD log file, using qpfile format\n# Please do not modify this file\n\n"

    for i in range(clip.num_frames):
        if args.scxvid:
            if clip.get_frame(i).props["_SceneChangePrev"] == 1:
                out_txt += "%d I -1\n" % i
        else:
            if clip.get_frame(i).props["Scenechange"] == 1:
                out_txt += "%d I -1\n" % i
        if i % 1 == 0:
            print(f"Progress: {i}/{clip.num_frames} frames", end="\r")
    text_file = open(out_path, "w")
    text_file.write(out_txt)
    text_file.close()
예제 #3
0
def gencomp(num: int = 10,
            path: str = "comp",
            matrix: str = "709",
            firstnum: int = 1,
            **clips: vs.VideoNode) -> None:
    lens = set(c.num_frames for c in clips.values())
    if len(lens) != 1:
        raise ValueError("gencomp: 'Clips must be equal length!'")

    frames = sorted(random.sample(range(lens.pop()), num))
    print("Sample frames: " + str(frames))

    if os.path.exists(path):
        shutil.rmtree(path)

    os.makedirs(path)

    for name, clip in clips.items():
        log.status(f"Rendering clip {name}")
        splice = clip[frames[0]]
        for f in frames[1:]:
            splice += clip[f]
        splice = splice.resize.Bicubic(format=vs.RGB24, matrix_in_s=matrix) \
            .imwri.Write("PNG", os.path.join(path, f"{name}%0{len(str(num))}d.png"), firstnum=firstnum)
        [splice.get_frame(f) for f in range(splice.num_frames)]
예제 #4
0
def do_encode(clip: vs.VideoNode) -> vs.VideoNode:
    """Compression with x26X"""

    if not os.path.isfile(JPBD.output):
        print('\n\n\nVideo encoding')
        bits = clip.format.bits_per_sample
        x265_cmd = f'x265 -o {JPBD.output} - --y4m' + ' '
        x265_cmd += f'--csv {JPBD.name}_log_x265.csv --csv-log-level 2' + ' '
        x265_cmd += '--preset slower' + ' '
        x265_cmd += f'--frames {clip.num_frames} --fps {clip.fps_num}/{clip.fps_den} --output-depth {bits}' + ' '
        x265_cmd += '--rd 3 --no-rect --no-amp --rskip 1 --tu-intra-depth 2 --tu-inter-depth 2 --tskip' + ' '
        x265_cmd += '--merange 48 --weightb' + ' '
        x265_cmd += '--no-strong-intra-smoothing' + ' '
        x265_cmd += '--psy-rd 2.0 --psy-rdoq 1.5 --no-open-gop --keyint 240 --min-keyint 23 --scenecut 40 --rc-lookahead 60 --bframes 16' + ' '
        x265_cmd += '--crf 15 --aq-mode 3 --aq-strength 0.85 --cbqpoffs -2 --crqpoffs -2 --qcomp 0.70' + ' '
        x265_cmd += '--deblock=-1:-1 --no-sao --no-sao-non-deblock' + ' '
        x265_cmd += f'--sar 1 --range limited --colorprim 1 --transfer 1 --colormatrix 1 --min-luma {str(16 << (bits - 8))} --max-luma {str(235 << (bits - 8))}'

        print("Encoder command: ", " ".join(shlex.split(x265_cmd)), "\n")
        process = subprocess.Popen(shlex.split(x265_cmd), stdin=subprocess.PIPE)
        clip.output(process.stdin, y4m=True, progress_update=lambda value, endvalue:
                    print(f"\rVapourSynth: {value}/{endvalue} ~ {100 * value // endvalue}% || Encoder: ", end=""))
        process.communicate()


    print('\n\n\nAudio extraction')
    eac3to_args = ['eac3to', JPBD.src, '2:', JPBD.a_src.format(1), '3:', JPBD.a_src.format(2), '-log=NUL']
    subprocess.run(eac3to_args, text=True, check=True, encoding='utf-8')

    print('\n\n\nAudio cutting')
    eztrim(JPBD.src_clip, (JPBD.frame_start, JPBD.frame_end), JPBD.a_src.format(1), JPBD.a_src_cut.format(1))
    eztrim(JPBD.src_clip, (JPBD.frame_start, JPBD.frame_end), JPBD.a_src.format(2), JPBD.a_src_cut.format(2))
예제 #5
0
def do_encode(clip: vs.VideoNode)-> None:
    """Compression with x265"""
    print('\n\n\nVideo encoding')
    if not os.path.exists(JPBD.output):
        x265_args = [
            "x265", "--y4m", "--frames", f"{clip.num_frames}", "--sar", "1", "--output-depth", "10",
            "--colormatrix", "bt709", "--colorprim", "bt709", "--transfer", "bt709", "--range", "limited",
            "--min-luma", str(16<<2), "--max-luma", str(235<<2),
            "--fps", f"{clip.fps_num}/{clip.fps_den}",
            "-o", JPBD.output, "-",
            "--frame-threads", "4",
            "--no-sao", "--fades",
            "--preset", "slower",
            "--crf", "15", "--qcomp", "0.70",
            "--bframes", "16",
            "--psy-rd", "2.0", "--psy-rdoq", "1.0",
            "--deblock", "-1:-1",
            "--rc-lookahead", "96",
            "--min-keyint", "23", "--keyint", "360",
            "--aq-mode", "3", "--aq-strength", "1.0"
            ]
        print("Encoder command: ", " ".join(x265_args), "\n")
        process = subprocess.Popen(x265_args, stdin=subprocess.PIPE)
        clip.output(process.stdin, y4m=True, progress_update=lambda value, endvalue:
                    print(f"\rVapourSynth: {value}/{endvalue} ~ {100 * value // endvalue}% || Encoder: ", end=""))
        process.communicate()

    print('\n\n\nAudio extraction')
    eac3to_args = ['eac3to', JPBD.src, '2:', JPBD.a_src, '-log=NUL']
    subprocess.run(eac3to_args, text=True, check=True, encoding='utf-8')

    print('\n\n\nAudio cutting')
    eztrim(JPBD.src_clip, (JPBD.frame_start, JPBD.frame_end), JPBD.a_src, JPBD.a_src_cut.format(1))

    print('\n\n\nAudio encoding')
    qaac_args = ['qaac', JPBD.a_src_cut.format(1), '-V', '127', '--no-delay', '-o', JPBD.a_enc_cut.format(1)]
    subprocess.run(qaac_args, text=True, check=True, encoding='utf-8')

    ffprobe_args = ['ffprobe', '-loglevel', 'quiet', '-show_entries', 'format_tags=encoder', '-print_format', 'default=nokey=1:noprint_wrappers=1', JPBD.a_enc_cut.format(1)]
    encoder_name = subprocess.check_output(ffprobe_args, shell=True, encoding='utf-8')
    f = open("tags_aac.xml", 'w')
    f.writelines(['<?xml version="1.0"?>', '<Tags>', '<Tag>', '<Targets>', '</Targets>',
                  '<Simple>', '<Name>ENCODER</Name>', f'<String>{encoder_name}</String>', '</Simple>',
                  '</Tag>', '</Tags>'])
    f.close()

    print('\nFinal muxing')
    mkv_args = ['mkvmerge', '-o', JPBD.output_final,
                '--timestamps', '0:symphogearg_12_timecode.txt',
                '--track-name', '0:HEVC BDRip by Vardë@Kodoku-no-Kawarini', '--language', '0:jpn', JPBD.output,
                '--tags', '0:tags_aac.xml', '--track-name', '0:AAC 2.0', '--language', '0:jpn', JPBD.a_enc_cut.format(1),
                '--chapter-language', 'fra', '--chapters', JPBD.chapter]
    subprocess.run(mkv_args, text=True, check=True, encoding='utf-8')

    # Clean up
    files = [JPBD.a_src, JPBD.a_src_cut.format(1),
             JPBD.a_enc_cut.format(1), 'tags_aac.xml']
    for file in files:
        if os.path.exists(file):
            os.remove(file)
예제 #6
0
def do_encode(clip: vs.VideoNode) -> None:
    """Compression with x26X"""
    print('\n\n\nVideo encoding')
    print('\n\n\nVideo encoding')
    x265_cmd = f'x265 -o {JPBD.output} - --y4m' + ' '
    x265_cmd += f'--csv {JPBD.name}_log_x265.csv --csv-log-level 2' + ' '
    x265_cmd += '--frame-threads 8 --pmode --pme --preset slower' + ' '
    x265_cmd += f'--frames {clip.num_frames} --fps {clip.fps_num/clip.fps_den} --output-depth 10' + ' '
    x265_cmd += '--rd 3 --no-rect --no-amp --rskip 1 --tu-intra-depth 2 --tu-inter-depth 2 --tskip' + ' '
    x265_cmd += '--merange 48 --weightb' + ' '
    x265_cmd += '--no-strong-intra-smoothing' + ' '
    x265_cmd += '--psy-rd 2.0 --psy-rdoq 1.0 --no-open-gop --keyint 360 --min-keyint 12 --scenecut 45 --rc-lookahead 120 --bframes 16' + ' '
    x265_cmd += '--crf 15 --aq-mode 3 --aq-strength 0.85 --qcomp 0.70' + ' '
    x265_cmd += '--deblock=-1:-1 --no-sao --no-sao-non-deblock' + ' '
    x265_cmd += f'--sar 1 --range limited --colorprim 1 --transfer 1 --colormatrix 1 --min-luma {str(16<<2)} --max-luma {str(235<<2)}'  # + ' '

    print("Encoder command: ", " ".join(shlex.split(x265_cmd)), "\n")
    process = subprocess.Popen(shlex.split(x265_cmd), stdin=subprocess.PIPE)
    clip.output(
        process.stdin,
        y4m=True,
        progress_update=lambda value, endvalue: print(
            f"\rVapourSynth: {value}/{endvalue} ~ {100 * value // endvalue}% || Encoder: ",
            end=""))
    process.communicate()

    print('\n\n\nAudio extraction')
    eac3to_args = ['eac3to', JPBD.src, '2:', JPBD.a_src, '-log=NUL']
    subprocess.run(eac3to_args, text=True, check=True, encoding='utf-8')

    print('\n\n\nAudio cutting')
    eztrim(JPBD.src_clip, (JPBD.frame_start, JPBD.frame_end), JPBD.a_src,
           JPBD.a_src_cut.format(1))

    print('\n\n\nAudio encoding')
    ffmpeg_args = [
        'ffmpeg', '-i',
        JPBD.a_src_cut.format(1), '-compression_level', '12', '-lpc_type',
        'cholesky', '-lpc_passes', '3', '-exact_rice_parameters', '1',
        JPBD.a_enc_cut.format(1)
    ]
    subprocess.run(ffmpeg_args, text=True, check=True, encoding='utf-8')

    print('\nFinal muxing')
    mkv_args = [
        'mkvmerge', '-o', JPBD.output_final, '--track-name',
        '0:HEVC BDRip by Vardë@Raws-Maji', '--language', '0:jpn', JPBD.output,
        '--track-name', '0:FLAC 2.0', '--language', '0:jpn',
        JPBD.a_enc_cut.format(1)
    ]
    subprocess.run(mkv_args, text=True, check=True, encoding='utf-8')

    # Clean up
    files = [JPBD.a_src, JPBD.a_src_cut.format(1), JPBD.a_enc_cut.format(1)]
    for file in files:
        if os.path.exists(file):
            os.remove(file)
예제 #7
0
def do_encode(clip: vs.VideoNode)-> None:
    """Compression with x26X"""
    print('\n\n\nVideo encoding')
    bits = clip.format.bits_per_sample
    x265_cmd = f'x265 -o {JPBD.output} - --y4m' + ' '
    x265_cmd += f'--csv {JPBD.name}_log_x265.csv --csv-log-level 2' + ' '
    x265_cmd += '--frame-threads 4 --pmode --pme --preset slower' + ' '
    x265_cmd += f'--frames {clip.num_frames} --fps {clip.fps_num}/{clip.fps_den} --output-depth {bits}' + ' '
    x265_cmd += '--rd 3 --no-rect --no-amp --rskip 1 --tu-intra-depth 2 --tu-inter-depth 2 --tskip' + ' '
    x265_cmd += '--merange 36 --weightb' + ' '
    x265_cmd += '--no-strong-intra-smoothing' + ' '
    x265_cmd += '--psy-rd 1.85 --psy-rdoq 0.8 --no-open-gop --keyint 240 --min-keyint 23 --scenecut 45 --rc-lookahead 60 --bframes 10' + ' '
    x265_cmd += '--crf 16 --aq-mode 3 --aq-strength 0.85 --qcomp 0.75' + ' '
    x265_cmd += '--deblock=-1:-1 --no-sao --no-sao-non-deblock' + ' '
    x265_cmd += f'--sar 1 --range limited --colorprim 1 --transfer 1 --colormatrix 1 --min-luma {str(16<<(bits - 8))} --max-luma {str(235<<(bits - 8))}'# + ' '

    print("Encoder command: ", " ".join(shlex.split(x265_cmd)), "\n")
    process = subprocess.Popen(shlex.split(x265_cmd), stdin=subprocess.PIPE)
    clip.output(process.stdin, y4m=True, progress_update=lambda value, endvalue:
                print(f"\rVapourSynth: {value}/{endvalue} ~ {100 * value // endvalue}% || Encoder: ", end=""))
    process.communicate()

    print('\n\n\nAudio extraction')
    eac3to_args = ['eac3to', JPBD.src, '2:', JPBD.a_src, '-log=NUL']
    subprocess.run(eac3to_args, text=True, check=True, encoding='utf-8')

    print('\n\n\nAudio cutting')
    eztrim(JPBD.src_clip, (JPBD.frame_start, JPBD.frame_end), JPBD.a_src, JPBD.a_src_cut.format(1))

    print('\n\n\nAudio encoding')
    qaac_args = ['qaac', JPBD.a_src_cut.format(1), '-V', '127', '--no-delay', '-o', JPBD.a_enc_cut.format(1)]
    subprocess.run(qaac_args, text=True, check=True, encoding='utf-8')

    ffprobe_args = ['ffprobe', '-loglevel', 'quiet', '-show_entries', 'format_tags=encoder', '-print_format', 'default=nokey=1:noprint_wrappers=1', JPBD.a_enc_cut.format(1)]
    encoder_name = subprocess.check_output(ffprobe_args, shell=True, encoding='utf-8')
    f = open("tags_aac.xml", 'w')
    f.writelines(['<?xml version="1.0"?>', '<Tags>', '<Tag>', '<Targets>', '</Targets>',
                  '<Simple>', '<Name>ENCODER</Name>', f'<String>{encoder_name}</String>', '</Simple>',
                  '</Tag>', '</Tags>'])
    f.close()

    print('\nFinal muxing')
    mkv_args = ['mkvmerge', '-o', JPBD.output_final,
                '--track-name', '0:HEVC BDRip by Vardë@Natsumi-no-Sekai', '--language', '0:jpn', JPBD.output,
                '--tags', '0:tags_aac.xml', '--track-name', '0:AAC 2.0', '--language', '0:jpn', JPBD.a_enc_cut.format(1),
                '--chapter-language', 'fr', '--chapters', JPBD.chapter]
    subprocess.run(mkv_args, text=True, check=True, encoding='utf-8')

    # Clean up
    files = [JPBD.a_src, JPBD.a_src_cut.format(1),
             JPBD.a_enc_cut.format(1), 'tags_aac.xml']
    for file in files:
        if os.path.exists(file):
            os.remove(file)
예제 #8
0
def do_encode(clip: vs.VideoNode)-> None:
    """Compression with x265"""
    print('\n\n\nVideo encoding')
    if not os.path.exists(JPBD_NCOP.output):
        ffv1_args = [
            'ffmpeg', '-i', '-', '-vcodec', 'ffv1', '-coder', '1', '-context', '0',
            '-g', '1', '-level', '3', '-threads', '8',
            '-slices', '24', '-slicecrc', '1', "_assets/" + JPBD_NCOP.name + "_lossless.mkv"
        ]
        print("Encoder command: ", " ".join(ffv1_args), "\n")
        process = subprocess.Popen(ffv1_args, stdin=subprocess.PIPE)
        clip.output(process.stdin, y4m=True, progress_update=lambda value, endvalue:
                    print(f"\rVapourSynth: {value}/{endvalue} ~ {100 * value // endvalue}% || Encoder: ", end=""))
        process.communicate()
예제 #9
0
def is_topleft(clip: vs.VideoNode) -> bool:
    """
    Simple function that checks if chroma is top-left aligned or not.

    In any other case it's fairly safe to assume the chroma is aligned to the
    center-left as was the default before 4K UHD BDs and Bt.2020 were a thing.
    This is basically a more complex check for BT.2020 material.
    """
    if not clip.format:
        raise VariableFormatError("is_topleft")

    if clip.format.subsampling_h != 1 or clip.format.subsampling_w != 1:
        return False

    props = clip.get_frame(0).props

    # If chromalocation is set, use it and return if it's left.
    cloc = props.get("_ChromaLocation")
    # If it exists, we just need to check if it's 2 or not
    if cloc is not None:
        return cloc == 2

    # If the primaties are set (they should be) then return if it's BT.2020
    prims = props.get("_Primaries")
    if prims not in [None, 2]:
        return prims == 9

    # These should be the minimum 4:3 and 21:9 dimensions after cropping 4K
    # with letter- and/or pillarboxing.
    return clip.width >= 2880 and clip.height >= 1645
예제 #10
0
def do_encode(clip: vs.VideoNode)-> None:
    """Compression with x264"""
    print('\n\n\nVideo encoding')
    x265_args = [
        X265, "--y4m", "--frames", f"{clip.num_frames}", "--sar", "1", "--output-depth", "10",
        "--colormatrix", "bt709", "--colorprim", "bt709", "--transfer", "bt709", "--range", "limited",
        "--min-luma", str(16<<2), "--max-luma", str(235<<2),
        "--fps", f"{clip.fps_num}/{clip.fps_den}",
        "-o", JPBD.output, "-",
        "--frame-threads", "16",
        "--no-sao", "--fades",
        "--preset", "slower",
        "--crf", "14.5", "--qcomp", "0.72",
        "--bframes", "16",
        "--psy-rd", "2.0", "--psy-rdoq", "1.0",
        "--deblock", "-1:-1",
        "--rc-lookahead", "96",
        "--min-keyint", "23", "--keyint", "360",
        "--aq-mode", "3", "--aq-strength", "1.0"
        ]
    print("Encoder command: ", " ".join(x265_args), "\n")
    process = subprocess.Popen(x265_args, stdin=subprocess.PIPE)
    clip.output(process.stdin, y4m=True, progress_update=lambda value, endvalue:
                print(f"\rVapourSynth: {value}/{endvalue} ~ {100 * value // endvalue}% || Encoder: ", end=""))
    process.communicate()

    print('\n\n\nAudio extraction')
    mka = MKVFile()
    mka.add_track(MKVTrack(JPBD.src, 1))
    mka.mux(JPBD.a_src)

    print('\n\n\nAudio cutting')
    eztrim(JPBD.src_clip, (JPBD.frame_start, JPBD.frame_end), JPBD.a_src, mkvextract_path='mkvextract')

    print('\n\n\nAudio encoding')
    qaac_args = ['qaac64', JPBD.a_src_cut.format(1), '-V', '127', '--no-delay', '-o', JPBD.a_enc_cut.format(1)]
    subprocess.run(qaac_args, text=True, check=True, encoding='utf-8')

    print('\nFinal muxing')
    mkv = MKVFile()
    mkv.add_track(MKVTrack(JPBD.output, language='jpn', default_track=True))
    mkv.add_track(MKVTrack(JPBD.a_enc_cut.format(1), language='jpn', default_track=True))
    mkv.chapters(JPBD.chapter, 'jpn')
    mkv.mux(JPBD.output_final)
예제 #11
0
def _sraa_frameeval(n: int, clip: vs.VideoNode, w: int,
                    h: int) -> vs.VideoNode:
    frame = clip.get_frame(n)
    if frame.height < 1080:
        rfactor = 2.5
    else:
        rfactor = 1.5
    return upscaled_sraa(clip.resize.Bicubic(frame.width, frame.height),
                         rfactor=rfactor,
                         h=h,
                         ar=w / h)
예제 #12
0
def generate_keyframes(clip: vs.VideoNode, out_path: str) -> None:
    """Generate qp filename for keyframes to pass the file into the encoder
       to force I frames. Use both scxvid and wwxd. Original function stolen from kagefunc.

    Args:
        clip (vs.VideoNode): Source clip
        out_path (str): output path
    """
    clip = core.resize.Bilinear(clip, 640, 360)
    clip = core.scxvid.Scxvid(clip)
    clip = core.wwxd.WWXD(clip)
    out_txt = ""
    for i in range(clip.num_frames):
        if clip.get_frame(i).props["_SceneChangePrev"] == 1 \
                or clip.get_frame(i).props["Scenechange"] == 1:
            out_txt += "%d I -1\n" % i
        if i % 2000 == 0:
            print(i)
    text_file = open(out_path, "w")
    text_file.write(out_txt)
    text_file.close()
예제 #13
0
def print_vs_output_colorspace_info(vs_output: vs.VideoNode) -> None:
    from vspreview.core import Output

    props = vs_output.get_frame(0).props
    logging.debug('Matrix: {}, Transfer: {}, Primaries: {}, Range: {}'.format(
        Output.Matrix.values[props['_Matrix']] if '_Matrix' in props else None,
        Output.Transfer.values[props['_Transfer']]
        if '_Transfer' in props else None,
        Output.Primaries.values[props['_Primaries']]
        if '_Primaries' in props else None,
        Output.Range.values[props['_ColorRange']]
        if '_ColorRange' in props else None,
    ))
예제 #14
0
    def _GetMatrix(clip: vs.VideoNode) -> int:
        frame = clip.get_frame(0)
        w, h = frame.width, frame.height

        if frame.format.color_family == vs.RGB:
            return 0
        if frame.format.color_family == vs.YCOCG:
            return 8
        if w <= 1024 and h <= 576:
            return 5
        if w <= 2048 and h <= 1536:
            return 1
        return 9
예제 #15
0
def ccd(clip: vs.VideoNode, threshold: float) -> vs.VideoNode:
    """taken from a currently-private gist, but should become available in `vs-denoise` soon enough"""
    from vsutil import split

    assert clip.format
    bits = clip.format.bits_per_sample
    is_float = clip.format.sample_type == vs.FLOAT
    peak = 1.0 if is_float else (1 << bits) - 1
    threshold /= peak
    # threshold = threshold ** 2 / 195075.0

    rgb = clip.resize.Bicubic(format=vs.RGBS)

    pre1 = rgb.resize.Point(
        clip.width+24, clip.height+24,
        src_left=-12, src_top=-12,
        src_width=clip.width+24, src_height=clip.height+24
    )
    pre2 = rgb.resize.Point(
        rgb.width+24, rgb.height+24,
        src_width=rgb.width+24, src_height=rgb.height+24
    )
    pre_planes = split(pre1)

    shift_planes_clips = [
        split(pre2.resize.Point(src_left=-x, src_top=-y))
        for x in range(0, 25, 8) for y in range(0, 25, 8)
    ]
    denoise_clips = [
        core.std.Expr(pre_planes + shift_planes, f'x a - dup * y b - dup * + z c - dup * + sqrt {threshold} <')
        for shift_planes in shift_planes_clips
    ]

    cond_planes_clips = [
        join([core.std.Expr([splane, dclip], 'y 0 > x 0 ?') for splane in splanes])
        for dclip, splanes in zip(denoise_clips, shift_planes_clips)
    ]

    denoise = core.std.Expr(denoise_clips, add_expr(len(denoise_clips)) + ' 1 +')
    denoise = join([denoise] * 3)

    n_op = len(cond_planes_clips) + 1
    avg = core.std.Expr([pre1] + cond_planes_clips + [denoise], add_expr(n_op) + f' {EXPR_VARS[n_op]} /')
    avg = avg.resize.Bicubic(
        format=clip.format.id, dither_type='error_diffusion', matrix=cast(int, clip.get_frame(0).props['_Matrix'])
    )
    avg = avg.std.Crop(12, 12, 12, 12)

    assert avg.format
    return core.std.ShufflePlanes([clip, avg], [0, 1, 2], avg.format.color_family)
예제 #16
0
def do_encode(clip: vs.VideoNode) -> None:
    """Compression with x265"""
    print('\n\n\nVideo encoding')
    x265_args = [
        X265, "--y4m", "--frames", f"{clip.num_frames}", "--sar", "1",
        "--output-depth", "10", "--colormatrix", "bt709", "--colorprim",
        "bt709", "--transfer", "bt709", "--range", "limited", "--min-luma",
        str(16 << 2), "--max-luma",
        str(235 << 2), "--fps", f"{clip.fps_num}/{clip.fps_den}", "-o",
        JPBD.output, "-", "--frame-threads", "16", "--no-sao", "--fades",
        "--preset", "slower", "--crf", "15", "--qcomp", "0.70", "--bframes",
        "16", "--psy-rd", "2.0", "--psy-rdoq", "1.0", "--deblock", "-1:-1",
        "--rc-lookahead", "72", "--min-keyint", "23", "--keyint", "240",
        "--aq-mode", "3", "--aq-strength", "1.0"
    ]
    print("Encoder command: ", " ".join(x265_args), "\n")
    process = subprocess.Popen(x265_args, stdin=subprocess.PIPE)
    clip.output(
        process.stdin,
        y4m=True,
        progress_update=lambda value, endvalue: print(
            f"\rVapourSynth: {value}/{endvalue} ~ {100 * value // endvalue}% || Encoder: ",
            end=""))
    process.communicate()
def bob(clip: vs.VideoNode, tff: bool | None = None) -> vs.VideoNode:
    """
    Very simple bobbing function. Shouldn't be used for regular filtering,
    but as a very cheap bobber for other functions.

    :param clip:    Input clip
    :param tff:     Top-field-first. `False` sets it to Bottom-Field-First.
                    If None, get the field order from the _FieldBased prop.

    :return:        Bobbed clip
    """
    if get_prop(clip.get_frame(0), '_FieldBased', int) == 0 and tff is None:
        raise vs.Error("bob: 'You must set `tff` for this clip!'")
    elif isinstance(tff, (bool, int)):
        clip = clip.std.SetFieldBased(int(tff) + 1)

    return Catrom().scale(clip.std.SeparateFields(), clip.width, clip.height)
예제 #18
0
def generate_keyframes(clip: vs.VideoNode, out_path=None) -> None:
    """
    probably only useful for fansubbing
    generates qp-filename for keyframes to simplify timing
    """
    import os
    # Speed up the analysis by resizing first. Converting to 8 bit also seems to improve the accuracy of wwxd.
    clip = core.resize.Bilinear(clip, 640, 360, format=vs.YUV420P8)
    clip = core.wwxd.WWXD(clip)
    out_txt = "# WWXD log file, using qpfile format\n\n"
    for i in range(clip.num_frames):
        if clip.get_frame(i).props.Scenechange == 1:
            out_txt += "%d I -1\n" % i
        if i % 1000 == 0:
            print(i)
    out_path = fallback(out_path,
                        os.path.expanduser("~") + "/Desktop/keyframes.txt")
    with open(out_path, "w") as text_file:
        text_file.write(out_txt)
예제 #19
0
def get_matrix(clip: vs.VideoNode) -> int:
    """
    Helper function to get the matrix for a clip.

    :param clip:    src clip

    :return:        Value representing a matrix
    """
    frame = clip.get_frame(0)
    w, h = frame.width, frame.height

    if frame.format.color_family == vs.RGB:
        return 0
    if frame.format.color_family == vs.YCOCG:
        return 8
    if w <= 1024 and h <= 576:
        return 5
    if w <= 2048 and h <= 1536:
        return 1
    return 9
예제 #20
0
파일: snippet.py 프로젝트: szabo92/gistable
def generate_keyframes(clip: vs.VideoNode, out_path=None) -> None:
    """
    probably only useful for fansubbing
    generates qp-filename for keyframes to simplify timing
    disclaimer: I don't actually know why -1 is forced. I just ported the avisynth script
    """
    import os
    clip = core.resize.Bilinear(clip, 640, 360)  # speed up the analysis by resizing first
    clip = core.wwxd.WWXD(clip)
    out_txt = ""
    for i in range(clip.num_frames):
        if clip.get_frame(i).props.Scenechange == 1:
            out_txt += "%d I -1\n" % i
        if i % 1000 == 0:
            print(i)
    if out_path is None:
        out_path = os.path.expanduser("~") + "/Desktop/keyframes.txt"
    text_file = open(out_path, "w")
    text_file.write(out_txt)
    text_file.close()
예제 #21
0
    def execute(self, n: int, clip: vs.VideoNode) -> vs.VideoNode:
        """
        Copies the xinntao ESRGAN repo's main execution code. The only real difference is it doesn't use cv2, and
        instead uses vapoursynth ports of cv2's functionality for read and writing "images".

        Code adapted from:
        https://github.com/xinntao/ESRGAN/blob/master/test.py#L26
        """
        if not self.rrdb_net_model:
            raise ValueError("VSGAN: No ESRGAN model has been loaded, use VSGAN.load_model().")
        # 255 being the max value for an RGB color space, could this be key to YUV support in the future?
        max_n = 255.0
        img = self.frame_to_np(clip.get_frame(n))
        img = img * 1.0 / max_n
        img = np.transpose(img[:, :, (0, 1, 2)], (2, 0, 1))  # RGB to BRG
        img = torch.from_numpy(img).float()
        img_lr = img.unsqueeze(0).to(self.torch_device)
        with torch.no_grad():
            output = self.rrdb_net_model(img_lr).data.squeeze().float().cpu().clamp_(0, 1).numpy()
        output = np.transpose(output[(2, 1, 0), :, :], (1, 2, 0))  # BGR to GBR
        output = (output * max_n).round()
        return self.np_to_clip(clip, output)
예제 #22
0
def generate_keyframes(clip: vs.VideoNode,
                       out_path=None,
                       no_header=False) -> None:
    """
    probably only useful for fansubbing
    generates qp-filename for keyframes to simplify timing
    """
    clip = core.resize.Bilinear(
        clip, 640, 360,
        format=vs.YUV420P8)  # speed up the analysis by resizing first
    clip = core.wwxd.WWXD(clip)
    if no_header:
        out_txt = ''
    else:
        out_txt = "# WWXD log file, using qpfile format\n\n"
    for i in range(clip.num_frames):
        if clip.get_frame(i).props.Scenechange == 1:
            out_txt += "%d I -1\n" % i
        if i % 1000 == 0:
            print(f"Progress: {i}/{clip.num_frames} frames")
    text_file = open(out_path, "w")
    text_file.write(out_txt)
    text_file.close()
예제 #23
0
def clip_to_timecodes(src_clip: vs.VideoNode) -> Deque[float]:
    """
    Cached function to return a list of timecodes for vfr clips.

    The first call to this function can be `very` expensive depending on the `src_clip`
    length and the source filter used.

    Subsequent calls on the same clip will return the previously generated list of timecodes.
    The timecodes are `floats` representing seconds from the start of the `src_clip`.

    If you have ``rich`` installed, will output a pretty progress bar as this process can take a long time.
    """
    # fmt: off
    try:
        from rich.progress import track
        rich = True
    except ImportError:
        track = lambda x, description, total: x  # type: ignore
        rich = False
    # fmt: on
    timecodes = collections.deque([0.0], maxlen=src_clip.num_frames + 1)
    curr_time = fractions.Fraction()
    init_percentage = 0
    for frame in track(src_clip.frames(), description="Finding timestamps...", total=src_clip.num_frames):
        num = cast(int, frame.props["_DurationNum"])
        den = cast(int, frame.props["_DurationDen"])
        curr_time += fractions.Fraction(num, den)
        timecodes.append(float(curr_time))
        if rich:
            pass  # if ran in a normal console/terminal, should render a pretty progress bar
        else:
            percentage_done = round(100 * len(timecodes) / src_clip.num_frames)
            if percentage_done % 10 == 0 and percentage_done != init_percentage:
                print(rf"Finding timecodes for variable-framerate clip: {percentage_done}% done")
                init_percentage = percentage_done
    return timecodes
예제 #24
0
파일: clip.py 프로젝트: stuxcrystal/yuuno2
def get_frame_async(node: VideoNode, frame: int) -> Awaitable[VideoFrame]:
    sfut = node.get_frame_async(frame)
    return wrap_future(sfut)
예제 #25
0
def is_limited_range(clip: vs.VideoNode) -> bool:
    """Returns true if the input clip is limited range."""
    return clip.get_frame(0).props.get("_ColorRange") == 1
예제 #26
0
def comp(*frames: int,
         rand: int = 0,
         slicing: bool = False,
         slices: List[str] = None,
         full: bool = False,
         label: bool = True,
         label_size: int = 30,
         label_alignment: int = 7,
         stack_type: str = 'clip',
         **in_clips: vs.VideoNode) -> vs.VideoNode:
    """
    All-encompassing comparison tool for VapourSynth preview.

    Allows an infinite number of clips to be compared.
    Can compare entire clips, frames, or slices.
    Visually arranges clips in five ways:
        continuous clip (A0 B0 A1 B1)
        vertical stacking
        horizontal stacking
        mosaic
        split (A | B [| C])

    :param frames: frame number(s) to be compared
        Can be left blank.

    :param rand: number of random frames to compare from all clips (Default value = 0)
        Can be left blank.

    :param slicing: changes output to slicing mode (Default value = False)
        Overrides 'frames' and 'rand'.

    :param slices: Python slices of all clips to be compared (Default value = None)
        Does not accept advanced / combined slicing.
        Example: '[":16","200:400","570:"]' for frames 0-15,200-399,570+
        Can be left blank is slicing is False.

    :param full: whether or not to compare full length of clips (Default value = False)
        Overrides 'frames', 'rand', and 'slicing'/'slices'

    :param label: labels clips with their name (Default value = True)

    :param label_size: <int> fontsize for 'label' (Default value = 30)

    :param label_alignment: numpad alignment of 'label' (Default value = 7)

    :param stack_type: type of comparison to output (Default value = 'clip')
        Accepts 'clip', 'vertical', 'horizontal', 'mosaic', 'split'.
        'split' allows only 2 or 3 clips and overrides 'label_alignment'

    :param in_clips: comma separated pairs of name=clip
        :bit depth: ANY
        :color family: ANY
        :float precision: ANY
        :sample type: ANY
        :subsampling: ANY

    :returns: processed clip
    """
    def _markclips(clips, names, label_size,
                   label_alignment) -> List[vs.VideoNode]:
        style = f'sans-serif,{label_size},&H00FFFFFF,&H000000FF,&H00000000,&H00000000,0,0,0,0,100,100,0,0,1,3,1,' \
                f'{label_alignment},10,10,10,1'
        margins = [10, 0, 10, 0]
        markedclips = []

        if type(clips) == vs.VideoNode:
            return core.sub.Subtitle(clips,
                                     str(names),
                                     style=style,
                                     margins=margins)
        else:
            for name, clip in zip(names, clips):
                markedclip = core.sub.Subtitle(clip,
                                               str(name),
                                               style=style,
                                               margins=margins)
                markedclips.append(markedclip)

        return markedclips

    def _cutclips(clips, frames, rand) -> List[vs.VideoNode]:
        if slicing:
            cut_clips = []
            for i, clip in enumerate(clips):
                cut_clips.append(core.std.BlankClip(clip, length=1))
                for s in slices:
                    a, b = s.split(':')
                    if a == '': cut_clips[i] += clip[:int(b)]
                    elif b == '': cut_clips[i] += clip[int(a):]
                    else: cut_clips[i] += clip[int(a):int(b)]

        else:
            if len(frames) == 0 and rand < 1: rand = 1
            if rand > 0:
                max_frame = min(clip.num_frames for clip in clips) - 1
                if rand == 1: frames.append(randint(0, max_frame))
                else: frames = frames + sample(range(max_frame), rand)

            cut_clips = []
            for i, clip in enumerate(clips):
                cut_clips.append(core.std.BlankClip(clip, length=1))
                for f in frames:
                    cut_clips[i] += clip[f]

        for i in range(len(cut_clips)):
            cut_clips[i] = cut_clips[i][1:]

        return cut_clips

    def _assemble(markedclips: List[vs.VideoNode],
                  stack_type: str) -> vs.VideoNode:
        def _stack2d(clips, size):
            rows = []
            for i in range(0, size):
                min_s = (i * size)
                max_s = ((i + 1) * size)
                if i == 0:
                    row_clips = clips[:max_s]
                    rows.append(core.std.StackHorizontal(row_clips))
                else:
                    row_clips = clips[min_s:max_s]
                    rows.append(core.std.StackHorizontal(row_clips))

            return core.std.StackVertical(rows)

        def _root_check(clips):
            def _blank_create(clips, size):
                blank_clips = []
                blank_number = (size**2) - len(clips)
                for _ in range(0, blank_number):
                    blank_clips.append(core.std.BlankClip(clips[0], length=1))
                added_clips = clips + blank_clips

                return _stack2d(added_clips, size)

            root = sqrt(len(clips))
            size = floor(root) + 1

            if int(root + 0.5)**2 != len(clips):
                return _blank_create(clips, size)
            else:
                return _stack2d(clips, int(root))

        def _split(clips):
            width = clips[0].width
            if len(clips) == 2:
                clip_left = _markclips(clips[0], names[0], label_size, 7)
                clip_right = _markclips(clips[1], names[1], label_size, 9)

                clip_left = core.std.Crop(clip_left, 0, width / 2, 0, 0)
                clip_right = core.std.Crop(clip_right, width / 2, 0, 0, 0)

                clips_list = clip_left, clip_right

                return core.std.StackHorizontal(clips_list)

            if len(clips) == 3:
                width = floor(width / 3)
                dwidth = 2 * width

                clip_left = _markclips(clips[0], names[0], label_size, 7)
                clip_middle = _markclips(clips[1], names[1], label_size, 8)
                clip_right = _markclips(clips[2], names[2], label_size, 9)

                clip_left = core.std.Crop(clip_left, 0, dwidth, 0, 0)
                clip_middle = core.std.Crop(clip_middle, width, width, 0, 0)
                clip_right = core.std.Crop(clip_right, dwidth, 0, 0, 0)

                clips_list = clip_left, clip_middle, clip_right

                return core.std.StackHorizontal(clips_list)

        if stack_type == 'vertical':
            return core.std.StackVertical(markedclips)
        elif stack_type == 'horizontal' or (stack_type == 'mosaic'
                                            and len(markedclips) < 3):
            return core.std.StackHorizontal(markedclips)
        elif stack_type == 'mosaic':
            return _root_check(markedclips)
        elif stack_type == 'split' and (len(clips) < 2 or len(clips) > 3):
            raise ValueError(
                'comp: \'split\' stack_type only allows 2 or 3 clips')
        elif stack_type == 'split':
            return _split(clips)
        else:
            return core.std.Interleave(markedclips)

    names = list(in_clips.keys())
    clips = list(in_clips.values())

    for i in range(1, len(clips)):
        if clips[i - 1].width != clips[i].width:
            raise ValueError("comp: the width of all clips must be the same")
        if clips[i - 1].height != clips[i].height:
            raise ValueError("comp: the height of all clips must be the same")
        if clips[i - 1].format != clips[i].format:
            raise ValueError("comp: the format of all clips must be the same")

    if not full:
        frames = list(frames)
        clips = _cutclips(clips, frames, rand)

    if label:
        markedclips = _markclips(clips, names, label_size, label_alignment)
    else:
        markedclips = clips

    return _assemble(markedclips, stack_type)
예제 #27
0
def save(*frames: int,
         rand: int = 0,
         folder: bool = False,
         zoom: int = 1,
         **clips: vs.VideoNode):
    """
    Writes frames as named RGB24 PNG files for easy upload to slowpics.org.

    Running "save(17, 24, rand=2, folder=True, zoom=3, BD=bd, TV=tv)"
    will save four 3x-point-upscaled frames (17, 24, and 2 randoms) in folders named 'BD' and 'TV'.

    :param frames: frame number(s) to save

    :param rand: number of random frames to extract (Default value = 0)

    :param folder: saves images into named sub-folders (Default value = False)
        If True, saving will not prefix image files with clip name.

    :param zoom: zoom factor (Default value = 1)

    :param clips: comma separated pairs of name=clip to save frames from
        :bit depth: ANY
        :color family: ANY
        :float precision: ANY
        :sample type: ANY
        :subsampling: ANY
    """
    frames = list(frames)
    if len(frames) == 0 and rand < 1: rand = 1

    if rand > 0:
        max_frame = min(clip.num_frames for name, clip in clips.items()) - 1
        if rand == 1: frames.append(randint(0, max_frame))
        else: frames = frames + sample(range(max_frame), rand)

    if folder:
        for name, clip in clips.items():
            os.makedirs(str(name), exist_ok=True)
            with _cd(str(name)):
                for f in frames:
                    out = core.imwri.Write(clip[f].resize.Point(
                        width=(zoom * clip.width),
                        height=(zoom * clip.height),
                        format=vs.RGB24,
                        matrix_in_s='709',
                        range=0,
                        range_in=0,
                        dither_type='error_diffusion'),
                                           'PNG',
                                           '%06d.png',
                                           firstnum=f)
                    out.get_frame(0)
    else:
        for name, clip in clips.items():
            for f in frames:
                out = core.imwri.Write(clip[f].resize.Point(
                    width=(zoom * clip.width),
                    height=(zoom * clip.height),
                    format=vs.RGB24,
                    matrix_in_s='709',
                    range=0,
                    range_in=0,
                    dither_type='error_diffusion'),
                                       'PNG',
                                       f"{name}%06d.png",
                                       firstnum=f)
                out.get_frame(0)
def ivtc_credits(clip: vs.VideoNode,
                 frame_ref: int,
                 tff: bool | None = None,
                 interlaced: bool = True,
                 dec: bool | None = None,
                 bob_clip: vs.VideoNode | None = None,
                 qtgmc_args: Dict[str, Any] = {}) -> vs.VideoNode:
    """
    Deinterlacing function for interlaced credits (60i/30p) on top of telecined video (24p).
    This is a combination of havsfunc's dec_txt60mc, ivtc_txt30mc, and ivtc_txt60mc functions.
    The credits are interpolated and decimated to match the output clip.

    The function assumes you're passing a telecined clip (that's native 24p).
    If your clip is already fieldmatched, decimation will automatically be enabled unless set it to False.
    Likewise, if your credits are 30p (as opposed to 60i), you should set `interlaced` to False.

    The recommended way to use this filter is to trim out the area with interlaced credits,
    apply this function, and `vsutil.insert_clip` the clip back into a properly IVTC'd clip.
    Alternatively, use `muvsfunc.VFRSplice` to splice the clip back in if you're dealing with a VFR clip.

    :param clip:            Input clip. Framerate must be 30000/1001.
    :param frame_ref:       First frame in the pattern. Expected pattern is ABBCD,
                            except for when ``dec`` is enabled, in which case it's AABCD.
    :param tff:             Top-field-first. `False` sets it to Bottom-Field-First.
    :param interlaced:      60i credits. Set to false for 30p credits.
    :param dec:             Decimate input clip as opposed to IVTC.
                            Automatically enabled if certain fieldmatching props are found.
                            Can be forcibly disabled by setting it to `False`.
    :param bob_clip:        Custom bobbed clip. If `None`, uses a QTGMC clip.
                            Framerate must be 60000/1001.
    :param qtgmc_args:      Arguments to pass on to QTGMC.
                            Accepts any parameter except for FPSDivisor and TFF.

    :return:                IVTC'd/decimated clip with deinterlaced credits
    """
    try:
        from havsfunc import QTGMC, DitherLumaRebuild
    except ModuleNotFoundError:
        raise ModuleNotFoundError(
            "ivtc_credits: missing dependency 'havsfunc'")

    check_variable(clip, "ivtc_credits")

    if clip.fps != Fraction(30000, 1001):
        raise ValueError(
            "ivtc_credits: 'Your clip must have a framerate of 30000/1001!'")

    if get_prop(clip.get_frame(0), '_FieldBased', int) == 0 and tff is None:
        raise vs.Error("ivtc_credits: 'You must set `tff` for this clip!'")
    elif isinstance(tff, (bool, int)):
        clip = clip.std.SetFieldBased(int(tff) + 1)

    qtgmc_kwargs: Dict[str, Any] = dict(SourceMatch=3,
                                        Lossless=2,
                                        TR0=2,
                                        TR1=2,
                                        TR2=3,
                                        Preset="Placebo")
    qtgmc_kwargs |= qtgmc_args
    qtgmc_kwargs |= dict(
        FPSDivisor=1,
        TFF=tff or bool(get_prop(clip.get_frame(0), '_FieldBased', int) - 1))

    if dec is not False:  # Automatically enable dec unless set to False
        dec = any(x in clip.get_frame(0).props
                  for x in {"VFMMatch", "TFMMatch"})

        if dec:
            warnings.warn(
                "ivtc_credits: 'Fieldmatched clip passed to function! "
                "dec is set to True. If you want to disable this, set dec=False!'"
            )

    # motion vector and other values
    field_ref = frame_ref * 2
    frame_ref %= 5
    invpos = (5 - field_ref) % 5

    offset = [0, 0, -1, 1, 1][frame_ref]
    pattern = [0, 1, 0, 0, 1][frame_ref]
    direction = [-1, -1, 1, 1, 1][frame_ref]

    blksize = 16 if clip.width > 1024 or clip.height > 576 else 8
    overlap = blksize // 2

    ivtc_fps = dict(fpsnum=24000, fpsden=1001)
    ivtc_fps_div = dict(fpsnum=12000, fpsden=1001)

    # Bobbed clip
    bobbed = bob_clip or QTGMC(clip, **qtgmc_kwargs)

    if bobbed.fps != Fraction(60000, 1001):
        raise ValueError(
            "ivtc_credits: 'Your bobbed clip must have a framerate of 60000/1001!'"
        )

    if interlaced:  # 60i credits. Start of ABBCD
        if dec:  # Decimate the clip instead of properly IVTC
            clean = bobbed.std.SelectEvery(5, [4 - invpos])

            if invpos > 2:
                jitter = core.std.AssumeFPS(
                    bobbed[0] * 2 +
                    bobbed.std.SelectEvery(5, [6 - invpos, 7 - invpos]),
                    **ivtc_fps)  # type:ignore[arg-type]
            elif invpos > 1:
                jitter = core.std.AssumeFPS(
                    bobbed[0] +
                    bobbed.std.SelectEvery(5, [2 - invpos, 6 - invpos]),
                    **ivtc_fps)  # type:ignore[arg-type]
            else:
                jitter = bobbed.std.SelectEvery(5, [1 - invpos, 2 - invpos])
        else:  # Properly IVTC
            if invpos > 1:
                clean = core.std.AssumeFPS(
                    bobbed[0] + bobbed.std.SelectEvery(5, [6 - invpos]),
                    **ivtc_fps_div)  # type:ignore[arg-type]
            else:
                clean = bobbed.std.SelectEvery(5, [1 - invpos])

            if invpos > 3:
                jitter = core.std.AssumeFPS(
                    bobbed[0] +
                    bobbed.std.SelectEvery(5, [4 - invpos, 8 - invpos]),
                    **ivtc_fps)  # type:ignore[arg-type]
            else:
                jitter = bobbed.std.SelectEvery(5, [3 - invpos, 4 - invpos])

        jsup_pre = DitherLumaRebuild(jitter, s0=1).mv.Super(pel=2)
        jsup = jitter.mv.Super(pel=2, levels=1)
        vect_f = jsup_pre.mv.Analyse(blksize=blksize,
                                     isb=False,
                                     delta=1,
                                     overlap=overlap)
        vect_b = jsup_pre.mv.Analyse(blksize=blksize,
                                     isb=True,
                                     delta=1,
                                     overlap=overlap)
        comp = core.mv.FlowInter(jitter, jsup, vect_b, vect_f)
        out = core.std.Interleave(
            [comp[::2], clean] if dec else [clean, comp[::2]])
        offs = 3 if dec else 2
        return out[invpos // offs:]
    else:  # 30i credits
        if pattern == 0:
            if offset == -1:
                c1 = core.std.AssumeFPS(bobbed[0] + bobbed.std.SelectEvery(
                    10, [2 + offset, 7 + offset, 5 + offset, 10 + offset]),
                                        **ivtc_fps)  # type:ignore[arg-type]
            else:
                c1 = bobbed.std.SelectEvery(
                    10, [offset, 2 + offset, 7 + offset, 5 + offset])

            if offset == 1:
                c2 = core.std.Interleave([
                    bobbed.std.SelectEvery(10, [4]),
                    bobbed.std.SelectEvery(10, [5]),
                    bobbed[10:].std.SelectEvery(10, [0]),
                    bobbed.std.SelectEvery(10, [9])
                ])
            else:
                c2 = bobbed.std.SelectEvery(
                    10, [3 + offset, 4 + offset, 9 + offset, 8 + offset])
        else:
            if offset == 1:
                c1 = core.std.Interleave([
                    bobbed.std.SelectEvery(10, [3]),
                    bobbed.std.SelectEvery(10, [5]),
                    bobbed[10:].std.SelectEvery(10, [0]),
                    bobbed.std.SelectEvery(10, [8])
                ])
            else:
                c1 = bobbed.std.SelectEvery(
                    10, [2 + offset, 4 + offset, 9 + offset, 7 + offset])

            if offset == -1:
                c2 = core.std.AssumeFPS(bobbed[0] + bobbed.std.SelectEvery(
                    10, [1 + offset, 6 + offset, 5 + offset, 10 + offset]),
                                        **ivtc_fps)  # type:ignore[arg-type]
            else:
                c2 = bobbed.std.SelectEvery(
                    10, [offset, 1 + offset, 6 + offset, 5 + offset])

        super1_pre = DitherLumaRebuild(c1, s0=1).mv.Super(pel=2)
        super1 = c1.mv.Super(pel=2, levels=1)
        vect_f1 = super1_pre.mv.Analyse(blksize=blksize,
                                        isb=False,
                                        delta=1,
                                        overlap=overlap)
        vect_b1 = super1_pre.mv.Analyse(blksize=blksize,
                                        isb=True,
                                        delta=1,
                                        overlap=overlap)
        fix1 = c1.mv.FlowInter(super1,
                               vect_b1,
                               vect_f1,
                               time=50 + direction * 25).std.SelectEvery(
                                   4, [0, 2])

        super2_pre = DitherLumaRebuild(c2, s0=1).mv.Super(pel=2)
        super2 = c2.mv.Super(pel=2, levels=1)
        vect_f2 = super2_pre.mv.Analyse(blksize=blksize,
                                        isb=False,
                                        delta=1,
                                        overlap=overlap)
        vect_b2 = super2_pre.mv.Analyse(blksize=blksize,
                                        isb=True,
                                        delta=1,
                                        overlap=overlap)
        fix2 = c2.mv.FlowInter(super2, vect_b2,
                               vect_f2).std.SelectEvery(4, [0, 2])

        return core.std.Interleave([fix1, fix2] if pattern ==
                                   0 else [fix2, fix1])
def vsdpir(clip: vs.VideoNode,
           strength: SupportsFloat | vs.VideoNode | None = 25,
           mode: str = 'deblock',
           matrix: Matrix | int | None = None,
           tiles: int | Tuple[int] | None = None,
           cuda: bool = True,
           i444: bool = False,
           kernel: Kernel | str = Bicubic(b=0, c=0.5),
           **dpir_args: Any) -> vs.VideoNode:
    """
    A simple vs-mlrt DPIR wrapper for convenience.

    You must install vs-mlrt. For more information, see the following links:

    * https://github.com/AmusementClub/vs-mlrt
    * https://github.com/AmusementClub/vs-mlrt/wiki/DPIR
    * https://github.com/AmusementClub/vs-mlrt/releases/latest

    Converts to RGB -> runs DPIR -> converts back to original format, and with no subsampling if ``i444=True``.
    For more information, see https://github.com/cszn/DPIR.

    Dependencies:

    * vs-mlrt

    :param clip:            Input clip
    :param strength:        DPIR strength. Sane values lie between 1–20 for ``mode='deblock'``,
                            and 1–3 for ``mode='denoise'``
    :param mode:            DPIR mode. Valid modes are 'deblock' and 'denoise'.
    :param matrix:          Enum for the matrix of the input clip. See ``types.Matrix`` for more info.
                            If not specified, gets matrix from the "_Matrix" prop of the clip unless it's an RGB clip,
                            in which case it stays as `None`.
    :param cuda:            Use CUDA backend if True, else CPU backend
    :param i444:            Forces the returned clip to be YUV444PS instead of the input clip's format
    :param dpir_args:       Additional args to pass to vs-mlrt.
                            Note: strength, tiles, and model cannot be overridden!

    :return:                Deblocked or denoised clip in either the given clip's format or YUV444PS
    """
    try:
        from vsmlrt import DPIR, Backend, DPIRModel
    except ModuleNotFoundError:
        raise ModuleNotFoundError(
            "vsdpir: 'vsmlrt is required to use vsdpir functions.'")

    check_variable(clip, "vsdpir")
    assert clip.format

    if isinstance(kernel, str):
        kernel = get_kernel(kernel)()

    bit_depth = get_depth(clip)
    is_rgb, is_gray = (clip.format.color_family is f
                       for f in (vs.RGB, vs.GRAY))

    clip_32 = depth(clip, 32, dither_type=Dither.ERROR_DIFFUSION)

    # TODO: Replace with a match-case?
    if mode.lower() == 'deblock':
        model = DPIRModel.drunet_deblocking_color if not is_gray else DPIRModel.drunet_deblocking_grayscale
    elif mode.lower() == 'denoise':
        model = DPIRModel.drunet_color if not is_gray else DPIRModel.drunet_gray
    else:
        raise ValueError(f"""vsdpir: '"{mode}" is not a valid mode!'""")

    dpir_args |= dict(strength=strength, tiles=tiles, model=model)

    if "backend" not in dpir_args:
        dpir_args |= dict(backend=Backend.ORT_CUDA if cuda else Backend.OV_CPU)

    if is_rgb or is_gray:
        return depth(DPIR(clip_32.std.Limiter(), **dpir_args), bit_depth)

    if matrix is None:
        matrix = get_prop(clip.get_frame(0), "_Matrix", int)

    targ_matrix = Matrix(matrix)
    targ_format = clip.format.replace(subsampling_w=0,
                                      subsampling_h=0) if i444 else clip.format

    clip_rgb = kernel.resample(
        clip_32, vs.RGBS,
        matrix_in=targ_matrix).std.Limiter()  # type:ignore[arg-type]

    mod_w, mod_h = clip_rgb.width % 8, clip_rgb.height % 8

    if to_pad := any([mod_w, mod_h]):
        d_width, d_height = clip.width + mod_w, clip.height + mod_h

        clip_rgb = Point(src_width=d_width,
                         src_height=d_height).scale(clip_rgb, d_width,
                                                    d_height, (-mod_h, -mod_w))
def autodb_dpir(clip: vs.VideoNode,
                edgevalue: int = 24,
                strs: Sequence[float] = [30, 50, 75],
                thrs: Sequence[Tuple[float, float, float]] = [(1.5, 2.0, 2.0),
                                                              (3.0, 4.5, 4.5),
                                                              (5.5, 7.0, 7.0)],
                matrix: Optional[Matrix | int] = None,
                cuda: bool = True,
                write_props: bool = False,
                **vsdpir_args: Any) -> vs.VideoNode:
    """
    A rewrite of fvsfunc.AutoDeblock that uses vspdir instead of dfttest to deblock.

    This function checks for differences between a frame and an edgemask with some processing done on it,
    and for differences between the current frame and the next frame.
    For frames where both thresholds are exceeded, it will perform deblocking at a specified strength.
    This will ideally be frames that show big temporal *and* spatial inconsistencies.

    Thresholds and calculations are added to the frameprops to use as reference when setting the thresholds.

    Keep in mind that vsdpir is not perfect; it may cause weird, black dots to appear sometimes.
    If that happens, you can perform a denoise on the original clip (maybe even using vsdpir's denoising mode)
    and grab the brightest pixels from your two clips. That should return a perfectly fine clip.

    Thanks Vardë, louis, setsugen_no_ao!

    Dependencies:

    * vs-dpir

    :param clip:            Input clip
    :param edgevalue:       Remove edges from the edgemask that exceed this threshold (higher means more edges removed)
    :param strs:            A list of DPIR strength values (higher means stronger deblocking).
                            You can pass any arbitrary number of values here.
                            Sane deblocking strenghts lie between 1–20 for most regular deblocking.
                            Going higher than 50 is not recommended outside of very extreme cases.
                            The amount of values in strs and thrs need to be equal.
    :param thrs:            A list of thresholds, written as [(EdgeValRef, NextFrameDiff, PrevFrameDiff)].
                            You can pass any arbitrary number of values here.
                            The amount of values in strs and thrs need to be equal.
    :param matrix:          Enum for the matrix of the input clip. See ``types.Matrix`` for more info.
                            If `None`, gets matrix from the "_Matrix" prop of the clip unless it's an RGB clip,
                            in which case it stays as `None`.
    :param cuda:            Use CUDA backend if True, else CPU backend
    :param write_props:     Will write verbose props
    :param vsdpir_args:     Additional args to pass to ``vsdpir``

    :return:                Deblocked clip
    """
    check_variable(clip, "autodb_dpir")
    assert clip.format

    def _eval_db(n: int, f: Sequence[vs.VideoFrame], clip: vs.VideoNode,
                 db_clips: Sequence[vs.VideoNode],
                 nthrs: Sequence[Tuple[float, float, float]]) -> vs.VideoNode:

        evref_diff, y_next_diff, y_prev_diff = [
            get_prop(f[i], prop, float) for i, prop in zip(
                range(3), ['EdgeValRefDiff', 'YNextDiff', 'YPrevDiff'])
        ]
        f_type = get_prop(f[0], '_PictType', bytes).decode('utf-8')

        if f_type == 'I':
            y_next_diff = (y_next_diff + evref_diff) / 2

        out = clip
        nthr_used = (-1., ) * 3
        for dblk, nthr in zip(db_clips, nthrs):
            if all(p > t for p, t in zip(
                [evref_diff, y_next_diff, y_prev_diff], nthr)):
                out = dblk
                nthr_used = nthr

        if write_props:
            for prop_name, prop_val in zip([
                    'Adb_EdgeValRefDiff', 'Adb_YNextDiff', 'Adb_YPrevDiff',
                    'Adb_EdgeValRefDiffThreshold', 'Adb_YNextDiffThreshold',
                    'Adb_YPrevDiffThreshold'
            ], [evref_diff, y_next_diff, y_prev_diff] + list(nthr_used)):
                out = out.std.SetFrameProp(prop_name,
                                           floatval=max(prop_val * 255, -1))

        return out

    if len(strs) != len(thrs):
        raise ValueError(
            'autodb_dpir: You must pass an equal amount of values to '
            f'strenght {len(strs)} and thrs {len(thrs)}!')

    nthrs = [tuple(x / 255 for x in thr) for thr in thrs]

    is_rgb = clip.format.color_family is vs.RGB

    if not matrix and not is_rgb:
        matrix = get_prop(clip.get_frame(0), "_Matrix", int)

    rgb = core.resize.Bicubic(clip, format=vs.RGBS,
                              matrix_in=matrix) if not is_rgb else clip

    assert rgb.format

    maxvalue = (1 << rgb.format.bits_per_sample) - 1
    evref = core.std.Prewitt(rgb)
    evref = core.std.Expr(evref, f"x {edgevalue} >= {maxvalue} x ?")
    evref_rm = evref.std.Median().std.Convolution(
        matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1])

    diffevref = core.std.PlaneStats(evref, evref_rm, prop='EdgeValRef')
    diffnext = core.std.PlaneStats(rgb,
                                   rgb.std.DeleteFrames([0]),
                                   prop='YNext')
    diffprev = core.std.PlaneStats(rgb, rgb[0] + rgb, prop='YPrev')

    db_clips = [
        vsdpir(rgb, strength=st, mode='deblock', cuda=cuda,
               **vsdpir_args).std.SetFrameProp('Adb_DeblockStrength',
                                               intval=int(st)) for st in strs
    ]

    debl = core.std.FrameEval(rgb,
                              partial(_eval_db,
                                      clip=rgb,
                                      db_clips=db_clips,
                                      nthrs=nthrs),
                              prop_src=[diffevref, diffnext, diffprev])

    return core.resize.Bicubic(debl,
                               format=clip.format.id,
                               matrix=matrix if not is_rgb else None)