Beispiel #1
0
def generate_keyframes(clip: vs.VideoNode,
                       out_path=None,
                       no_header=False) -> None:
    """
    The exact same version as found in kagefunc.
    """
    clip = core.resize.Bilinear(clip, 640, 360, format=vs.YUV420P8)

    if args.scxvid:
        clip = core.scxvid.Scxvid(clip)
    else:
        clip = core.wwxd.WWXD(clip)  # speed up the analysis by resizing first

    out_txt = '' if no_header else "# WWXD log file, using qpfile format\n# Please do not modify this file\n\n"

    for i in range(clip.num_frames):
        if args.scxvid:
            if clip.get_frame(i).props["_SceneChangePrev"] == 1:
                out_txt += "%d I -1\n" % i
        else:
            if clip.get_frame(i).props["Scenechange"] == 1:
                out_txt += "%d I -1\n" % i
        if i % 1 == 0:
            print(f"Progress: {i}/{clip.num_frames} frames", end="\r")
    text_file = open(out_path, "w")
    text_file.write(out_txt)
    text_file.close()
Beispiel #2
0
def is_topleft(clip: vs.VideoNode) -> bool:
    """
    Simple function that checks if chroma is top-left aligned or not.

    In any other case it's fairly safe to assume the chroma is aligned to the
    center-left as was the default before 4K UHD BDs and Bt.2020 were a thing.
    This is basically a more complex check for BT.2020 material.
    """
    if not clip.format:
        raise VariableFormatError("is_topleft")

    if clip.format.subsampling_h != 1 or clip.format.subsampling_w != 1:
        return False

    props = clip.get_frame(0).props

    # If chromalocation is set, use it and return if it's left.
    cloc = props.get("_ChromaLocation")
    # If it exists, we just need to check if it's 2 or not
    if cloc is not None:
        return cloc == 2

    # If the primaties are set (they should be) then return if it's BT.2020
    prims = props.get("_Primaries")
    if prims not in [None, 2]:
        return prims == 9

    # These should be the minimum 4:3 and 21:9 dimensions after cropping 4K
    # with letter- and/or pillarboxing.
    return clip.width >= 2880 and clip.height >= 1645
Beispiel #3
0
def _sraa_frameeval(n: int, clip: vs.VideoNode, w: int,
                    h: int) -> vs.VideoNode:
    frame = clip.get_frame(n)
    if frame.height < 1080:
        rfactor = 2.5
    else:
        rfactor = 1.5
    return upscaled_sraa(clip.resize.Bicubic(frame.width, frame.height),
                         rfactor=rfactor,
                         h=h,
                         ar=w / h)
Beispiel #4
0
def generate_keyframes(clip: vs.VideoNode, out_path: str) -> None:
    """Generate qp filename for keyframes to pass the file into the encoder
       to force I frames. Use both scxvid and wwxd. Original function stolen from kagefunc.

    Args:
        clip (vs.VideoNode): Source clip
        out_path (str): output path
    """
    clip = core.resize.Bilinear(clip, 640, 360)
    clip = core.scxvid.Scxvid(clip)
    clip = core.wwxd.WWXD(clip)
    out_txt = ""
    for i in range(clip.num_frames):
        if clip.get_frame(i).props["_SceneChangePrev"] == 1 \
                or clip.get_frame(i).props["Scenechange"] == 1:
            out_txt += "%d I -1\n" % i
        if i % 2000 == 0:
            print(i)
    text_file = open(out_path, "w")
    text_file.write(out_txt)
    text_file.close()
Beispiel #5
0
def print_vs_output_colorspace_info(vs_output: vs.VideoNode) -> None:
    from vspreview.core import Output

    props = vs_output.get_frame(0).props
    logging.debug('Matrix: {}, Transfer: {}, Primaries: {}, Range: {}'.format(
        Output.Matrix.values[props['_Matrix']] if '_Matrix' in props else None,
        Output.Transfer.values[props['_Transfer']]
        if '_Transfer' in props else None,
        Output.Primaries.values[props['_Primaries']]
        if '_Primaries' in props else None,
        Output.Range.values[props['_ColorRange']]
        if '_ColorRange' in props else None,
    ))
Beispiel #6
0
    def _GetMatrix(clip: vs.VideoNode) -> int:
        frame = clip.get_frame(0)
        w, h = frame.width, frame.height

        if frame.format.color_family == vs.RGB:
            return 0
        if frame.format.color_family == vs.YCOCG:
            return 8
        if w <= 1024 and h <= 576:
            return 5
        if w <= 2048 and h <= 1536:
            return 1
        return 9
Beispiel #7
0
def ccd(clip: vs.VideoNode, threshold: float) -> vs.VideoNode:
    """taken from a currently-private gist, but should become available in `vs-denoise` soon enough"""
    from vsutil import split

    assert clip.format
    bits = clip.format.bits_per_sample
    is_float = clip.format.sample_type == vs.FLOAT
    peak = 1.0 if is_float else (1 << bits) - 1
    threshold /= peak
    # threshold = threshold ** 2 / 195075.0

    rgb = clip.resize.Bicubic(format=vs.RGBS)

    pre1 = rgb.resize.Point(
        clip.width+24, clip.height+24,
        src_left=-12, src_top=-12,
        src_width=clip.width+24, src_height=clip.height+24
    )
    pre2 = rgb.resize.Point(
        rgb.width+24, rgb.height+24,
        src_width=rgb.width+24, src_height=rgb.height+24
    )
    pre_planes = split(pre1)

    shift_planes_clips = [
        split(pre2.resize.Point(src_left=-x, src_top=-y))
        for x in range(0, 25, 8) for y in range(0, 25, 8)
    ]
    denoise_clips = [
        core.std.Expr(pre_planes + shift_planes, f'x a - dup * y b - dup * + z c - dup * + sqrt {threshold} <')
        for shift_planes in shift_planes_clips
    ]

    cond_planes_clips = [
        join([core.std.Expr([splane, dclip], 'y 0 > x 0 ?') for splane in splanes])
        for dclip, splanes in zip(denoise_clips, shift_planes_clips)
    ]

    denoise = core.std.Expr(denoise_clips, add_expr(len(denoise_clips)) + ' 1 +')
    denoise = join([denoise] * 3)

    n_op = len(cond_planes_clips) + 1
    avg = core.std.Expr([pre1] + cond_planes_clips + [denoise], add_expr(n_op) + f' {EXPR_VARS[n_op]} /')
    avg = avg.resize.Bicubic(
        format=clip.format.id, dither_type='error_diffusion', matrix=cast(int, clip.get_frame(0).props['_Matrix'])
    )
    avg = avg.std.Crop(12, 12, 12, 12)

    assert avg.format
    return core.std.ShufflePlanes([clip, avg], [0, 1, 2], avg.format.color_family)
def bob(clip: vs.VideoNode, tff: bool | None = None) -> vs.VideoNode:
    """
    Very simple bobbing function. Shouldn't be used for regular filtering,
    but as a very cheap bobber for other functions.

    :param clip:    Input clip
    :param tff:     Top-field-first. `False` sets it to Bottom-Field-First.
                    If None, get the field order from the _FieldBased prop.

    :return:        Bobbed clip
    """
    if get_prop(clip.get_frame(0), '_FieldBased', int) == 0 and tff is None:
        raise vs.Error("bob: 'You must set `tff` for this clip!'")
    elif isinstance(tff, (bool, int)):
        clip = clip.std.SetFieldBased(int(tff) + 1)

    return Catrom().scale(clip.std.SeparateFields(), clip.width, clip.height)
Beispiel #9
0
def generate_keyframes(clip: vs.VideoNode, out_path=None) -> None:
    """
    probably only useful for fansubbing
    generates qp-filename for keyframes to simplify timing
    """
    import os
    # Speed up the analysis by resizing first. Converting to 8 bit also seems to improve the accuracy of wwxd.
    clip = core.resize.Bilinear(clip, 640, 360, format=vs.YUV420P8)
    clip = core.wwxd.WWXD(clip)
    out_txt = "# WWXD log file, using qpfile format\n\n"
    for i in range(clip.num_frames):
        if clip.get_frame(i).props.Scenechange == 1:
            out_txt += "%d I -1\n" % i
        if i % 1000 == 0:
            print(i)
    out_path = fallback(out_path,
                        os.path.expanduser("~") + "/Desktop/keyframes.txt")
    with open(out_path, "w") as text_file:
        text_file.write(out_txt)
Beispiel #10
0
def get_matrix(clip: vs.VideoNode) -> int:
    """
    Helper function to get the matrix for a clip.

    :param clip:    src clip

    :return:        Value representing a matrix
    """
    frame = clip.get_frame(0)
    w, h = frame.width, frame.height

    if frame.format.color_family == vs.RGB:
        return 0
    if frame.format.color_family == vs.YCOCG:
        return 8
    if w <= 1024 and h <= 576:
        return 5
    if w <= 2048 and h <= 1536:
        return 1
    return 9
Beispiel #11
0
def generate_keyframes(clip: vs.VideoNode, out_path=None) -> None:
    """
    probably only useful for fansubbing
    generates qp-filename for keyframes to simplify timing
    disclaimer: I don't actually know why -1 is forced. I just ported the avisynth script
    """
    import os
    clip = core.resize.Bilinear(clip, 640, 360)  # speed up the analysis by resizing first
    clip = core.wwxd.WWXD(clip)
    out_txt = ""
    for i in range(clip.num_frames):
        if clip.get_frame(i).props.Scenechange == 1:
            out_txt += "%d I -1\n" % i
        if i % 1000 == 0:
            print(i)
    if out_path is None:
        out_path = os.path.expanduser("~") + "/Desktop/keyframes.txt"
    text_file = open(out_path, "w")
    text_file.write(out_txt)
    text_file.close()
Beispiel #12
0
    def execute(self, n: int, clip: vs.VideoNode) -> vs.VideoNode:
        """
        Copies the xinntao ESRGAN repo's main execution code. The only real difference is it doesn't use cv2, and
        instead uses vapoursynth ports of cv2's functionality for read and writing "images".

        Code adapted from:
        https://github.com/xinntao/ESRGAN/blob/master/test.py#L26
        """
        if not self.rrdb_net_model:
            raise ValueError("VSGAN: No ESRGAN model has been loaded, use VSGAN.load_model().")
        # 255 being the max value for an RGB color space, could this be key to YUV support in the future?
        max_n = 255.0
        img = self.frame_to_np(clip.get_frame(n))
        img = img * 1.0 / max_n
        img = np.transpose(img[:, :, (0, 1, 2)], (2, 0, 1))  # RGB to BRG
        img = torch.from_numpy(img).float()
        img_lr = img.unsqueeze(0).to(self.torch_device)
        with torch.no_grad():
            output = self.rrdb_net_model(img_lr).data.squeeze().float().cpu().clamp_(0, 1).numpy()
        output = np.transpose(output[(2, 1, 0), :, :], (1, 2, 0))  # BGR to GBR
        output = (output * max_n).round()
        return self.np_to_clip(clip, output)
Beispiel #13
0
def generate_keyframes(clip: vs.VideoNode,
                       out_path=None,
                       no_header=False) -> None:
    """
    probably only useful for fansubbing
    generates qp-filename for keyframes to simplify timing
    """
    clip = core.resize.Bilinear(
        clip, 640, 360,
        format=vs.YUV420P8)  # speed up the analysis by resizing first
    clip = core.wwxd.WWXD(clip)
    if no_header:
        out_txt = ''
    else:
        out_txt = "# WWXD log file, using qpfile format\n\n"
    for i in range(clip.num_frames):
        if clip.get_frame(i).props.Scenechange == 1:
            out_txt += "%d I -1\n" % i
        if i % 1000 == 0:
            print(f"Progress: {i}/{clip.num_frames} frames")
    text_file = open(out_path, "w")
    text_file.write(out_txt)
    text_file.close()
Beispiel #14
0
def is_limited_range(clip: vs.VideoNode) -> bool:
    """Returns true if the input clip is limited range."""
    return clip.get_frame(0).props.get("_ColorRange") == 1
def autodb_dpir(clip: vs.VideoNode,
                edgevalue: int = 24,
                strs: Sequence[float] = [30, 50, 75],
                thrs: Sequence[Tuple[float, float, float]] = [(1.5, 2.0, 2.0),
                                                              (3.0, 4.5, 4.5),
                                                              (5.5, 7.0, 7.0)],
                matrix: Optional[Matrix | int] = None,
                cuda: bool = True,
                write_props: bool = False,
                **vsdpir_args: Any) -> vs.VideoNode:
    """
    A rewrite of fvsfunc.AutoDeblock that uses vspdir instead of dfttest to deblock.

    This function checks for differences between a frame and an edgemask with some processing done on it,
    and for differences between the current frame and the next frame.
    For frames where both thresholds are exceeded, it will perform deblocking at a specified strength.
    This will ideally be frames that show big temporal *and* spatial inconsistencies.

    Thresholds and calculations are added to the frameprops to use as reference when setting the thresholds.

    Keep in mind that vsdpir is not perfect; it may cause weird, black dots to appear sometimes.
    If that happens, you can perform a denoise on the original clip (maybe even using vsdpir's denoising mode)
    and grab the brightest pixels from your two clips. That should return a perfectly fine clip.

    Thanks Vardë, louis, setsugen_no_ao!

    Dependencies:

    * vs-dpir

    :param clip:            Input clip
    :param edgevalue:       Remove edges from the edgemask that exceed this threshold (higher means more edges removed)
    :param strs:            A list of DPIR strength values (higher means stronger deblocking).
                            You can pass any arbitrary number of values here.
                            Sane deblocking strenghts lie between 1–20 for most regular deblocking.
                            Going higher than 50 is not recommended outside of very extreme cases.
                            The amount of values in strs and thrs need to be equal.
    :param thrs:            A list of thresholds, written as [(EdgeValRef, NextFrameDiff, PrevFrameDiff)].
                            You can pass any arbitrary number of values here.
                            The amount of values in strs and thrs need to be equal.
    :param matrix:          Enum for the matrix of the input clip. See ``types.Matrix`` for more info.
                            If `None`, gets matrix from the "_Matrix" prop of the clip unless it's an RGB clip,
                            in which case it stays as `None`.
    :param cuda:            Use CUDA backend if True, else CPU backend
    :param write_props:     Will write verbose props
    :param vsdpir_args:     Additional args to pass to ``vsdpir``

    :return:                Deblocked clip
    """
    check_variable(clip, "autodb_dpir")
    assert clip.format

    def _eval_db(n: int, f: Sequence[vs.VideoFrame], clip: vs.VideoNode,
                 db_clips: Sequence[vs.VideoNode],
                 nthrs: Sequence[Tuple[float, float, float]]) -> vs.VideoNode:

        evref_diff, y_next_diff, y_prev_diff = [
            get_prop(f[i], prop, float) for i, prop in zip(
                range(3), ['EdgeValRefDiff', 'YNextDiff', 'YPrevDiff'])
        ]
        f_type = get_prop(f[0], '_PictType', bytes).decode('utf-8')

        if f_type == 'I':
            y_next_diff = (y_next_diff + evref_diff) / 2

        out = clip
        nthr_used = (-1., ) * 3
        for dblk, nthr in zip(db_clips, nthrs):
            if all(p > t for p, t in zip(
                [evref_diff, y_next_diff, y_prev_diff], nthr)):
                out = dblk
                nthr_used = nthr

        if write_props:
            for prop_name, prop_val in zip([
                    'Adb_EdgeValRefDiff', 'Adb_YNextDiff', 'Adb_YPrevDiff',
                    'Adb_EdgeValRefDiffThreshold', 'Adb_YNextDiffThreshold',
                    'Adb_YPrevDiffThreshold'
            ], [evref_diff, y_next_diff, y_prev_diff] + list(nthr_used)):
                out = out.std.SetFrameProp(prop_name,
                                           floatval=max(prop_val * 255, -1))

        return out

    if len(strs) != len(thrs):
        raise ValueError(
            'autodb_dpir: You must pass an equal amount of values to '
            f'strenght {len(strs)} and thrs {len(thrs)}!')

    nthrs = [tuple(x / 255 for x in thr) for thr in thrs]

    is_rgb = clip.format.color_family is vs.RGB

    if not matrix and not is_rgb:
        matrix = get_prop(clip.get_frame(0), "_Matrix", int)

    rgb = core.resize.Bicubic(clip, format=vs.RGBS,
                              matrix_in=matrix) if not is_rgb else clip

    assert rgb.format

    maxvalue = (1 << rgb.format.bits_per_sample) - 1
    evref = core.std.Prewitt(rgb)
    evref = core.std.Expr(evref, f"x {edgevalue} >= {maxvalue} x ?")
    evref_rm = evref.std.Median().std.Convolution(
        matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1])

    diffevref = core.std.PlaneStats(evref, evref_rm, prop='EdgeValRef')
    diffnext = core.std.PlaneStats(rgb,
                                   rgb.std.DeleteFrames([0]),
                                   prop='YNext')
    diffprev = core.std.PlaneStats(rgb, rgb[0] + rgb, prop='YPrev')

    db_clips = [
        vsdpir(rgb, strength=st, mode='deblock', cuda=cuda,
               **vsdpir_args).std.SetFrameProp('Adb_DeblockStrength',
                                               intval=int(st)) for st in strs
    ]

    debl = core.std.FrameEval(rgb,
                              partial(_eval_db,
                                      clip=rgb,
                                      db_clips=db_clips,
                                      nthrs=nthrs),
                              prop_src=[diffevref, diffnext, diffprev])

    return core.resize.Bicubic(debl,
                               format=clip.format.id,
                               matrix=matrix if not is_rgb else None)
def vsdpir(clip: vs.VideoNode,
           strength: SupportsFloat | vs.VideoNode | None = 25,
           mode: str = 'deblock',
           matrix: Matrix | int | None = None,
           tiles: int | Tuple[int] | None = None,
           cuda: bool = True,
           i444: bool = False,
           kernel: Kernel | str = Bicubic(b=0, c=0.5),
           **dpir_args: Any) -> vs.VideoNode:
    """
    A simple vs-mlrt DPIR wrapper for convenience.

    You must install vs-mlrt. For more information, see the following links:

    * https://github.com/AmusementClub/vs-mlrt
    * https://github.com/AmusementClub/vs-mlrt/wiki/DPIR
    * https://github.com/AmusementClub/vs-mlrt/releases/latest

    Converts to RGB -> runs DPIR -> converts back to original format, and with no subsampling if ``i444=True``.
    For more information, see https://github.com/cszn/DPIR.

    Dependencies:

    * vs-mlrt

    :param clip:            Input clip
    :param strength:        DPIR strength. Sane values lie between 1–20 for ``mode='deblock'``,
                            and 1–3 for ``mode='denoise'``
    :param mode:            DPIR mode. Valid modes are 'deblock' and 'denoise'.
    :param matrix:          Enum for the matrix of the input clip. See ``types.Matrix`` for more info.
                            If not specified, gets matrix from the "_Matrix" prop of the clip unless it's an RGB clip,
                            in which case it stays as `None`.
    :param cuda:            Use CUDA backend if True, else CPU backend
    :param i444:            Forces the returned clip to be YUV444PS instead of the input clip's format
    :param dpir_args:       Additional args to pass to vs-mlrt.
                            Note: strength, tiles, and model cannot be overridden!

    :return:                Deblocked or denoised clip in either the given clip's format or YUV444PS
    """
    try:
        from vsmlrt import DPIR, Backend, DPIRModel
    except ModuleNotFoundError:
        raise ModuleNotFoundError(
            "vsdpir: 'vsmlrt is required to use vsdpir functions.'")

    check_variable(clip, "vsdpir")
    assert clip.format

    if isinstance(kernel, str):
        kernel = get_kernel(kernel)()

    bit_depth = get_depth(clip)
    is_rgb, is_gray = (clip.format.color_family is f
                       for f in (vs.RGB, vs.GRAY))

    clip_32 = depth(clip, 32, dither_type=Dither.ERROR_DIFFUSION)

    # TODO: Replace with a match-case?
    if mode.lower() == 'deblock':
        model = DPIRModel.drunet_deblocking_color if not is_gray else DPIRModel.drunet_deblocking_grayscale
    elif mode.lower() == 'denoise':
        model = DPIRModel.drunet_color if not is_gray else DPIRModel.drunet_gray
    else:
        raise ValueError(f"""vsdpir: '"{mode}" is not a valid mode!'""")

    dpir_args |= dict(strength=strength, tiles=tiles, model=model)

    if "backend" not in dpir_args:
        dpir_args |= dict(backend=Backend.ORT_CUDA if cuda else Backend.OV_CPU)

    if is_rgb or is_gray:
        return depth(DPIR(clip_32.std.Limiter(), **dpir_args), bit_depth)

    if matrix is None:
        matrix = get_prop(clip.get_frame(0), "_Matrix", int)

    targ_matrix = Matrix(matrix)
    targ_format = clip.format.replace(subsampling_w=0,
                                      subsampling_h=0) if i444 else clip.format

    clip_rgb = kernel.resample(
        clip_32, vs.RGBS,
        matrix_in=targ_matrix).std.Limiter()  # type:ignore[arg-type]

    mod_w, mod_h = clip_rgb.width % 8, clip_rgb.height % 8

    if to_pad := any([mod_w, mod_h]):
        d_width, d_height = clip.width + mod_w, clip.height + mod_h

        clip_rgb = Point(src_width=d_width,
                         src_height=d_height).scale(clip_rgb, d_width,
                                                    d_height, (-mod_h, -mod_w))
def ivtc_credits(clip: vs.VideoNode,
                 frame_ref: int,
                 tff: bool | None = None,
                 interlaced: bool = True,
                 dec: bool | None = None,
                 bob_clip: vs.VideoNode | None = None,
                 qtgmc_args: Dict[str, Any] = {}) -> vs.VideoNode:
    """
    Deinterlacing function for interlaced credits (60i/30p) on top of telecined video (24p).
    This is a combination of havsfunc's dec_txt60mc, ivtc_txt30mc, and ivtc_txt60mc functions.
    The credits are interpolated and decimated to match the output clip.

    The function assumes you're passing a telecined clip (that's native 24p).
    If your clip is already fieldmatched, decimation will automatically be enabled unless set it to False.
    Likewise, if your credits are 30p (as opposed to 60i), you should set `interlaced` to False.

    The recommended way to use this filter is to trim out the area with interlaced credits,
    apply this function, and `vsutil.insert_clip` the clip back into a properly IVTC'd clip.
    Alternatively, use `muvsfunc.VFRSplice` to splice the clip back in if you're dealing with a VFR clip.

    :param clip:            Input clip. Framerate must be 30000/1001.
    :param frame_ref:       First frame in the pattern. Expected pattern is ABBCD,
                            except for when ``dec`` is enabled, in which case it's AABCD.
    :param tff:             Top-field-first. `False` sets it to Bottom-Field-First.
    :param interlaced:      60i credits. Set to false for 30p credits.
    :param dec:             Decimate input clip as opposed to IVTC.
                            Automatically enabled if certain fieldmatching props are found.
                            Can be forcibly disabled by setting it to `False`.
    :param bob_clip:        Custom bobbed clip. If `None`, uses a QTGMC clip.
                            Framerate must be 60000/1001.
    :param qtgmc_args:      Arguments to pass on to QTGMC.
                            Accepts any parameter except for FPSDivisor and TFF.

    :return:                IVTC'd/decimated clip with deinterlaced credits
    """
    try:
        from havsfunc import QTGMC, DitherLumaRebuild
    except ModuleNotFoundError:
        raise ModuleNotFoundError(
            "ivtc_credits: missing dependency 'havsfunc'")

    check_variable(clip, "ivtc_credits")

    if clip.fps != Fraction(30000, 1001):
        raise ValueError(
            "ivtc_credits: 'Your clip must have a framerate of 30000/1001!'")

    if get_prop(clip.get_frame(0), '_FieldBased', int) == 0 and tff is None:
        raise vs.Error("ivtc_credits: 'You must set `tff` for this clip!'")
    elif isinstance(tff, (bool, int)):
        clip = clip.std.SetFieldBased(int(tff) + 1)

    qtgmc_kwargs: Dict[str, Any] = dict(SourceMatch=3,
                                        Lossless=2,
                                        TR0=2,
                                        TR1=2,
                                        TR2=3,
                                        Preset="Placebo")
    qtgmc_kwargs |= qtgmc_args
    qtgmc_kwargs |= dict(
        FPSDivisor=1,
        TFF=tff or bool(get_prop(clip.get_frame(0), '_FieldBased', int) - 1))

    if dec is not False:  # Automatically enable dec unless set to False
        dec = any(x in clip.get_frame(0).props
                  for x in {"VFMMatch", "TFMMatch"})

        if dec:
            warnings.warn(
                "ivtc_credits: 'Fieldmatched clip passed to function! "
                "dec is set to True. If you want to disable this, set dec=False!'"
            )

    # motion vector and other values
    field_ref = frame_ref * 2
    frame_ref %= 5
    invpos = (5 - field_ref) % 5

    offset = [0, 0, -1, 1, 1][frame_ref]
    pattern = [0, 1, 0, 0, 1][frame_ref]
    direction = [-1, -1, 1, 1, 1][frame_ref]

    blksize = 16 if clip.width > 1024 or clip.height > 576 else 8
    overlap = blksize // 2

    ivtc_fps = dict(fpsnum=24000, fpsden=1001)
    ivtc_fps_div = dict(fpsnum=12000, fpsden=1001)

    # Bobbed clip
    bobbed = bob_clip or QTGMC(clip, **qtgmc_kwargs)

    if bobbed.fps != Fraction(60000, 1001):
        raise ValueError(
            "ivtc_credits: 'Your bobbed clip must have a framerate of 60000/1001!'"
        )

    if interlaced:  # 60i credits. Start of ABBCD
        if dec:  # Decimate the clip instead of properly IVTC
            clean = bobbed.std.SelectEvery(5, [4 - invpos])

            if invpos > 2:
                jitter = core.std.AssumeFPS(
                    bobbed[0] * 2 +
                    bobbed.std.SelectEvery(5, [6 - invpos, 7 - invpos]),
                    **ivtc_fps)  # type:ignore[arg-type]
            elif invpos > 1:
                jitter = core.std.AssumeFPS(
                    bobbed[0] +
                    bobbed.std.SelectEvery(5, [2 - invpos, 6 - invpos]),
                    **ivtc_fps)  # type:ignore[arg-type]
            else:
                jitter = bobbed.std.SelectEvery(5, [1 - invpos, 2 - invpos])
        else:  # Properly IVTC
            if invpos > 1:
                clean = core.std.AssumeFPS(
                    bobbed[0] + bobbed.std.SelectEvery(5, [6 - invpos]),
                    **ivtc_fps_div)  # type:ignore[arg-type]
            else:
                clean = bobbed.std.SelectEvery(5, [1 - invpos])

            if invpos > 3:
                jitter = core.std.AssumeFPS(
                    bobbed[0] +
                    bobbed.std.SelectEvery(5, [4 - invpos, 8 - invpos]),
                    **ivtc_fps)  # type:ignore[arg-type]
            else:
                jitter = bobbed.std.SelectEvery(5, [3 - invpos, 4 - invpos])

        jsup_pre = DitherLumaRebuild(jitter, s0=1).mv.Super(pel=2)
        jsup = jitter.mv.Super(pel=2, levels=1)
        vect_f = jsup_pre.mv.Analyse(blksize=blksize,
                                     isb=False,
                                     delta=1,
                                     overlap=overlap)
        vect_b = jsup_pre.mv.Analyse(blksize=blksize,
                                     isb=True,
                                     delta=1,
                                     overlap=overlap)
        comp = core.mv.FlowInter(jitter, jsup, vect_b, vect_f)
        out = core.std.Interleave(
            [comp[::2], clean] if dec else [clean, comp[::2]])
        offs = 3 if dec else 2
        return out[invpos // offs:]
    else:  # 30i credits
        if pattern == 0:
            if offset == -1:
                c1 = core.std.AssumeFPS(bobbed[0] + bobbed.std.SelectEvery(
                    10, [2 + offset, 7 + offset, 5 + offset, 10 + offset]),
                                        **ivtc_fps)  # type:ignore[arg-type]
            else:
                c1 = bobbed.std.SelectEvery(
                    10, [offset, 2 + offset, 7 + offset, 5 + offset])

            if offset == 1:
                c2 = core.std.Interleave([
                    bobbed.std.SelectEvery(10, [4]),
                    bobbed.std.SelectEvery(10, [5]),
                    bobbed[10:].std.SelectEvery(10, [0]),
                    bobbed.std.SelectEvery(10, [9])
                ])
            else:
                c2 = bobbed.std.SelectEvery(
                    10, [3 + offset, 4 + offset, 9 + offset, 8 + offset])
        else:
            if offset == 1:
                c1 = core.std.Interleave([
                    bobbed.std.SelectEvery(10, [3]),
                    bobbed.std.SelectEvery(10, [5]),
                    bobbed[10:].std.SelectEvery(10, [0]),
                    bobbed.std.SelectEvery(10, [8])
                ])
            else:
                c1 = bobbed.std.SelectEvery(
                    10, [2 + offset, 4 + offset, 9 + offset, 7 + offset])

            if offset == -1:
                c2 = core.std.AssumeFPS(bobbed[0] + bobbed.std.SelectEvery(
                    10, [1 + offset, 6 + offset, 5 + offset, 10 + offset]),
                                        **ivtc_fps)  # type:ignore[arg-type]
            else:
                c2 = bobbed.std.SelectEvery(
                    10, [offset, 1 + offset, 6 + offset, 5 + offset])

        super1_pre = DitherLumaRebuild(c1, s0=1).mv.Super(pel=2)
        super1 = c1.mv.Super(pel=2, levels=1)
        vect_f1 = super1_pre.mv.Analyse(blksize=blksize,
                                        isb=False,
                                        delta=1,
                                        overlap=overlap)
        vect_b1 = super1_pre.mv.Analyse(blksize=blksize,
                                        isb=True,
                                        delta=1,
                                        overlap=overlap)
        fix1 = c1.mv.FlowInter(super1,
                               vect_b1,
                               vect_f1,
                               time=50 + direction * 25).std.SelectEvery(
                                   4, [0, 2])

        super2_pre = DitherLumaRebuild(c2, s0=1).mv.Super(pel=2)
        super2 = c2.mv.Super(pel=2, levels=1)
        vect_f2 = super2_pre.mv.Analyse(blksize=blksize,
                                        isb=False,
                                        delta=1,
                                        overlap=overlap)
        vect_b2 = super2_pre.mv.Analyse(blksize=blksize,
                                        isb=True,
                                        delta=1,
                                        overlap=overlap)
        fix2 = c2.mv.FlowInter(super2, vect_b2,
                               vect_f2).std.SelectEvery(4, [0, 2])

        return core.std.Interleave([fix1, fix2] if pattern ==
                                   0 else [fix2, fix1])
def fix_telecined_fades(clip: vs.VideoNode,
                        tff: bool | int | None = None,
                        thr: float = 2.2) -> vs.VideoNode:
    """
    A filter that gives a mathematically perfect solution to fades made *after* telecining
    (which made perfect IVTC impossible). This is an improved version of the Fix-Telecined-Fades plugin
    that deals with overshoot/undershoot by adding a check.

    Make sure to run this *after* IVTC/deinterlacing!

    If the value surpases thr * original value, it will not affect any pixels in that frame
    to avoid it damaging frames it shouldn't need to. This helps a lot with orphan fields as well,
    which would otherwise create massive swings in values, sometimes messing up the fade fixing.

    If you pass your own float clip, you'll want to make sure to properly dither it down after.
    If you don't do this, you'll run into some serious issues!

    Taken from this gist and modified by LightArrowsEXE.
    <https://gist.github.com/blackpilling/bf22846bfaa870a57ad77925c3524eb1>

    :param clip:        Input clip
    :param tff:         Top-field-first. `False` sets it to Bottom-Field-First.
                        If None, get the field order from the _FieldBased prop.
    :param thr:         Threshold for when a field should be adjusted.
                        Default is 2.2, which appears to be a safe value that doesn't
                        cause it to do weird stuff with orphan fields.

    :return:            Clip with only fades fixed

    """
    def _ftf(n: int, f: List[vs.VideoFrame]) -> vs.VideoNode:
        avg = (get_prop(f[0], 'PlaneStatsAverage',
                        float), get_prop(f[1], 'PlaneStatsAverage', float))

        if avg[0] != avg[1]:
            mean = sum(avg) / 2
            fixed = (sep[0].std.Expr(
                f"x {mean} {avg[0]} / dup {thr} <= swap 1 ? *"),
                     sep[1].std.Expr(f"x {mean} {avg[1]} / *"))
        else:
            fixed = sep  # type: ignore

        return core.std.Interleave(fixed).std.DoubleWeave()[::2]

    # I want to catch this before it reaches SeperateFields and give newer users a more useful error
    if get_prop(clip.get_frame(0), '_FieldBased', int) == 0 and tff is None:
        raise vs.Error(
            "fix_telecined_fades: 'You must set `tff` for this clip!'")
    elif isinstance(tff, (bool, int)):
        clip = clip.std.SetFieldBased(int(tff) + 1)

    clip32 = depth(clip, 32).std.Limiter()
    bits = get_depth(clip)

    sep = clip32.std.SeparateFields().std.PlaneStats()
    sep = sep[::2], sep[
        1::
        2]  # type: ignore # I know this isn't good, but frameeval breaks otherwise
    ftf = core.std.FrameEval(clip32, _ftf, sep)  # and I don't know how or why

    if bits == 32:
        warnings.warn(
            "fix_telecined_fades: 'Make sure to dither down BEFORE setting the FieldBased prop to 0! "
            "Not doing this MAY return some of the combing!'")
    else:
        ftf = depth(ftf, bits, dither_type=Dither.ERROR_DIFFUSION)
        ftf = ftf.std.SetFieldBased(0)

    return ftf
Beispiel #19
0
 def assert_runs(clip: vs.VideoNode):
     clip.get_frame(0)