Esempio n. 1
0
def ccd(clip: vs.VideoNode, threshold: float) -> vs.VideoNode:
    """taken from a currently-private gist, but should become available in `vs-denoise` soon enough"""
    from vsutil import split

    assert clip.format
    bits = clip.format.bits_per_sample
    is_float = clip.format.sample_type == vs.FLOAT
    peak = 1.0 if is_float else (1 << bits) - 1
    threshold /= peak
    # threshold = threshold ** 2 / 195075.0

    rgb = clip.resize.Bicubic(format=vs.RGBS)

    pre1 = rgb.resize.Point(
        clip.width+24, clip.height+24,
        src_left=-12, src_top=-12,
        src_width=clip.width+24, src_height=clip.height+24
    )
    pre2 = rgb.resize.Point(
        rgb.width+24, rgb.height+24,
        src_width=rgb.width+24, src_height=rgb.height+24
    )
    pre_planes = split(pre1)

    shift_planes_clips = [
        split(pre2.resize.Point(src_left=-x, src_top=-y))
        for x in range(0, 25, 8) for y in range(0, 25, 8)
    ]
    denoise_clips = [
        core.std.Expr(pre_planes + shift_planes, f'x a - dup * y b - dup * + z c - dup * + sqrt {threshold} <')
        for shift_planes in shift_planes_clips
    ]

    cond_planes_clips = [
        join([core.std.Expr([splane, dclip], 'y 0 > x 0 ?') for splane in splanes])
        for dclip, splanes in zip(denoise_clips, shift_planes_clips)
    ]

    denoise = core.std.Expr(denoise_clips, add_expr(len(denoise_clips)) + ' 1 +')
    denoise = join([denoise] * 3)

    n_op = len(cond_planes_clips) + 1
    avg = core.std.Expr([pre1] + cond_planes_clips + [denoise], add_expr(n_op) + f' {EXPR_VARS[n_op]} /')
    avg = avg.resize.Bicubic(
        format=clip.format.id, dither_type='error_diffusion', matrix=cast(int, clip.get_frame(0).props['_Matrix'])
    )
    avg = avg.std.Crop(12, 12, 12, 12)

    assert avg.format
    return core.std.ShufflePlanes([clip, avg], [0, 1, 2], avg.format.color_family)
Esempio n. 2
0
def Deband(clip: vs.VideoNode,
           radius: int = 17,
           threshold: float = 4,
           iterations: int = 1,
           grain: float = 4,
           chroma: bool = True) -> vs.VideoNode:
    """Wrapper for placebo.Deband because at the moment, processing one plane is faster.

    Args:
        clip (vs.VideoNode):
        radius (int, optional): Defaults to 17.
        threshold (float, optional): Defaults to 4.
        iterations (int, optional): Defaults to 1.
        grain (float, optional): Defaults to 4.
        chroma (bool, optional): Defaults to True.

    Returns:
        vs.VideoNode
    """
    if get_depth(clip) != 16:
        clip = depth(clip, 16)
    if chroma is True:
        clip = join([
            core.placebo.Deband(x, 1, iterations, threshold, radius, grain)
            for x in split(clip)
        ])
    else:
        clip = core.placebo.Deband(clip, 1, iterations, threshold, radius,
                                   grain)
    return clip
Esempio n. 3
0
def debander(clip: vs.VideoNode,
             luma_grain: float = 4.0,
             **kwargs: Any) -> vs.VideoNode:
    """
        A quick 'n dirty generic debanding function.
        To be more specific, it would appear that it's faster to
        deband every plane separately (don't ask me why).

        To abuse this, we split up the clip into planes beforehand,
        and then join them back together again at the end.

        Although the vast, vast majority of video will be YUV,
        a sanity check for plane amount is done as well, just to be safe.

        :param clip:        Input clip
        :param luma_grain:  Grain added to the luma plane
        :param kwargs:        Additional parameters passed to placebo.Deband

        :return:            Debanded clip
    """
    if clip.format.num_planes == 0:
        return core.placebo.Deband(clip, grain=luma_grain, **kwargs)
    return join([
        core.placebo.Deband(plane(clip, 0), grain=luma_grain, **kwargs),
        core.placebo.Deband(plane(clip, 1), grain=0, **kwargs),
        core.placebo.Deband(plane(clip, 2), grain=0, **kwargs)
    ])
Esempio n. 4
0
 def _fsrcnnx(clip: vs.VideoNode) -> vs.VideoNode:
     blank = core.std.BlankClip(clip, format=vs.GRAY16, color=128 << 8)
     clip = join([clip, blank, blank])
     # The chroma is upscaled with box AKA nearest but we don't care since we only need the luma.
     # It's especially faster and speed is the key :^)
     clip = core.placebo.Shader(clip, 'Shaders/FSRCNNX_x2_56-16-4-1.glsl', clip.width*2, clip.height*2, filter='box')
     return get_y(clip)
def sbr(clip, radius=None, planes=None, mode='s', blur='gauss', r=1):
    if not isinstance(clip, vs.VideoNode):
        raise TypeError('sbr: This is not a clip')

    numplanes = clip.format.num_planes

    planes = getplanes(planes, numplanes, 'sbr')

    radius = fallback(radius, r)

    radius = append_params(radius, numplanes)[:numplanes]
    mode = append_params(mode, numplanes)[:numplanes]
    blur = append_params(blur, numplanes)[:numplanes]

    radius = [radius[x] if x in planes else -1 for x in range(numplanes)]
    aplanes = [3 if radius[x] >= 0 else 1 for x in range(numplanes)]

    pr = [radius[x] for x in planes]
    pm = [mode[x] for x in planes]
    pb = [blur[x] for x in planes]

    if max(radius) < 1:
        return clip

    mixproc = numplanes - len(planes) > 1
    mixproc = mixproc or any(len(set(p)) > 1 for p in (pr, pm, pb))

    if mixproc:
        clips = split(clip)
        return join([
            sbr_internal(clips[x], radius[x], [3], mode[x], blur[x])
            for x in range(numplanes)
        ])

    return sbr_internal(clip, pr[0], aplanes, pm[0], pb[0])
Esempio n. 6
0
def ED(ed_in: vs.VideoNode) -> vs.VideoNode:
    src = ed_in
    # Rescale using a modified version of Zastin's dogahomo()
    rescale = rvs.questionable_rescale(vsutil.depth(src, 16), 810, b=1/3, c=1/3, mask_thresh=0.05)

    # Detail- and linemasking for denoising
    det_mask = lvf.mask.detail_mask(rescale, brz_a=0.25, brz_b=0.15)
    denoise_ya = core.knlm.KNLMeansCL(rescale, d=2, a=3, s=3, h=1.2, channels="Y")
    denoise_ca = core.knlm.KNLMeansCL(rescale, d=2, a=2, s=3, h=1.0, channels="UV")
    denoise_a = core.std.ShufflePlanes([denoise_ya,denoise_ca,denoise_ca], [0,1,2], colorfamily=vs.YUV)
    denoise_b = mvf.BM3D(rescale, sigma=[1.9], ref=denoise_a, profile1="fast", radius1=3)
    # BM3D left some gunk in chroma, most noticeably around hard contrast edges
    denoise = core.std.ShufflePlanes([denoise_b,denoise_a,denoise_a], [0,1,2], colorfamily=vs.YUV)
    denoise = core.std.MaskedMerge(denoise, rescale, det_mask)
    # Thanks for handling the effort of AA for me, Light
    aa = lvf.aa.nneedi3_clamp(denoise, strength=1.25, mthr=0.25)
    # Dehaloing it
    dehalom = rvs.dehalo_mask(aa, iter_out=3)
    dehalo_a = haf.DeHalo_alpha(aa, darkstr=0.9, brightstr=1.1)
    dehalo_a = vsutil.depth(dehalo_a, 16)
    dehalo = core.std.MaskedMerge(aa, dehalo_a, dehalom)
    # Generate a new detail mask and deband it, putting back fine detail the way it was
    det_mask = lvf.mask.detail_mask(dehalo, rad=2, radc=1, brz_a=0.05, brz_b=0.09)
    y,u,v = vsutil.split(dehalo)
    deband_a = vsutil.join([pdb(y, threshold=3.0, grain=6.5),
                            pdb(u, threshold=3.0, grain=2.0),
                            pdb(v, threshold=3.0, grain=2.0)])
    deband = core.std.MaskedMerge(deband_a, dehalo, det_mask)

    # Finish up and output
    grain = kgf.adaptive_grain(deband, strength=0.65, luma_scaling=5)
    out = vsutil.depth(grain, 10)
    return out
Esempio n. 7
0
def rescaler(clip: vs.VideoNode,
             height: int) -> Tuple[vs.VideoNode, vs.VideoNode]:
    """
    Basic rescaling and mask generating function using nnedi3.
    """
    from lvsfunc.kernels import Bicubic
    from lvsfunc.scale import descale_detail_mask
    from vardefunc.mask import FDOG
    from vardefunc.scale import nnedi3_upscale
    from vsutil import Range, get_w, join, plane

    bits, clip = _get_bits(clip, expected_depth=32)

    clip_y = get_y(clip)
    descale = Bicubic().descale(clip_y, get_w(height,
                                              clip.width / clip.height),
                                height)
    rescale = Bicubic().scale(nnedi3_upscale(descale, pscrn=1), clip.width,
                              clip.height)

    l_mask = FDOG().get_mask(clip_y, lthr=0.065,
                             hthr=0.065).std.Maximum().std.Minimum()
    l_mask = l_mask.std.Median().std.Convolution([1] *
                                                 9)  # stolen from varde xd
    masked_rescale = core.std.MaskedMerge(clip_y, rescale, l_mask)

    scaled = join([masked_rescale, plane(clip, 1), plane(clip, 2)])

    upscale = Bicubic().scale(descale, 1920, 1080)
    detail_mask = descale_detail_mask(clip_y, upscale, threshold=0.055)

    scaled_down = scaled if bits == 32 else depth(scaled, bits)
    mask_down = detail_mask if bits == 32 else depth(
        detail_mask, 16, range_in=Range.FULL, range=Range.LIMITED)
    return scaled_down, mask_down
Esempio n. 8
0
def rescaler(clip: vs.VideoNode, height: int) -> vs.VideoNode:
    """
    Basic rescaling function using nnedi3.
    """
    from lvsfunc.kernels import Bicubic, BicubicSharp
    from vardefunc.mask import FDOG
    from vardefunc.scale import nnedi3_upscale
    from vsutil import Range, depth, get_w, get_y

    clip = depth(clip, 32)

    clip_y = get_y(clip)
    descale = BicubicSharp().descale(clip_y,
                                     get_w(height, clip.width / clip.height),
                                     height)
    rescale = Bicubic(b=-1 / 2,
                      c=1 / 4).scale(nnedi3_upscale(descale, pscrn=1),
                                     clip.width, clip.height)

    l_mask = FDOG().get_mask(clip_y, lthr=0.065,
                             hthr=0.065).std.Maximum().std.Minimum()
    l_mask = l_mask.std.Median().std.Convolution([1] *
                                                 9)  # stolen from varde xd
    masked_rescale = core.std.MaskedMerge(clip_y, rescale, l_mask)

    scaled = join([masked_rescale, plane(clip, 1), plane(clip, 2)])

    return depth(scaled, 16)
Esempio n. 9
0
def do_filter() -> vs.VideoNode:
    """Vapoursynth filtering"""
    src = JPBD.src_cut
    out = src

    luma = get_y(out)
    rows = [
        core.std.CropAbs(luma, out.width, 1, top=out.height - 1),
        core.std.CropAbs(luma, out.width, 1, top=out.height - 2)
    ]
    diff = core.std.Expr(rows, 'x y - abs').std.PlaneStats()

    row_fix = vdf.merge_chroma(
        luma.fb.FillBorders(bottom=1, mode="fillmargins"),
        out.fb.FillBorders(bottom=2, mode="fillmargins"))

    fixrow = core.std.FrameEval(out,
                                partial(_select_row, clip=out,
                                        row_fix=row_fix),
                                prop_src=diff)
    out = fixrow

    fixedge_a = awf.bbmod(out, 1, 1, 1, 1, 20, blur=700, u=False, v=False)

    fixedge = out
    fixedge = lvf.rfs(fixedge, fixedge_a, [(EDSTART + 309, EDEND)])
    out = fixedge

    out = depth(out, 16)

    dehalo = gf.MaskedDHA(out, rx=1.4, ry=1.4, darkstr=0.02, brightstr=1)
    dehalo = lvf.rfs(out, dehalo, [(EDEND + 1, src.num_frames - 1)])
    out = dehalo

    resize = core.std.Crop(out, right=12, bottom=8).resize.Bicubic(1920, 1080)
    resize = lvf.rfs(out, resize, [(27005, 27076)])
    out = resize

    # Denoising only the chroma
    pre = hvf.SMDegrain(out, tr=2, thSADC=300, plane=3)
    planes = split(out)
    planes[1], planes[2] = [
        mvf.BM3D(planes[i], 1.25, radius2=2, pre=plane(pre, i))
        for i in range(1, 3)
    ]
    out = join(planes)

    preden = core.dfttest.DFTTest(out, sbsize=16, sosize=12, tbsize=1)
    detail_mask = lvf.mask.detail_mask(preden, brz_a=2500, brz_b=1500)

    deband = vdf.dumb3kdb(preden, 16, threshold=[17, 17], grain=[24, 0])
    deband = core.std.MergeDiff(deband, out.std.MakeDiff(preden))
    deband = core.std.MaskedMerge(deband, out, detail_mask)
    out = deband

    decz = vdf.decsiz(out, min_in=128 << 8, max_in=192 << 8)
    out = decz

    return depth(out, 10).std.Limiter(16 << 2, [235 << 2, 240 << 2], [0, 1, 2])
Esempio n. 10
0
def do_filter():
    """Vapoursynth filtering"""
    src = JPBD.src_cut
    src = depth(src, 32)
    out = src


    full_range = core.resize.Bicubic(out, range_in=0, range=1, dither_type='error_diffusion')
    out = full_range


    radius = 3
    y, u, v = split(out)

    y_m = core.resize.Point(y, 960, 1080, src_left=-1)
    y_m = core.resize.Bicubic(y_m, 960, 540)

    def demangle(clip):
        return vdf.nnedi3_upscale(clip, pscrn=0, correct_shift=False).resize.Spline16(src_left=0.5+0.25, src_top=.5)

    y_m, u, v = map(demangle, (y_m, u, v))


    y_fixup = core.std.MakeDiff(y, y_m)
    yu, yv = Regress(y_m, u, v, radius=radius)

    u_fixup = ReconstructMulti(y_fixup, yu, radius=radius)
    u_r = core.std.MergeDiff(u, u_fixup)

    v_fixup = ReconstructMulti(y_fixup, yv, radius=radius)
    v_r = core.std.MergeDiff(v, v_fixup)

    out = join([y, u_r, v_r])
    out = depth(out, 16)



    dehalo = gf.MaskedDHA(out, rx=1.25, ry=1.25, darkstr=0.10, brightstr=1.0, maskpull=46, maskpush=148)
    out = dehalo


    upscale = atf.eedi3Scale(out, 2160, pscrn=0)
    out = upscale

    dehalo = gf.MaskedDHA(out, rx=1.15, ry=1.15, darkstr=0.10, brightstr=1.0, maskpull=46, maskpush=148)
    out = dehalo


    deband_mask = lvf.denoise.detail_mask(out, brz_a=2000, brz_b=1000)
    deband = dbs.f3kpf(out, 28, 48, 48)
    deband = core.std.MaskedMerge(deband, out, deband_mask)
    out = deband


    grain = core.grain.Add(out, 1)
    out = grain

    return out.std.AssumeFPS(fpsnum=1, fpsden=1)
Esempio n. 11
0
def placebo_debander(clip: vs.VideoNode,
                     grain: int = 4,
                     **deband_args: Any) -> vs.VideoNode:
    return join(
        [  # Still not sure why splitting it up into planes is faster, but hey!
            core.placebo.Deband(plane(clip, 0), grain=grain, **deband_args),
            core.placebo.Deband(plane(clip, 1), grain=0, **deband_args),
            core.placebo.Deband(plane(clip, 2), grain=0, **deband_args)
        ])
Esempio n. 12
0
def shader(clip: vs.VideoNode,
           width: int,
           height: int,
           shader_file: str,
           luma_only: bool = True,
           **kwargs) -> vs.VideoNode:
    """Wrapper for placebo.Resample
       https://github.com/Lypheo/vs-placebo#vs-placebo

    Args:
        clip (vs.VideoNode): Source clip.

        width (int): Destination width.

        height (int): Destination height.

        shader_file (str):
            Path to shader file used into placebo.Shader.

        luma_only (bool, optional):
            If process the luma only. Defaults to True.

    Returns:
        vs.VideoNode: Shader'd clip.
    """
    clip = depth(clip, 16)

    if clip.format is None:
        raise FormatError('shader: Variable format not allowed!')

    if luma_only:
        filter_shader = 'box'
        if clip.format.num_planes == 1:
            if width > clip.width or height > clip.height:
                clip = clip.resize.Point(format=vs.YUV444P16)
            else:
                if width % 4 == 0 and height % 4 == 0:
                    blank = core.std.BlankClip(clip, int(clip.width / 4),
                                               int(clip.height / 4), vs.GRAY16)
                elif width % 2 == 0 and height % 2 == 0:
                    blank = core.std.BlankClip(clip, int(clip.width / 2),
                                               int(clip.height / 2), vs.GRAY16)
                else:
                    blank = core.std.BlankClip(clip, vs.GRAY16)
                clip = join([clip, blank, blank])
    else:
        filter_shader = 'ewa_lanczos'

    clip = core.placebo.Shader(clip,
                               shader_file,
                               width,
                               height,
                               filter=filter_shader,
                               **kwargs)

    return get_y(clip) if luma_only else clip
Esempio n. 13
0
def placebo_debander(clip: vs.VideoNode,
                     grain: float = 4.0,
                     placebo_args: Dict[str, Any] = {}) -> vs.VideoNode:
    from vsutil import join, plane

    return join([
        core.placebo.Deband(plane(clip, 0), grain=grain, **placebo_args),
        core.placebo.Deband(plane(clip, 1), grain=0, **placebo_args),
        core.placebo.Deband(plane(clip, 2), grain=0, **placebo_args)
    ])
Esempio n. 14
0
def finedehalo_mask(clip: vs.VideoNode,
                    thresh: int = 24320,
                    *,
                    chroma: bool = False) -> vs.VideoNode:
    """
    Dehalo mask based on :py:meth:`fineline_mask` for protecting small things.

    A masking function designed to protect textures and very thin linework when
    and very fine detail like textures when performing more aggressive forms
    of filtering. Fairly large values are required for the threshold because
    all internal processing is done in 16 bit.
    The returned mask is the same depth as the input ``clip``.

    :param clip:        The clip to generate the mask for.
    :param thresh:      The threshold value used for :py:meth:`fineline_mask`.
                        Don't forget to scale the value for 16-bit video.
    :param chroma:      Whether or not to mask chroma.
    """

    if not clip.format:
        raise VariableFormatError("fine_dehalo_mask")

    def _gen_mask(plane: vs.VideoNode, thr: int) -> vs.VideoNode:
        flm = depth(fineline_mask(plane, thr), 16)
        dhm = depth(dehalo_mask(plane, outer=True), 16)

        sob = partial(core.std.Sobel, planes=[0])
        dhinner = depth(dehalo_mask(plane, sob, inner=True), 16)

        yeet = core.std.Expr([flm, dhm], "y x -")
        dhm2 = core.std.Expr([dhm, yeet], "x y -").std.Binarize(threshold=thr)

        return core.std.Expr([dhm2, dhinner],
                             "x y -").std.Binarize(threshold=thr)

    dither = False

    depth_in = clip.format.bits_per_sample
    if not depth_in == 16:
        dither = True
        clip = depth(clip, 16, sample_type=vs.INTEGER)

    if chroma:
        planes = split(clip)
        maskplanes = []

        for p in planes:
            maskplanes.append(_gen_mask(p, thresh))

        mask = join(maskplanes)
    else:
        y = get_y(clip)
        mask = _gen_mask(y, thresh)

    return depth(mask, depth_in) if dither else mask
Esempio n. 15
0
def placebo_debander(clip: vs.VideoNode,
                     grain: float = 4.0,
                     deband_args: Dict[str, Any] = {}) -> vs.VideoNode:
    placebo_args: Dict[str, Any] = dict()
    placebo_args |= deband_args

    return join([
        core.placebo.Deband(plane(clip, 0), grain=grain, **placebo_args),
        core.placebo.Deband(plane(clip, 1), grain=0, **placebo_args),
        core.placebo.Deband(plane(clip, 2), grain=0, **placebo_args)
    ])
Esempio n. 16
0
def chroma_reconstruct(clip: vs.VideoNode,
                       radius: int = 2,
                       i444: bool = False) -> vs.VideoNode:
    """
    A function to demangle messed-up chroma, like for example chroma
    that was downscaled using Nearest Neighbour, or the chroma found on DVDs.
    This function should be used with care, and not blindly applied to anything.

    This function can also return a 4:4:4 clip. This is not recommended
    except for very specific cases, like for example where you're
    dealing with a razor-sharp 1080p source with a lot of bright colours.
    Otherwise, have it return the 4:2:0 clip instead.

    Original function by shane, modified by Ichunjo and LightArrowsEXE.

    Aliases for this function are `lvsfunc.demangle` and `lvsfunc.crecon`.

    :param clip:    Input clip
    :param radius:  Boxblur radius
    :param i444:    Return a 4:4:4 clip

    :return:        Clip with demangled chroma in either 4:2:0 or 4:4:4
    """
    if clip.format is None:
        raise ValueError("recon: 'Variable-format clips not supported'")

    def dmgl(clip: vs.VideoNode) -> vs.VideoNode:
        return core.resize.Bicubic(clip, w, h, src_left=0.25)

    w, h = clip.width, clip.height

    clipb = depth(clip, 32)
    planes = split(clipb)
    clip_y = planes[0]
    planes[0] = planes[0].resize.Bicubic(planes[1].width,
                                         planes[1].height,
                                         src_left=-.5,
                                         filter_param_a=1 / 3,
                                         filter_param_b=1 / 3)
    planes[0], planes[1], planes[2] = map(dmgl,
                                          (planes[0], planes[1], planes[2]))
    y_fix = core.std.MakeDiff(clip_y, planes[0])
    yu, yv = _Regress(planes[0], planes[1], planes[2], radius=radius)

    u_fix = _ReconstructMulti(y_fix, yu, radius=radius)
    planes[1] = core.std.MergeDiff(planes[1], u_fix)
    v_fix = _ReconstructMulti(y_fix, yv, radius=radius)
    planes[2] = core.std.MergeDiff(planes[2], v_fix)

    merged = join([clip_y, planes[1], planes[2]])
    return core.resize.Bicubic(merged, format=clip.format.id) if not i444 \
        else depth(merged, get_depth(clip))
Esempio n. 17
0
def do_filter() -> vs.VideoNode:
    """Vapoursynth filtering"""
    src = JPBD.src_cut
    out = src

    luma = get_y(out)
    rows = [
        core.std.CropAbs(luma, out.width, 1, top=out.height - 1),
        core.std.CropAbs(luma, out.width, 1, top=out.height - 2)
    ]
    diff = core.std.Expr(rows, 'x y - abs').std.PlaneStats()

    row_fix = vdf.merge_chroma(
        luma.fb.FillBorders(bottom=1, mode="fillmargins"),
        out.fb.FillBorders(bottom=2, mode="fillmargins"))

    fixrow = core.std.FrameEval(out,
                                partial(_select_row, clip=out,
                                        row_fix=row_fix),
                                prop_src=diff)
    out = fixrow

    out = depth(out, 16)

    # Denoising only the chroma
    pre = hvf.SMDegrain(out, tr=2, thSADC=300, plane=3)
    planes = split(out)
    planes[1], planes[2] = [
        mvf.BM3D(planes[i], 1.25, radius2=2, pre=plane(pre, i))
        for i in range(1, 3)
    ]
    out = join(planes)

    preden = core.dfttest.DFTTest(out, sbsize=16, sosize=12, tbsize=1)
    detail_mask = lvf.mask.detail_mask(preden, brz_a=2500, brz_b=1500)

    deband = vdf.dumb3kdb(preden, 16, threshold=[17, 17], grain=[24, 0])
    deband = core.std.MergeDiff(deband, out.std.MakeDiff(preden))
    deband = core.std.MaskedMerge(deband, out, detail_mask)
    out = deband

    decz = vdf.decsiz(out, min_in=128 << 8, max_in=192 << 8)
    out = decz

    ref = depth(src, 16)
    credit = out
    credit = lvf.rfs(out, ref, CREDITS)
    out = credit

    return depth(out, 10).std.Limiter(16 << 2, [235 << 2, 240 << 2], [0, 1, 2])
def Median(clip,
           radius=None,
           planes=None,
           mode='s',
           vcmode=1,
           range_in=None,
           memsize=1048576,
           opt=0,
           r=1):
    if not isinstance(clip, vs.VideoNode):
        raise TypeError('Median: This is not a clip')

    f = clip.format
    bits = f.bits_per_sample
    numplanes = f.num_planes
    range_in = fallback(range_in, f.color_family is vs.RGB)

    planes = getplanes(planes, numplanes, 'Median')

    radius = fallback(radius, r)

    radius = append_params(radius, numplanes)[:numplanes]
    mode = append_params(mode, numplanes)[:numplanes]
    vcmode = append_params(vcmode, numplanes)[:numplanes]

    radius = [radius[x] if x in planes else 0 for x in range(numplanes)]
    vcmode = [vcmode[x] if x in planes else 0 for x in range(numplanes)]
    aplanes = [3 if radius[x] > 0 else 1 for x in range(numplanes)]

    pr = [radius[x] for x in planes]
    pm = [mode[x] for x in planes]

    if max(radius) < 1:
        return clip

    mixproc = ('h' in pm or 'v' in pm) and max(pr) > 1 and numplanes - len(
        planes) != 0  # no planes parameter in average.Median
    mixproc = mixproc or any(len(set(p)) > 1 for p in (pr, pm))

    if mixproc:
        clips = split(clip)
        return join([
            Median_internal(clips[x], radius[x], [3], mode[x], vcmode[x],
                            range_in, [0, 1, 1][x], memsize, opt)
            for x in range(numplanes)
        ])

    return Median_internal(clip, pr[0], aplanes, pm[0], vcmode, range_in, 0,
                           memsize, opt)
Esempio n. 19
0
def rescaler(clip: vs.VideoNode,
             height: int,
             shader_file: Optional[str] = None
             ) -> Tuple[vs.VideoNode, vs.VideoNode]:
    """
    Basic rescaling function using nnedi3.
    """
    from lvsfunc.kernels import Bicubic, Lanczos
    from lvsfunc.scale import descale_detail_mask
    from vardefunc.mask import FDOG
    from vardefunc.scale import fsrcnnx_upscale, nnedi3_upscale
    from vsutil import Range, depth, get_w, get_y, iterate, join, plane

    clip = depth(clip, 32)

    clip_y = get_y(clip)
    descale = Lanczos(taps=5).descale(clip_y,
                                      get_w(height, clip.width / clip.height),
                                      height)

    if shader_file:
        rescale = fsrcnnx_upscale(descale,
                                  shader_file=shader_file,
                                  downscaler=Bicubic(b=-1 / 2, c=1 / 4).scale)
    else:
        rescale = Bicubic(b=-1 / 2,
                          c=1 / 4).scale(nnedi3_upscale(descale, pscrn=1),
                                         clip.width, clip.height)

    l_mask = FDOG().get_mask(clip_y, lthr=0.065,
                             hthr=0.065).std.Maximum().std.Minimum()
    l_mask = l_mask.std.Median().std.Convolution([1] *
                                                 9)  # stolen from varde xd
    masked_rescale = core.std.MaskedMerge(clip_y, rescale, l_mask)

    scaled = join([masked_rescale, plane(clip, 1), plane(clip, 2)])

    upscale = Lanczos(taps=5).scale(descale, 1920, 1080)
    detail_mask = descale_detail_mask(clip_y, upscale, threshold=0.035)
    detail_mask = iterate(detail_mask, core.std.Inflate, 2)
    detail_mask = iterate(detail_mask, core.std.Maximum, 2)

    return depth(scaled, 16), depth(detail_mask,
                                    16,
                                    range_in=Range.FULL,
                                    range=Range.LIMITED)
Esempio n. 20
0
def rescaler(clip: vs.VideoNode, height: int,
             shader_file: Optional[str] = None, **kwargs: Any
             ) -> Tuple[vs.VideoNode, vs.VideoNode]:
    """
    Multi-descaling + reupscaling function.
    Compares multiple descales and takes darkest/brightest pixels from clips as necessary
    """
    import lvsfunc as lvf
    import muvsfunc as muf
    from vardefunc.mask import FDOG
    from vardefunc.scale import fsrcnnx_upscale, nnedi3_upscale

    bits = get_depth(clip)
    clip = depth(clip, 32)

    clip_y = get_y(clip)
    scalers: List[Callable[[vs.VideoNode, int, int], vs.VideoNode]] = [
        lvf.kernels.Spline36().descale,
        lvf.kernels.Catrom().descale,
        lvf.kernels.BicubicSharp().descale,
        lvf.kernels.Catrom().scale
    ]

    descale_clips = [scaler(clip_y, get_w(height), height) for scaler in scalers]

    descale_clip = core.std.Expr(descale_clips, 'x y z a min max min y z a max min max z a min max')
    if shader_file:
        rescale = fsrcnnx_upscale(descale_clip, shader_file=shader_file, downscaler=None)
    else:
        rescale = nnedi3_upscale(descale_clip)

    rescale = muf.SSIM_downsample(rescale, clip.width, clip.height, smooth=((3 ** 2 - 1) / 12) ** 0.5,
                                  sigmoid=True, filter_param_a=0, filter_param_b=0)

    l_mask = FDOG().get_mask(clip_y, lthr=0.065, hthr=0.065).std.Maximum().std.Minimum()
    l_mask = l_mask.std.Median().std.Convolution([1] * 9)  # stolen from varde xd
    masked_rescale = core.std.MaskedMerge(clip_y, rescale, l_mask)

    scaled = join([masked_rescale, plane(clip, 1), plane(clip, 2)])

    upscale = lvf.kernels.Spline36().scale(descale_clips[0], clip.width, clip.height)
    detail_mask = lvf.scale.descale_detail_mask(clip_y, upscale, threshold=0.04)

    scaled_down = scaled if bits == 32 else depth(scaled, bits)
    mask_down = detail_mask if bits == 32 else depth(detail_mask, 16, range_in=Range.FULL, range=Range.LIMITED)
    return scaled_down, mask_down
Esempio n. 21
0
def do_filter():
    """Vapoursynth filtering"""
    src = JPBD.src_cut
    src = depth(src, 16)

    denoise = CoolDegrain(src, tr=1, thsad=24, blksize=8, overlap=4, plane=4)
    out = denoise

    radius = 2
    clip_in = depth(out, 32)
    y, u, v = split(clip_in)
    y_m = core.resize.Bicubic(y,
                              960,
                              540,
                              src_left=-.5,
                              filter_param_a=1 / 3,
                              filter_param_b=1 / 3)

    def demangle(clip):
        return core.resize.Bicubic(clip, 1920, 1080, src_left=.5)

    y_m, u, v = map(demangle, (y_m, u, v))

    y_fixup = core.std.MakeDiff(y, y_m)
    yu, yv = Regress(y_m, u, v, radius=radius)

    u_fixup = ReconstructMulti(y_fixup, yu, radius=radius)
    u_r = core.std.MergeDiff(u, u_fixup)

    v_fixup = ReconstructMulti(y_fixup, yv, radius=radius)
    v_r = core.std.MergeDiff(v, v_fixup)

    scaled = depth(join([y, u_r, v_r]), 16)
    out = scaled

    deband_mask = lvf.denoise.detail_mask(out, brz_a=2000, brz_b=1000)
    deband = dbs.f3kpf(out, 17, 24, 24)
    deband = core.std.MaskedMerge(deband, out, deband_mask)
    out = deband

    grain = kgf.adaptive_grain(out, 0.2, luma_scaling=14)
    out = grain

    return depth(out, 10)
Esempio n. 22
0
def range_mask(clip: vs.VideoNode,
               rad: int = 2,
               radc: int = 0) -> vs.VideoNode:
    """
    Min/max mask with separate luma/chroma radii.

    rad/radc are the luma/chroma equivalent of gradfun3's "mask" parameter.
    The way gradfun3's mask works is on an 8 bit scale, with rounded dithering of high depth input.
    As such, when following this filter with a Binarize, use the following conversion steps based on input:

    -  8 bit = Binarize(2) or Binarize(thr_det)
    - 16 bit = Binarize(384) or Binarize((thr_det - 0.5) * 256)
    - floats = Binarize(0.005859375) or Binarize((thr_det - 0.5) / 256)

    When radii are equal to 1, this filter becomes identical to mt_edge("min/max", 0, 255, 0, 255).

    :param clip:    Input clip
    :param rad:     Depth in pixels of the detail/edge masking
    :param radc:    Chroma equivalent to ``rad``

    :return:        Range mask
    """
    check_variable(clip, "range_mask")

    if radc == 0:
        clip = get_y(clip)

    if clip.format is None:
        raise ValueError("range_mask: 'Variable-format clips not supported'")

    if clip.format.color_family == vs.GRAY:
        ma = _minmax(clip, rad, True)
        mi = _minmax(clip, rad, False)
        mask = core.std.Expr([ma, mi], 'x y -')
    else:
        planes = split(clip)
        for i, rad_ in enumerate([rad, radc, radc]):
            ma = _minmax(planes[i], rad_, True)
            mi = _minmax(planes[i], rad_, False)
            planes[i] = core.std.Expr([ma, mi], 'x y -')
        mask = join(planes)

    return mask
Esempio n. 23
0
def prot_dpir(clip: vs.VideoNode, strength: int = 25,
              matrix: Optional[Union[Matrix, int]] = None,
              cuda: bool = True, device_index: int = 0,
              **dpir_args: Any) -> vs.VideoNode:
    """
    Protective DPIR function for the deblocking mode.
    Sometimes vs-dpir's deblocking mode will litter a random frame with a lot of red dots.
    This is obviously undesirable, so this function was written to combat that.
    Original code by Zewia, modified by LightArrowsEXE.
    Dependencies:
    * vs-dpir
    :param clip:            Input clip
    :param strength:        DPIR's deblocking strength
    :param matrix:          Enum for the matrix of the input clip. See ``types.Matrix`` for more info.
                            If `None`, gets matrix from the "_Matrix" prop of the clip
    :param cuda:            Device type used for deblocking. Uses CUDA if True, else CPU
    :param device_index:    The 'device_index' + 1º device of type device type in the system
    :dpir_args:             Additional args to pass onto DPIR
    :return:                Deblocked clip
    """
    from vsdpir import DPIR

    if clip.format is None:
        raise ValueError("prot_dpir: 'Variable-format clips not supported'")

    dpir_args |= dict(strength=strength, task='deblock',
                      device_type='cuda' if cuda else 'cpu',
                      device_index=device_index)

    clip_rgb = depth(clip, 32).std.SetFrameProp('_Matrix', intval=matrix)
    clip_rgb = core.resize.Bicubic(clip_rgb, format=vs.RGBS)

    debl = DPIR(clip_rgb, **dpir_args)
    rgb_planes = split(debl)

    # Grab the brigher parts of the R plane to avoid model fuckery
    # Everything below 5 (8 bit value) gets replaced with the ref's R plane
    rgb_planes[0] = core.std.Expr([rgb_planes[0], rgb_planes[1], plane(clip_rgb, 0)],
                                  'z x > y 5 255 / <= and z x ?')
    rgb_merge = join(rgb_planes, family=vs.RGB)

    return core.resize.Bicubic(rgb_merge, format=clip.format.id, matrix=matrix)
Esempio n. 24
0
def filterchain() -> Union[vs.VideoNode, Tuple[vs.VideoNode, ...]]:
    """Main filterchain"""
    import lvsfunc as lvf
    import vardefunc as vdf
    from vsutil import depth, split, join
    from finedehalo import fine_dehalo

    src = JP_BD.clip_cut
    src = depth(src, 16)
    src = core.resize.Bicubic(src, chromaloc_in=1, chromaloc=0)

    debl = lvf.deblock.vsdpir(src, strength=1, i444=True)
    debl = depth(debl, 16)
    decs = vdf.noise.decsiz(debl, sigmaS=8, min_in=200 << 8, max_in=235 << 8)

    planes = split(decs)
    planes[2] = fine_dehalo(planes[2], rx=2, ry=2, brightstr=0.9, darkstr=0)
    cdehalo = join(planes)

    dehalo = lvf.dehalo.bidehalo(cdehalo, sigmaS=1.5, sigmaS_final=1)

    baa = lvf.aa.based_aa(dehalo, str(shader_file))
    sraa = lvf.sraa(dehalo, rfactor=1.65)
    clmp = lvf.aa.clamp_aa(dehalo, baa, sraa, strength=1.3)

    deband = flt.masked_f3kdb(clmp, thr=[32, 24])

    grain = vdf.noise.Graigasm(  # Mostly stolen from Varde tbh
        thrs=[x << 8 for x in (32, 80, 128, 176)],
        strengths=[(0.25, 0.0), (0.2, 0.0), (0.15, 0.0), (0.0, 0.0)],
        sizes=(1.15, 1.1, 1.05, 1),
        sharps=(65, 50, 40, 40),
        grainers=[
            vdf.noise.AddGrain(seed=69420, constant=True),
            vdf.noise.AddGrain(seed=69420, constant=False),
            vdf.noise.AddGrain(seed=69420, constant=False)
        ]).graining(deband)

    return grain
Esempio n. 25
0
def descale(clip: vs.VideoNode,
            upscaler: Optional[Callable[[vs.VideoNode, int, int],
                                        vs.VideoNode]] = reupscale,
            width: Union[int, List[int], None] = None,
            height: Union[int, List[int]] = 720,
            kernel: kernels.Kernel = kernels.Bicubic(b=0, c=1 / 2),
            threshold: float = 0.0,
            mask: Optional[Callable[[vs.VideoNode, vs.VideoNode],
                                    vs.VideoNode]] = descale_detail_mask,
            src_left: float = 0.0,
            src_top: float = 0.0,
            show_mask: bool = False) -> vs.VideoNode:
    """
    A unified descaling function.
    Includes support for handling fractional resolutions (experimental),
    multiple resolutions, detail masking, and conditional scaling.

    If you want to descale to a fractional resolution,
    set src_left and src_top and round up the target height.

    If the source has multiple native resolutions, specify ``height``
    as a list.

    If you want to conditionally descale, specify a non-zero threshold.

    Dependencies: vapoursynth-descale, znedi3

    :param clip:                    Clip to descale
    :param upscaler:                Callable function with signature upscaler(clip, width, height)
                                    -> vs.VideoNode to be used for reupscaling.
                                    Must be capable of handling variable res clips
                                    for multiple heights and conditional scaling.
                                    If a single height is given and upscaler is None,
                                    a constant resolution GRAY clip will be returned instead.
                                    Note that if upscaler is None, no upscaling will be performed
                                    and neither detail masking nor proper fractional descaling can be preformed.
                                    (Default: :py:func:`lvsfunc.scale.reupscale`)
    :param width:                   Width to descale to (if None, auto-calculated)
    :param height:                  Height(s) to descale to. List indicates multiple resolutions,
                                    the function will determine the best. (Default: 720)
    :param kernel:                  Kernel used to descale (see :py:class:`lvsfunc.kernels.Kernel`,
                                    (Default: kernels.Bicubic(b=0, c=1/2))
    :param threshold:               Error threshold for conditional descaling (Default: 0.0, always descale)
    :param mask:                    Function used to mask detail. If ``None``, no masking.
                                    Function must accept a clip and a reupscaled clip and return a mask.
                                    (Default: :py:func:`lvsfunc.scale.descale_detail_mask`)
    :param src_left:                Horizontal shifting for fractional resolutions (Default: 0.0)
    :param src_top:                 Vertical shifting for fractional resolutions (Default: 0.0)
    :param show_mask:               Return detail mask

    :return:                       Descaled and re-upscaled clip with float bitdepth
    """
    if clip.format is None:
        raise ValueError("descale: 'Variable-format clips not supported'")

    if type(height) is int:
        height = [cast(int, height)]

    height = cast(List[int], height)

    if type(width) is int:
        width = [cast(int, width)]
    elif width is None:
        width = [
            get_w(h, aspect_ratio=clip.width / clip.height) for h in height
        ]

    width = cast(List[int], width)

    if len(width) != len(height):
        raise ValueError(
            "descale: Asymmetric number of heights and widths specified")

    resolutions = [Resolution(*r) for r in zip(width, height)]

    clip = depth(clip, 32)
    assert clip.format is not None  # clip was modified by depth, but that wont make it variable
    clip_y = get_y(clip) \
        .std.SetFrameProp('descaleResolution', intval=clip.height)

    variable_res_clip = core.std.Splice([
        core.std.BlankClip(clip_y, length=len(clip) - 1),
        core.std.BlankClip(clip_y, length=1, width=clip.width + 1)
    ],
                                        mismatch=True)

    descale_partial = partial(_perform_descale, clip=clip_y, kernel=kernel)
    clips_by_resolution = {
        c.resolution.height: c
        for c in map(descale_partial, resolutions)
    }

    props = [c.diff for c in clips_by_resolution.values()]
    select_partial = partial(_select_descale,
                             threshold=threshold,
                             clip=clip_y,
                             clips_by_resolution=clips_by_resolution)

    descaled = core.std.FrameEval(variable_res_clip,
                                  select_partial,
                                  prop_src=props)

    if src_left != 0 or src_top != 0:
        descaled = core.resize.Bicubic(descaled,
                                       src_left=src_left,
                                       src_top=src_top)

    if upscaler is None:
        upscaled = descaled
        if len(height) == 1:
            upscaled = core.resize.Point(upscaled, width[0], height[0])
        else:
            return upscaled
    else:
        upscaled = upscaler(descaled, clip.width, clip.height)

    if src_left != 0 or src_top != 0:
        upscaled = core.resize.Bicubic(descaled,
                                       src_left=-src_left,
                                       src_top=-src_top)

    if upscaled.format is None:
        raise RuntimeError(
            "descale: 'Upscaler cannot return variable-format clips'")

    if mask:
        clip_y = clip_y.resize.Point(format=upscaled.format.id)
        rescaled = kernel.scale(descaled, clip.width, clip.height,
                                (src_left, src_top))
        rescaled = rescaled.resize.Point(format=clip.format.id)
        dmask = mask(clip_y, rescaled)

        if upscaler is None:
            dmask = core.resize.Spline36(dmask, upscaled.width,
                                         upscaled.height)
            clip_y = core.resize.Spline36(clip_y, upscaled.width,
                                          upscaled.height)

        if show_mask:
            return dmask

        upscaled = core.std.MaskedMerge(upscaled, clip_y, dmask)

    upscaled = depth(upscaled, get_depth(clip))

    if clip.format.num_planes == 1 or upscaler is None:
        return upscaled
    return join([upscaled, plane(clip, 1), plane(clip, 2)])
Esempio n. 26
0
    def main(self: Filtering) -> vs.VideoNode:
        """Vapoursynth filtering"""
        src = JPBD.clip_cut
        src = depth(src, 16)
        out = src


        h = 800  # noqa
        w = get_w(h)  # noqa
        opstart, opend = 2830, 4986
        edstart, edend = 31504, 33661




        inp = get_y(out)
        out = inp



        # Remove the grain
        ref = hvf.SMDegrain(out, tr=1, thSAD=300, plane=0)
        preden = mvf.BM3D(out, sigma=2, radius1=1, ref=ref)
        out = preden




        # Rescale / Antialiasing / Limiting
        out = depth(out, 32)
        lineart = vdf.mask.FDOG().get_mask(out, lthr=0.065, hthr=0.065).std.Maximum().std.Minimum()
        lineart = lineart.std.Median().std.Convolution([1] * 9)


        descale_clips = [core.resize.Bicubic(out, w, h, filter_param_a=1/3, filter_param_b=1/3),
                         core.descale.Debicubic(out, w, h, 0, 1/2),
                         core.descale.Debilinear(out, w, h)]
        descale = core.std.Expr(descale_clips, 'x y z min max y z max min z min')

        upscale = vdf.scale.fsrcnnx_upscale(descale, height=h * 2, shader_file=r'_shaders\FSRCNNX_x2_56-16-4-1.glsl',
                                            upscaled_smooth=vdf.scale.eedi3_upscale(descale), profile='zastin',
                                            sharpener=partial(gf.DetailSharpen, sstr=1.65, power=4, mode=0, med=True))


        antialias = self.sraa_eedi3(upscale, 3, alpha=0.2, beta=0.4, gamma=100, mdis=20, nrad=3)

        downscale = muvf.SSIM_downsample(antialias, src.width, src.height, kernel='Bicubic', filter_param_a=0, filter_param_b=0)

        adaptmask = core.adg.Mask(downscale.std.PlaneStats(), 25).std.Minimum().std.Minimum().std.Convolution([1] * 9)
        contra = gf.ContraSharpening(downscale, depth(preden, 32), radius=2).rgsf.Repair(downscale, 1)
        contra = core.std.MaskedMerge(downscale, contra, adaptmask)


        scaled = core.std.MaskedMerge(out, contra, lineart)
        merged = vdf.misc.merge_chroma(depth(scaled, 16), src)
        out = merged


        detail_light_mask = lvf.mask.detail_mask(out, brz_a=1500, brz_b=600)

        deband = vdf.deband.dumb3kdb(out, 16, [33, 1], sample_mode=4, use_neo=True)
        deband = core.std.MaskedMerge(deband, out, detail_light_mask)
        out = deband


        # Restore the grain
        neutral = inp.std.BlankClip(960, 540, color=128 << 8)
        diff = join([inp.std.MakeDiff(preden), neutral, neutral])
        grain = core.std.MergeDiff(out, diff)
        out = grain



        crop_a = self.crop_and_fix(out, src, top=128, bottom=136)
        crop_b = self.crop_and_fix(out, src, top=132, bottom=140)
        crop = out
        crop = lvf.rfs(crop, crop_a, [(25696, 25750), (25768, 25963), (26916, 27095),
                                      (27213, 27319), (27368, 27395), (27615, 27810)])
        crop = lvf.rfs(crop, crop_b, [(25751, 25767), (25964, 26723), (26786, 26915),
                                      (27096, 27212), (27320, 27367), (27396, 27614)])
        out = crop




        ref = src
        creditless_mask = vdf.mask.diff_creditless_mask(
            ref, src[opstart:opend+1], JPBD_NCOP.clip_cut[:opend-opstart+1], opstart, thr=25 << 8, sw=3, sh=3, prefilter=True
        ).std.Deflate()
        ringing_mask = hvf.HQDeringmod(ref, mrad=1, msmooth=2, mthr=40, show=True)

        credit = out
        credit = lvf.rfs(credit, ref, [(edstart, edend)])
        credit = lvf.rfs(credit, core.std.MaskedMerge(credit, ref, creditless_mask, 0), [(opstart, opend)])
        credit = lvf.rfs(credit, core.std.MaskedMerge(credit, ref, ringing_mask, 0),
                         [(opstart + 169, opstart + 411)])
        out = credit



        endcard = out + out[31757] * 119
        out = endcard


        decs = vdf.noise.decsiz(out, sigmaS=10, min_in=110 << 8, max_in=192 << 8, gamma=1.1)
        out = decs


        return depth(out, 10).std.Limiter(16 << 2, [235 << 2, 240 << 2])
Esempio n. 27
0
    def main(self: Filtering) -> vs.VideoNode:
        """Vapoursynth filtering"""
        src = JPBD.clip_cut
        src = depth(src, 16)
        out = src


        h = 800  # noqa
        w = get_w(h)  # noqa
        edstart, edend = 31384, 33540



        inp = get_y(out)
        out = inp



        # Remove the grain
        ref = hvf.SMDegrain(out, tr=1, thSAD=300, plane=0)
        preden = mvf.BM3D(out, sigma=2, radius1=1, ref=ref)
        out = preden




        # Rescale / Antialiasing / Limiting
        out = depth(out, 32)
        lineart = vdf.mask.FDOG().get_mask(out, lthr=0.065, hthr=0.065).std.Maximum().std.Minimum()
        lineart = lineart.std.Median().std.Convolution([1] * 9)


        descale_clips = [core.resize.Bicubic(out, w, h, filter_param_a=1/3, filter_param_b=1/3),
                         core.descale.Debicubic(out, w, h, 0, 1 / 2),
                         core.descale.Debilinear(out, w, h)]
        descale = core.std.Expr(descale_clips, 'x y z min max y z max min z min')

        upscale = vdf.scale.fsrcnnx_upscale(descale, height=h * 2, shader_file=r'_shaders\FSRCNNX_x2_56-16-4-1.glsl',
                                            upscaled_smooth=vdf.scale.eedi3_upscale(descale), profile='zastin',
                                            sharpener=partial(gf.DetailSharpen, sstr=1.65, power=4, mode=0, med=True))


        antialias = self.sraa_eedi3(upscale, 3, alpha=0.2, beta=0.4, gamma=100, mdis=20, nrad=3)

        downscale = muvf.SSIM_downsample(antialias, src.width, src.height, kernel='Bicubic', filter_param_a=0, filter_param_b=0)

        adaptmask = core.adg.Mask(downscale.std.PlaneStats(), 25).std.Minimum().std.Minimum().std.Convolution([1] * 9)
        contra = gf.ContraSharpening(downscale, depth(preden, 32), radius=2).rgsf.Repair(downscale, 1)
        contra = core.std.MaskedMerge(downscale, contra, adaptmask)


        scaled = core.std.MaskedMerge(out, contra, lineart)
        merged = vdf.misc.merge_chroma(depth(scaled, 16), src)
        out = merged


        detail_light_mask = lvf.mask.detail_mask(out, brz_a=1500, brz_b=600)

        deband_a = vdf.deband.dumb3kdb(out, 16, [33, 1], sample_mode=4, use_neo=True)
        deband_b = vdf.deband.dumb3kdb(out, 17, 40)
        deband = lvf.rfs(deband_a, deband_b, [(11812, 11931), (33541, 33600)])

        deband = core.std.MaskedMerge(deband, out, detail_light_mask)
        out = deband



        # Restore the grain
        neutral = inp.std.BlankClip(960, 540, color=128 << 8)
        diff = join([inp.std.MakeDiff(preden), neutral, neutral])
        grain = core.std.MergeDiff(out, diff)
        out = grain



        ref = src
        credit = out
        credit = lvf.rfs(credit, ref, [(edstart, edend)])
        out = credit



        endcard = out + out[31717] * 121
        out = endcard


        decs = vdf.noise.decsiz(out, sigmaS=10, min_in=110 << 8, max_in=192 << 8, gamma=1.1)
        out = decs


        return depth(out, 10).std.Limiter(16 << 2, [235 << 2, 240 << 2])
Esempio n. 28
0
def pre_filterchain() -> Union[vs.VideoNode, Tuple[vs.VideoNode, ...]]:
    """
    Regular filtering to get the output images to stitch.

    This is preferable over handling it unfiltered, since it'll be faster than encoding it
    and reduces the amount of jitter caused by the upscale after.
    """
    import lvsfunc as lvf
    import rekt
    import vardefunc as vdf
    from awsmfunc import bbmod
    from muvsfunc import SSIM_downsample
    from vsutil import depth, get_y, join, plane

    src = JP_BD.clip_cut

    # Fixing animation f**k-ups
    if freeze_ranges:
        src = core.std.FreezeFrames(
            src,
            [s[0] for s in freeze_ranges],
            [e[1] for e in freeze_ranges],
            [f[2] for f in freeze_ranges]
        )

    # Edgefixing
    ef = rekt.rektlvls(
        src, prot_val=[16, 235], min=16, max=235,
        rownum=[0, src.height-1], rowval=[16, 16],
        colnum=[0, src.width-1], colval=[16, 16],
    )

    bb_y = bbmod(ef, left=1, top=1, right=1, bottom=1, thresh=32, y=True, u=False, v=False)
    bb_uv = bbmod(bb_y, left=2, top=2, right=2, bottom=2, y=False, u=True, v=True)

    cshift = flt.shift_chroma(bb_uv, left=0.6)
    cshift = lvf.rfs(bb_uv, cshift, cshift_left_ranges)

    bb32 = depth(cshift, 32)
    bb32_y = get_y(bb32)

    # Descaling + DPIR while it's at a lower res (so I can actually run it because >memory issues xd)
    descale = lvf.kernels.Catrom().descale(bb32_y, 1280, 720)
    downscale = lvf.kernels.Catrom(format=vs.YUV444PS).scale(bb32, 1280, 720)
    descale_444 = join([descale, plane(downscale, 1), plane(downscale, 2)])
    denoise_y = lvf.deblock.vsdpir(descale_444, strength=2.75, mode='deblock', matrix=1, i444=True, cuda=True)

    supersample = vdf.scale.fsrcnnx_upscale(get_y(denoise_y), shader_file=shader_file, downscaler=None)
    downscaled = SSIM_downsample(supersample, src.width, src.height, smooth=((3 ** 2 - 1) / 12) ** 0.5,
                                 sigmoid=True, filter_param_a=0, filter_param_b=0)

    # Create credit mask
    upscale = lvf.kernels.Catrom().scale(descale, src.width, src.height)
    credit_mask = lvf.scale.descale_detail_mask(bb32_y, upscale, threshold=0.055) \
        .std.Deflate().std.Deflate().std.Minimum()

    # Merge early for additional accuracy with DPIR
    merged = core.std.MaskedMerge(downscaled, bb32_y, credit_mask)

    down_y = lvf.kernels.Catrom().scale(merged, src.width/2, src.height/2)
    down_i444 = join([down_y, plane(bb32, 1), plane(bb32, 2)])
    deblock_down = lvf.deblock.vsdpir(down_i444, strength=3, mode='denoise', matrix=1, i444=True, cuda=True)

    scaled = depth(join([merged, plane(deblock_down, 1), plane(deblock_down, 2)]), 16)

    # Final bit of "denoising"
    dft = core.dfttest.DFTTest(scaled, sigma=2.0, tbsize=5, tosize=3, planes=[0])
    decs = vdf.noise.decsiz(dft, sigmaS=4, min_in=208 << 8, max_in=232 << 8)

    # AA
    baa = lvf.aa.based_aa(decs, str(shader_file))
    sraa = lvf.sraa(decs, rfactor=1.65)
    clmp = lvf.aa.clamp_aa(decs, baa, sraa, strength=1.3)

    dehalo = lvf.dehalo.masked_dha(clmp, rx=1.4, ry=1.4, brightstr=0.4)
    cwarp = core.warp.AWarpSharp2(dehalo, thresh=72, blur=3, type=1, depth=4, planes=[1, 2])

    # Merge credits (if applicable)
    merged = core.std.MaskedMerge(cwarp, depth(bb32, 16), depth(credit_mask, 16))

    deband = core.average.Mean([
        flt.masked_f3kdb(merged, rad=16, thr=[20, 24], grain=[24, 12]),
        flt.masked_f3kdb(merged, rad=20, thr=[28, 24], grain=[24, 12]),
        flt.masked_placebo(merged, rad=6, thr=2.5, itr=2, grain=4)
    ])

    no_flt = lvf.rfs(deband, depth(bb32, 16), no_filter)

    return no_flt
Esempio n. 29
0
File: aa.py Progetto: petzku/lvsfunc
def upscaled_sraa(clip: vs.VideoNode,
                  rfactor: float = 1.5,
                  rep: Optional[int] = None,
                  width: Optional[int] = None,
                  height: Optional[int] = None,
                  downscaler: Optional[
                      Callable[[vs.VideoNode, int, int],
                               vs.VideoNode]] = kernels.Spline36().scale,
                  opencl: bool = False,
                  nnedi3cl: Optional[bool] = None,
                  eedi3cl: Optional[bool] = None,
                  **eedi3_args: Any) -> vs.VideoNode:
    """
    A function that performs a supersampled single-rate AA to deal with heavy aliasing and broken-up lineart.
    Useful for Web rips, where the source quality is not good enough to descale,
    but you still want to deal with some bad aliasing and lineart.

    It works by supersampling the clip, performing AA, and then downscaling again.
    Downscaling can be disabled by setting `downscaler` to `None`, returning the supersampled luma clip.
    The dimensions of the downscaled clip can also be adjusted by setting `height` or `width`.
    Setting either `height` or `width` will also scale the chroma accordingly.

    Original function written by Zastin, heavily modified by LightArrowsEXE.

    Alias for this function is `lvsfunc.sraa`.

    Dependencies:
    * fmtconv
    * rgsf (optional: 32 bit clip),
    * vapoursynth-eedi3
    * vapoursynth-nnedi3
    * vapoursynth-nnedi3cl (optional: opencl)

    :param clip:            Input clip
    :param rfactor:         Image enlargement factor. 1.3..2 makes it comparable in strength to vsTAAmbk
                            It is not recommended to go below 1.3 (Default: 1.5)
    :param rep:             Repair mode (Default: None)
    :param width:           Target resolution width. If None, determined from `height`
    :param height:          Target resolution height (Default: ``clip.height``)
    :param downscaler:      Resizer used to downscale the AA'd clip
    :param opencl:          OpenCL acceleration (Default: False)
    :param nnedi3cl:        OpenCL acceleration for nnedi3 (Default: False)
    :param eedi3cl:         OpenCL acceleration for eedi3 (Default: False)
    :param eedi3_args:      Arguments passed to eedi3 (Default: alpha=0.2, beta=0.6, gamma=40, nrad=2, mdis=20)

    :return:                Antialiased and optionally rescaled clip
    """
    if clip.format is None:
        raise ValueError(
            "upscaled_sraa: 'Variable-format clips not supported'")

    luma = get_y(clip)

    nnargs: Dict[str, Any] = dict(nsize=0, nns=4, qual=2)
    # TAAmbk defaults are 0.5, 0.2, 20, 3, 30
    eeargs: Dict[str, Any] = dict(alpha=0.2,
                                  beta=0.6,
                                  gamma=40,
                                  nrad=2,
                                  mdis=20)
    eeargs.update(eedi3_args)

    if rfactor < 1:
        raise ValueError("upscaled_sraa: '\"rfactor\" must be above 1'")

    ssw = round(clip.width * rfactor)
    ssh = round(clip.height * rfactor)

    while ssw % 2:
        ssw += 1
    while ssh % 2:
        ssh += 1

    if height is None:
        height = clip.height
    if width is None:
        if height != clip.height:
            width = get_w(height, aspect_ratio=clip.width / clip.height)
        else:
            width = clip.width

    nnedi3cl = fallback(nnedi3cl, opencl)
    eedi3cl = fallback(eedi3cl, opencl)

    # there doesn't seem to be a cleaner way to do this that makes mypy happy
    def nnedi3(*args: Any, **kwargs: Any) -> vs.VideoNode:
        return core.nnedi3cl.NNEDI3CL(
            *args, **kwargs) if nnedi3cl else core.nnedi3.nnedi3(
                *args, **kwargs)

    def eedi3(*args: Any, **kwargs: Any) -> vs.VideoNode:
        return core.eedi3m.EEDI3CL(*args, **
                                   kwargs) if nnedi3cl else core.eedi3m.EEDI3(
                                       *args, **kwargs)

    # Nnedi3 upscale from source height to source height * rounding (Default 1.5)
    up_y = nnedi3(luma, 0, 1, 0, **nnargs)
    up_y = core.resize.Spline36(up_y, height=ssh, src_top=.5)
    up_y = core.std.Transpose(up_y)
    up_y = nnedi3(up_y, 0, 1, 0, **nnargs)
    up_y = core.resize.Spline36(up_y, height=ssw, src_top=.5)

    # Single-rate AA
    aa_y = eedi3(up_y,
                 0,
                 0,
                 0,
                 sclip=nnedi3(up_y, 0, 0, 0, **nnargs),
                 **eeargs)
    aa_y = core.std.Transpose(aa_y)
    aa_y = eedi3(aa_y,
                 0,
                 0,
                 0,
                 sclip=nnedi3(aa_y, 0, 0, 0, **nnargs),
                 **eeargs)

    scaled: vs.VideoNode

    # Back to source clip height or given height
    if downscaler is None:
        scaled = aa_y
    else:
        scaled = downscaler(aa_y, width, height)

    if rep:
        scaled = util.pick_repair(scaled)(scaled,
                                          luma.resize.Bicubic(width, height),
                                          mode=rep)

    if clip.format.num_planes == 1 or downscaler is None:
        return scaled
    if height is not clip.height or width is not clip.width:
        if height % 2:
            raise ValueError(
                "upscaled_sraa: '\"height\" must be an even number when not passing a GRAY clip'"
            )
        if width % 2:
            raise ValueError(
                "upscaled_sraa: '\"width\" must be an even number when not passing a GRAY clip'"
            )

        chr = kernels.Bicubic().scale(clip, width, height)
        return join([scaled, plane(chr, 1), plane(chr, 2)])
    return join([scaled, plane(clip, 1), plane(clip, 2)])
Esempio n. 30
0
def do_filter():
    """Vapoursynth filtering"""
    src = JPBD.src_cut

    # Variables
    opstart, opend = 2111, 4268
    edstart, edend = 31650, 33809
    full_zone = [(18727, 18774), (31590, 31649), (33990, src.num_frames - 1)
                 ]  # eyecatch, episode name and next episode
    shabc_zone = [(edstart + 15, edstart + 1215),
                  (edstart + 1882, edstart + 2126)]
    h = 720
    w = get_w(h)

    # Bicubic sharp parts don't have bad edges
    edges_a = core.edgefixer.ContinuityFixer(src, *[[2, 1, 1]] * 4)
    edges_b = awf.bbmod(src, left=6, thresh=32, blur=200)
    edges = lvf.rfs(edges_a, edges_b, [(edstart + 1275, edstart + 1757)])
    edges = lvf.rfs(edges, src, [(opstart, opend)] + full_zone)
    out = depth(edges, 32)

    # Denoise
    ref = hvf.SMDegrain(depth(get_y(out), 16), thSAD=450)
    denoise = hybrid_denoise(out, 0.35, 1.4, dict(a=2, d=1),
                             dict(ref=depth(ref, 32)))
    out = denoise
    # denoise = out

    # Descale
    luma = get_y(out)
    lineart = vdf.edge_detect(luma, 'FDOG', 0.055,
                              (1, 1)).std.Median().std.BoxBlur(0, 1, 1, 1, 1)

    descale_a = core.descale.Despline36(luma, w,
                                        h).std.SetFrameProp('descaleKernel',
                                                            data='spline36')
    descale_b = core.descale.Debicubic(luma, w, h, 0, 1).std.SetFrameProp(
        'descaleKernel', data='sharp_bicubic')
    descale = lvf.rfs(descale_a, descale_b, shabc_zone)

    # Chroma reconstruction
    # y_m is the assumed mangled luma.
    # Descale 1080p -> Bad conversion in 422 720p -> Regular 1080p 420
    radius = 2
    y, u, v = descale, plane(out, 1), plane(out, 2)
    y_m = core.resize.Point(y, 640, 720,
                            src_left=-1).resize.Bicubic(960,
                                                        540,
                                                        filter_param_a=1 / 3,
                                                        filter_param_b=1 / 3)

    # 0.25 for 444 and 0.25 for right shifting
    y_m, u, v = [
        c.resize.Bicubic(w,
                         h,
                         src_left=0.25 + 0.25,
                         filter_param_a=0,
                         filter_param_b=.5) for c in [y_m, u, v]
    ]

    y_fixup = core.std.MakeDiff(y, y_m)
    yu, yv = Regress(y_m, u, v, radius=radius, eps=1e-7)

    u_fixup = ReconstructMulti(y_fixup, yu, radius=radius)
    u_r = core.std.MergeDiff(u, u_fixup)

    v_fixup = ReconstructMulti(y_fixup, yv, radius=radius)
    v_r = core.std.MergeDiff(v, v_fixup)

    # -0.5 * 720/1080 = -1/3
    # -1/3 for the right shift
    # https://forum.doom9.org/showthread.php?p=1802716#post1802716
    u_r, v_r = [
        c.resize.Bicubic(960,
                         540,
                         src_left=-1 / 3,
                         filter_param_a=-.5,
                         filter_param_b=.25) for c in [u_r, v_r]
    ]

    upscale = vdf.fsrcnnx_upscale(
        descale,
        height=h * 2,
        shader_file=r'shaders\FSRCNNX_x2_56-16-4-1.glsl',
        upscaler_smooth=eedi3_upscale,
        profile='zastin')

    antialias = sraa_eedi3(upscale,
                           3,
                           alpha=0.2,
                           beta=0.4,
                           gamma=40,
                           nrad=3,
                           mdis=20)

    downscale = muvf.SSIM_downsample(antialias,
                                     src.width,
                                     src.height,
                                     filter_param_a=0,
                                     filter_param_b=0)
    downscale = core.std.MaskedMerge(luma, downscale, lineart)

    merged_a = join([downscale, u_r, v_r])
    merged_b = vdf.merge_chroma(downscale, denoise)
    merged = lvf.rfs(merged_a, merged_b, shabc_zone)
    out = depth(merged, 16)

    warp = xvs.WarpFixChromaBlend(out, 80, 2, depth=8)
    out = warp

    dering = gf.MaskedDHA(out,
                          rx=1.25,
                          ry=1.25,
                          darkstr=0.05,
                          brightstr=1.0,
                          maskpull=48,
                          maskpush=140)
    out = dering

    qtgmc = hvf.QTGMC(out, Preset="Slower", InputType=1, ProgSADMask=2.0)
    qtgmc = vdf.fade_filter(out, out, qtgmc, edstart + 1522,
                            edstart + 1522 + 24)
    qtgmc = lvf.rfs(out, qtgmc, [(edstart + 1522 + 25, edstart + 1757)])
    out = qtgmc

    out = lvf.rfs(out, depth(denoise, 16), [(opstart, opend)])

    detail_dark_mask = detail_dark_mask_func(get_y(out),
                                             brz_a=8000,
                                             brz_b=6000)
    detail_light_mask = lvf.denoise.detail_mask(out, brz_a=2500, brz_b=1200)
    detail_mask = core.std.Expr([detail_dark_mask, detail_light_mask],
                                'x y +').std.Median()
    detail_mask_grow = iterate(detail_mask, core.std.Maximum, 2)
    detail_mask_grow = iterate(detail_mask_grow, core.std.Inflate,
                               2).std.BoxBlur(0, 1, 1, 1, 1)

    detail_mask = core.std.Expr([get_y(out), detail_mask_grow, detail_mask],
                                f'x {32<<8} < y z ?')

    deband = dumb3kdb(out, 22, 30)
    deband = core.std.MaskedMerge(deband, out, detail_mask)
    out = deband

    ref = get_y(out).std.PlaneStats()
    adgmask_a = core.adg.Mask(ref, 30)
    adgmask_b = core.adg.Mask(ref, 12)

    stgrain = sizedgrn(out, 0.1, 0.05, 1.05, sharp=80)
    stgrain = core.std.MaskedMerge(out, stgrain, adgmask_b)
    stgrain = core.std.MaskedMerge(out, stgrain, adgmask_a.std.Invert())

    dygrain = sizedgrn(out, 0.2, 0.05, 1.15, sharp=80, static=False)
    dygrain = core.std.MaskedMerge(out, dygrain, adgmask_a)
    grain = core.std.MergeDiff(dygrain, out.std.MakeDiff(stgrain))
    out = grain

    ref = depth(edges, 16)
    credit = out
    rescale_mask = vdf.diff_rescale_mask(ref, h, b=0, c=1, mthr=40, sw=0, sh=0)
    rescale_mask = vdf.region_mask(rescale_mask, *[10] * 4)
    rescale_mask = hvf.mt_expand_multi(rescale_mask,
                                       mode='ellipse',
                                       sw=4,
                                       sh=4).std.BoxBlur(0, 1, 1, 1, 1)

    credit = lvf.rfs(credit, ref, full_zone)
    credit = lvf.rfs(credit, core.std.MaskedMerge(credit, ref, rescale_mask),
                     [(edstart, edend)])
    out = credit

    return depth(out, 10).std.Limiter(16 << 2, [235 << 2, 240 << 2])