Пример #1
0
def scradit_mask(src_luma: vs.VideoNode,
                 rescaled_luma: vs.VideoNode,
                 absthresh: float = 0.060,
                 iters: int = 4) -> vs.VideoNode:
    """
    Basic detail and credit masking function borrowed from Scrad.

    Changed it to be used in a more generic manner, but the core stuff and
    logic comes from him. Or wherever he got it. Geared towards catching very
    light detail in a different native resolution than the rest of the video.
    Returns a 32 bit (GrayS) mask.

    :param src_luma:        Luma plane of the source. If it has more planes,
                            the luma plane will be extracted.
    :param rescaled_luma:   Luma plane of the rescaled video. If it has more
                            planes, the luma plane will be extracted.
    :param absthresh:       The threshold for binarizing the mask with.
    :param iters:           How often to iterate Maximum and Inflate calls.
    """

    luma = depth(get_y(src_luma), 32)

    rescaled = depth(get_y(rescaled_luma), 32)

    mask = core.std.Expr([luma, rescaled], f"x y - abs {absthresh} < 0 1 ?")

    mask = iterate(mask, core.std.Maximum, iters)

    return iterate(mask, core.std.Inflate, iters)
Пример #2
0
def detail_mask(source: vs.VideoNode,
                rescaled: vs.VideoNode,
                thresh: float = 0.05) -> vs.VideoNode:
    """
    Generates a fairly basic detail mask, mostly for descaling purposes.

    This is mostly used to pick up on detail *lost* in
    :py:func:`.edgecase.questionable_rescale` as per Zastin's original script.
    Catches most if not all elements in a different native resolution

    :param source:      The clip to generate the mask for.
    :param rescaled:    The descaled and re-upscaled clip where detail was lost.
    :param thresh:      The threshold for binarizing the detail mask
    """

    sy, ry = get_y(source), get_y(rescaled)

    if not (sy.format and ry.format):
        raise VariableFormatError("detail_mask")

    if sy.format.id != ry.format.id:
        sy = core.resize.Bicubic(sy, format=ry.format.id)

    mask = core.std.Expr([sy, ry], "x y - abs").std.Binarize(thresh)

    mask = iterate(mask, core.std.Maximum, 4)

    return iterate(mask, core.std.Inflate, 4)
Пример #3
0
 def _ret_mask(clip: vs.VideoNode, thr: int) -> vs.VideoNode:
     mask = kgf.retinex_edgemask(clip)
     mask = core.std.Median(mask).std.Binarize(thr)
     mask = iterate(mask, core.std.Median, 2)
     mask = iterate(mask, core.std.Maximum, 3)
     mask = iterate(mask, core.std.Minimum, 2)
     return mask
def hardsub_mask(hrdsb: vs.VideoNode, ref: vs.VideoNode, thresh: float = 0.06,
                 minimum: int = 1, expand: int = 8, inflate: int = 7) -> vs.VideoNode:
    """
    Zastin's spatially-aware hardsub mask.

    :param hrdsb:   Hardsubbed source
    :param ref:     Reference clip
    :param thresh:  Binarization threshold, [0, 1] (Default: 0.06)
    :param minimum: Times to minimize the max (Default: 1)
    :param expand:  Times to maximize the mask (Default: 8)
    :param inflate: Times to inflate the mask (Default: 7)

    :return:        Hardsub mask
    """
    check_variable(hrdsb, "hardsub_mask")
    check_variable(ref, "hardsub_mask")
    assert hrdsb.format

    hsmf = core.std.Expr([hrdsb, ref], 'x y - abs') \
        .resize.Point(format=hrdsb.format.replace(subsampling_w=0, subsampling_h=0).id)
    hsmf = core.std.Expr(split(hsmf), "x y z max max")
    hsmf = hsmf.std.Binarize(scale_thresh(thresh, hsmf))
    hsmf = iterate(hsmf, core.std.Minimum, minimum)
    hsmf = iterate(hsmf, core.std.Maximum, expand)
    hsmf = iterate(hsmf, core.std.Inflate, inflate)

    return hsmf
Пример #5
0
def luma_credit_mask(clip: vs.VideoNode, thr: int = 230,
                     edgemask: EdgeDetect = FDOG(), draft: bool = False) -> vs.VideoNode:
    """Makes a mask based on luma value and edges.

    Args:
        clip (vs.VideoNode):
            Source clip.

        thr (int, optional):
            Luma value assuming 8 bit input. Defaults to 230.

        edgemask (EdgeDetect, optional):
            Edge mask used with thr. Defaults to FDOG().

        draft (bool, optional):
            Allow to output the mask without growing. Defaults to False.

    Returns:
        vs.VideoNode: Credit mask.
    """
    clip = get_y(clip)

    edge_mask = edgemask.get_mask(clip)

    credit_mask = core.std.Expr([edge_mask, clip], f'y {thr} > y 0 ? x min')

    if not draft:
        credit_mask = iterate(credit_mask, core.std.Maximum, 4)
        credit_mask = iterate(credit_mask, core.std.Inflate, 2)

    return credit_mask
 def _mask(self, clip: vs.VideoNode, ref: vs.VideoNode) -> vs.VideoNode:
     ref = ref[0] * self.shift + ref if self.shift else ref
     mask = ref.sub.TextFile(self.filename, fontdir=self.fontdir, blend=False)[1]  # horrific abuse of typechecker
     mask = mask[self.shift:] if self.shift else mask
     mask = mask.std.Binarize(1)
     mask = iterate(mask, core.std.Maximum, 3)
     mask = iterate(mask, core.std.Inflate, 3)
     return mask
Пример #7
0
def do_filter():
    """Vapoursynth filtering"""
    src = JPBD.src_cut
    out = depth(src, 16)

    denoise = hvf.SMDegrain(out, thSAD=150, thSADC=75)
    out = denoise

    y = get_y(out)
    lineart = core.std.Sobel(y).std.Binarize(
        75 << 8).std.Maximum().std.Inflate()

    antialias = lvf.sraa(y,
                         1.5,
                         9,
                         downscaler=core.resize.Spline36,
                         gamma=200,
                         mdis=18)

    sharp = hvf.LSFmod(antialias,
                       strength=95,
                       Smode=3,
                       Lmode=1,
                       edgemode=1,
                       edgemaskHQ=True)

    minmax = core.std.Expr([y, sharp, antialias], 'x y z min max y z max min')
    merge = core.std.MaskedMerge(y, minmax, lineart)
    out = vdf.merge_chroma(merge, out)

    y = get_y(out)
    detail_dark_mask = detail_dark_mask_func(y, brz_a=10000, brz_b=9000)
    detail_light_mask = lvf.denoise.detail_mask(y, brz_a=2500, brz_b=1200)
    detail_mask = core.std.Expr([detail_dark_mask, detail_light_mask],
                                'x y +').std.Median()
    detail_mask_grow = iterate(detail_mask, core.std.Maximum, 2)
    detail_mask_grow = iterate(detail_mask_grow, core.std.Inflate,
                               2).std.Convolution([1, 1, 1, 1, 1, 1, 1, 1, 1])

    detail_mask = core.std.Expr([y, detail_mask_grow, detail_mask],
                                f'x {28<<8} < y z ?')

    deband = dbs.f3kpf(out, 17, 24, 24)
    deband = core.std.MaskedMerge(deband, out, detail_mask)
    out = deband

    grain = adptvgrnMod(out,
                        0.2,
                        0.1,
                        1.25,
                        luma_scaling=14,
                        sharp=80,
                        static=False,
                        lo=19,
                        hi=[192, 240])
    out = grain

    return depth(out, 10).std.Limiter(16 << 2, [235 << 2, 240 << 2], [0, 1, 2])
Пример #8
0
def filterchain() -> Union[vs.VideoNode, Tuple[vs.VideoNode, ...]]:
    """Regular VapourSynth filterchain"""
    import EoEfunc as eoe
    import havsfunc as haf
    import lvsfunc as lvf
    import vardefunc as vdf
    from adptvgrnMod import adptvgrnMod
    from ccd import ccd
    from muvsfunc import SSIM_downsample
    from vsutil import depth, get_y, iterate

    src = pre_freeze().std.AssumeFPS(fpsnum=24000, fpsden=1001)
    src = depth(src, 16)

    # TO-DO: Figure out how they post-sharpened it. Probably some form of unsharpening?
    src_y = depth(get_y(src), 32)
    descale = lvf.kernels.Bicubic(b=0, c=3 / 4).descale(src_y, 1440, 810)
    double = vdf.scale.nnedi3cl_double(descale, pscrn=1)
    rescale = depth(SSIM_downsample(double, 1920, 1080), 16)
    scaled = vdf.misc.merge_chroma(rescale, src)

    denoise = core.knlm.KNLMeansCL(scaled, d=1, a=3, s=4, h=0.4, channels='Y')
    stab = haf.GSMC(denoise, radius=2, planes=[0])
    cdenoise = ccd(stab, threshold=5, matrix='709')
    decs = vdf.noise.decsiz(cdenoise,
                            sigmaS=4,
                            min_in=208 << 8,
                            max_in=232 << 8)

    dehalo = haf.YAHR(decs, blur=2, depth=32)
    halo_mask = lvf.mask.halo_mask(decs, rad=3, brz=0.3, thma=0.42)
    dehalo_masked = core.std.MaskedMerge(decs, dehalo, halo_mask)
    dehalo_min = core.std.Expr([dehalo_masked, decs], "x y min")

    aa = lvf.aa.nneedi3_clamp(dehalo_min, strength=1.5)
    # Some scenes have super strong aliasing that I really don't wanna scenefilter until BDs. Thanks, Silver Link!
    aa_strong = lvf.sraa(dehalo_min, rfactor=1.35)
    aa_spliced = lvf.rfs(aa, aa_strong, [])

    upscale = lvf.kernels.Bicubic(b=0, c=3 / 4).scale(descale, 1920, 1080)
    credit_mask = lvf.scale.descale_detail_mask(src_y, upscale, threshold=0.08)
    credit_mask = iterate(credit_mask, core.std.Deflate, 3)
    credit_mask = iterate(credit_mask, core.std.Inflate, 3)
    credit_mask = iterate(credit_mask, core.std.Maximum, 2)
    merge_credits = core.std.MaskedMerge(aa_spliced, src,
                                         depth(credit_mask, 16))

    deband = flt.masked_f3kdb(merge_credits, rad=18, thr=32, grain=[24, 0])
    grain: vs.VideoNode = adptvgrnMod(deband,
                                      seed=42069,
                                      strength=0.15,
                                      luma_scaling=10,
                                      size=1.25,
                                      sharp=80,
                                      static=True,
                                      grain_chroma=False)

    return grain
Пример #9
0
def filterchain() -> Union[vs.VideoNode, Tuple[vs.VideoNode, ...]]:
    """Main filterchain"""
    import lvsfunc as lvf
    import muvsfunc as muf
    import vardefunc as vdf
    from adptvgrnMod import adptvgrnMod
    from ccd import ccd
    from vsutil import depth, get_w, get_y, iterate
    from xvs import WarpFixChromaBlend

    # Can't mean this one out this time because of credit changes
    src = JP_CR.clip_cut
    src = depth(src, 32)

    src_y = get_y(src)
    descale = lvf.kernels.Lanczos(taps=5).descale(src_y, get_w(945), 945)
    rescale = vdf.scale.nnedi3cl_double(descale, pscrn=1)
    rescale = muf.SSIM_downsample(rescale, src_y.width, src_y.height)
    scaled = vdf.misc.merge_chroma(rescale, src)
    scaled = depth(scaled, 16)

    # Having a hard time reliably catching the EDs. Oh well.
    upscale = lvf.kernels.Lanczos(taps=5).scale(descale, src_y.width,
                                                src_y.height)
    credit_mask = depth(
        lvf.scale.descale_detail_mask(src_y, upscale, threshold=0.08), 16)
    credit_mask = iterate(credit_mask, core.std.Minimum, 5)
    credit_mask = iterate(credit_mask, core.std.Maximum, 9)
    credit_mask = core.morpho.Close(credit_mask, 9)

    credits_merged = core.std.MaskedMerge(scaled, depth(src, 16), credit_mask)

    denoise_y = core.knlm.KNLMeansCL(credits_merged,
                                     d=1,
                                     a=3,
                                     s=4,
                                     h=0.55,
                                     channels='Y')
    denoise_uv = ccd(denoise_y, threshold=6, matrix='709')
    decs = vdf.noise.decsiz(denoise_uv,
                            sigmaS=8,
                            min_in=208 << 8,
                            max_in=232 << 8)

    darken = flt.line_darkening(decs, strength=0.175)

    deband = flt.masked_f3kdb(darken, thr=24, grain=[24, 12])
    grain: vs.VideoNode = adptvgrnMod(deband,
                                      seed=42069,
                                      strength=0.45,
                                      luma_scaling=10,
                                      size=1.25,
                                      sharp=100,
                                      static=True,
                                      grain_chroma=False)

    return grain
Пример #10
0
    def test_iterate(self):
        def double_number(x: int) -> int:
            return x * 2

        self.assertEqual(vsutil.iterate(2, double_number, 0), 2)
        self.assertEqual(vsutil.iterate(2, double_number, 1), double_number(2))
        self.assertEqual(vsutil.iterate(2, double_number, 3), double_number(double_number(double_number(2))))

        with self.assertRaisesRegex(ValueError, 'Count cannot be negative.'):
            vsutil.iterate(2, double_number, -1)
Пример #11
0
    def ringing_mask(clip: vs.VideoNode) -> vs.VideoNode:
        linemask = vdf.mask.FreyChenG41().get_mask(clip, lthr=5000, multi=1.5)
        linemask = iterate(linemask, lambda c: core.rgvs.RemoveGrain(c, 2), 3)
        linemask = iterate(linemask, lambda c: core.rgvs.RemoveGrain(c, 3), 2)
        linemask = linemask.std.Maximum().std.Minimum()
        linemask = core.std.Merge(linemask, linemask.std.Minimum(), 0.25)

        ringing_mask = iterate(linemask, core.std.Maximum, 3).std.Inflate()
        ringing_mask = core.std.Expr([ringing_mask, linemask], 'x y -')
        return ringing_mask
Пример #12
0
def do_filter():
    """Vapoursynth filtering"""
    src = JPBD.src_cut
    h = 720
    w = get_w(h)


    edgesfix = awf.bbmod(src, 1, 1, 1, 1, 48, 500)
    out = edgesfix


    clip = depth(out, 32)
    denoise = hybrid_denoise(clip, 0.45, 1.5)
    out = denoise



    luma = get_y(out)
    line_mask = line_mask_func(luma)

    descale = core.descale.Debilinear(luma, w, h)
    upscale = vdf.nnedi3_upscale(descale, pscrn=1)
    antialias = single_rate_antialiasing(upscale, 13, alpha=0.2, beta=0.5, gamma=600, mdis=15)


    scaled = core.resize.Bicubic(antialias, src.width, src.height)
    rescale = core.std.MaskedMerge(luma, scaled, depth(line_mask, 32))
    merged = vdf.merge_chroma(rescale, out)
    out = depth(merged, 16)



    preden = core.knlm.KNLMeansCL(get_y(out), h=0.75, a=2, d=3, device_type='gpu', device_id=0)
    detail_dark_mask = detail_dark_mask_func(preden, brz_a=8000, brz_b=6000)
    detail_light_mask = lvf.denoise.detail_mask(preden, brz_a=2500, brz_b=1200)
    detail_mask = core.std.Expr([detail_dark_mask, detail_light_mask], 'x y +').std.Median()
    detail_mask_grow = iterate(detail_mask, core.std.Maximum, 2)
    detail_mask_grow = iterate(detail_mask_grow, core.std.Inflate, 2).std.Convolution([1, 1, 1, 1, 1, 1, 1, 1, 1])

    detail_mask = core.std.Expr([preden, detail_mask_grow, detail_mask], f'x {32<<8} < y z ?')


    deband_a = dbs.f3kpf(out, 16, 30, 42, thr=0.5, elast=2, thrc=0.2)
    deband_b = placebo.deband(out, 18, 5.5, 2, 4)
    deband = core.std.MaskedMerge(deband_a, deband_b, preden)
    deband = core.std.MaskedMerge(deband_a, out, detail_mask)
    deband = core.neo_f3kdb.Deband(deband, preset='depth', grainy=24, grainc=24)
    out = deband


    grain = adptvgrnMod(out, 0.4, 0.3, 1.25, luma_scaling=8, sharp=80, static=False, lo=19)
    out = grain


    return depth(out, 10)
Пример #13
0
def halo_mask(clip: vs.VideoNode, rad: int = 2,
              brz: float = 0.35,
              thmi: float = 0.315, thma: float = 0.5,
              thlimi: float = 0.195, thlima: float = 0.392,
              edgemask: Optional[vs.VideoNode] = None) -> vs.VideoNode:
    """
    A halo mask to catch basic haloing, inspired by the mask from FineDehalo.
    Most was copied from there, but some key adjustments were made to center it specifically around masking.

    rx and ry are now combined into rad and expects an integer.
    Float made sense for FineDehalo since it uses DeHalo_alpha for dehaloing,
    but the masks themselves use rounded rx/ry values, so there's no reason to bother with floats here.

    All thresholds are float and will be scaled to ``clip``\\'s format.
    If thresholds are greater than 1, they will be asummed to be in 8-bit and scaled accordingly.

    :param clip:            Input clip
    :param rad:             Radius for the mask
    :param brz:             Binarizing for shrinking mask (Default: 0.35)
    :param thmi:            Minimum threshold for sharp edges; keep only the sharpest edges
    :param thma:            Maximum threshold for sharp edges; keep only the sharpest edges
    :param thlimi:          Minimum limiting threshold; includes more edges than previously, but ignores simple details
    :param thlima:          Maximum limiting threshold; includes more edges than previously, but ignores simple details
    :param edgemask:        Edgemask to use. If None, uses ``clip.std.Prewitt()`` (Default: None).

    :return:                Halo mask
    """
    smax = scale_thresh(1.0, clip)

    thmi, thma, thlimi, thlima = (scale_thresh(t, clip, assume=8) for t in [thmi, thma, thlimi, thlima])

    matrix = [1, 2, 1, 2, 4, 2, 1, 2, 1]

    edgemask = edgemask or get_y(clip).std.Prewitt()

    # Preserve just the strongest edges
    strong = core.std.Expr(edgemask, expr=f"x {thmi} - {thlima-thlimi} / {smax} *")
    # Expand to pick up additional halos
    expand = iterate(strong, core.std.Maximum, rad)

    # Having too many intersecting lines will oversmooth the mask. We get rid of those here.
    light = core.std.Expr(edgemask, expr=f"x {thlimi} - {thma-thmi} / {smax} *")
    shrink = iterate(light, core.std.Maximum, rad)
    shrink = core.std.Binarize(shrink, scale_thresh(brz, clip))
    shrink = iterate(shrink, core.std.Minimum, rad)
    shrink = iterate(shrink, partial(core.std.Convolution, matrix=matrix), 2)

    # Making sure the lines are actually excluded
    excl = core.std.Expr([strong, shrink], expr="x y max")
    # Subtract and boosting to make sure we get the max pixel values for dehaloing
    mask = core.std.Expr([expand, excl], expr="x y - 2 *")
    # Additional blurring to amplify the mask
    mask = core.std.Convolution(mask, matrix)
    return core.std.Expr(mask, expr="x 2 *")
Пример #14
0
 def _mask(self, clip: vs.VideoNode, ref: vs.VideoNode) -> vs.VideoNode:
     assert clip.format is not None
     hsmf = core.std.Expr([clip, ref], 'x y - abs') \
         .resize.Point(format=clip.format.replace(subsampling_w=0, subsampling_h=0).id)
     if clip.format.num_planes > 1:
         hsmf = core.std.Expr(vsutil.split(hsmf), "x y z max max")
     hsmf = vsutil.iterate(
         vsutil.iterate(
             hsmf.std.Binarize(scale_thresh(self.thresh,
                                            clip)).std.Minimum(),
             core.std.Maximum, self.expand), core.std.Inflate, self.inflate)
     return hsmf
Пример #15
0
def filterchain() -> Union[vs.VideoNode, Tuple[vs.VideoNode, ...]]:
    """Regular VapourSynth filterchain"""
    import havsfunc as haf
    import lvsfunc as lvf
    import vardefunc as vdf
    from adptvgrnMod import adptvgrnMod
    from muvsfunc import SSIM_downsample
    from vsutil import depth, get_y, iterate

    src = pre_freeze()
    src = depth(src, 16)

    src_y = depth(get_y(src), 32)
    descale = lvf.kernels.Bicubic(b=0, c=3 / 4).descale(src_y, 1440, 810)
    double = vdf.scale.nnedi3cl_double(descale, pscrn=1)
    rescale = depth(SSIM_downsample(double, 1920, 1080), 16)
    scaled = vdf.misc.merge_chroma(rescale, src)

    denoise = core.knlm.KNLMeansCL(scaled, d=1, a=3, s=4, h=0.3, channels='Y')
    decs = vdf.noise.decsiz(denoise,
                            sigmaS=4,
                            min_in=208 << 8,
                            max_in=232 << 8)

    dehalo = haf.YAHR(decs, blur=2, depth=28)
    halo_mask = lvf.mask.halo_mask(decs, rad=3, brz=0.3, thma=0.42)
    dehalo_masked = core.std.MaskedMerge(decs, dehalo, halo_mask)

    aa = lvf.aa.nneedi3_clamp(dehalo_masked, strength=1.5)
    # Strong aliasing on the transformation scene (and probably elsewhere that I missed). Thanks, Silver Link!
    aa_strong = lvf.sraa(dehalo_masked, rfactor=1.35)
    aa_spliced = lvf.rfs(aa, aa_strong, [(7056, 7322)])

    upscale = lvf.kernels.Bicubic(b=0, c=3 / 4).scale(descale, 1920, 1080)
    credit_mask = lvf.scale.descale_detail_mask(src_y, upscale, threshold=0.08)
    credit_mask = iterate(credit_mask, core.std.Deflate, 3)
    credit_mask = iterate(credit_mask, core.std.Inflate, 3)
    credit_mask = iterate(credit_mask, core.std.Maximum, 2)
    merge_credits = core.std.MaskedMerge(aa_spliced, src,
                                         depth(credit_mask, 16))

    deband = flt.masked_f3kdb(merge_credits, rad=18, thr=32, grain=[24, 0])
    grain: vs.VideoNode = adptvgrnMod(deband,
                                      seed=42069,
                                      strength=0.15,
                                      luma_scaling=10,
                                      size=1.25,
                                      sharp=80,
                                      static=True,
                                      grain_chroma=False)

    return grain
Пример #16
0
    def aa_stonks(clip: vs.VideoNode) -> vs.VideoNode:
        mask = core.std.Prewitt(get_y(clip)).std.Binarize(4000)
        mask = iterate(mask, core.std.Maximum, 4)
        mask = iterate(mask, core.std.Minimum, 2)
        mask = iterate(mask, core.std.Deflate, 4)
        mask = vdf.region_mask(mask, 400, 400, 0, 0)

        descale = core.descale.Debicubic(depth(get_y(clip), 32), 1440, 810)
        upscale = core.caffe.Waifu2x(descale, 3, 2, model=6)
        downscale = core.descale.Debilinear(upscale, 1920, 1080)
        merged = vdf.merge_chroma(depth(downscale, 16), clip)
        merged = core.std.MaskedMerge(clip, merged, mask)

        return core.warp.AWarpSharp2(merged, 128, 2, depth=16)
Пример #17
0
def do_filter():
    """Vapoursynth filtering"""
    src = JPBD.src_cut

    edges = core.edgefixer.ContinuityFixer(src, *[[2, 1, 1]] * 4)
    out = depth(edges, 32)

    ref = hvf.SMDegrain(depth(get_y(out), 16), thSAD=450)
    denoise = hybrid_denoise(out, 0.35, 1.75, dict(a=2, d=1),
                             dict(ref=depth(ref, 32)))
    out = depth(denoise, 16)

    detail_dark_mask = detail_dark_mask_func(get_y(out),
                                             brz_a=8000,
                                             brz_b=6000)
    detail_light_mask = lvf.denoise.detail_mask(out, brz_a=2500, brz_b=1200)
    detail_mask = core.std.Expr([detail_dark_mask, detail_light_mask],
                                'x y +').std.Median()
    detail_mask_grow = iterate(detail_mask, core.std.Maximum, 2)
    detail_mask_grow = iterate(detail_mask_grow, core.std.Inflate,
                               2).std.Convolution([*[1] * 9])

    detail_mask = core.std.Expr([get_y(out), detail_mask_grow, detail_mask],
                                f'x {32<<8} < y z ?')

    deband = dumb3kdbv2(out, 22, 24)
    deband = core.std.MaskedMerge(deband, out, detail_mask)

    deband_b = dumb3kdbv2(out, 24, 64)
    deband_b = core.std.MaskedMerge(deband_b, out, detail_light_mask)
    deband = lvf.rfs(deband, deband_b, [(414, 496)])

    out = deband

    ref = get_y(out).std.PlaneStats()
    adgmask_a = core.adg.Mask(ref, 30)
    adgmask_b = core.adg.Mask(ref, 12)

    stgrain = sizedgrn(out, 0.1, 0.05, 1.05, sharp=80)
    stgrain = core.std.MaskedMerge(out, stgrain, adgmask_b)
    stgrain = core.std.MaskedMerge(out, stgrain, adgmask_a.std.Invert())

    dygrain = sizedgrn(out, 0.2, 0.05, 1.15, sharp=80, static=False)
    dygrain = core.std.MaskedMerge(out, dygrain, adgmask_a)
    grain = core.std.MergeDiff(dygrain, out.std.MakeDiff(stgrain))
    out = grain

    return depth(out, 10).std.Limiter(16 << 2, [235 << 2, 240 << 2])
Пример #18
0
def mask_logo(clip: vs.VideoNode, src: vs.VideoNode, src_logo: vs.VideoNode,
              range: Range) -> vs.VideoNode:
    mask = vsutil.get_y(src_logo).std.Binarize(19).fmtc.bitdepth(bits=16)
    mask = vsutil.iterate(mask, core.std.Inflate, 5)
    merge = core.std.MaskedMerge(clip, src, mask)
    merge = replace_ranges(clip, merge, [range])
    return merge
    def get_progressive_dehardsub(self, hrdsb: vs.VideoNode, ref: vs.VideoNode,
                                  partials: List[vs.VideoNode]) -> Tuple[List[vs.VideoNode], List[vs.VideoNode]]:
        """
        Dehardsub using multiple superior hardsubbed sources and one inferior non-subbed source.

        :param hrdsb:    Hardsub master source (eg Wakanim RU dub)
        :param ref:      Non-subbed reference source (eg CR, Funi, Amazon)
        :param partials: Sources to use for partial dehardsubbing (eg Waka DE, FR, SC)

        :return:         Dehardsub stages and masks used for progressive dehardsub
        """
        masks = [self.get_mask(hrdsb, ref)]
        pdhs = [hrdsb]
        dmasks = []
        partials = partials + [ref]
        assert masks[-1].format is not None
        thresh = scale_thresh(0.75, masks[-1])
        for p in partials:
            masks.append(core.std.Expr([masks[-1], self.get_mask(p, ref)], expr="x y -"))
            dmasks.append(iterate(core.std.Expr([masks[-1]], f"x {thresh} < 0 x ?"),
                                  core.std.Maximum,
                                  4).std.Inflate())
            pdhs.append(core.std.MaskedMerge(pdhs[-1], p, dmasks[-1]))
            masks[-1] = core.std.MaskedMerge(masks[-1], masks[-1].std.Invert(), masks[-2])
        return pdhs, dmasks
Пример #20
0
    def _perform_endcard(path: str, ref: vs.VideoNode) -> vs.VideoNode:
        endcard = lvf.src(path).std.AssumeFPS(ref)
        endcard = core.std.CropRel(endcard,
                                   left=64,
                                   top=14,
                                   right=54,
                                   bottom=23)
        endcard = core.resize.Bicubic(endcard,
                                      ref.width,
                                      ref.height,
                                      vs.RGBS,
                                      dither_type='error_diffusion')

        endcard = iterate(
            endcard, partial(core.w2xc.Waifu2x, noise=3, scale=1, photo=True),
            2)

        endcard = core.resize.Bicubic(endcard,
                                      format=vs.YUV444PS,
                                      matrix_s='709',
                                      dither_type='error_diffusion')
        endcard = lvf.util.quick_resample(
            endcard,
            lambda c: core.neo_f3kdb.Deband(c, 15, 36, 36, 36, 24, 24, 4))

        return Tweak(endcard, sat=1.2, bright=-0.05, cont=1.2)
Пример #21
0
def antiedgemask(src: vs.VideoNode, iteration: int = 1) -> vs.VideoNode:
    """
    Create an anti-edge mask from inverted sobel edge clip.

    Parameters
    ----------
    src: :class:`VideoNode`
        The video to be anti-edge masked.
    iteration: :class:`int`
        How many times we will need to iterate the anti-edge mask.
        Set to zero if you dont want to iterate.

    Returns
    -------
    :class:`VideoNode`
        The anti-edge masked video.
    """
    if not isinstance(src, vs.VideoNode):
        raise TypeError("antiedgemask: src must be a clip")
    if not isinstance(iteration, int):
        raise TypeError("antiedgemask: iteration must be an integer")

    edge_mask = core.std.Sobel(get_y(src), planes=0)
    if iteration > 0:
        edge_mask = iterate(edge_mask, core.std.Maximum, iteration)

    return edge_mask.std.Invert(0)
Пример #22
0
def detail_mask(clip: vs.VideoNode,
                sigma: float = 1.0, rxsigma: List[int] = [50, 200, 350],
                pf_sigma: Optional[float] = 1.0,
                rad: int = 3, brz: Tuple[int, int] = (2500, 4500),
                rg_mode: int = 17,
                ) -> vs.VideoNode:
    """
    A detail mask aimed at preserving as much detail as possible
    within darker areas, even if it contains mostly noise.
    """
    from kagefunc import kirsch
    from vsutil import iterate

    bits, clip = _get_bits(clip)

    clip_y = get_y(clip)
    pf = core.bilateral.Gaussian(clip_y, sigma=pf_sigma) if pf_sigma else clip_y
    ret = core.retinex.MSRCP(pf, sigma=rxsigma, upper_thr=0.005)

    blur_ret = core.bilateral.Gaussian(ret, sigma=sigma)
    blur_ret_diff = core.std.Expr([blur_ret, ret], "x y -")
    blur_ret_dfl = core.std.Deflate(blur_ret_diff)
    blur_ret_ifl = iterate(blur_ret_dfl, core.std.Inflate, 4)
    blur_ret_brz = core.std.Binarize(blur_ret_ifl, brz[0])
    # blur_ret_brz = core.morpho.Close(blur_ret_brz, size=8)  # No longer in R55

    kirsch_mask = kirsch(clip_y).std.Binarize(brz[1])
    kirsch_ifl = kirsch_mask.std.Deflate().std.Inflate()
    kirsch_brz = core.std.Binarize(kirsch_ifl, brz[1])
    # kirsch_brz = core.morpho.Close(kirsch_brz, size=4)    # No longer in R55

    merged = core.std.Expr([blur_ret_brz, kirsch_brz], "x y +")
    rm_grain = core.rgvs.RemoveGrain(merged, rg_mode)
    return rm_grain if bits == 16 else depth(rm_grain, bits)
Пример #23
0
def final_filterchain() -> vs.VideoNode:
    """Final post-filtering chain"""
    from fractions import Fraction

    import vardefunc as vdf
    from vsutil import get_y, iterate

    if not extract_frames():
        raise vs.Error("Stitched image does not exist!")

    clip = pre_filterchain()
    img = f"assets/{name}/{name}_stitched.png"

    pan = flt.panner_x(clip, img, fps=Fraction(30000/1001))

    denoise = vdf.noise.decsiz(pan, min_in=164 << 8, max_in=204 << 8)
    grain = vdf.noise.Graigasm(
        thrs=[x << 8 for x in (32, 80, 128, 176)],
        strengths=[(0.25, 0.0), (0.20, 0.0), (0.15, 0.0), (0.0, 0.0)],
        sizes=(1.20, 1.15, 1.10, 1),
        sharps=(80, 70, 60, 50),
        grainers=[
            vdf.noise.AddGrain(seed=69420, constant=True),
            vdf.noise.AddGrain(seed=69420, constant=True),
            vdf.noise.AddGrain(seed=69420, constant=True)
        ]).graining(denoise)

    mask = core.std.Expr(get_y(pan), f"x {233 << 8} > {255 << 8} 0 ?")
    mask = mask.std.Maximum().std.Minimum()
    mask = iterate(mask, partial(core.std.Convolution, matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]), 4)

    wh = core.std.BlankClip(grain).std.Invert()
    masked = core.std.MaskedMerge(grain, wh, mask)

    return masked
Пример #24
0
def rescaler(clip: vs.VideoNode,
             height: int,
             shader_file: Optional[str] = None
             ) -> Tuple[vs.VideoNode, vs.VideoNode]:
    """
    Basic rescaling function using nnedi3.
    """
    from lvsfunc.kernels import Bicubic, Lanczos
    from lvsfunc.scale import descale_detail_mask
    from vardefunc.mask import FDOG
    from vardefunc.scale import fsrcnnx_upscale, nnedi3_upscale
    from vsutil import Range, depth, get_w, get_y, iterate, join, plane

    clip = depth(clip, 32)

    clip_y = get_y(clip)
    descale = Lanczos(taps=5).descale(clip_y,
                                      get_w(height, clip.width / clip.height),
                                      height)

    if shader_file:
        rescale = fsrcnnx_upscale(descale,
                                  shader_file=shader_file,
                                  downscaler=Bicubic(b=-1 / 2, c=1 / 4).scale)
    else:
        rescale = Bicubic(b=-1 / 2,
                          c=1 / 4).scale(nnedi3_upscale(descale, pscrn=1),
                                         clip.width, clip.height)

    l_mask = FDOG().get_mask(clip_y, lthr=0.065,
                             hthr=0.065).std.Maximum().std.Minimum()
    l_mask = l_mask.std.Median().std.Convolution([1] *
                                                 9)  # stolen from varde xd
    masked_rescale = core.std.MaskedMerge(clip_y, rescale, l_mask)

    scaled = join([masked_rescale, plane(clip, 1), plane(clip, 2)])

    upscale = Lanczos(taps=5).scale(descale, 1920, 1080)
    detail_mask = descale_detail_mask(clip_y, upscale, threshold=0.035)
    detail_mask = iterate(detail_mask, core.std.Inflate, 2)
    detail_mask = iterate(detail_mask, core.std.Maximum, 2)

    return depth(scaled, 16), depth(detail_mask,
                                    16,
                                    range_in=Range.FULL,
                                    range=Range.LIMITED)
Пример #25
0
def dehalo_mask(clip: vs.VideoNode,
                maskgen: Optional[Callable[[vs.VideoNode],
                                           vs.VideoNode]] = None,
                iter_out: int = 2,
                iter_in: int = -1,
                inner: bool = False,
                outer: bool = False) -> vs.VideoNode:
    """
    Lazy wrapper for making a very basic dehalo mask.

    Expects a YUV clip. No idea what happens when anything else is passed,
    and it's not my issue to figure that out either. Make sure to handle
    any conversions properly before calling this function with a clip.

    :param clip:        The clip to generate the mask for
    :param maskgen:     The masking function to call. Defaults to Prewitt.
    :param iter_out:    Amount of times to iterate expansion for the outer mask
                        Defaults to 2, the standard size
    :param iter_in:     Amount of times to iterate impansion for the inner mask
                        Defaults to ``iter_out+1``.
    :param inner:       Returns the inner mask for checking.
    :param outer:       Returns the outer mask for checking.
    """

    if not clip.format:
        raise VariableFormatError("dehalo_mask")

    maskgen = maskgen if maskgen else lambda c: core.std.Prewitt(c, [0])

    if clip.format.num_planes > 1:
        clip = get_y(clip)

    mask = maskgen(clip)

    luma = core.std.ShufflePlanes(mask, 0, colorfamily=vs.GRAY)

    mout = iterate(luma, core.std.Maximum, iter_out)

    if outer:
        return mout

    iter_in = (iter_out + 1) if iter_in < 0 else iter_in

    minn = iterate(mout, core.std.Minimum, iter_in)

    return minn if inner else core.std.Expr([mout, minn], "x y -")
Пример #26
0
def detail_mask_neo(clip: vs.VideoNode,
                    sigma: float = 1.0,
                    detail_brz: float = 0.05,
                    lines_brz: float = 0.08,
                    blur_func: Callable[[vs.VideoNode, vs.VideoNode, float],
                                        vs.VideoNode] | None = None,
                    edgemask_func: Callable[[vs.VideoNode],
                                            vs.VideoNode] = core.std.Prewitt,
                    rg_mode: int = 17) -> vs.VideoNode:
    """
    A detail mask aimed at preserving as much detail as possible within darker areas,
    even if it winds up being mostly noise.

    :param clip:            Input clip
    :param sigma:           Sigma for the detail mask.
                            Higher means more detail and noise will be caught.
    :param detail_brz:      Binarizing for the detail mask.
                            Default values assume a 16bit clip, so you may need to adjust it yourself.
                            Will not binarize if set to 0.
    :param lines_brz:       Binarizing for the prewitt mask.
                            Default values assume a 16bit clip, so you may need to adjust it yourself.
                            Will not binarize if set to 0.
    :param blur_func:       Blurring function used for the detail detection.
                            Must accept the following parameters: ``clip``, ``ref_clip``, ``sigma``.
                            Uses `bilateral.Bilateral` by default.
    :param edgemask_func:   Edgemasking function used for the edge detection
    :param rg_mode:         Removegrain mode performed on the final output

    :return:                Detail mask
    """
    check_variable(clip, "detail_mask_neo")
    assert clip.format

    if not blur_func:
        blur_func = core.bilateral.Bilateral

    detail_brz = scale_thresh(detail_brz, clip)
    lines_brz = scale_thresh(lines_brz, clip)

    clip_y = get_y(clip)
    blur_pf = core.bilateral.Gaussian(clip_y, sigma=sigma / 4 * 3)

    blur_pref = blur_func(clip_y, blur_pf, sigma)
    blur_pref_diff = core.std.Expr([blur_pref, clip_y], "x y -").std.Deflate()
    blur_pref = iterate(blur_pref_diff, core.std.Inflate, 4)

    prew_mask = edgemask_func(clip_y).std.Deflate().std.Inflate()

    if detail_brz > 0:
        blur_pref = blur_pref.std.Binarize(detail_brz)
    if lines_brz > 0:
        prew_mask = prew_mask.std.Binarize(lines_brz)

    merged = core.std.Expr([blur_pref, prew_mask], "x y +")
    rm_grain = pick_removegrain(merged)(merged, rg_mode)

    return depth(rm_grain, clip.format.bits_per_sample)
Пример #27
0
def descale_detail_mask(clip: vs.VideoNode,
                        rescaled_clip: vs.VideoNode,
                        threshold: float = 0.05) -> vs.VideoNode:
    """
    Generate a detail mask given a clip and a clip rescaled with the same
    kernel.

    Function is curried to allow parameter tuning when passing to :py:func:`lvsfunc.scale.descale`

    :param clip:           Original clip
    :param rescaled_clip:  Clip downscaled and reupscaled using the same kernel
    :param threshold:      Binarization threshold for mask (Default: 0.05)

    :return:               Mask of lost detail
    """
    mask = core.std.Expr([get_y(clip), get_y(rescaled_clip)], 'x y - abs') \
        .std.Binarize(threshold)
    mask = iterate(mask, core.std.Maximum, 4)
    return iterate(mask, core.std.Inflate, 2)
Пример #28
0
def simple_native_mask(
    clip: vs.VideoNode,
    descale_w: IntegerFloat,
    descale_h: IntegerFloat,
    blurh: IntegerFloat = 1.5,
    blurv: IntegerFloat = 1.5,
    iter_max: int = 3,
    no_resize: bool = False,
) -> vs.VideoNode:
    """
    Create a native mask to make sure native content does not get descaled.

    Parameters
    ----------
    clip: :class:`VideoNode`
        The video source.
    descale_w: :class:`Union[int, float]`
        Target descale width resolution for checking.
    descale_h: :class:`Union[int, float]`
        Target descale height resolution for checking.
    blurh: :class:`Union[int, float]`
        Horizontal blur strength.
    blurv: :class:`Union[int, float]`
        Vertical blur strength.
    iter_max: :class:`int`
        Iteration count that will expand the mask size.
    no_resize: :class:`bool`
        Don't resize to the descaled resolution (keep it at original resolution)

    Returns
    -------
    :class:`VideoNode`
        The native mask.
    """
    has_plugin_or_raise(["fmtc", "descale"])
    clip_32 = fvf.Depth(clip, 32)
    y_32 = get_y(clip_32)
    clip_bits = clip.format.bits_per_sample

    target_w = clip.width
    target_h = clip.height
    descale_w = int(round(descale_w))
    descale_h = int(round(descale_h))

    down = core.descale.Debicubic(y_32, descale_w, descale_h)
    up = core.resize.Bicubic(down, target_w, target_h)
    dmask = core.std.Expr([y_32, up], "x y - abs 0.025 > 1 0 ?")
    dmask = iterate(dmask, core.std.Maximum, iter_max)
    if blurh > 0 and blurv > 0:
        dmask = core.std.BoxBlur(dmask,
                                 hradius=cast(int, blurh),
                                 vradius=cast(int, blurv))
    if not no_resize:
        dmask = core.resize.Bicubic(dmask, descale_w, descale_h)
    return fvf.Depth(dmask, clip_bits)
Пример #29
0
def detail_mask(
        clip: vs.VideoNode,
        sigma: float = 1.0,
        detail_brz: int = 2500,
        lines_brz: int = 4500,
        blur_func: Callable[
            [vs.VideoNode, vs.VideoNode, float],
            vs.VideoNode] = core.bilateral.Bilateral,  # type: ignore
        edgemask_func: Callable[[vs.VideoNode],
                                vs.VideoNode] = core.std.Prewitt,
        rg_mode: int = 17) -> vs.VideoNode:
    """
    A detail mask aimed at preserving as much detail as possible within darker areas,
    even if it winds up being mostly noise.

    Currently still in the beta stage.
    Please report any problems or feedback in the IEW Discord (link in the README).

    :param clip:            Input clip
    :param sigma:           Sigma for the detail mask.
                            Higher means more detail and noise will be caught.
    :param detail_brz:      Binarizing for the detail mask.
                            Default values assume a 16bit clip, so you may need to adjust it yourself.
                            Will not binarize if set to 0.
    :param lines_brz:       Binarizing for the prewitt mask.
                            Default values assume a 16bit clip, so you may need to adjust it yourself.
                            Will not binarize if set to 0.
    :param blur_func:       Blurring function used for the detail detection.
                            Must accept the following parameters: ``clip``, ``ref_clip``, ``sigma``.
    :param edgemask_func:   Edgemasking function used for the edge detection
    :param rg_mode:         Removegrain mode performed on the final output
    """
    import lvsfunc as lvf

    if clip.format is None:
        raise ValueError("detail_mask: 'Variable-format clips not supported'")

    clip_y = get_y(clip)
    blur_pf = core.bilateral.Gaussian(clip_y, sigma=0.5)

    blur_pref = blur_func(clip_y, blur_pf, sigma)
    blur_pref_diff = core.std.Expr([blur_pref, clip_y], "x y -").std.Deflate()
    blur_pref = iterate(blur_pref_diff, core.std.Inflate, 4)

    prew_mask = edgemask_func(clip_y).std.Deflate().std.Inflate()

    if detail_brz > 0:
        blur_pref = blur_pref.std.Binarize(detail_brz)
    if lines_brz > 0:
        prew_mask = prew_mask.std.Binarize(lines_brz)

    merged = core.std.Expr([blur_pref, prew_mask], "x y +")
    rm_grain = lvf.util.pick_removegrain(merged)(merged, rg_mode)

    return depth(rm_grain, clip.format.bits_per_sample)
Пример #30
0
    def rescale(self, clip: vs.VideoNode, height: int = 720,
                kernel: lvsfunc.kernels.Kernel = lvsfunc.kernels.Catrom(),
                thr: Union[int, float] = 55, expand: int = 2) -> vs.VideoNode:
        """Makes a mask based on rescaled difference.
           Modified version of Atomchtools.

        Args:
            clip (vs.VideoNode):
                Source clip. Can be Gray, YUV or RGB.
                Keep in mind that descale plugin will descale all planes
                after conversion to GRAYS, YUV444PS and RGBS respectively.

            height (int, optional):
                Height to descale to. Defaults to 720.

            kernel (lvsfunc.kernels.Kernel, optional):
                Kernel used to descale. Defaults to lvsfunc.kernels.Bicubic(b=0, c=0.5).

            thr (Union[int, float], optional):
                Binarization threshold. Defaults to 55.

            expand (int, optional):
                Growing/shrinking shape. 0 is allowed. Defaults to 2.

        Returns:
            vs.VideoNode: Rescaled mask.
        """
        if clip.format is None:
            raise FormatError('diff_rescale_mask: Variable format not allowed!')

        bits = get_depth(clip)
        gray_only = clip.format.num_planes == 1
        thr = scale_value(thr, bits, 32, scale_offsets=True)

        pre = core.resize.Bicubic(
            clip, format=clip.format.replace(
                bits_per_sample=32, sample_type=vs.FLOAT, subsampling_w=0, subsampling_h=0
            ).id
        )
        descale = kernel.descale(pre, get_w(height), height)
        rescale = kernel.scale(descale, clip.width, clip.height)

        diff = core.std.Expr(split(pre) + split(rescale), mae_expr(gray_only))

        mask = iterate(diff, lambda x: core.rgsf.RemoveGrain(x, 2), 2)
        mask = core.std.Expr(mask, f'x 2 4 pow * {thr} < 0 1 ?')

        mask = self._minmax(mask, 2 + expand, True)
        mask = mask.std.Deflate()

        return mask.resize.Point(
            format=clip.format.replace(color_family=vs.GRAY, subsampling_w=0, subsampling_h=0).id,
            dither_type='none'
        )