def filterchain() -> Union[vs.VideoNode, Tuple[vs.VideoNode, ...]]: """Main VapourSynth filterchain""" import EoEfunc as eoe import havsfunc as haf import lvsfunc as lvf import vardefunc as vdf from vsutil import depth src = JP_BD.clip_cut yt = lvf.src(r"src/『魔法使いの夜』ティザーPV-NHIgc-seeSo.mkv", ref=src) \ .std.AssumeFPS(fpsnum=24000, fpsden=1001) src = core.std.SetFrameProp(src, '_FieldBased', intval=0) se = core.tivtc.TDecimate(src) # Fix range compression and missing frame in TV release csp = core.resize.Bicubic(se, range_in=0, range=1, dither_type="error_diffusion") csp = csp.std.SetFrameProp(prop="_ColorRange", intval=1) merge = csp[:400] + yt[400] + csp[400:] # "Delogoing" and "Edgefixing" sqmask = lvf.mask.BoundingBox( (1753, 27), (118, 50)).get_mask(merge).std.Inflate().std.Inflate().std.Maximum() sqmask = sqmask.std.Maximum().std.Maximum().std.Median().std.Convolution( [1] * 9).std.Convolution([1] * 9) mask_merge = core.std.MaskedMerge(merge, yt[:merge.num_frames], sqmask) sqmask_ef = lvf.mask.BoundingBox( (3, 3), (src.width - 3, src.height - 3)).get_mask(mask_merge) ef = core.std.MaskedMerge(yt, mask_merge, sqmask_ef) ef = depth(ef, 16) debl = lvf.deblock.vsdpir(ef, strength=35, cuda=use_cuda) csharp = eoe.misc.ContraSharpening(debl, ef, radius=2, rep=13) darken = haf.FastLineDarkenMOD(csharp, strength=24) deband = core.average.Mean([ flt.masked_f3kdb(darken, rad=16, thr=[28, 20], grain=[16, 6]), flt.masked_placebo(darken, rad=14, thr=4.5, itr=2, grain=2) ]) grain = vdf.noise.Graigasm(thrs=[x << 8 for x in (32, 80, 128, 176)], strengths=[(0.15, 0.0), (0.10, 0.0), (0.05, 0.0), (0.0, 0.0)], sizes=(1.15, 1.10, 1.05, 1), sharps=(100, 90, 80, 50), grainers=[ vdf.noise.AddGrain(seed=69420, constant=True), vdf.noise.AddGrain(seed=69420, constant=False), vdf.noise.AddGrain(seed=69420, constant=False) ]).graining(deband) return grain
def filterchain() -> Union[vs.VideoNode, Tuple[vs.VideoNode, ...]]: """Main VapourSynth filterchain""" import havsfunc as haf import lvsfunc as lvf import vardefunc as vdf from ccd import ccd from vsutil import depth src = JP_BD.clip_cut cloc = core.resize.Bicubic(src, chromaloc_in=2, chromaloc=0) src = depth(cloc, 16) halo_mask = lvf.mask.halo_mask(src, rad=1, brz=0.85, thmi=0.35, thma=0.95) halo_mask = halo_mask.std.Maximum().std.Inflate() bidehalo = lvf.dehalo.bidehalo(src, sigmaR=8 / 255, sigmaS=2.0, sigmaS_final=1.5) dehalo_den = core.dfttest.DFTTest(bidehalo, sigma=8.0) dehalo_clean = haf.EdgeCleaner(dehalo_den, strength=8, smode=1, hot=True) dehalo = core.std.MaskedMerge(src, dehalo_clean, halo_mask) # Certain cuts have a strong camera effect that amplifies haloing, and is likely intentional dehalo = lvf.rfs(dehalo, src, [(773, 786), (867, 886)]) denoise = core.dfttest.DFTTest(dehalo, sigma=1.75) cdenoise = ccd(denoise, threshold=4, matrix='709') decs = vdf.noise.decsiz(cdenoise, sigmaS=8.0, min_in=208 << 8, max_in=232 << 8) baa = lvf.aa.based_aa(decs, str(shader_file)) sraa = lvf.sraa(decs, rfactor=1.45) clmp = lvf.aa.clamp_aa(decs, baa, sraa, strength=1.45) darken = haf.FastLineDarkenMOD(clmp, strength=12) # Debanding and graining deband = flt.masked_f3kdb(darken, rad=18, thr=[24, 20]) grain = vdf.noise.Graigasm(thrs=[x << 8 for x in (32, 80, 128, 176)], strengths=[(0.20, 0.0), (0.15, 0.0), (0.10, 0.0), (0.0, 0.0)], sizes=(1.20, 1.15, 1.10, 1), sharps=(70, 60, 50, 50), grainers=[ vdf.noise.AddGrain(seed=69420, constant=True), vdf.noise.AddGrain(seed=69420, constant=True), vdf.noise.AddGrain(seed=69420, constant=True) ]).graining(deband) return grain
def warpsharp(clip: vs.VideoNode, thresh: int = 128, blur: int = 3, type: int = 1, depth: int = 8, darken_strength: int = 24): """ Experimental script for sharpening poorly blurred/starved video. This is done through awarpsharp2, which you typically AVOID LIKE THE PLAGUE. Blame the ones who requested I add bleeding-sharp filters. I at least try to limit it to keep myself slightly sane. If you have any resemblance of sanity, you should not use this. Requires VapourSynth <http://www.vapoursynth.com/doc/about.html> Additional dependencies: * awarpsharp2 <https://github.com/dubhater/vapoursynth-awarpsharp2> * havsfunc <https://github.com/HomeOfVapourSynthEvolution/havsfunc> :param clip: Input clip :param thresh: No pixel in the edge mask will have a value greater than thresh. Decrease for weaker sharpening. :param blur: Controls the number of times to blur the edge mask. Increase for weaker sharpening. :param type: Controls the type of blur to use. 0 means some kind of 13x13 average. 1 means some kind of 5x5 average. :param depth: Controls how far to warp. Negative values warp in the other direction, i.e. will blur the image instead of sharpening. :param darken_strength: Line darkening amount, 0-256 :return: Sharpened clip """ mask = core.warp.ASobel(clip, thresh=thresh) \ .warp.ABlur(blur=blur, type=type) warp = core.warp.AWarpSharp2(clip, thresh=thresh, blur=blur, type=type, depth=depth) merged = core.std.MaskedMerge(clip, warp, mask) return haf.FastLineDarkenMOD( merged, strength=darken_strength >> clip.format.bits_per_sample)
def filterchain() -> Union[vs.VideoNode, Tuple[vs.VideoNode, ...]]: """Main VapourSynth filterchain""" import havsfunc as haf import lvsfunc as lvf import vardefunc as vdf from ccd import ccd from muvsfunc import SSIM_downsample from vsutil import depth, get_y src = JP_BD.clip_cut src = depth(src, 32) src_y = get_y(src) l_mask = vdf.mask.FDOG().get_mask(src_y, lthr=0.065, hthr=0.065).std.Maximum().std.Minimum() l_mask = l_mask.std.Median().std.Convolution([1] * 9) # Rescaling descale = flt.auto_descale(src_y) desc_i = flt.descale_fields(src_y, kernel=lvf.kernels.BicubicSharp) descale = lvf.rfs(descale, desc_i, [(1555, 1685)]) supersample = vdf.scale.nnedi3cl_double(descale, use_znedi=True, pscrn=1) downscaled = SSIM_downsample(supersample, src.width, src.height, smooth=((3 ** 2 - 1) / 12) ** 0.5, sigmoid=True, filter_param_a=0, filter_param_b=0) scaled_mask = core.std.MaskedMerge(src_y, downscaled, l_mask) scaled = depth(vdf.misc.merge_chroma(scaled_mask, src), 16) # Denoising l_mask_16 = depth(l_mask, 16).std.Minimum() dft = core.dfttest.DFTTest(scaled, sigma=1.25, tbsize=3, tosize=1) dft_masked = core.std.MaskedMerge(dft, scaled, l_mask_16) ccd_uv = ccd(dft, threshold=4, matrix='709') ccd_uv = core.std.MaskedMerge(ccd_uv, dft_masked, l_mask_16, planes=[1, 2]) decs = vdf.noise.decsiz(ccd_uv, sigmaS=4, min_in=212 << 8, max_in=240 << 8) # AA and slight lineart enhancement baa = lvf.aa.based_aa(decs, shader_file) sraa = lvf.sraa(decs, rfactor=1.5, downscaler=lvf.kernels.Bicubic(b=-1/2, c=1/4).scale) clmp = lvf.aa.clamp_aa(decs, baa, sraa, strength=1.5) sraa_strong = flt.transpose_sraa(decs, rfactor=1.2, downscaler=lvf.kernels.Bicubic(b=-1/2, c=1/4).scale) clmp = lvf.rfs(clmp, sraa_strong, []) darken = haf.FastLineDarkenMOD(clmp, strength=36) # Debanding and graining deband = core.average.Mean([ flt.masked_f3kdb(darken, rad=18, thr=[28, 24]), flt.masked_placebo(darken, rad=15, thr=4) ]) grain = vdf.noise.Graigasm( thrs=[x << 8 for x in (32, 80, 128, 176)], strengths=[(0.15, 0.0), (0.10, 0.0), (0.10, 0.0), (0.0, 0.0)], sizes=(1.15, 1.10, 1.05, 1), sharps=(60, 50, 50, 50), grainers=[ vdf.noise.AddGrain(seed=69420, constant=True), vdf.noise.AddGrain(seed=69420, constant=True), vdf.noise.AddGrain(seed=69420, constant=True) ]).graining(deband) return grain
def sharpaamcmod(orig, dark=20, thin=10, sharp=0, smooth=0, stabilize=False, tradius=2, aapel=2, aaov=None, aablk=None, aatype='sangnom'): """Ported from: http://forum.doom9.org/showthread.php?p=1673928 Args: dark = Line darkening amount, 0-256. thin = optional line thinning amount, 0-256. sharp = Postsharpening smooth = Postsmoothing stabilize = Use post stabilization with Motion Compensation Tradius = Temporal radius for Mdegrain (1, 2 or 3) aapel = Accuracy of the motion estimation (Value can only be 1, 2 or 4. 1 means a precision to the pixel. 2 means a precision to half a pixel, 4 means a precision to quarter a pixel, produced by spatial interpolation (better but slower).) aaov = Block overlap value (horizontal). Must be even and less than block size. aablk = Size of a block (horizontal). It's either 4, 8 or 16 ( default is 8 ). Larger blocks are less sensitive to noise, are faster, but also less accurate. aatype = Use sangnom or eedi2 for anti-aliasing """ core = vs.get_core() # Vars and stuff w = orig.width h = orig.height if w > 1100 and aaov is None: aaov = 8 else: aaov = 4 if w > 1100 and aablk is None: aablk = 16 else: aablk = 8 _max = (1 << orig.format.bits_per_sample) - 1 _mid = (1 << orig.format.bits_per_sample) / 2 aatype = aatype.lower() # Mask m = core.std.ShufflePlanes(orig, planes=0, colorfamily=vs.GRAY) m = core.std.Expr( core.std.Expr([ core.std.Convolution(m, [5, 10, 5, 0, 0, 0, -5, -10, -5], divisor=4, saturate=False), core.std.Convolution( m, [5, 0, -5, 10, 0, -10, 5, 0, -5], divisor=4, saturate=False) ], ['x y max']), ['x {_mid} / 0.86 pow {_max} *'.format(_max=_max, _mid=_mid)]) # darkening and thining work different than in the original script because of effort if dark != 0 or thin != 0: preaa = haf.FastLineDarkenMOD(orig, strength=dark, thinning=thin) else: preaa = orig # Antialiasing if aatype == 'sangnom': aa = sangnomaa(preaa) elif aatype == 'eedi2': aa = ediaa(preaa) else: raise ValueError('Wrong aatype, it should be "sangnom" or "eedi2".') # Post sharpen if sharp == 0 and smooth == 0: postsh = aa else: postsh = haf.LSFmod(aa, edgemode=1, strength=sharp, overshoot=1, soft=smooth) # Merge results merged = core.std.MaskedMerge(orig, postsh, m, planes=0) # Motion compensate AA clip sdiff = core.std.MakeDiff(orig, merged) origsuper = core.mv.Super(orig, pel=aapel) sdiffsuper = core.mv.Super(sdiff, pel=aapel, levels=1) fvec3 = core.mv.Analyse(origsuper, delta=3, isb=False, blksize=aablk, overlap=aaov) fvec2 = core.mv.Analyse(origsuper, delta=2, isb=False, blksize=aablk, overlap=aaov) fvec1 = core.mv.Analyse(origsuper, delta=1, isb=False, blksize=aablk, overlap=aaov) bvec1 = core.mv.Analyse(origsuper, delta=1, isb=True, blksize=aablk, overlap=aaov) bvec2 = core.mv.Analyse(origsuper, delta=2, isb=True, blksize=aablk, overlap=aaov) bvec3 = core.mv.Analyse(origsuper, delta=3, isb=True, blksize=aablk, overlap=aaov) if tradius > 0: sdd = core.mv.Degrain1(clip=sdiff, super=sdiffsuper, mvbw=bvec1, mvfw=fvec1) if tradius > 1: sdd = core.mv.Degrain2(clip=sdiff, super=sdiffsuper, mvbw=bvec1, mvfw=fvec1, mvbw2=bvec2, mvfw2=fvec2) if tradius > 2: sdd = core.mv.Degrain3(clip=sdiff, super=sdiffsuper, mvbw=bvec1, mvfw=fvec1, mvbw2=bvec2, mvfw2=fvec2, mvbw3=bvec3, mvfw3=fvec3) reduct = 0.4 tmp = core.std.Expr( [sdiff, sdd], 'x {_mid} - abs y {_mid} - abs < x y ?'.format(_mid=_mid)) sdd = core.std.Merge(tmp, sdd, [1.0 - reduct, 0]) return core.std.MakeDiff(orig, sdd) if stabilize is True else merged
def filterchain() -> Union[vs.VideoNode, Tuple[vs.VideoNode, ...]]: """Regular VapourSynth filterchain""" import EoEfunc.denoise as eoe import havsfunc as haf import lvsfunc as lvf import vardefunc as vdf from adptvgrnMod import adptvgrnMod from ccd import ccd from muvsfunc import SSIM_downsample from vsutil import depth, get_y, iterate from xvs import WarpFixChromaBlend src = JP_clip.clip_cut.std.AssumeFPS(fpsnum=24000, fpsden=1001) src = depth(src, 16) # TO-DO: Figure out how they post-sharpened it. Probably some form of unsharpening? src_y = depth(get_y(src), 32) descale = lvf.kernels.Bicubic(b=0, c=3 / 4).descale(src_y, 1440, 810) double = vdf.scale.nnedi3cl_double(descale, pscrn=1) rescale = depth(SSIM_downsample(double, 1920, 1080), 16) scaled = vdf.misc.merge_chroma(rescale, src) denoise_ref = core.dfttest.DFTTest(scaled, sigma=1.8) denoise = lvf.denoise.bm3d(scaled, sigma=[0.75, 0.65], ref=denoise_ref) cdenoise = ccd(denoise, threshold=3, matrix='709') decs = vdf.noise.decsiz(cdenoise, sigmaS=4, min_in=208 << 8, max_in=232 << 8) # Dehalo fuckery. F**k the sharpening, dude dehalo = haf.YAHR(decs, blur=2, depth=32) dehalo_2 = lvf.dehalo.masked_dha(dehalo, ry=2.5, rx=2.5) halo_mask = lvf.mask.halo_mask(decs, rad=3, brz=0.3, thma=0.42) dehalo_masked = core.std.MaskedMerge(decs, dehalo_2, halo_mask) dehalo_min = core.std.Expr([dehalo_masked, decs], "x y min") # Brightening the lines to undo the unsharpening's line darkening bright = haf.FastLineDarkenMOD(dehalo_min, strength=-24) # AA baa = lvf.aa.based_aa(bright, str(shader_file)) sraa = lvf.sraa(bright, rfactor=1.45) clmp = lvf.aa.clamp_aa(bright, baa, sraa, strength=1.45) line_mask = core.std.Prewitt(clmp) cwarp = WarpFixChromaBlend(clmp, thresh=96, depth=6) cwarp = core.std.MaskedMerge(cwarp, clmp, line_mask) upscale = lvf.kernels.Bicubic(b=0, c=3 / 4).scale(descale, 1920, 1080) credit_mask = lvf.scale.descale_detail_mask(src_y, upscale, threshold=0.08) credit_mask = iterate(credit_mask, core.std.Deflate, 3) credit_mask = iterate(credit_mask, core.std.Inflate, 3) credit_mask = iterate(credit_mask, core.std.Maximum, 2) merge_credits = core.std.MaskedMerge(cwarp, src, depth(credit_mask, 16)) deband = flt.masked_f3kdb(merge_credits, rad=15, thr=20, grain=[12, 0]) grain: vs.VideoNode = adptvgrnMod(deband, seed=42069, strength=0.15, luma_scaling=10, size=1.25, sharp=70, static=True, grain_chroma=False) return grain