deband = lvf.rfs(deband_reg, deband_str, [(1162, 1193), (1210, 1216)]) grain: vs.VideoNode = adptvgrnMod(deband, seed=42069, strength=0.3, luma_scaling=8, size=1.35, sharp=100, grain_chroma=False) return grain if __name__ == '__main__': FILTERED = filterchain() enc.Encoder(JP_CR, FILTERED).run(clean_up=True) # type: ignore[arg-type] #enc.Patcher(JP_CR, FILTERED).patch(ranges=[(1162, 1216), (2059, 2157)]) # type: ignore[arg-type] elif __name__ == '__vapoursynth__': FILTERED = filterchain() if not isinstance(FILTERED, vs.VideoNode): raise ImportError( f"Input clip has multiple output nodes ({len(FILTERED)})! Please output a single clip" ) else: enc.dither_down(FILTERED).set_output(0) else: JP_CR.clip_cut.std.SetFrameProp('node', intval=0).set_output(0) FILTERED = filterchain() if not isinstance(FILTERED, vs.VideoNode): for i, clip_filtered in enumerate(FILTERED, start=1): clip_filtered.std.SetFrameProp('node', intval=i).set_output(i)
vdf.noise.AddGrain(seed=69420, constant=True), vdf.noise.AddGrain(seed=69420, constant=False), vdf.noise.AddGrain(seed=69420, constant=False) ]).graining(deband) merge_creds = core.std.MergeDiff(depth(grain, 32), diff) return merge_creds if __name__ == '__main__': FILTERED = filterchain() enc.Encoder(JP_BD, FILTERED).run(clean_up=True) elif __name__ == '__vapoursynth__': FILTERED = filterchain() if not isinstance(FILTERED, vs.VideoNode): raise ImportError( f"Input clip has multiple output nodes ({len(FILTERED)})! Please output a single clip" ) else: enc.dither_down(FILTERED).set_output(0) else: JP_BD.clip_cut.std.SetFrameProp('node', intval=0).set_output(0) FILTERED = filterchain() if not isinstance(FILTERED, vs.VideoNode): for i, clip_filtered in enumerate(FILTERED, start=1): clip_filtered.std.SetFrameProp('node', intval=i).set_output(i) else:
vdf.noise.AddGrain(seed=69420, constant=True), vdf.noise.AddGrain(seed=69420, constant=True) ]).graining(denoise) mask = core.std.Expr(get_y(pan), f"x {233 << 8} > {255 << 8} 0 ?") mask = mask.std.Maximum().std.Minimum() mask = iterate(mask, partial(core.std.Convolution, matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]), 4) wh = core.std.BlankClip(grain).std.Invert() masked = core.std.MaskedMerge(grain, wh, mask) return masked if __name__ == '__main__': enc.Encoder(JP_BD, final_filterchain()).run(clean_up=True, zones=zones) # type: ignore elif __name__ == '__vapoursynth__': FILTERED = final_filterchain() if not isinstance(FILTERED, vs.VideoNode): raise ImportError(f"Input clip has multiple output nodes ({len(FILTERED)})! Please output a single clip") else: enc.dither_down(FILTERED).set_output(0) else: JP_BD.clip_cut.std.SetFrameProp('node', intval=0).set_output(0) # FILTERED = pre_filterchain() FILTERED = final_filterchain() if not isinstance(FILTERED, vs.VideoNode): for i, clip_filtered in enumerate(FILTERED, start=1): clip_filtered.std.SetFrameProp('node', intval=i).set_output(i) else: FILTERED.std.SetFrameProp('node', intval=1).set_output(1)
dehalo = haf.FineDehalo(aa_masked, rx=1.6, ry=1.6, darkstr=0, brightstr=1.25) darken = flt.line_darkening(dehalo, 0.275).warp.AWarpSharp2(depth=2) merged_credits = core.std.MaskedMerge(darken, decs, credit_mask) deband = flt.masked_f3kdb(merged_credits, rad=18, thr=32, grain=[32, 12]) grain: vs.VideoNode = adptvgrnMod(deband, seed=42069, strength=0.35, luma_scaling=8, size=1.05, sharp=80, grain_chroma=False) return grain if __name__ == '__main__': FILTERED = filterchain() enc.Encoder(JP_BD, FILTERED, CHAPTERS, CHAP_NAMES).run(clean_up=True) # type: ignore elif __name__ == '__vapoursynth__': FILTERED = filterchain() if not isinstance(FILTERED, vs.VideoNode): raise ImportError( f"Input clip has multiple output nodes ({len(FILTERED)})! Please output just 1 clip" ) else: enc.dither_down(FILTERED).set_output(0) else: JP_BD.clip_cut.std.SetFrameProp('node', intval=0).set_output(0) #FILTERED = pre_corrections() FILTERED = filterchain() if not isinstance(FILTERED, vs.VideoNode): for i, clip_filtered in enumerate(FILTERED, start=1): clip_filtered.std.SetFrameProp('node', intval=i).set_output(i)
'detail_brz': 100, 'lines_brz': 450 }) dft_diff = core.std.MakeDiff(decs, dft) plac_diff = core.std.MergeDiff(plac, dft_diff) deband = lvf.rfs(deband_wk, plac_diff, strong_debanding) return deband if __name__ == '__main__': FILTERED = filterchain() enc.Encoder(JP_BD, FILTERED).run(clean_up=True, settings_name='x265_settings', zones=zones) elif __name__ == '__vapoursynth__': FILTERED = filterchain() if not isinstance(FILTERED, vs.VideoNode): raise ImportError( f"Input clip has multiple output nodes ({len(FILTERED)})! Please output a single clip" ) else: enc.dither_down(FILTERED).set_output(0) else: JP_BD.clip_cut.std.SetFrameProp('node', intval=0).set_output(0) FILTERED = filterchain() if not isinstance(FILTERED, vs.VideoNode): for i, clip_filtered in enumerate(FILTERED, start=1): clip_filtered.std.SetFrameProp('node', intval=i).set_output(i)
deband = flt.masked_f3kdb(darken, rad=18, thr=32, grain=[32, 12]) grain: vs.VideoNode = adptvgrnMod(deband, seed=42069, strength=0.35, luma_scaling=8, size=1.05, sharp=80, grain_chroma=False) return grain if __name__ == '__main__': FILTERED = filterchain() enc.Encoder(JP_BD, FILTERED).run(clean_up=True) # type: ignore elif __name__ == '__vapoursynth__': FILTERED = filterchain() if not isinstance(FILTERED, vs.VideoNode): raise ImportError( f"Input clip has multiple output nodes ({len(FILTERED)})! Please output just 1 clip" ) else: enc.dither_down(FILTERED).set_output(0) else: JP_BD.clip_cut.std.SetFrameProp('node', intval=0).set_output(0) FILTERED = filterchain() if not isinstance(FILTERED, vs.VideoNode): for i, clip_filtered in enumerate(FILTERED, start=1): clip_filtered.std.SetFrameProp('node', intval=i).set_output(i) else:
sharps=(100, 90, 80, 50), grainers=[ vdf.noise.AddGrain(seed=69420, constant=True), vdf.noise.AddGrain(seed=69420, constant=False), vdf.noise.AddGrain(seed=69420, constant=False) ]).graining(deband) return grain if __name__ == '__main__': FILTERED = filterchain() enc.Encoder(JP_BD, FILTERED).run(clean_up=True, settings='x265_settings') elif __name__ == '__vapoursynth__': FILTERED = filterchain() if not isinstance(FILTERED, vs.VideoNode): raise ImportError( f"Input clip has multiple output nodes ({len(FILTERED)})! Please output a single clip" ) else: enc.dither_down(FILTERED).set_output(0) else: JP_BD.clip_cut.std.SetFrameProp('node', intval=0).set_output(0) FILTERED = filterchain() if not isinstance(FILTERED, vs.VideoNode): for i, clip_filtered in enumerate(FILTERED, start=1): clip_filtered.std.SetFrameProp('node', intval=i).set_output(i) else: