Ejemplo n.º 1
0
    def rescale(self, clip: vs.VideoNode, height: int = 720,
                kernel: lvsfunc.kernels.Kernel = lvsfunc.kernels.Catrom(),
                thr: Union[int, float] = 55, expand: int = 2) -> vs.VideoNode:
        """Makes a mask based on rescaled difference.
           Modified version of Atomchtools.

        Args:
            clip (vs.VideoNode):
                Source clip. Can be Gray, YUV or RGB.
                Keep in mind that descale plugin will descale all planes
                after conversion to GRAYS, YUV444PS and RGBS respectively.

            height (int, optional):
                Height to descale to. Defaults to 720.

            kernel (lvsfunc.kernels.Kernel, optional):
                Kernel used to descale. Defaults to lvsfunc.kernels.Bicubic(b=0, c=0.5).

            thr (Union[int, float], optional):
                Binarization threshold. Defaults to 55.

            expand (int, optional):
                Growing/shrinking shape. 0 is allowed. Defaults to 2.

        Returns:
            vs.VideoNode: Rescaled mask.
        """
        if clip.format is None:
            raise FormatError('diff_rescale_mask: Variable format not allowed!')

        bits = get_depth(clip)
        gray_only = clip.format.num_planes == 1
        thr = scale_value(thr, bits, 32, scale_offsets=True)

        pre = core.resize.Bicubic(
            clip, format=clip.format.replace(
                bits_per_sample=32, sample_type=vs.FLOAT, subsampling_w=0, subsampling_h=0
            ).id
        )
        descale = kernel.descale(pre, get_w(height), height)
        rescale = kernel.scale(descale, clip.width, clip.height)

        diff = core.std.Expr(split(pre) + split(rescale), mae_expr(gray_only))

        mask = iterate(diff, lambda x: core.rgsf.RemoveGrain(x, 2), 2)
        mask = core.std.Expr(mask, f'x 2 4 pow * {thr} < 0 1 ?')

        mask = self._minmax(mask, 2 + expand, True)
        mask = mask.std.Deflate()

        return mask.resize.Point(
            format=clip.format.replace(color_family=vs.GRAY, subsampling_w=0, subsampling_h=0).id,
            dither_type='none'
        )
Ejemplo n.º 2
0
def generate_detail_mask(clip: vs.VideoNode,
                         brz_a: float = 0.045,
                         brz_b: float = 0.060,
                         **kwargs: Any) -> vs.VideoNode:
    """
        Generates a detail mask.
        If a float value is passed, it'll be scaled to the clip's bitdepth.

        :param clip:        Input clip
        :param brz_a:       Binarizing for the detail mask
        :param brz_b:       Binarizing for the edge mask
        :param kwargs:      Additional parameters passed to lvf.denoise.detail_mask

        :return:            Detail mask
    """
    return lvf.denoise.detail_mask(
        clip,
        brz_a=scale_value(brz_a, 32, clip.format.bits_per_sample)
        if brz_a is float else brz_a,
        brz_b=scale_value(brz_b, 32, clip.format.bits_per_sample)
        if brz_b is float else brz_b,
        **kwargs)
Ejemplo n.º 3
0
def letterbox_edgefix(clip: vs.VideoNode,
                      crops: Optional[List[Range]] = None,
                      fades: Optional[List[Range]] = None) -> vs.VideoNode:
    assert clip.format is not None
    fixed = clip
    if fades:
        fy = _fixplane(clip.std.ShufflePlanes(planes=0, colorfamily=vs.GRAY),
                       top=132,
                       bottom=131,
                       bbt=2,
                       bbb=2)
        fu = _fixplane(clip.std.ShufflePlanes(planes=1, colorfamily=vs.GRAY),
                       top=66,
                       bottom=65,
                       bbt=1,
                       bbb=2,
                       chroma=True)
        fv = _fixplane(clip.std.ShufflePlanes(planes=2, colorfamily=vs.GRAY),
                       top=66,
                       bottom=66,
                       bbt=1,
                       bbb=2,
                       chroma=True)
        f = core.std.ShufflePlanes([fy, fu, fv],
                                   planes=[0, 0, 0],
                                   colorfamily=vs.YUV)
        fixed = replace_ranges(fixed, f, fades)
    if crops:
        black = [
            vsutil.scale_value(0,
                               8,
                               clip.format.bits_per_sample,
                               range_in=vsutil.Range.FULL,
                               range=vsutil.Range.LIMITED,
                               scale_offsets=True),
            scale_thresh(0.5, clip),
            scale_thresh(0.5, clip),
        ]
        crop = clip.std.Crop(top=132, bottom=132)
        bb = bbmod(crop, top=2, bottom=2, blur=500)
        f = bb.std.AddBorders(top=132, bottom=132, color=black)
        fixed = replace_ranges(fixed, f, crops)
    return fixed
Ejemplo n.º 4
0
def retinex(clip: vs.VideoNode,
            tsigma: float = 1.5,
            rsigma: list[float] = [50, 200, 350],
            opencl: bool = False,
            msrcp_dict: Optional[Dict[str, Any]] = None,
            tcanny_dict: Optional[Dict[str, Any]] = None) -> vs.VideoNode:
    from lvsfunc.util import quick_resample
    from vsutil import depth

    tcanny = core.tcanny.TCannyCL if opencl else core.tcanny.TCanny

    msrcp_args: Dict[str, Any] = dict(upper_thr=0.005, fulls=True)
    if msrcp_dict is not None:
        msrcp_args |= msrcp_dict

    tcanny_args: Dict[str, Any] = dict(mode=1)
    if tcanny_dict is not None:
        tcanny_args |= tcanny_dict

    if clip.format.bits_per_sample == 32:
        max_value = 1
    else:
        max_value = vsutil.scale_value(1,
                                       32,
                                       clip.format.bits_per_sample,
                                       scale_offsets=True,
                                       range=1)

    if clip.format.num_planes > 1:
        clip = vsutil.get_y(clip)

    ret = quick_resample(
        clip, lambda x: core.retinex.MSRCP(x, sigma=rsigma, **msrcp_args))
    tcanny = tcanny(
        ret, sigma=tsigma,
        **tcanny_args).std.Minimum(coordinates=[1, 0, 1, 0, 0, 1, 0, 1])

    return depth(core.std.Expr([clip, tcanny], f'x y + {max_value} min'),
                 clip.format.bits_per_sample,
                 dither_type='none')
Ejemplo n.º 5
0
def shift_tint(clip: vs.VideoNode, values: Union[int, Sequence[int]] = 16) -> vs.VideoNode:
    """
    A function for forcibly adding pixel values to a clip.
    Can be used to fix green tints in Crunchyroll sources, for example.
    Only use this if you know what you're doing!

    This function accepts a single integer or a list of integers.
    Values passed should mimic those of an 8bit clip.
    If your clip is not 8bit, they will be scaled accordingly.

    If you only pass 1 value, it will copied to every plane.
    If you pass 2, the 2nd one will be copied over to the 3rd.
    Don't pass more than three.

    :param clip:    Input clip
    :param values:  Value added to every pixel, scales accordingly to your clip's depth (Default: 16)

    :return:        Clip with pixel values added
    """
    val: Tuple[float, float, float]

    if isinstance(values, int):
        val = (values, values, values)
    elif len(values) == 2:
        val = (values[0], values[1], values[1])
    elif len(values) == 3:
        val = (values[0], values[1], values[2])
    else:
        raise ValueError("shift_tint: 'Too many values supplied'")

    if any(v > 255 or v < -255 for v in val):
        raise ValueError("shift_tint: 'Every value in \"values\" must be below 255'")

    cdepth = get_depth(clip)
    cv: List[float] = [scale_value(v, 8, cdepth) for v in val] if cdepth != 8 else list(val)

    return core.std.Expr(clip, expr=[f'x {cv[0]} +', f'x {cv[1]} +', f'x {cv[2]} +'])
Ejemplo n.º 6
0
def shift_tint(clip: vs.VideoNode,
               values: Union[int, List[int]] = 16) -> vs.VideoNode:
    """
    A function for forcibly adding pixel values to a clip.
    Can be used to fix green tints in CrunchyRoll sources, for example.
    Only use this if you know what you're doing!

    Values passed should mimic those of an 8bit clip.
    If your clip is not 8bit, they will be scaled accordingly.

    If you only pass 1 value, it will copied to every plane.
    If you pass 2, the 2nd one will be copied over to the 3rd.

    Alias for this function is `lvsfunc.misc.fix_cr_tint`.

    :param clip:   Input clip
    :param value:  Value added to every pixel, scales accordingly to your clip's depth (Default: 16)

    :return:       Clip with pixel values added
    """
    if isinstance(values, int):
        values = [values, values, values]
    elif len(values) == 2:
        values = [values[0], values[1], values[1]]

    if any(v > 255 or v < -255 for v in values):
        raise ValueError(
            "shift_tint: 'Every value in \"values\" must be below 255'")

    cdepth = get_depth(clip)
    if cdepth != 8:
        values: List[float, int] = [scale_value(v, 8, cdepth) for v in values]

    return core.std.Expr(
        clip,
        expr=[f'x {values[0]} +', f'x {values[1]} +', f'x {values[2]} +'])
Ejemplo n.º 7
0
def do_filter():
    """Vapoursynth filtering"""
    src = JPBD.src_cut
    src += src[-1]

    # Variables
    opstart, opend = 8296, 10452
    eptitle_s, eptitle_e = 10453, 10572
    edstart, edend = 31528, 33685
    preview_s, preview_e = 33686, src.num_frames - 1
    h = 720
    w = get_w(h)

    edges = core.edgefixer.ContinuityFixer(src, *[[2, 1, 1]] * 4)
    out = depth(edges, 32)

    ref = hvf.SMDegrain(depth(get_y(out), 16), thSAD=450)
    denoise = hybrid_denoise(out, 0.35, 1.75, dict(a=2, d=1),
                             dict(ref=depth(ref, 32)))
    out = denoise

    y = get_y(out)
    lineart = vdf.edge_detect(y, 'FDOG', 0.055,
                              (1, 1)).std.Median().std.Convolution([*[1] * 9])

    descale = core.descale.Debilinear(y, w, h)

    upscale = vdf.fsrcnnx_upscale(
        descale,
        height=h * 2,
        shader_file=r'shaders\FSRCNNX_x2_56-16-4-1.glsl',
        upscaler_smooth=eedi3_upscale,
        profile='slow',
        strength=85)

    antialias = sraa_eedi3(upscale, 9)

    downscale = muvf.SSIM_downsample(antialias,
                                     src.width,
                                     src.height,
                                     filter_param_a=0,
                                     filter_param_b=0)
    downscale = core.std.MaskedMerge(y, downscale, lineart)

    merged = vdf.merge_chroma(downscale, out)
    merged = lvf.rfs(merged, out, [(opstart, opend), (eptitle_s, eptitle_e),
                                   (preview_s, preview_e)])
    out = depth(merged, 16)

    detail_dark_mask = detail_dark_mask_func(get_y(out),
                                             brz_a=8000,
                                             brz_b=6000)
    detail_light_mask = lvf.denoise.detail_mask(out, brz_a=2500, brz_b=1200)
    detail_mask = core.std.Expr([detail_dark_mask, detail_light_mask],
                                'x y +').std.Median()
    detail_mask_grow = iterate(detail_mask, core.std.Maximum, 2)
    detail_mask_grow = iterate(detail_mask_grow, core.std.Inflate,
                               2).std.Convolution([*[1] * 9])

    detail_mask = core.std.Expr([get_y(out), detail_mask_grow, detail_mask],
                                f'x {32<<8} < y z ?')

    deband = dumb3kdbv2(out, 22, 24)
    deband = core.std.MaskedMerge(deband, out, detail_mask)

    deband_b = dumb3kdbv2(out, 24, 64)
    deband_b = core.std.MaskedMerge(deband_b, out, detail_light_mask)
    deband = lvf.rfs(deband, deband_b, [(opstart + 414, opstart + 496)])

    out = deband

    ref = get_y(out).std.PlaneStats()
    adgmask_a = core.adg.Mask(ref, 30)
    adgmask_b = core.adg.Mask(ref, 12)

    stgrain = sizedgrn(out, 0.1, 0.05, 1.05, sharp=80)
    stgrain = core.std.MaskedMerge(out, stgrain, adgmask_b)
    stgrain = core.std.MaskedMerge(out, stgrain, adgmask_a.std.Invert())

    dygrain = sizedgrn(out, 0.2, 0.05, 1.15, sharp=80, static=False)
    dygrain = core.std.MaskedMerge(out, dygrain, adgmask_a)
    grain = core.std.MergeDiff(dygrain, out.std.MakeDiff(stgrain))
    out = grain

    ref = depth(src, 16)
    rescale_mask = vdf.drm(ref, 720, 'bilinear', mthr=30, sw=0, sh=0)
    rescale_mask = vdf.region_mask(rescale_mask, *[10] * 4)
    rescale_mask = hvf.mt_expand_multi(rescale_mask,
                                       mode='ellipse',
                                       sw=4,
                                       sh=4)
    rescale_mask = rescale_mask.std.Binarize(scale_value(
        100, 8, 16)).std.Inflate().std.Convolution([*[1] * 9])

    dehalo_ref = gf.MaskedDHA(ref,
                              rx=1.65,
                              ry=1.65,
                              darkstr=0.15,
                              brightstr=1.0,
                              maskpull=48,
                              maskpush=140)
    dehalo_mask_a = vdf.region_mask(rescale_mask, top=650, right=200)

    credit = out
    dehalo_range_a = [(18, 130), (1291, 1355), (14691, 14846), (15126, 15189)]
    credit = lvf.rfs(credit, core.std.MaskedMerge(credit, ref, rescale_mask,
                                                  0),
                     [(131, 220), (400, 505), (817, 899), (1766, 1809),
                      (1925, 2041), (4033, 4116), (4342, 4453), (4964, 5031),
                      (6422, 6515), (7848, 7978),
                      (10606, 10728), (11619, 11701), (15339, 15457),
                      (edstart, edend)] + dehalo_range_a)
    credit = lvf.rfs(
        credit, core.std.MaskedMerge(credit, dehalo_ref, dehalo_mask_a, 0),
        dehalo_range_a)

    out = credit

    return depth(out, 10).std.Limiter(16 << 2, [235 << 2, 240 << 2])
Ejemplo n.º 8
0
def do_filter():
    """Vapoursynth filtering"""
    src = JPBD.src_cut
    src += src[-1]

    # Variables
    opstart, opend = 2206, 4362
    eptitle_s, eptitle_e = 4363, 4482
    edstart, edend = 31528, 33685
    preview_s, preview_e = 33686, src.num_frames - 1
    h = 720
    w = get_w(h)

    edges = core.edgefixer.ContinuityFixer(src, *[[2, 1, 1]] * 4)
    out = depth(edges, 32)

    ref = hvf.SMDegrain(depth(get_y(out), 16), thSAD=450)
    denoise = hybrid_denoise(out, 0.35, 1.75, dict(a=2, d=1),
                             dict(ref=depth(ref, 32)))
    out = denoise

    y = get_y(out)
    lineart = vdf.edge_detect(y, 'FDOG', 0.055,
                              (1, 1)).std.Median().std.Convolution([*[1] * 9])

    descale = core.descale.Debilinear(y, w, h)

    upscale = vdf.fsrcnnx_upscale(
        descale,
        height=h * 2,
        shader_file=r'shaders\FSRCNNX_x2_56-16-4-1.glsl',
        upscaler_smooth=eedi3_upscale,
        profile='slow',
        strength=85)

    antialias = sraa_eedi3(upscale, 9)

    downscale = muvf.SSIM_downsample(antialias,
                                     src.width,
                                     src.height,
                                     filter_param_a=0,
                                     filter_param_b=0)
    downscale = core.std.MaskedMerge(y, downscale, lineart)

    merged = vdf.merge_chroma(downscale, out)
    merged = lvf.rfs(merged, out, [(opstart, opend), (eptitle_s, eptitle_e),
                                   (preview_s, preview_e)])
    out = depth(merged, 16)

    detail_dark_mask = detail_dark_mask_func(get_y(out),
                                             brz_a=8000,
                                             brz_b=6000)
    detail_light_mask = lvf.denoise.detail_mask(out, brz_a=2500, brz_b=1200)
    detail_mask = core.std.Expr([detail_dark_mask, detail_light_mask],
                                'x y +').std.Median()
    detail_mask_grow = iterate(detail_mask, core.std.Maximum, 2)
    detail_mask_grow = iterate(detail_mask_grow, core.std.Inflate,
                               2).std.Convolution([*[1] * 9])

    detail_mask = core.std.Expr([get_y(out), detail_mask_grow, detail_mask],
                                f'x {32<<8} < y z ?')

    deband = dumb3kdbv2(out, 22, 24)
    deband = core.std.MaskedMerge(deband, out, detail_mask)
    out = deband

    ref = get_y(out).std.PlaneStats()
    adgmask_a = core.adg.Mask(ref, 30)
    adgmask_b = core.adg.Mask(ref, 12)

    stgrain = sizedgrn(out, 0.1, 0.05, 1.05, sharp=80)
    stgrain = core.std.MaskedMerge(out, stgrain, adgmask_b)
    stgrain = core.std.MaskedMerge(out, stgrain, adgmask_a.std.Invert())

    dygrain = sizedgrn(out, 0.2, 0.05, 1.15, sharp=80, static=False)
    dygrain = core.std.MaskedMerge(out, dygrain, adgmask_a)
    grain = core.std.MergeDiff(dygrain, out.std.MakeDiff(stgrain))
    out = grain

    ref = depth(src, 16)
    rescale_mask = vdf.drm(ref, 720, 'bilinear', mthr=30, sw=0, sh=0)
    rescale_mask = vdf.region_mask(rescale_mask, *[10] * 4)
    rescale_mask = hvf.mt_expand_multi(rescale_mask,
                                       mode='ellipse',
                                       sw=4,
                                       sh=4)
    rescale_mask = rescale_mask.std.Binarize(scale_value(
        100, 8, 16)).std.Inflate().std.Convolution([*[1] * 9])

    dehalo_ref = gf.MaskedDHA(ref,
                              rx=1.65,
                              ry=1.65,
                              darkstr=0.15,
                              brightstr=1.0,
                              maskpull=48,
                              maskpush=140)
    dehalo_mask_b = vdf.region_mask(rescale_mask, right=400)

    credit = out
    dehalo_range_b = [(18, 101), (16853, 16917)]
    credit = lvf.rfs(credit, core.std.MaskedMerge(credit, ref, rescale_mask,
                                                  0),
                     [(126, 160), (1722, 1761), (7749, 7795), (8189, 8232),
                      (8445, 8504), (9020, 9067), (9727, 9768), (12430, 12520),
                      (12876, 12971), (13608, 13646), (15833, 15879),
                      (20526, 20586), (20783, 20823), (21193, 21239),
                      (23812, 23854), (24018, 24059),
                      (edstart, edend)] + dehalo_range_b)

    credit = lvf.rfs(
        credit, core.std.MaskedMerge(credit, dehalo_ref, dehalo_mask_b, 0),
        dehalo_range_b)
    out = credit

    return depth(out, 10).std.Limiter(16 << 2, [235 << 2, 240 << 2])
Ejemplo n.º 9
0
def do_filter():
    """Vapoursynth filtering"""
    src = JPBD.src_cut

    # Variables
    opstart, opend = 6161, 8319
    eptitle_s, eptitle_e = 8320, 8439
    edstart, edend = 31170, 33326
    preview_s, preview_e = 33687, src.num_frames - 1
    h = 720
    w = get_w(h)

    edges = core.edgefixer.ContinuityFixer(src, *[[2, 1, 1]] * 4)
    out = depth(edges, 32)

    ref = hvf.SMDegrain(depth(get_y(out), 16), thSAD=450)
    denoise = hybrid_denoise(out, 0.35, 1.75, dict(a=2, d=1),
                             dict(ref=depth(ref, 32)))
    out = denoise

    y = get_y(out)
    lineart = vdf.edge_detect(y, 'FDOG', 0.055,
                              (1, 1)).std.Median().std.Convolution([*[1] * 9])

    descale = core.descale.Debilinear(y, w, h)

    upscale = vdf.fsrcnnx_upscale(
        descale,
        height=h * 2,
        shader_file=r'shaders\FSRCNNX_x2_56-16-4-1.glsl',
        upscaler_smooth=eedi3_upscale,
        profile='slow',
        strength=85)

    antialias = sraa_eedi3(upscale, 9)

    downscale = muvf.SSIM_downsample(antialias,
                                     src.width,
                                     src.height,
                                     filter_param_a=0,
                                     filter_param_b=0)
    downscale = core.std.MaskedMerge(y, downscale, lineart)

    merged = vdf.merge_chroma(downscale, out)
    merged = lvf.rfs(merged, out, [(opstart, opend), (eptitle_s, eptitle_e),
                                   (preview_s, preview_e)])
    out = depth(merged, 16)

    detail_dark_mask = detail_dark_mask_func(get_y(out),
                                             brz_a=8000,
                                             brz_b=6000)
    detail_light_mask = lvf.denoise.detail_mask(out, brz_a=2500, brz_b=1200)
    detail_mask = core.std.Expr([detail_dark_mask, detail_light_mask],
                                'x y +').std.Median()
    detail_mask_grow = iterate(detail_mask, core.std.Maximum, 2)
    detail_mask_grow = iterate(detail_mask_grow, core.std.Inflate,
                               2).std.Convolution([*[1] * 9])

    detail_mask = core.std.Expr([get_y(out), detail_mask_grow, detail_mask],
                                f'x {32<<8} < y z ?')

    deband = dumb3kdbv2(out, 22, 24)
    deband = core.std.MaskedMerge(deband, out, detail_mask)
    out = deband

    ref = get_y(out).std.PlaneStats()
    adgmask_a = core.adg.Mask(ref, 30)
    adgmask_b = core.adg.Mask(ref, 12)

    stgrain = sizedgrn(out, 0.1, 0.05, 1.05, sharp=80)
    stgrain = core.std.MaskedMerge(out, stgrain, adgmask_b)
    stgrain = core.std.MaskedMerge(out, stgrain, adgmask_a.std.Invert())

    dygrain = sizedgrn(out, 0.2, 0.05, 1.15, sharp=80, static=False)
    dygrain = core.std.MaskedMerge(out, dygrain, adgmask_a)
    grain = core.std.MergeDiff(dygrain, out.std.MakeDiff(stgrain))
    out = grain

    ref = depth(src, 16)
    rescale_mask = vdf.drm(ref, 720, 'bilinear', mthr=30, sw=0, sh=0)
    rescale_mask = vdf.region_mask(rescale_mask, *[10] * 4)
    rescale_mask = hvf.mt_expand_multi(rescale_mask,
                                       mode='ellipse',
                                       sw=4,
                                       sh=4)
    rescale_mask = rescale_mask.std.Binarize(scale_value(
        100, 8, 16)).std.Inflate().std.Convolution([*[1] * 9])

    dehalo_ref = gf.MaskedDHA(ref,
                              rx=1.65,
                              ry=1.65,
                              darkstr=0.15,
                              brightstr=1.0,
                              maskpull=48,
                              maskpush=140)
    dehalo_mask_a = vdf.region_mask(rescale_mask, top=650)
    dehalo_mask_b = vdf.region_mask(rescale_mask, right=400)

    credit = out
    dehalo_range_a = [(25, 205), (518, 612), (2090, 2172), (14449, 14537)]
    dehalo_range_b = [(3893, 3981)]
    credit = lvf.rfs(credit, core.std.MaskedMerge(credit, ref, rescale_mask,
                                                  0),
                     [(2197, 2252), (4072, 4129), (4409, 4527), (5390, 5484),
                      (8473, 8603), (8610, 8669), (9826, 9909), (10666, 10713),
                      (12088, 12507), (14943, 15052), (17988, 18136),
                      (18897, 18976),
                      (edstart, edend)] + dehalo_range_a + dehalo_range_b)
    credit = lvf.rfs(
        credit, core.std.MaskedMerge(credit, dehalo_ref, dehalo_mask_a, 0),
        dehalo_range_a)
    credit = lvf.rfs(
        credit, core.std.MaskedMerge(credit, dehalo_ref, dehalo_mask_b, 0),
        dehalo_range_b)
    out = credit

    return depth(out, 10).std.Limiter(16 << 2, [235 << 2, 240 << 2])
Ejemplo n.º 10
0
def do_filter():
    """Vapoursynth filtering"""
    src = JPBD.src_cut

    # Variables
    opstart, opend = 2038, 4196
    eptitle_s, eptitle_e = 4197, 4316
    edstart, edend = 31530, 33687
    preview_s, preview_e = 33688, src.num_frames - 1
    h = 720
    w = get_w(h)

    edges = core.edgefixer.ContinuityFixer(src, *[[2, 1, 1]] * 4)
    out = depth(edges, 32)

    ref = hvf.SMDegrain(depth(get_y(out), 16), thSAD=450)
    denoise = hybrid_denoise(out, 0.35, 1.75, dict(a=2, d=1),
                             dict(ref=depth(ref, 32)))
    out = denoise

    y = get_y(out)
    lineart = vdf.edge_detect(y, 'FDOG', 0.055,
                              (1, 1)).std.Median().std.Convolution([*[1] * 9])

    descale = core.descale.Debilinear(y, w, h)

    upscale = vdf.fsrcnnx_upscale(
        descale,
        height=h * 2,
        shader_file=r'shaders\FSRCNNX_x2_56-16-4-1.glsl',
        upscaler_smooth=eedi3_upscale,
        profile='slow',
        strength=85)

    antialias = sraa_eedi3(upscale, 9)

    downscale = muvf.SSIM_downsample(antialias,
                                     src.width,
                                     src.height,
                                     filter_param_a=0,
                                     filter_param_b=0)
    downscale = core.std.MaskedMerge(y, downscale, lineart)

    merged = vdf.merge_chroma(downscale, out)
    merged = lvf.rfs(merged, out, [(opstart, opend), (eptitle_s, eptitle_e),
                                   (preview_s, preview_e)])
    out = depth(merged, 16)

    detail_dark_mask = detail_dark_mask_func(get_y(out),
                                             brz_a=8000,
                                             brz_b=6000)
    detail_light_mask = lvf.denoise.detail_mask(out, brz_a=2500, brz_b=1200)
    detail_mask = core.std.Expr([detail_dark_mask, detail_light_mask],
                                'x y +').std.Median()
    detail_mask_grow = iterate(detail_mask, core.std.Maximum, 2)
    detail_mask_grow = iterate(detail_mask_grow, core.std.Inflate,
                               2).std.Convolution([*[1] * 9])

    detail_mask = core.std.Expr([get_y(out), detail_mask_grow, detail_mask],
                                f'x {32<<8} < y z ?')

    deband = dumb3kdbv2(out, 22, 24)
    deband = core.std.MaskedMerge(deband, out, detail_mask)

    deband_b = dumb3kdbv2(out, 24, 64)
    deband_b = core.std.MaskedMerge(deband_b, out, detail_light_mask)
    deband = lvf.rfs(deband, deband_b, [(opstart + 414, opstart + 496)])

    out = deband

    ref = get_y(out).std.PlaneStats()
    adgmask_a = core.adg.Mask(ref, 30)
    adgmask_b = core.adg.Mask(ref, 12)

    stgrain = sizedgrn(out, 0.1, 0.05, 1.05, sharp=80)
    stgrain = core.std.MaskedMerge(out, stgrain, adgmask_b)
    stgrain = core.std.MaskedMerge(out, stgrain, adgmask_a.std.Invert())

    dygrain = sizedgrn(out, 0.2, 0.05, 1.15, sharp=80, static=False)
    dygrain = core.std.MaskedMerge(out, dygrain, adgmask_a)
    grain = core.std.MergeDiff(dygrain, out.std.MakeDiff(stgrain))
    out = grain

    ref = depth(src, 16)
    rescale_mask = vdf.drm(ref, 720, 'bilinear', mthr=30, sw=0, sh=0)
    rescale_mask = vdf.region_mask(rescale_mask, *[10] * 4)
    rescale_mask = hvf.mt_expand_multi(rescale_mask,
                                       mode='ellipse',
                                       sw=4,
                                       sh=4)
    rescale_mask = rescale_mask.std.Binarize(scale_value(
        100, 8, 16)).std.Inflate().std.Convolution([*[1] * 9])

    credit = out
    credit = lvf.rfs(credit, core.std.MaskedMerge(credit, ref, rescale_mask,
                                                  0), [(8993, 9066),
                                                       (20104, 20223),
                                                       (edstart, edend)])

    out = credit

    return depth(out, 10).std.Limiter(16 << 2, [235 << 2, 240 << 2])
Ejemplo n.º 11
0
def fsrcnnx_upscale(
    clip: vs.VideoNode,
    width: int = None,
    height: int = 1080,
    shader_file: str = None,  # noqa: PLR0912
    downscaler: Callable[[vs.VideoNode, int, int],
                         vs.VideoNode] = core.resize.Bicubic,
    upscaled_smooth: Optional[vs.VideoNode] = None,
    strength: float = 100.0,
    profile: str = 'slow',
    lmode: int = 1,
    overshoot: float = None,
    undershoot: float = None,
    sharpener: Callable[[vs.VideoNode], vs.VideoNode] = partial(z4usm,
                                                                radius=2,
                                                                strength=65)
) -> vs.VideoNode:
    """
    Upscale the given luma source clip with FSRCNNX to a given width / height
    while preventing FSRCNNX artifacts by limiting them.

    Args:
        source (vs.VideoNode):
            Source clip, assuming this one is perfectly descaled.

        width (int):
            Target resolution width (if None, auto-calculated). Defaults to None.

        height (int):
            Target resolution height. Defaults to 1080.

        shader_file (str):
            Path to the FSRCNNX shader file. Defaults to None.

        downscaler (Callable[[vs.VideoNode, int, int], vs.VideoNode], optional):
            Resizer used to downscale the upscaled clip. Defaults to core.resize.Bicubic.

        upscaled_smooth (Optional[vs.VideoNode]):
            Smooth doubled clip. If not provided, will use nnedi3_upscale(source).

        strength (float):
            Only for profile='slow'.
            Strength between the smooth upscale and the fsrcnnx upscale where 0.0 means the full smooth clip
            and 100.0 means the full fsrcnnx clip. Negative and positive values are possible, but not recommended.

        profile (str): Profile settings. Possible strings: "fast", "old", "slow" or "zastin".
                       – "fast" is the old draft mode (the plain fsrcnnx clip returned).
                       – "old" is the old mode to deal with the bright pixels.
                       – "slow" is the new mode, more efficient, using clamping.
                       – "zastin" is a combination between a sharpened nnedi3 upscale and a fsrcnnx upscale.
                         The sharpener prevents the interior of lines from being brightened and fsrnncx
                         (as a clamping clip without nnedi3) prevents artifacting (halos) from the sharpening.

        lmode (int): Only for profile='slow':
                     – (< 0): Limit with rgvs.Repair (ex: lmode=-1 --> rgvs.Repair(1), lmode=-5 --> rgvs.Repair(5) ...)
                     – (= 0): No limit.
                     – (= 1): Limit to over/undershoot.

        overshoot (int):
            Only for profile='slow'.
            Limit for pixels that get brighter during upscaling.

        undershoot (int):
            Only for profile='slow'.
            Limit for pixels that get darker during upscaling.

        sharpener (Callable[[vs.VideoNode, Any], vs.VideoNode], optional):
            Only for profile='zastin'.
            Sharpening function used to replace the sharped smoother nnedi3 upscale.
            Defaults to partial(z4USM, radius=2, strength=65)

    Returns:
        vs.VideoNode: Upscaled luma clip.
    """
    bits = get_depth(clip)

    clip = get_y(clip)
    clip = depth(clip, 16)

    if width is None:
        width = get_w(height, clip.width / clip.height)
    if overshoot is None:
        overshoot = strength / 100
    if undershoot is None:
        undershoot = overshoot

    profiles = ['fast', 'old', 'slow', 'zastin']
    if profile not in profiles:
        raise ValueError(
            'fsrcnnx_upscale: "profile" must be "fast", "old", "slow" or "zastin"'
        )
    num = profiles.index(profile.lower())

    if not shader_file:
        raise ValueError(
            'fsrcnnx_upscale: You must set a string path for "shader_file"')

    fsrcnnx = shader(clip, clip.width * 2, clip.height * 2, shader_file)

    if num >= 1:
        # old or slow profile
        smooth = depth(get_y(upscaled_smooth),
                       bits) if upscaled_smooth else nnedi3_upscale(clip)
        if num == 1:
            # old profile
            limit = core.std.Expr([fsrcnnx, smooth], 'x y min')
        elif num == 2:
            # slow profile
            upscaled = core.std.Expr(
                [fsrcnnx, smooth],
                'x {strength} * y 1 {strength} - * +'.format(
                    strength=strength / 100))
            if lmode < 0:
                limit = core.rgvs.Repair(upscaled, smooth, abs(lmode))
            elif lmode == 0:
                limit = upscaled
            elif lmode == 1:
                dark_limit = core.std.Minimum(smooth)
                bright_limit = core.std.Maximum(smooth)

                overshoot = scale_value(overshoot,
                                        8,
                                        16,
                                        range_in=Range.FULL,
                                        range=Range.FULL)
                undershoot = scale_value(undershoot,
                                         8,
                                         16,
                                         range_in=Range.FULL,
                                         range=Range.FULL)
                limit = core.std.Expr([
                    upscaled, bright_limit, dark_limit
                ], f'x y {overshoot} + > y {overshoot} + x ? z {undershoot} - < z {undershoot} - x y {overshoot} + > y {overshoot} + x ? ?'
                                      )
            else:
                raise ValueError(
                    'fsrcnnx_upscale: "lmode" must be < 0, 0 or 1')
        else:
            # zastin profile
            smooth_sharp = sharpener(smooth)
            limit = core.std.Expr([smooth, fsrcnnx, smooth_sharp],
                                  'x y z min max y z max min')
    else:
        limit = fsrcnnx

    if downscaler:
        scaled = downscaler(limit, width, height)
    else:
        scaled = limit

    return depth(scaled, bits)
Ejemplo n.º 12
0
def do_filter():
    """Vapoursynth filtering"""
    src = JPBD.src_cut
    src = depth(src, 16)
    out = src

    edstart, edend = 31888, src.num_frames-1


    shift = core.resize.Point(out, src_left=1).edgefixer.ContinuityFixer([2, 1, 1], 0, 0, 0)
    out = lvf.rfs(out, shift, [(235, 318)])



    ref = hvf.SMDegrain(out, tr=1, thSAD=300, plane=4)
    denoise = mvf.BM3D(out, [1.5, 1], 1, profile1='lc', ref=ref)
    out = denoise


    y = get_y(out)
    lineart = vdf.edge_detect(y, 'FDOG', scale_value(0.065, 32, 16), (1, 1)).std.Median().std.BoxBlur(0, 1, 1, 1, 1)

    rescale = fake_rescale(y, 837.5, 1/3, 1/3, coef_dering=0.1, coef_dark=1.25,
                           coef_warp=1.15, coef_sharp=0.75, coef_finalsharp=0.85)
    rescale = core.std.MaskedMerge(y, rescale, lineart)

    out = vdf.merge_chroma(rescale, out)





    preden = core.dfttest.DFTTest(get_y(out), ftype=0, sigma=0.5, sbsize=16, sosize=12, tbsize=1)
    detail_dark_mask = detail_dark_mask_func(preden, brz_a=6000, brz_b=5000)
    detail_light_mask = lvf.denoise.detail_mask(preden, brz_a=2500, brz_b=1200)
    detail_mask = core.std.Expr([detail_dark_mask, detail_light_mask], 'x y +').std.Median()
    detail_mask_grow = iterate(detail_mask, core.std.Maximum, 2)
    detail_mask_grow = iterate(detail_mask_grow, core.std.Inflate, 2).std.BoxBlur(0, 1, 1, 1, 1)

    detail_mask = core.std.Expr([preden, detail_mask_grow, detail_mask], f'x {40<<8} < y z ?')

    pf = vdf.merge_chroma(preden, out)
    deband = dumb3kdb(pf, 17, 40)
    deband_b = dumb3kdb(pf, 20, 80)

    deband = lvf.rfs(deband, deband_b, [(2194, 2216), (2283, 2306)])
    deband = core.std.MergeDiff(deband, out.std.MakeDiff(pf))
    deband = core.std.MaskedMerge(deband, out, detail_light_mask)
    out = deband


    grain = kgf.adaptive_grain(out, 0.35)
    out = grain



    ref = depth(src, 16)
    src_c, src_nced = [depth(x, 16) for x in [src, JPBD_NCED.src_cut[13:]]]

    ending_mask = vdf.dcm(out, src_c[edstart:edend+1], src_nced[:edend-edstart+1], edstart, edend, 2, 2).std.BoxBlur(0, 2, 2, 2, 2)

    credit = out
    credit = lvf.rfs(credit, core.std.MaskedMerge(credit, ref, ending_mask), [(31900, src.num_frames-1)])
    out = credit



    return depth(out, 10).std.Limiter(16<<2, [235<<2, 240<<2], [0, 1, 2])
Ejemplo n.º 13
0
def detail_mask(clip: vs.VideoNode,
                sigma: Optional[float] = None,
                rad: int = 3,
                radc: int = 2,
                brz_a: float = 0.005,
                brz_b: float = 0.005) -> vs.VideoNode:
    """
    A wrapper for creating a detail mask to be used during denoising and/or debanding.
    The detail mask is created using debandshit's rangemask,
    and is then merged with Prewitt to catch lines it may have missed.

    Function is curried to allow parameter tuning when passing to denoisers
    that allow you to pass your own mask.

    Dependencies: VapourSynth-Bilateral (optional: sigma), debandshit

    :param clip:        Input clip
    :param sigma:       Sigma for Bilateral for pre-blurring (Default: False)
    :param rad:         The luma equivalent of gradfun3's "mask" parameter
    :param radc:        The chroma equivalent of gradfun3's "mask" parameter
    :param brz_a:       Binarizing for the detail mask (Default: 0.05)
    :param brz_b:       Binarizing for the edge mask (Default: 0.05)

    :return:            Detail mask
    """
    try:
        from debandshit import rangemask
    except ModuleNotFoundError:
        raise ModuleNotFoundError(
            "detail_mask: missing dependency 'debandshit'")

    if clip.format is None:
        raise ValueError("detail_mask: 'Variable-format clips not supported'")

    # Handling correct value scaling if there's a assumed depth mismatch
    # To the me in the future, long after civilisation has fallen, make sure to check 3.10's pattern matching.
    if get_depth(clip) != 32:
        if isinstance(brz_a, float):
            brz_a = scale_value(brz_a, 32, get_depth(clip))
        if isinstance(brz_b, float):
            brz_b = scale_value(brz_b, 32, get_depth(clip))
    else:
        if isinstance(brz_a, int):
            brz_a = scale_value(brz_a, get_depth(clip), 32)
        if isinstance(brz_b, int):
            brz_b = scale_value(brz_b, get_depth(clip), 32)

    blur = (util.quick_resample(clip,
                                partial(core.bilateral.Gaussian, sigma=sigma))
            if sigma else clip)

    mask_a = rangemask(get_y(blur), rad=rad, radc=radc)
    mask_a = depth(mask_a, clip.format.bits_per_sample)
    mask_a = core.std.Binarize(mask_a, brz_a)

    mask_b = core.std.Prewitt(get_y(blur))
    mask_b = core.std.Binarize(mask_b, brz_b)

    mask = core.std.Expr([mask_a, mask_b], 'x y max')
    mask = util.pick_removegrain(mask)(mask, 22)
    return util.pick_removegrain(mask)(mask, 11)
Ejemplo n.º 14
0
    def test_scale_value(self):
        # no change
        self.assertEqual(vsutil.scale_value(1, 8, 8, range_in=0, range=0), 1)
        self.assertEqual(vsutil.scale_value(1, 8, 8, range_in=1, range=1), 1)
        self.assertEqual(vsutil.scale_value(1, 32, 32, range_in=1, range=0), 1)
        self.assertEqual(vsutil.scale_value(1, 32, 32, range_in=0, range=1), 1)

        # range conversion
        self.assertEqual(vsutil.scale_value(219, 8, 8, range_in=0, range=1, scale_offsets=False, chroma=False), 255)
        self.assertEqual(vsutil.scale_value(255, 8, 8, range_in=1, range=0, scale_offsets=False, chroma=False), 219)

        self.assertEqual(vsutil.scale_value(224, 8, 8, range_in=0, range=1, scale_offsets=False, chroma=True), 255)
        self.assertEqual(vsutil.scale_value(255, 8, 8, range_in=1, range=0, scale_offsets=False, chroma=True), 224)

        self.assertEqual(vsutil.scale_value(235, 8, 8, range_in=0, range=1, scale_offsets=True, chroma=False), 255)
        self.assertEqual(vsutil.scale_value(255, 8, 8, range_in=1, range=0, scale_offsets=True, chroma=False), 235)

        self.assertEqual(vsutil.scale_value(240, 8, 8, range_in=0, range=1, scale_offsets=True, chroma=True), 255)
        self.assertEqual(vsutil.scale_value(255, 8, 8, range_in=1, range=0, scale_offsets=True, chroma=True), 240)

        # int to int (upsample)
        self.assertEqual(vsutil.scale_value(1, 8, 16, range_in=0, range=0, scale_offsets=False, chroma=False), 256)
        self.assertEqual(vsutil.scale_value(1, 8, 16, range_in=1, range=1, scale_offsets=False, chroma=False), 257)
        self.assertEqual(vsutil.scale_value(219, 8, 16, range_in=0, range=1, scale_offsets=False, chroma=False), 65535)
        self.assertEqual(vsutil.scale_value(255, 8, 16, range_in=1, range=0, scale_offsets=False, chroma=False), 219 << 8)

        self.assertEqual(vsutil.scale_value(1, 8, 16, range_in=0, range=0, scale_offsets=False, chroma=True), 256)
        self.assertEqual(vsutil.scale_value(1, 8, 16, range_in=1, range=1, scale_offsets=False, chroma=True), 257)
        self.assertEqual(vsutil.scale_value(224, 8, 16, range_in=0, range=1, scale_offsets=False, chroma=True), 65535)
        self.assertEqual(vsutil.scale_value(255, 8, 16, range_in=1, range=0, scale_offsets=False, chroma=True), 224 << 8)

        self.assertEqual(vsutil.scale_value(1, 8, 16, range_in=0, range=0, scale_offsets=True, chroma=False), 256)
        self.assertEqual(vsutil.scale_value(1, 8, 16, range_in=1, range=1, scale_offsets=True, chroma=False), 257)
        self.assertEqual(vsutil.scale_value(235, 8, 16, range_in=0, range=1, scale_offsets=True, chroma=False), 65535)
        self.assertEqual(vsutil.scale_value(255, 8, 16, range_in=1, range=0, scale_offsets=True, chroma=False), 235 << 8)

        self.assertEqual(vsutil.scale_value(1, 8, 16, range_in=0, range=0, scale_offsets=True, chroma=True), 256)
        self.assertEqual(vsutil.scale_value(1, 8, 16, range_in=1, range=1, scale_offsets=True, chroma=True), 257)
        self.assertEqual(vsutil.scale_value(240, 8, 16, range_in=0, range=1, scale_offsets=True, chroma=True), 65535)
        self.assertEqual(vsutil.scale_value(255, 8, 16, range_in=1, range=0, scale_offsets=True, chroma=True), 240 << 8)

        # int to flt
        self.assertEqual(vsutil.scale_value(1, 8, 32, range_in=0, range=1, scale_offsets=False, chroma=False), 1 / 219)
        self.assertEqual(vsutil.scale_value(1, 8, 32, range_in=1, range=1, scale_offsets=False, chroma=False), 1 / 255)
        self.assertEqual(vsutil.scale_value(219, 8, 32, range_in=0, range=1, scale_offsets=False, chroma=False), 1)
        self.assertEqual(vsutil.scale_value(255, 8, 32, range_in=1, range=1, scale_offsets=False, chroma=False), 1)

        self.assertEqual(vsutil.scale_value(1, 8, 32, range_in=0, range=1, scale_offsets=False, chroma=True), 1 / 224)
        self.assertEqual(vsutil.scale_value(1, 8, 32, range_in=1, range=1, scale_offsets=False, chroma=True), 1 / 255)
        self.assertEqual(vsutil.scale_value(224, 8, 32, range_in=0, range=1, scale_offsets=False, chroma=True), 1)
        self.assertEqual(vsutil.scale_value(255, 8, 32, range_in=1, range=1, scale_offsets=False, chroma=True), 1)

        self.assertEqual(vsutil.scale_value(1, 8, 32, range_in=0, range=1, scale_offsets=True, chroma=False), (1 - 16) / 219)
        self.assertEqual(vsutil.scale_value(1, 8, 32, range_in=1, range=1, scale_offsets=True, chroma=False), 1 / 255)
        self.assertEqual(vsutil.scale_value(235, 8, 32, range_in=0, range=1, scale_offsets=True, chroma=False), 1)
        self.assertEqual(vsutil.scale_value(255, 8, 32, range_in=1, range=1, scale_offsets=True, chroma=False), 1)

        self.assertEqual(vsutil.scale_value(1, 8, 32, range_in=0, range=1, scale_offsets=True, chroma=True), (1 - 128) / 224)
        self.assertEqual(vsutil.scale_value(1, 8, 32, range_in=1, range=1, scale_offsets=True, chroma=True), (1 - 128) / 255)
        self.assertEqual(vsutil.scale_value(240, 8, 32, range_in=0, range=1, scale_offsets=True, chroma=True), 0.5)
        self.assertEqual(vsutil.scale_value(255, 8, 32, range_in=1, range=1, scale_offsets=True, chroma=True), (255 - 128) / 255)

        # int to int (downsample)
        self.assertEqual(vsutil.scale_value(256, 16, 8, range_in=0, range=0, scale_offsets=False, chroma=False), 1)
        self.assertEqual(vsutil.scale_value(257, 16, 8, range_in=1, range=1, scale_offsets=False, chroma=False), 1)
        self.assertEqual(vsutil.scale_value(65535, 16, 8, range_in=1, range=0, scale_offsets=False, chroma=False), 219)
        self.assertEqual(vsutil.scale_value(219 << 8, 16, 8, range_in=0, range=1, scale_offsets=False, chroma=False), 255)

        self.assertEqual(vsutil.scale_value(256, 16, 8, range_in=0, range=0, scale_offsets=False, chroma=True), 1)
        self.assertEqual(vsutil.scale_value(257, 16, 8, range_in=1, range=1, scale_offsets=False, chroma=True), 1)
        self.assertEqual(vsutil.scale_value(65535, 16, 8, range_in=1, range=0, scale_offsets=False, chroma=True), 224)
        self.assertEqual(vsutil.scale_value(224 << 8, 16, 8, range_in=0, range=1, scale_offsets=False, chroma=True), 255)

        self.assertEqual(vsutil.scale_value(256, 16, 8, range_in=0, range=0, scale_offsets=True, chroma=False), 1)
        self.assertEqual(vsutil.scale_value(257, 16, 8, range_in=1, range=1, scale_offsets=True, chroma=False), 1)
        self.assertEqual(vsutil.scale_value(65535, 16, 8, range_in=1, range=0, scale_offsets=True, chroma=False), 235)
        self.assertEqual(vsutil.scale_value(235 << 8, 16, 8, range_in=0, range=1, scale_offsets=True, chroma=False), 255)

        self.assertEqual(vsutil.scale_value(256, 16, 8, range_in=0, range=0, scale_offsets=True, chroma=True), 1)
        self.assertEqual(vsutil.scale_value(257, 16, 8, range_in=1, range=1, scale_offsets=True, chroma=True), 1)
        self.assertEqual(vsutil.scale_value(65535, 16, 8, range_in=1, range=0, scale_offsets=True, chroma=True), 240)
        self.assertEqual(vsutil.scale_value(240 << 8, 16, 8, range_in=0, range=1, scale_offsets=True, chroma=True), 255)

        # flt to int
        self.assertEqual(vsutil.scale_value(1 / 219, 32, 8, range_in=1, range=0, scale_offsets=False, chroma=False), 1)
        self.assertEqual(vsutil.scale_value(1 / 255, 32, 8, range_in=1, range=1, scale_offsets=False, chroma=False), 1)
        self.assertEqual(vsutil.scale_value(1, 32, 8, range_in=1, range=0, scale_offsets=False, chroma=False), 219)
        self.assertEqual(vsutil.scale_value(1, 32, 8, range_in=1, range=1, scale_offsets=False, chroma=False), 255)

        self.assertEqual(vsutil.scale_value(1 / 224, 32, 8, range_in=1, range=0, scale_offsets=False, chroma=True), 1)
        self.assertEqual(vsutil.scale_value(1 / 255, 32, 8, range_in=1, range=1, scale_offsets=False, chroma=True), 1)
        self.assertEqual(vsutil.scale_value(1, 32, 8, range_in=1, range=0, scale_offsets=False, chroma=True), 224)
        self.assertEqual(vsutil.scale_value(1, 32, 8, range_in=1, range=1, scale_offsets=False, chroma=True), 255)

        self.assertEqual(vsutil.scale_value((1 - 16) / 219, 32, 8, range_in=1, range=0, scale_offsets=True, chroma=False), 1)
        self.assertEqual(vsutil.scale_value(1 / 255, 32, 8, range_in=1, range=1, scale_offsets=True, chroma=False), 1)
        self.assertEqual(vsutil.scale_value(1, 32, 8, range_in=1, range=0, scale_offsets=True, chroma=False), 235)
        self.assertEqual(vsutil.scale_value(1, 32, 8, range_in=1, range=1, scale_offsets=True, chroma=False), 255)

        self.assertEqual(vsutil.scale_value((1 - 128) / 224, 32, 8, range_in=1, range=0, scale_offsets=True, chroma=True), 1)
        self.assertEqual(vsutil.scale_value((1 - 128) / 255, 32, 8, range_in=1, range=1, scale_offsets=True, chroma=True), 1)
        self.assertEqual(vsutil.scale_value(0.5, 32, 8, range_in=1, range=0, scale_offsets=True, chroma=True), 240)
        self.assertEqual(vsutil.scale_value((255 - 128) / 255, 32, 8, range_in=1, range=1, scale_offsets=True, chroma=True), 255)
Ejemplo n.º 15
0
def based_aa(clip: vs.VideoNode,
             shader_file: str = "FSRCNNX_x2_56-16-4-1.glsl",
             rfactor: float = 2.0,
             tff: bool = True,
             mask_thr: float = 60,
             show_mask: bool = False,
             lmask: vs.VideoNode | None = None,
             **eedi3_args: Any) -> vs.VideoNode:
    """
    As the name implies, this is a based anti-aliaser. Thank you, based Zastin.
    This relies on FSRCNNX being very sharp, and as such it very much acts like the main "AA" here.

    Original function by Zastin, modified by LightArrowsEXE.

    Dependencies:

    * vapoursynth-eedi3
    * vs-placebo

    :param clip:            Input clip
    :param shader_file:     Path to FSRCNNX shader file
    :param rfactor:         Image enlargement factor
    :param tff:             Top-Field-First if true, Bottom-Field-First if alse
    :param mask_thr:        Threshold for the edge mask binarisation.
                            Scaled internally to match bitdepth of clip.
    :param show_mask:       Output mask
    :param eedi3_args:      Additional args to pass to eedi3
    :param lmask:           Line mask clip to use for eedi3

    :return:                AA'd clip or mask clip
    """
    def _eedi3s(clip: vs.VideoNode,
                mclip: vs.VideoNode | None = None,
                **eedi3_kwargs: Any) -> vs.VideoNode:
        edi_args: Dict[str, Any] = {  # Eedi3 args for `eedi3s`
            'field': int(tff),
            'alpha': 0.125,
            'beta': 0.25,
            'gamma': 40,
            'nrad': 2,
            'mdis': 20,
            'vcheck': 2,
            'vthresh0': 12,
            'vthresh1': 24,
            'vthresh2': 4
        }
        edi_args |= eedi3_kwargs

        out = core.eedi3m.EEDI3(clip,
                                dh=False,
                                sclip=clip,
                                planes=0,
                                **edi_args)

        if mclip:
            return core.std.Expr([clip, out, mclip], 'z y x ?')
        return out

    def _resize_mclip(mclip: vs.VideoNode,
                      width: int | None = None,
                      height: int | None = None) -> vs.VideoNode:
        iw, ih = mclip.width, mclip.height
        ow, oh = fallback(width, iw), fallback(height, ih)

        if (ow > iw and ow / iw != ow // iw) or (oh > ih
                                                 and oh / ih != oh // ih):
            mclip = Point().scale(mclip, iw * ceil(ow / iw),
                                  ih * ceil(oh / ih))
        return core.fmtc.resample(mclip,
                                  ow,
                                  oh,
                                  kernel='box',
                                  fulls=1,
                                  fulld=1)

    check_variable(clip, "based_aa")
    assert clip.format

    aaw = (round(clip.width * rfactor) + 1) & ~1
    aah = (round(clip.height * rfactor) + 1) & ~1

    clip_y = get_y(clip)

    if not lmask:
        if mask_thr > 255:
            raise ValueError(
                f"based_aa: 'mask_thr must be equal to or lower than 255 (current: {mask_thr})'"
            )

        mask_thr = scale_value(mask_thr, 8, get_depth(clip))

        lmask = clip_y.std.Prewitt().std.Binarize(
            mask_thr).std.Maximum().std.BoxBlur(0, 1, 1, 1, 1)

    mclip_up = _resize_mclip(lmask, aaw, aah)

    if show_mask:
        return lmask

    aa = depth(clip_y, 16).std.Transpose()
    aa = join([aa] * 3).placebo.Shader(shader=shader_file,
                                       filter='box',
                                       width=aa.width * 2,
                                       height=aa.height * 2)
    aa = depth(aa, get_depth(clip_y))
    aa = ssim_downsample(get_y(aa), aah, aaw)
    aa = _eedi3s(aa, mclip=mclip_up.std.Transpose(),
                 **eedi3_args).std.Transpose()
    aa = ssim_downsample(_eedi3s(aa, mclip=mclip_up, **eedi3_args), clip.width,
                         clip.height)
    aa = depth(aa, get_depth(clip_y))

    aa_merge = core.std.MaskedMerge(clip_y, aa, lmask)

    if clip.format.num_planes == 1:
        return aa_merge
    return join([aa_merge, plane(clip, 1), plane(clip, 2)])