Exemple #1
0
def stack_planes(clip: vs.VideoNode,
                 stack_vertical: bool = False) -> vs.VideoNode:
    """
    Stacks the planes of a clip.

    :param clip:              Input clip
    :param stack_vertical:    Stack the planes vertically (Default: False)

    :return:                  Clip with stacked planes
    """

    planes = split(clip)
    subsampling = get_subsampling(clip)

    if subsampling == '420':
        if stack_vertical:
            stack = core.std.StackHorizontal([planes[1], planes[2]])
            return core.std.StackVertical([planes[0], stack])
        else:
            stack = core.std.StackVertical([planes[1], planes[2]])
            return core.std.StackHorizontal([planes[0], stack])
    elif subsampling == '444':
        return core.std.StackVertical(
            planes) if stack_vertical else core.std.StackHorizontal(planes)
    else:
        raise ValueError(
            "stack_planes: 'Input clip must be in YUV format with 444 or 420 chroma subsampling'"
        )
Exemple #2
0
 def test_subsampling(self):
     self.assertEqual('444', vsutil.get_subsampling(self.YUV444P8_CLIP))
     self.assertEqual('440', vsutil.get_subsampling(self.YUV440P8_CLIP))
     self.assertEqual('420', vsutil.get_subsampling(self.YUV420P8_CLIP))
     self.assertEqual('422', vsutil.get_subsampling(self.YUV422P8_CLIP))
     self.assertEqual('411', vsutil.get_subsampling(self.YUV411P8_CLIP))
     self.assertEqual('410', vsutil.get_subsampling(self.YUV410P8_CLIP))
     self.assertEqual(None, vsutil.get_subsampling(self.RGB24_CLIP))
     # let’s create a custom format with higher subsampling than any of the legal ones to test that branch as well:
     with self.assertRaisesRegex(ValueError, 'Unknown subsampling.'):
         vsutil.get_subsampling(
             vs.core.std.BlankClip(_format=self.YUV444P8_CLIP.format.replace(subsampling_w=4))
         )
Exemple #3
0
    def _smart_crop(
        self
    ) -> None:  # has to alter self.clips to send clips to _marked_clips() in Stack's _compare()
        """Crops self.clips in place accounting for odd resolutions."""
        if not self.width or not self.height:
            raise ValueError(
                "Split: all clips must have same width and height")

        breaks_subsampling = ((self.direction == Direction.HORIZONTAL and
                               (((self.width // self.num_clips) % 2) or
                                ((self.width % self.num_clips) % 2)))
                              or (self.direction == Direction.VERTICAL and
                                  (((self.height // self.num_clips) % 2) or
                                   ((self.height % self.num_clips) % 2))))

        is_subsampled = not all(
            vsutil.get_subsampling(clip) in ('444', None)
            for clip in self.clips)

        if breaks_subsampling and is_subsampled:
            raise ValueError(
                "Split: resulting cropped width or height violates subsampling rules; "
                "consider resampling to YUV444 or RGB before attempting to crop"
            )

        if self.direction == Direction.HORIZONTAL:
            crop_width, overflow = divmod(self.width, self.num_clips)

            for key, clip in enumerate(self.clips):
                left_crop = crop_width * key
                right_crop = crop_width * (self.num_clips - 1 - key)

                if key != (self.num_clips - 1):
                    right_crop += overflow

                self.clips[key] = clip.std.Crop(left=left_crop,
                                                right=right_crop)

        elif self.direction == Direction.VERTICAL:
            crop_height, overflow = divmod(self.height, self.num_clips)

            for key, clip in enumerate(self.clips):
                top_crop = crop_height * key
                bottom_crop = crop_height * (self.num_clips - 1 - key)

                if key != (self.num_clips - 1):
                    bottom_crop += overflow

                self.clips[key] = clip.std.Crop(top=top_crop,
                                                bottom=bottom_crop)
Exemple #4
0
def padder(clip: vs.VideoNode,
           left: int = 32,
           right: int = 32,
           top: int = 32,
           bottom: int = 32) -> vs.VideoNode:
    """
    Pads out the pixels on the side by the given amount of pixels.
    For a 4:2:0 clip, the output must be an even resolution.

    :param clip:        Input clip
    :param left:        Padding added to the left side of the clip
    :param right:       Padding added to the right side of the clip
    :param top:         Padding added to the top side of the clip
    :param bottom:      Padding added to the bottom side of the clip

    :return:            Padded clip
    """
    check_variable(clip, "padder")

    width = clip.width + left + right
    height = clip.height + top + bottom

    if get_subsampling(clip) == '420' and ((width % 2 != 0) or
                                           (height % 2 != 0)):
        raise ValueError(
            "padder: 'Values must result in an even resolution when passing a YUV420 clip!'"
        )

    scaled = core.resize.Point(clip,
                               width,
                               height,
                               src_top=-1 * top,
                               src_left=-1 * left,
                               src_width=width,
                               src_height=height)
    return core.fb.FillBorders(scaled,
                               left=left,
                               right=right,
                               top=top,
                               bottom=bottom)
def prep(*clips: vs.VideoNode, w: int = 1280, h: int = 720, dith: bool = True, yuv444: bool = True, static: bool = True) \
        -> Union[vs.VideoNode, List[vs.VideoNode]]:
    """Prepares multiple clips of diff sizes/bit-depths to be compared.

    Can optionally be used as a simplified resize/ftmc wrapper for one
    clip. Clips MUST be either YUV420 or YUV444.

    Transforms all planes to w x h using Bicubic:
        Hermite 0,0 for downscale / Mitchell 1/3,1/3 for upscale.

    :param clips: clip(s) to process
        :bit depth: ANY
        :color family: YUV
        :float precision: ANY
        :sample type: ANY
        :subsampling: 420, 444

    :param w: target width in px

    :param h: target height in px

    :param dith: whether or not to dither clips down to 8-bit (Default value = True)

    :param yuv444: whether or not to convert all clips to 444 chroma subsampling (Default value = True)

    :param static: changes dither mode based on clip usage (Default value = True)
        True will use Floyd-Steinberg error diffusion (good for static screenshots)
        False will use Sierra's Filter Lite error diffusion (faster)

    :returns: processed clip(s)
    """
    outclips = []
    for clip in clips:
        if get_subsampling(clip) == '444':
            if clip.height > h:
                clip_scaled = core.resize.Bicubic(clip,
                                                  w,
                                                  h,
                                                  filter_param_a=0,
                                                  filter_param_b=0)
            elif clip.height < h:
                clip_scaled = core.resize.Bicubic(clip,
                                                  w,
                                                  h,
                                                  filter_param_a=0.33,
                                                  filter_param_b=0.33)
            else:
                clip_scaled = clip

        elif get_subsampling(clip) == '420' and yuv444:
            if clip.height > h:
                if clip.height >= (2 * h):
                    # this downscales chroma with Hermite instead of Mitchell
                    clip_scaled = core.resize.Bicubic(
                        clip,
                        w,
                        h,
                        filter_param_a=0,
                        filter_param_b=0,
                        filter_param_a_uv=0,
                        filter_param_b_uv=0,
                        format=clip.format.replace(subsampling_w=0,
                                                   subsampling_h=0))
                else:
                    clip_scaled = core.resize.Bicubic(
                        clip,
                        w,
                        h,
                        filter_param_a=0,
                        filter_param_b=0,
                        filter_param_a_uv=0.33,
                        filter_param_b_uv=0.33,
                        format=clip.format.replace(subsampling_w=0,
                                                   subsampling_h=0))
            elif clip.height < h:
                clip_scaled = core.resize.Bicubic(clip,
                                                  w,
                                                  h,
                                                  filter_param_a=0.33,
                                                  filter_param_b=0.33,
                                                  format=clip.format.replace(
                                                      subsampling_w=0,
                                                      subsampling_h=0))
            else:
                clip_scaled = core.resize.Bicubic(clip,
                                                  filter_param_a=0.33,
                                                  filter_param_b=0.33,
                                                  format=clip.format.replace(
                                                      subsampling_w=0,
                                                      subsampling_h=0))

        else:
            if clip.height > h:
                clip_scaled = core.resize.Bicubic(clip,
                                                  w,
                                                  h,
                                                  filter_param_a=0,
                                                  filter_param_b=0)
            elif clip.height < h:
                clip_scaled = core.resize.Bicubic(clip,
                                                  w,
                                                  h,
                                                  filter_param_a=0.33,
                                                  filter_param_b=0.33)
            else:
                clip_scaled = clip

        if get_depth(clip_scaled) > 8:
            if dith:
                if static:
                    # Floyd-Steinberg error diffusion
                    clip_dith = core.fmtc.bitdepth(clip_scaled,
                                                   bits=8,
                                                   dmode=6)
                else:
                    # Sierra-2-4A "Filter Lite" error diffusion
                    clip_dith = core.fmtc.bitdepth(clip_scaled,
                                                   bits=8,
                                                   dmode=3)
            else:
                # No dither, round to the closest value
                clip_dith = core.fmtc.bitdepth(clip_scaled, bits=8, dmode=1)
        else:
            clip_dith = clip_scaled

        outclips.append(clip_dith)

    if len(outclips) == 1:
        return clip_dith

    return outclips
Exemple #6
0
 def test_decorators(self):
     with self.assertRaisesRegex(ValueError, 'Variable-format'):
         vsutil.get_subsampling(self.VARIABLE_FORMAT_CLIP)
Exemple #7
0
            'V': split_planes[2]
        }
    elif clip.format.color_family == vs.ColorFamily.RGB:
        planes = {
            'R': split_planes[0],
            'G': split_planes[1],
            'B': split_planes[2]
        }
    else:
        raise ValueError(
            f"stack_planes: unexpected color family {clip.format.color_family.name}"
        )

    direction: Direction = Direction.HORIZONTAL if not stack_vertical else Direction.VERTICAL

    if vsutil.get_subsampling(clip) in ('444', None):
        return Stack(planes, direction=direction).clip

    elif vsutil.get_subsampling(clip) == '420':
        subsample_direction: Direction = Direction.HORIZONTAL if stack_vertical else Direction.VERTICAL
        y_plane = planes.pop('Y').text.Text(text='Y')
        subsampled_planes = Stack(planes, direction=subsample_direction).clip

        return Stack([y_plane, subsampled_planes], direction=direction).clip

    else:
        raise ValueError(
            f"stack_planes: unexpected subsampling {vsutil.get_subsampling(clip)}"
        )