def _wrap_frame(frame: VideoFrame) -> VideoNode:
        core = get_proxy_or_core()

        bc = core.std.BlankClip(width=frame.width,
                                height=frame.height,
                                length=1,
                                fpsnum=1,
                                fpsden=1,
                                format=frame.format.id)

        return bc.std.ModifyFrame([bc], lambda n, f: frame.copy())
    def __getitem__(self, item) -> VapourSynthFrameWrapper:
        if not is_single():
            try:
                get_proxy_or_core().std.BlankClip()
            except vs.Error:
                raise RuntimeError(
                    "Tried to access clip of a dead core.") from None

        frame = yield self.clip.get_frame_async(item)
        wrapped = self._wrap_frame(frame)
        _rgb24: Future = self.to_rgb32(wrapped)
        rgb24 = _rgb24.get_frame_async(0)
        compat: Future = self.to_compat_rgb32(_rgb24).get_frame_async(0)

        (yield gather([rgb24, compat]))
        rgb24_frame, compat_frame = rgb24.result(), compat.result()

        return VapourSynthFrameWrapper(frame=frame,
                                       compat_frame=compat_frame,
                                       rgb_frame=rgb24_frame)
        def _func(self, change=None):
            core = get_proxy_or_core()

            if name is None:
                core.num_threads = self.core_num_threads
                core.add_cache = self.core_add_cache

                if hasattr(core, 'accept_lowercase'):
                    core.accept_lowercase = self.core_accept_lowercase

                # There is no obvious default for max_cache_size
                if self.core_max_cache_size is not None:
                    core.max_cache_size = self.core_max_cache_size

            elif hasattr(core, name):
                setattr(core, name, change.new)
def encode(
        clip: vs.VideoNode,
        stream: t.IO[t.ByteString],
        *,
        y4m: bool = False,
        prefetch: t.Optional[int]=None,
        backlog: t.Optional[int]=None,
        progress: t.Optional[t.Callable[[int, int], None]]=None
) -> None:
    if prefetch is None:
        prefetch = get_proxy_or_core().num_threads

    if not isinstance(clip, vs.VideoNode):
        clip = clip[0]

    if y4m:
        if clip.format.color_family == vs.GRAY:
            y4mformat = 'mono'
            if clip.format.bits_per_sample > 8:
                y4mformat = y4mformat + str(clip.format.bits_per_sample)
        elif clip.format.color_family == vs.YUV:
            if clip.format.subsampling_w == 1 and clip.format.subsampling_h == 1:
                y4mformat = '420'
            elif clip.format.subsampling_w == 1 and clip.format.subsampling_h == 0:
                y4mformat = '422'
            elif clip.format.subsampling_w == 0 and clip.format.subsampling_h == 0:
                y4mformat = '444'
            elif clip.format.subsampling_w == 2 and clip.format.subsampling_h == 2:
                y4mformat = '410'
            elif clip.format.subsampling_w == 2 and clip.format.subsampling_h == 0:
                y4mformat = '411'
            elif clip.format.subsampling_w == 0 and clip.format.subsampling_h == 1:
                y4mformat = '440'
            else:
                raise ValueError("This is a very strange subsampling config.")

            if clip.format.bits_per_sample > 8:
                y4mformat = y4mformat + 'p' + str(clip.format.bits_per_sample)
        else:
            raise ValueError("Can only use vs.GRAY and vs.YUV for V4M-Streams")

        if len(y4mformat) > 0:
            y4mformat = 'C' + y4mformat

        data = f'YUV4MPEG2 {y4mformat} W{clip.width} H{clip.height} F{clip.fps_num}:{clip.fps_den} Ip A0:0 XLENGTH={len(clip)}\n'
        stream.write(data.encode("ascii"))
        if hasattr(stream, "flush"):
            stream.flush()

    frame: vs.VideoFrame
    for idx, frame in enumerate(frames(clip, prefetch, backlog)):
        if y4m:
            stream.write(b"FRAME\n")

        if Features.API4:
            iterator = frame
        else:
            iterator = frame.planes()

        for planeno, plane in enumerate(iterator):
            # This is a quick fix.
            # Calling bytes(VideoPlane) should make the buffer continuous by
            # copying the frame to a continous buffer
            # if the stride does not match the width*bytes_per_sample.
            try:
                if frame.get_stride(planeno) != frame.width*clip.format.bytes_per_sample:
                    stream.write(bytes(plane))
                else:
                    stream.write(plane)

            except BrokenPipeError:
                return

            if hasattr(stream, "flush"):
                stream.flush()

        if progress is not None:
            progress(idx+1, len(clip))

    stream.close()
 def initialize_namespace(self, vapoursynth):
     core = get_proxy_or_core()
     self.parent.namespace['vs'] = vapoursynth
     self.parent.namespace['core'] = core