예제 #1
0
def frame_to_array(f: vs.VideoFrame) -> np.ndarray:
    """
    Simple wrapper to turn a video frame into an numpy array
    """
    global vs_api_below4
    if vs_api_below4 is None:
        vs_api_below4 = vs.__api_version__.api_major < 4  # type: ignore
    return np.dstack([
        f.get_read_array(p) for p in range(f.format.num_planes)  # type: ignore
    ] if vs_api_below4 else f)
예제 #2
0
파일: clip.py 프로젝트: stuxcrystal/yuuno2
def extract_plane(buffer: Buffer, offset: int, frame: VideoFrame,
                  planeno: int):
    """
    Extracts the plane with the VapourSynth R37+ array-API.

    :param buffer:  Target buffer
    :param offset:  Where to write it
    :param frame:   The frame
    :param planeno: The plane number
    :return: The extracted image.
    """
    arr = frame.get_read_array(planeno)
    length = len(arr)

    if length + offset > len(buffer):
        raise BufferError("Buffer too short.")

    buffer[offset:offset + length] = arr

    return len(arr)
예제 #3
0
 def frame_to_np(frame: vs.VideoFrame) -> np.dstack:
     """
     Alternative to cv2.imread() that will directly read images to a numpy array.
     :param frame: VapourSynth frame from a clip
     """
     return np.dstack([np.asarray(frame.get_read_array(i)) for i in range(frame.format.num_planes)])
예제 #4
0
파일: YoloCR.py 프로젝트: Ryu1845/YoloCR
    def write_subs(self, n: int, f: vs.VideoFrame, clip: vs.VideoNode,
                   sub: str) -> vs.VideoNode:
        """
        Log scene changes to a file.

        Notes
        -----
        The file is formatted as <frame_number> 0 1
        if it's the last frame in the scene,
        1 0 if it's the first one

        Parameters
        ----------
        n:
            The frame number
        f:
            The frame properties
        clip:
            The input clip
        sub:
            The sub filename

        Returns
        -------
        vs.VideoNode
            The same clip as the input
        """
        if f.props["_SceneChangeNext"] == 1 or f.props["_SceneChangePrev"] == 1:
            img = Image.fromarray(np.array(f.get_read_array(0), copy=False))
            if not img.getextrema() == (0, 0):
                if f.props["_SceneChangePrev"] == 1:
                    frame_time = convert((n * clip.fps_den / clip.fps_num))
                    img = ImageOps.invert(img)
                    self.sub_count += 1
                    self.frame_num = n
                    ocr_out = tesserocr.image_to_text(
                        img,
                        lang=self.language,
                        psm=tesserocr.PSM.SINGLE_BLOCK,
                        path=self.tessdata,
                    )
                    with open(sub, "a") as sub_io:
                        sub_io.write(f"""
{self.sub_count}
{frame_time} -->
{ocr_out}
""")
                elif f.props["_SceneChangeNext"] == 1:
                    frame_time = convert(
                        ((n + 1) * clip.fps_den / clip.fps_num))
                    with open(sub, "r") as sub_io:
                        lines = sub_io.readlines()
                    with open(sub, "w") as sub_io:
                        for idx, line in enumerate(lines):
                            if (line.strip() == str(self.sub_count)
                                    and self.frame_num < n):
                                times = lines[idx + 1].strip()
                                lines[idx + 1] = f"{times} {frame_time}\n"
                            elif (line.strip() == str(self.sub_count - 1)
                                  and self.frame_num > n):
                                times = lines[idx + 1].strip()
                                lines[idx + 1] = f"{times} {frame_time}\n"

                        sub_io.writelines(lines)

        return clip