Example #1
0
def get_optimal_line_heights(
    api: Api, renderer: AssRenderer
) -> T.Dict[str, float]:
    TEST_LINE_COUNT = 20
    VIDEO_RES_X = 100
    VIDEO_RES_Y = TEST_LINE_COUNT * 300

    fake_meta = AssMeta()
    fake_meta.set("WrapStyle", 2)
    renderer.set_source(
        style_list=api.subs.styles,
        event_list=api.subs.events,
        meta=fake_meta,
        video_resolution=(VIDEO_RES_X, VIDEO_RES_Y),
    )

    ret = {}
    for style in api.subs.styles:
        event = AssEvent(
            start=0,
            end=1000,
            text="\\N".join(["gjMW"] * TEST_LINE_COUNT),
            style=style.name,
        )

        _frame_width, frame_height = measure_frame_size(api, renderer, event)
        line_height = frame_height / TEST_LINE_COUNT
        ret[event.style] = line_height
        api.log.debug(f"average height for {event.style}: {line_height}")
    return ret
Example #2
0
def measure_frame_size(
    api: Api, renderer: AssRenderer, event: AssEvent
) -> T.Tuple[int, int]:
    fake_event_list = AssEventList()
    fake_event_list.append(copy(event))

    renderer.set_source(
        style_list=renderer.style_list,
        event_list=fake_event_list,
        meta=renderer.meta,
        video_resolution=renderer.video_resolution,
    )

    layers = [
        layer
        for layer in renderer.render_raw(time=event.start)
        if layer.type == 0
    ]
    if not layers:
        return (0, 0)
    min_x = min(layer.dst_x for layer in layers)
    min_y = min(layer.dst_y for layer in layers)
    max_x = max(layer.dst_x + layer.w for layer in layers)
    max_y = max(layer.dst_y + layer.h for layer in layers)
    return (int((max_x - min_x) * api.video.aspect_ratio), max_y - min_y)
Example #3
0
def get_optimal_line_heights(
    api: Api, renderer: AssRenderer
) -> T.Dict[str, float]:
    TEST_LINE_COUNT = 20
    VIDEO_RES_X = 100
    VIDEO_RES_Y = TEST_LINE_COUNT * 300

    fake_meta = AssMeta()
    fake_meta.set("WrapStyle", 2)
    renderer.set_source(
        style_list=api.subs.styles,
        event_list=api.subs.events,
        meta=fake_meta,
        video_resolution=(VIDEO_RES_X, VIDEO_RES_Y),
    )

    ret = {}
    for style in api.subs.styles:
        event = AssEvent(
            start=0,
            end=1000,
            text="\\N".join(["gjMW"] * TEST_LINE_COUNT),
            style=style.name,
        )

        _frame_width, frame_height = measure_frame_size(api, renderer, event)
        line_height = frame_height / TEST_LINE_COUNT
        ret[event.style] = line_height
        api.log.debug(f"average height for {event.style}: {line_height}")
    return ret
Example #4
0
def measure_frame_size(
    api: Api, renderer: AssRenderer, event: AssEvent
) -> T.Tuple[int, int]:
    if not any(style.name == event.style for style in renderer.style_list):
        return (0, 0)

    fake_event_list = AssEventList()
    fake_event_list.append(copy(event))

    renderer.set_source(
        style_list=renderer.style_list,
        event_list=fake_event_list,
        meta=renderer.meta,
        video_resolution=renderer.video_resolution,
    )

    layers = [
        layer
        for layer in renderer.render_raw(time=event.start)
        if layer.type == 0
    ]
    if not layers:
        return (0, 0)
    min_x = min(layer.dst_x for layer in layers)
    min_y = min(layer.dst_y for layer in layers)
    max_x = max(layer.dst_x + layer.w for layer in layers)
    max_y = max(layer.dst_y + layer.h for layer in layers)
    aspect_ratio = (
        api.video.current_stream.aspect_ratio
        if api.video.current_stream
        else 1
    )
    return (int((max_x - min_x) * aspect_ratio), max_y - min_y)
Example #5
0
def list_violations(spell_check_lang: T.Optional[str],
                    api: Api) -> T.Iterable[BaseResult]:
    renderer = AssRenderer()
    optimal_line_heights = get_optimal_line_heights(api, renderer)
    renderer.set_source(
        style_list=api.subs.styles,
        event_list=api.subs.events,
        meta=api.subs.meta,
        video_resolution=(get_width(api), get_height(api)),
    )

    for event in api.subs.events:
        yield from check_style_validity(event, api.subs.styles)
        yield from check_durations(event)
        yield from check_punctuation(spell_check_lang, event)
        yield from check_quotes(event)
        yield from check_line_continuation(event)
        yield from check_ass_tags(event)
        yield from check_double_words(event)
        yield from check_unnecessary_breaks(event, api, renderer)
        yield from check_long_line(event, api, renderer, optimal_line_heights)
Example #6
0
def list_violations(api: Api) -> T.Iterable[BaseResult]:
    renderer = AssRenderer()
    optimal_line_heights = get_optimal_line_heights(api, renderer)
    renderer.set_source(
        style_list=api.subs.styles,
        event_list=api.subs.events,
        meta=api.subs.meta,
        video_resolution=(get_width(api), get_height(api)),
    )

    for event in api.subs.events:
        yield from check_style_validity(event, api.subs.styles)
        yield from check_durations(event)
        yield from check_punctuation(event)
        yield from check_quotes(event)
        yield from check_line_continuation(event)
        yield from check_ass_tags(event)
        yield from check_double_words(event)
        yield from check_unnecessary_breaks(
            event, api, renderer, optimal_line_heights
        )
        yield from check_long_line(event, api, renderer, optimal_line_heights)
Example #7
0
class _StylePreview(QtWidgets.QGroupBox):
    preview_text_changed = QtCore.pyqtSignal([])

    def __init__(
        self,
        api: Api,
        model: AssStylesModel,
        selection_model: QtCore.QItemSelectionModel,
        parent: QtWidgets.QWidget,
    ) -> None:
        super().__init__("Preview", parent)
        self._api = api
        self._selection_model = selection_model

        self._renderer = AssRenderer()

        self._editor = QtWidgets.QPlainTextEdit()
        self._editor.setPlainText(api.cfg.opt["styles"]["preview_test_text"])
        self._editor.setFixedWidth(400)
        self._editor.setTabChangesFocus(True)
        self._editor.setFixedHeight(get_text_edit_row_height(self._editor, 2))

        self._background_combobox = QtWidgets.QComboBox()
        for i, path in enumerate(get_assets("style_preview_bk")):
            self._background_combobox.addItem(path.name, path.resolve())
            if path.name == api.cfg.opt["styles"]["preview_background"]:
                self._background_combobox.setCurrentIndex(i)

        self._preview_box = QtWidgets.QLabel(self)
        self._preview_box.setLineWidth(1)
        self._preview_box.setFrameShape(QtWidgets.QFrame.StyledPanel)
        self._preview_box.setFrameShadow(QtWidgets.QFrame.Sunken)
        self._preview_box.setSizePolicy(
            QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored
        )

        layout = QtWidgets.QVBoxLayout(self)
        layout.addWidget(self._editor)
        layout.addWidget(self._background_combobox)
        layout.addWidget(self._preview_box)

        self.update_preview()
        self._editor.textChanged.connect(self._on_text_change)
        self._background_combobox.currentIndexChanged.connect(
            self._on_background_change
        )

        model.dataChanged.connect(self.update_preview)
        model.rowsInserted.connect(self.update_preview)
        model.rowsRemoved.connect(self.update_preview)
        selection_model.selectionChanged.connect(self.update_preview)

    def _on_background_change(self) -> None:
        self.update_preview()
        self._api.cfg.opt["styles"][
            "preview_background"
        ] = self._background_combobox.currentData().name

    def _on_text_change(self) -> None:
        self.preview_text_changed.emit()
        self.update_preview()
        self._api.cfg.opt["styles"]["preview_test_text"] = self.preview_text

    @property
    def preview_text(self) -> str:
        return self._editor.toPlainText()

    @property
    def _selected_style(self) -> T.Optional[AssStyle]:
        try:
            idx = self._selection_model.selectedIndexes()[0].row()
        except IndexError:
            return None
        else:
            return self._api.subs.styles[idx]

    def update_preview(self) -> None:
        selected_style = self._selected_style
        if not selected_style:
            self._preview_box.clear()
            return

        resolution = (self._preview_box.width(), self._preview_box.height())
        if resolution[0] <= 0 or resolution[1] <= 0:
            self._preview_box.clear()
            return

        fake_style = copy(selected_style)
        fake_style.name = "Default"
        if (
            self._api.video.current_stream
            and self._api.video.current_stream.is_ready
        ):
            fake_style.scale(
                resolution[1] / self._api.video.current_stream.height
            )
        fake_style_list = AssStyleList()
        fake_style_list.append(fake_style)

        fake_event = AssEvent(
            start=0,
            end=1000,
            text=self.preview_text.replace("\n", "\\N"),
            style=fake_style.name,
        )
        fake_event_list = AssEventList()
        fake_event_list.append(fake_event)

        fake_meta = AssMeta()

        image = PIL.Image.new(mode="RGBA", size=resolution)

        background_path = self._background_combobox.currentData()
        if background_path and background_path.exists():
            background = PIL.Image.open(background_path)
            for y in range(0, resolution[1], background.height):
                for x in range(0, resolution[0], background.width):
                    image.paste(background, (x, y))

        self._renderer.set_source(
            fake_style_list, fake_event_list, fake_meta, resolution
        )
        subs_image = self._renderer.render(
            time=0,
            aspect_ratio=(
                self._api.video.current_stream.aspect_ratio
                if self._api.video.current_stream
                else 1
            ),
        )
        image = PIL.Image.composite(subs_image, image, subs_image)

        image = PIL.ImageQt.ImageQt(image)
        image = QtGui.QImage(image)
        self._preview_box.setPixmap(QtGui.QPixmap.fromImage(image))
Example #8
0
class VideoStream(QtCore.QObject):
    """The video API."""

    errored = QtCore.pyqtSignal()
    changed = QtCore.pyqtSignal()
    loaded = QtCore.pyqtSignal()

    def __init__(
        self,
        threading_api: ThreadingApi,
        log_api: LogApi,
        subs_api: SubtitlesApi,
        path: Path,
    ) -> None:
        """Initialize self.

        :param threading_api: threading API
        :param log_api: logging API
        :param subs_api: subtitles API
        :param path: path to the video file to load
        """
        super().__init__()
        self._threading_api = threading_api
        self._log_api = log_api
        self._subs_api = subs_api

        self.uid = uuid.uuid4()

        self._path = path
        self._timecodes: T.List[int] = []
        self._keyframes: T.List[int] = []
        self._frame_rate = fractions.Fraction(0, 1)
        self._aspect_ratio = fractions.Fraction(1, 1)
        self._width = 0
        self._height = 0

        self._ass_renderer = AssRenderer()
        self._source: T.Union[None, ffms2.VideoSource] = None

        self._last_output_fmt: T.Any = None

        self._log_api.info(f"video: loading {path}")
        self._threading_api.schedule_task(
            lambda: _load_video_source(self._log_api, self.uid, self._path),
            self._got_source,
        )

    @property
    def path(self) -> Path:
        """Return video source path.

        :return: path
        """
        return self._path

    @property
    def is_ready(self) -> bool:
        """Return whether if the video is loaded.

        :return: whether if the video is loaded
        """
        return self._source is not None

    def screenshot(
        self,
        pts: int,
        path: Path,
        include_subtitles: bool,
        width: T.Optional[int],
        height: T.Optional[int],
    ) -> None:
        """Save a screenshot into specified destination.

        :param pts: pts to make screenshot of
        :param path: path to save the screenshot to
        :param include_subtitles: whether to 'burn in' the subtitles
        :param width: optional width to render to
        :param height: optional height to render to
        """

        if width and height:
            grab_width = width
            grab_height = height
        elif height:
            grab_width = int(self.width * height / self.height)
            grab_height = height
        elif width:
            grab_height = int(self.height * width / self.width)
            grab_width = width
        else:
            grab_width = self.width
            grab_height = self.height

        if grab_width <= 0 or grab_height <= 0:
            raise ValueError("cannot take a screenshot at negative resolution")

        pts = self.align_pts_to_prev_frame(pts)
        idx = self.timecodes.index(pts)
        frame = self.get_frame(idx, grab_width, grab_height)
        image = PIL.Image.frombytes("RGB", (grab_width, grab_height), frame)

        if include_subtitles:
            self._ass_renderer.set_source(
                self._subs_api.styles,
                self._subs_api.events,
                self._subs_api.meta,
                (grab_width, grab_height),
            )
            subs_image = self._ass_renderer.render(
                time=pts, aspect_ratio=self._aspect_ratio)
            image = PIL.Image.composite(subs_image, image, subs_image)

        image.save(str(path))

    def align_pts_to_near_frame(self, pts: int) -> int:
        """Align PTS to a frame closest to given PTS.

        :param pts: PTS to align
        :return: aligned PTS
        """
        if self.timecodes:
            max_idx = len(self.timecodes) - 1
            idx1 = max(
                0, min(max_idx,
                       bisect.bisect_right(self.timecodes, pts) - 1))
            idx2 = max(0, min(max_idx, bisect.bisect_left(self.timecodes,
                                                          pts)))
            return min(
                [self.timecodes[idx1], self.timecodes[idx2]],
                key=lambda val: abs(val - pts),
            )
        return pts

    def align_pts_to_prev_frame(self, pts: int) -> int:
        """Align PTS to a frame immediately before given PTS.

        :param pts: PTS to align
        :return: aligned PTS
        """
        if self.timecodes:
            idx = bisect.bisect_right(self.timecodes, pts) - 1
            if idx >= len(self.timecodes):
                return self.timecodes[-1]
            if pts < self.timecodes[idx]:
                return pts
            return self.timecodes[idx]
        return pts

    def align_pts_to_next_frame(self, pts: int) -> int:
        """Align PTS to a frame immediately after given PTS.

        :param pts: PTS to align
        :return: aligned PTS
        """
        if self.timecodes:
            idx = bisect.bisect_left(self.timecodes, pts)
            if idx >= len(self.timecodes):
                return pts
            if pts < 0:
                return self.timecodes[0]
            return self.timecodes[idx]
        return pts

    def frame_idx_from_pts(
            self, pts: T.Union[float, int,
                               np.array]) -> T.Union[int, np.array]:
        """Get index of a frame that contains given PTS.

        :param pts: PTS to search for
        :return: frame index, -1 if not found
        """
        ret = np.searchsorted(self.timecodes, pts, "right").astype(np.int)
        ret = np.clip(ret - 1, a_min=0 if self.timecodes else -1, a_max=None)
        return ret

    @property
    def frame_rate(self) -> fractions.Fraction:
        """Return the frame rate.

        :return: video frame rate
        """
        return self._frame_rate

    @property
    def width(self) -> int:
        """Return horizontal video resolution.

        :return: video width in pixels
        """
        return self._width

    @property
    def height(self) -> int:
        """Return vertical video resolution.

        :return: video height in pixels
        """
        return self._height

    @property
    def aspect_ratio(self) -> fractions.Fraction:
        """Return the frame aspect ratio.

        :return: video frame aspect ratio
        """
        return self._aspect_ratio

    @property
    def timecodes(self) -> T.List[int]:
        """Return video frames' PTS.

        :return: video frames' PTS
        """
        if not self._wait_for_source():
            return []
        return self._timecodes

    @property
    def keyframes(self) -> T.List[int]:
        """Return video keyframes' indexes.

        :return: video keyframes' indexes
        """
        if not self._wait_for_source():
            return []
        return self._keyframes

    @property
    def min_pts(self) -> int:
        """Return minimum video time in milliseconds.

        :return: minimum PTS
        """
        if not self.timecodes:
            return 0
        return self.timecodes[0]

    @property
    def max_pts(self) -> int:
        """Return maximum video time in milliseconds.

        :return: maximum PTS
        """
        if not self.timecodes:
            return 0
        return self.timecodes[-1]

    def get_frame(self, frame_idx: int, width: int,
                  height: int) -> T.Optional[np.array]:
        """Get raw video data from the currently loaded video source.

        :param frame_idx: frame number
        :param width: output image width
        :param height: output image height
        :return: numpy image
        """
        with _SAMPLER_LOCK:
            if (not self._wait_for_source() or frame_idx < 0
                    or frame_idx >= len(self.timecodes)):
                return None
            assert self._source

            new_output_fmt = (_PIX_FMT, width, height, ffms2.FFMS_RESIZER_AREA)
            if self._last_output_fmt != new_output_fmt:
                self._source.set_output_format(*new_output_fmt)
                self._last_output_fmt = new_output_fmt

            frame = self._source.get_frame(frame_idx)
            return (frame.planes[0].reshape(
                (height,
                 frame.Linesize[0]))[:, 0:width * 3].reshape(height, width, 3))

    def _got_source(self, source: ffms2.VideoSource) -> None:
        with _SAMPLER_LOCK:
            self._source = source

            if source is None:
                self.errored.emit()
                return

            self._timecodes = [
                int(round(pts)) for pts in source.track.timecodes
            ]
            self._keyframes = source.track.keyframes[:]
            self._timecodes.sort()
            self._keyframes.sort()

            self._frame_rate = fractions.Fraction(
                self._source.properties.FPSNumerator,
                self._source.properties.FPSDenominator,
            )

            self._aspect_ratio = (fractions.Fraction(
                self._source.properties.SARNum,
                self._source.properties.SARDen,
            ) if (self._source.properties.SARNum
                  and self._source.properties.SARDen) else fractions.Fraction(
                      1, 1))

            frame = source.get_frame(0)
            self._width = frame.EncodedWidth
            self._height = int(frame.EncodedHeight / self._aspect_ratio)
            self.loaded.emit()

    def _wait_for_source(self) -> bool:
        if self._source is None:
            return False
        while self._source is _LOADING:
            time.sleep(0.01)
        if self._source is None:
            return False
        return True