Ejemplo n.º 1
0
    def get_contents(
        self,
        subdirs: Optional[Sequence[str]] = None,
        recurse: bool = True,
        base: Any = None,
        filters: Optional[Dict[str, Any]] = None,
        excludes: Optional[Dict[str, Any]] = None,
    ) -> Sequence[ContentProxy]:
        subdirs_path = None
        if not base:
            base = self.content
        if subdirs:
            subdirs_path = [
                parse_dir(path, base.src.contentpath[0]) for path in subdirs
            ]
            print(subdirs_path)

        ret = self.context.site.files.get_contents(
            self.context.site,
            filters=filters,
            subdirs=subdirs_path,
            recurse=recurse,
            excludes=excludes,
        )
        return [ContentProxy(self.context, content) for content in ret]
Ejemplo n.º 2
0
    def group_items(
        self,
        group: str,
        subdirs: Optional[Sequence[str]] = None,
        recurse: bool = True,
        filters: Optional[Dict[str, Any]] = None,
        excludes: Optional[Dict[str, Any]] = None,
    ) -> List[Tuple[Tuple[str, ...], List[ContentProxy]]]:
        subdirs_path = None
        if subdirs:
            subdirs_path = [
                parse_dir(path, self.content.src.contentpath[0])
                for path in subdirs
            ]

        groups = self.context.site.files.group_items(
            self.context.site,
            group,
            filters=filters,
            excludes=excludes,
            subdirs=subdirs_path,
            recurse=recurse,
        )

        ret: List[Tuple[Tuple[str, ...], List[ContentProxy]]] = []

        for groupvalues, contents in groups:
            ret.append((
                groupvalues,
                [ContentProxy(self.context, content) for content in contents],
            ))
        return ret
Ejemplo n.º 3
0
    def get(self,
            dir: Union[None, str, PathTuple],
            name: str,
            default: Any = _omit) -> Any:
        if isinstance(dir, tuple):
            dirtuple = dir
        elif isinstance(dir, str):
            dirtuple = parse_dir(dir, self.content.src.contentpath[0])
        elif dir is None:
            dirtuple = self.content.src.contentpath[0]
        else:
            raise ValueError(f"Invalid dir: {dir}")

        try:
            return self.context.site.config.get(dirtuple, name)
        except exceptions.ConfigNotFound:
            if default is self._omit:
                raise
            return default
Ejemplo n.º 4
0
    def create_builders(cls, site: Site, content: Content) -> List[Builder]:
        filters = content.get_metadata(site, "filters", {}).copy()

        if "type" not in filters:
            filters["type"] = {"article"}

        if "draft" not in filters:
            filters["draft"] = {False}

        excludes = content.get_metadata(site, "excludes", {}).copy()

        dirnames = content.get_metadata(site, "directories", [])
        if dirnames:
            dirs: Optional[List[PathTuple]] = [
                parse_dir(d, content.src.contentpath[0]) for d in dirnames
            ]
        else:
            dirs = None

        groupby = content.get_metadata(site, "groupby", None)
        groups = site.files.group_items(
            site,
            groupby,
            filters=filters,
            excludes=excludes,
            subdirs=dirs,
            recurse=True,
        )

        n_per_page = int(content.get_metadata(site, "indexpage_max_articles"))
        page_orphan = int(content.get_metadata(site, "indexpage_orphan"))
        indexpage_max_num_pages = int(
            content.get_metadata(site, "indexpage_max_num_pages", 0))

        ret: List[Builder] = []

        for values, group in groups:
            num = len(group)
            num_pages = ((num - 1) // n_per_page) + 1
            rest = num - (num_pages - 1) * n_per_page

            if rest <= page_orphan:
                if num_pages > 1:
                    num_pages -= 1

            if indexpage_max_num_pages:
                num_pages = min(num_pages, indexpage_max_num_pages)

            for page in range(0, num_pages):

                is_last = page == (num_pages - 1)

                f = page * n_per_page
                t = num if is_last else f + n_per_page
                articles = group[f:t]

                if values:
                    value = values[0]
                else:
                    value = ""

                ret.append(cls(content, value, articles, page + 1, num_pages))

        if not groupby:
            if not ret:
                ret = [cls(content, "", [], 1, 1)]

        return ret
Ejemplo n.º 5
0
    def build(self) -> List[OutputInfo]:
        oi = self.build_outputinfo()

        num_articles = int(
            self.content.get_metadata(self.site, "feed_num_articles"))

        filters = self.content.get_metadata(self.site, "filters", {}).copy()
        if "type" not in filters:
            filters["type"] = {"article"}

        if "draft" not in filters:
            filters["draft"] = {False}

        excludes = self.content.get_metadata(self.site, "excludes", {}).copy()

        dirnames = self.content.get_metadata(self.site, "directories", [])
        if dirnames:
            dirs: Optional[List[PathTuple]] = [
                parse_dir(d, self.content.src.contentpath[0]) for d in dirnames
            ]
        else:
            dirs = None

        contents = [
            c for c in self.site.files.get_contents(
                self.site,
                filters=filters,
                excludes=excludes,
                subdirs=dirs,
            )
        ][:num_articles]

        feedtype = self.content.get_metadata(self.site, "feedtype")
        if feedtype == "atom":
            cls = Atom1Feed
        elif feedtype == "rss":
            cls = Rss201rev2Feed
        else:
            raise ValueError(f"Invarid feed type: {feedtype}")

        feed = cls(
            title=self.content.get_metadata(self.site, "site_title"),
            link=self.content.get_metadata(self.site, "site_url"),
            feed_url=self.content.build_url(self, {}),
            description="",
        )

        for c in contents:
            link = c.build_url(self, {})
            description = c.build_abstract(self)
            date = c.get_metadata(self.site, "date")
            updated = c.get_metadata(self.site, "updated")

            if date:
                feed.add_item(
                    title=c.build_title(self),
                    link=link,
                    unique_id=get_tag_uri(link, date),
                    description=str(description),
                    pubdate=date,
                    updateddate=updated,
                )

        body = feed.writeString("utf-8")

        oi.filename.write_text(body)

        return [oi]