Example #1
0
    def load_dir(self, sitedir: ContentDir) -> List[Page]:
        taken = []
        pages = []
        for fname, src in sitedir.files.items():
            mo = re_ext.search(fname)
            if not mo:
                continue
            taken.append(fname)

            meta = sitedir.meta_file(fname)

            fmt = mo.group(1)

            with sitedir.open(fname, src, "rt") as fd:
                try:
                    fm_meta = parse_data(fd, fmt)
                except Exception:
                    log.exception("%s: failed to parse %s content",
                                  src.relpath, fmt)
                    continue

            try:
                data_type = fm_meta.get("data_type", None)
            except AttributeError:
                log.error("%s: data did not parse into a dict", src.relpath)
                continue

            if data_type is None:
                log.error("%s: data_type not found: ignoring page",
                          src.relpath)
                continue

            page_name = fname[:-len(mo.group(0))]
            if page_name != "index":
                meta["site_path"] = os.path.join(sitedir.meta["site_path"],
                                                 page_name)
            else:
                meta["site_path"] = sitedir.meta["site_path"]

            meta.update(fm_meta)

            cls = self.page_class_by_type.get(data_type, DataPage)
            page = cls(self.site, src, meta=meta, dir=sitedir)
            pages.append(page)

        for fname in taken:
            del sitedir.files[fname]

        return pages
Example #2
0
    def load_file_meta(self, sitedir: ContentDir, src: File,
                       fname: str) -> Tuple[Meta, DoctreeScan]:
        # Parse document into a doctree and extract docinfo metadata
        with sitedir.open(fname, src, "rt") as fd:
            meta, doctree_scan = self.parse_rest(fd)

        return meta, doctree_scan
Example #3
0
    def load_dir(self, sitedir: ContentDir) -> List[Page]:
        """
        Handle .links pages that generate the browseable archive of annotated
        external links
        """
        taken: List[str] = []
        pages: List[Page] = []
        for fname, src in sitedir.files.items():
            if not fname.endswith(".links"):
                continue
            taken.append(fname)

            name = fname[:-6]

            meta = sitedir.meta_file(fname)
            meta["site_path"] = os.path.join(sitedir.meta["site_path"], name)

            try:
                fm_meta = self.load_file_meta(sitedir, src, fname)
            except Exception:
                log.exception("%s: cannot parse taxonomy information", src.relpath)
                continue
            meta.update(fm_meta)

            page = LinkIndexPage(self.site, src, meta=meta, name=name, dir=sitedir, links=self)
            pages.append(page)

            self.indices.append(page)

        for fname in taken:
            del sitedir.files[fname]

        return pages
Example #4
0
    def load_dir(self, sitedir: ContentDir) -> List[Page]:
        taken: List[str] = []
        pages: List[Page] = []
        for fname, src in sitedir.files.items():
            if not fname.endswith(".taxonomy"):
                continue
            taken.append(fname)

            name = fname[:-9]

            meta = sitedir.meta_file(fname)
            meta["site_path"] = os.path.join(sitedir.meta["site_path"], name)

            try:
                fm_meta = self.load_file_meta(sitedir, src, fname)
            except Exception:
                log.exception("%s: cannot parse taxonomy information", src.relpath)
                continue
            meta.update(fm_meta)

            page = TaxonomyPage(self.site, src, meta=meta, name=name, dir=sitedir)
            self.taxonomies[page.name] = page
            pages.append(page)

        for fname in taken:
            del sitedir.files[fname]

        return pages
Example #5
0
    def load_dir(self, sitedir: ContentDir) -> List[Page]:
        taken: List[str] = []
        pages: List[Page] = []
        for fname, src in sitedir.files.items():
            base, ext = os.path.splitext(fname)
            mimetype = mimetypes.types_map.get(ext)
            if mimetype is None:
                continue

            if not mimetype.startswith("image/"):
                continue

            taken.append(fname)

            meta = sitedir.meta_file(fname)
            related_site_path = os.path.join(sitedir.meta["site_path"], base)
            meta["site_path"] = os.path.join(sitedir.meta["site_path"], fname)

            img_meta = self.scanner.scan(sitedir, src, mimetype)
            meta.update(img_meta)

            page = Image(self.site,
                         src,
                         meta=meta,
                         dir=sitedir,
                         mimetype=mimetype)
            pages.append(page)

            # Look at theme's image_sizes and generate ScaledImage pages
            image_sizes = self.site.theme.meta.get("image_sizes")
            if image_sizes:
                for name, info in image_sizes.items():
                    width = meta.get("width")
                    if width is None:
                        # SVG images, for example, don't have width
                        continue
                    if info["width"] >= width:
                        continue
                    rel_meta = dict(meta)
                    rel_meta["related"] = {}
                    rel_meta.pop("width", None)
                    rel_meta.pop("height", None)
                    rel_meta.update(**info)
                    scaled = ScaledImage.create_from(page,
                                                     rel_meta,
                                                     mimetype=mimetype,
                                                     name=name,
                                                     info=info)
                    pages.append(scaled)

            self.by_related_site_path[related_site_path] = page

        for fname in taken:
            del sitedir.files[fname]

        return pages
Example #6
0
    def load_dir(self, sitedir: ContentDir) -> List[Page]:
        # Update the list of yaml tags with information from site.metadata
        if not self.yaml_tags_filled:
            for meta in self.site.metadata.values():
                if meta.structure:
                    self.yaml_tags.add(meta.name)
            self.yaml_tags_filled = True

        taken: List[str] = []
        pages: List[Page] = []
        for fname, src in sitedir.files.items():
            if not fname.endswith(".rst"):
                continue
            taken.append(fname)

            meta = sitedir.meta_file(fname)
            if fname not in ("index.rst", "README.rst"):
                meta["site_path"] = os.path.join(sitedir.meta["site_path"],
                                                 fname[:-4])
            else:
                meta["site_path"] = sitedir.meta["site_path"]

            try:
                fm_meta, doctree_scan = self.load_file_meta(
                    sitedir, src, fname)
            except Exception as e:
                log.debug("%s: Failed to parse RestructuredText page: skipped",
                          src,
                          exc_info=True)
                log.warn(
                    "%s: Failed to parse RestructuredText page: skipped (%s)",
                    src, e)
                continue

            meta.update(fm_meta)

            page = RstPage(self.site,
                           src,
                           meta=meta,
                           dir=sitedir,
                           feature=self,
                           doctree_scan=doctree_scan)
            pages.append(page)

        for fname in taken:
            del sitedir.files[fname]

        return pages
Example #7
0
    def load_dir(self, sitedir: ContentDir) -> List[Page]:
        taken: List[str] = []
        pages: List[Page] = []
        for fname, src in sitedir.files.items():
            if not fname.endswith(".md"):
                continue
            taken.append(fname)

            meta = sitedir.meta_file(fname)
            if fname not in ("index.md", "README.md"):
                meta["site_path"] = os.path.join(sitedir.meta["site_path"],
                                                 fname[:-3])
            else:
                meta["site_path"] = sitedir.meta["site_path"]

            try:
                fm_meta, body = self.load_file_meta(sitedir, src, fname)
            except Exception as e:
                log.warn(
                    "%s: Failed to parse markdown page front matter (%s): skipped",
                    src, e)
                log.debug(
                    "%s: Failed to parse markdown page front matter: skipped",
                    src,
                    exc_info=e)
                continue

            meta.update(fm_meta)

            page = MarkdownPage(self.site,
                                src,
                                meta=meta,
                                dir=sitedir,
                                feature=self,
                                body=body)
            pages.append(page)

        for fname in taken:
            del sitedir.files[fname]

        return pages
Example #8
0
    def load_dir(self, sitedir: ContentDir) -> List[Page]:
        # Precompile JINJA2_PAGES patterns
        want_patterns = [
            compile_page_match(p) for p in self.site.settings.JINJA2_PAGES
        ]

        taken: List[str] = []
        pages: List[Page] = []
        for fname, src in sitedir.files.items():
            # Skip files that do not match JINJA2_PAGES
            for pattern in want_patterns:
                if pattern.match(fname):
                    break
            else:
                continue

            meta = sitedir.meta_file(fname)
            if fname != "index.html":
                meta["site_path"] = os.path.join(sitedir.meta["site_path"],
                                                 fname)
            else:
                meta["site_path"] = sitedir.meta["site_path"]

            try:
                page = J2Page(self.site,
                              src,
                              meta=meta,
                              dir=sitedir,
                              feature=self)
            except IgnorePage:
                continue

            taken.append(fname)
            pages.append(page)

        for fname in taken:
            del sitedir.files[fname]

        return pages