def inline_all_toctrees(builder: "Builder", docnameset: Set[str], docname: str, tree: nodes.document, colorfunc: Callable, traversed: List[str]) -> nodes.document: """Inline all toctrees in the *tree*. Record all docnames in *docnameset*, and output docnames with *colorfunc*. """ tree = cast(nodes.document, tree.deepcopy()) for toctreenode in tree.traverse(addnodes.toctree): newnodes = [] includefiles = map(str, toctreenode['includefiles']) for includefile in includefiles: if includefile not in traversed: try: traversed.append(includefile) logger.info(colorfunc(includefile) + " ", nonl=True) subtree = inline_all_toctrees( builder, docnameset, includefile, builder.env.get_doctree(includefile), colorfunc, traversed) docnameset.add(includefile) except Exception: logger.warning( __('toctree contains ref to nonexisting file %r'), includefile, location=docname) else: sof = addnodes.start_of_file(docname=includefile) sof.children = subtree.children for sectionnode in sof.traverse(nodes.section): if 'docname' not in sectionnode: sectionnode['docname'] = includefile newnodes.append(sof) toctreenode.parent.replace(toctreenode, newnodes) return tree
def make_glossary_term(env: "BuildEnvironment", textnodes: Iterable[Node], index_key: str, source: str, lineno: int, node_id: str, document: nodes.document) -> nodes.term: # get a text-only representation of the term and register it # as a cross-reference target term = nodes.term('', '', *textnodes) term.source = source term.line = lineno termtext = term.astext() if node_id: # node_id is given from outside (mainly i18n module), use it forcedly term['ids'].append(node_id) else: node_id = make_id(env, document, 'term', termtext) term['ids'].append(node_id) document.note_explicit_target(term) std = cast(StandardDomain, env.get_domain('std')) std.note_object('term', termtext, node_id, location=term) # add an index entry too indexnode = addnodes.index() indexnode['entries'] = [('single', termtext, node_id, 'main', index_key)] indexnode.source, indexnode.line = term.source, term.line term.append(indexnode) return term
def feed(self, docname: str, filename: str, title: str, doctree: nodes.document) -> None: """Feed a doctree to the index.""" self._titles[docname] = title self._filenames[docname] = filename visitor = WordCollector(doctree, self.lang) doctree.walk(visitor) # memoize self.lang.stem def stem(word: str) -> str: try: return self._stem_cache[word] except KeyError: self._stem_cache[word] = self.lang.stem(word).lower() return self._stem_cache[word] _filter = self.lang.word_filter for word in visitor.found_title_words: stemmed_word = stem(word) if _filter(stemmed_word): self._title_mapping.setdefault(stemmed_word, set()).add(docname) elif _filter(word): # stemmer must not remove words from search index self._title_mapping.setdefault(word, set()).add(docname) for word in visitor.found_words: stemmed_word = stem(word) # again, stemmer must not remove words from search index if not _filter(stemmed_word) and _filter(word): stemmed_word = word already_indexed = docname in self._title_mapping.get(stemmed_word, set()) if _filter(stemmed_word) and not already_indexed: self._mapping.setdefault(stemmed_word, set()).add(docname)
def make_footnote_ref(doc: nodes.document, label: str) -> nodes.footnote_reference: """Create a footnote_reference node with children""" footnote_ref = nodes.footnote_reference('[#]_') footnote_ref.append(nodes.Text(label)) doc.note_autofootnote_ref(footnote_ref) return footnote_ref
def fix_ids(self, tree: nodes.document) -> None: """Replace colons with hyphens in href and id attributes. Some readers crash because they interpret the part as a transport protocol specification. """ def update_node_id(node: Element) -> None: """Update IDs of given *node*.""" new_ids = [] for node_id in node['ids']: new_id = self.fix_fragment('', node_id) if new_id not in new_ids: new_ids.append(new_id) node['ids'] = new_ids for reference in tree.traverse(nodes.reference): if 'refuri' in reference: m = self.refuri_re.match(reference['refuri']) if m: reference['refuri'] = self.fix_fragment( m.group(1), m.group(2)) if 'refid' in reference: reference['refid'] = self.fix_fragment('', reference['refid']) for target in tree.traverse(nodes.target): update_node_id(target) next_node = target.next_node(ascend=True) # type: Node if isinstance(next_node, nodes.Element): update_node_id(next_node) for desc_signature in tree.traverse(addnodes.desc_signature): update_node_id(desc_signature)
def process_doc(self, app: Sphinx, doctree: nodes.document) -> None: """Process the docinfo part of the doctree as metadata. Keep processing minimal -- just return what docutils says. """ if len(doctree) > 0 and isinstance(doctree[0], nodes.docinfo): md = app.env.metadata[app.env.docname] for node in doctree[0]: # nodes are multiply inherited... if isinstance(node, nodes.authors): authors = cast(List[nodes.author], node) md['authors'] = [author.astext() for author in authors] elif isinstance(node, nodes.field): assert len(node) == 2 field_name = cast(nodes.field_name, node[0]) field_body = cast(nodes.field_body, node[1]) md[field_name.astext()] = field_body.astext() elif isinstance(node, nodes.TextElement): # other children must be TextElement # see: http://docutils.sourceforge.net/docs/ref/doctree.html#bibliographic-elements # NOQA md[node.__class__.__name__] = node.astext() for name, value in md.items(): if name in ('tocdepth', ): try: value = int(value) except ValueError: value = 0 md[name] = value doctree.pop(0)
def process(self, doctree: nodes.document, docname: str) -> None: # # If linked node, update title for node in doctree.traverse(linked_node): self._update_linked_node_title(node) # Traverse ref and numref nodes for node in doctree.traverse(): # If node type is ref if isinstance(node, nodes.reference): labelid = self._get_refuri(node) # If extension directive referenced if labelid in self.env.exercise_list: # Update displayed href text self._update_ref(node, labelid) # If node type is numref if isinstance(node, number_reference): labelid = self._get_refuri(node) # If extension directive referenced if labelid in self.env.exercise_list: # Update displayed href text self._update_numref(node, labelid)
def fix_ids(self, tree: nodes.document) -> None: """Replace colons with hyphens in href and id attributes. Some readers crash because they interpret the part as a transport protocol specification. """ for reference in tree.traverse(nodes.reference): if 'refuri' in reference: m = self.refuri_re.match(reference['refuri']) if m: reference['refuri'] = self.fix_fragment(m.group(1), m.group(2)) if 'refid' in reference: reference['refid'] = self.fix_fragment('', reference['refid']) for target in tree.traverse(nodes.target): for i, node_id in enumerate(target['ids']): if ':' in node_id: target['ids'][i] = self.fix_fragment('', node_id) next_node = target.next_node(ascend=True) # type: Node if isinstance(next_node, nodes.Element): for i, node_id in enumerate(next_node['ids']): if ':' in node_id: next_node['ids'][i] = self.fix_fragment('', node_id) for desc_signature in tree.traverse(addnodes.desc_signature): ids = desc_signature.attributes['ids'] newids = [] for id in ids: newids.append(self.fix_fragment('', id)) desc_signature.attributes['ids'] = newids
def get_tags(context: Dict[str, Any], doctree: nodes.document, config: Dict[str, Any]) -> str: # Set length of description try: desc_len = int(config["ogp_description_length"]) except ValueError: desc_len = DEFAULT_DESCRIPTION_LENGTH # Get the title and parse any html in it htp = HTMLTextParser() htp.feed(context["title"]) htp.close() # Parse/walk doctree for metadata (tag/description) mcv = OGMetadataCreatorVisitor(desc_len, [htp.text, htp.text_outside_tags]) doctree.walkabout(mcv) tags = "\n " # title tag tags += make_tag("og:title", htp.text) # type tag tags += make_tag("og:type", config["ogp_type"]) # url tag # Get the URL of the specific page page_url = urljoin(config["ogp_site_url"], context["pagename"] + context["file_suffix"]) tags += make_tag("og:url", page_url) # site name tag site_name = config["ogp_site_name"] if site_name: tags += make_tag("og:site_name", site_name) # description tag tags += make_tag("og:description", mcv.description) # image tag # Get the image from the config image_url = config["ogp_image"] if image_url: tags += make_tag("og:image", image_url) # Add image alt text (either provided by config or from site_name) ogp_image_alt = config["ogp_image_alt"] if isinstance(ogp_image_alt, str): tags += make_tag("og:image:alt", ogp_image_alt) elif ogp_image_alt and site_name: tags += make_tag("og:image:alt", site_name) elif ogp_image_alt and htp.text: tags += make_tag("og:image:alt", htp.text) # custom tags tags += "\n".join(config["ogp_custom_meta_tags"]) return tags
def html_meta_to_nodes( data: Dict[str, Any], document: nodes.document, line: int, reporter: Reporter ) -> List[Union[nodes.pending, nodes.system_message]]: """Replicate the `meta` directive, by converting a dictionary to a list of pending meta nodes See: https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html#html-metadata """ if not data: return [] try: from sphinx.addnodes import meta as meta_cls except ImportError: from docutils.parsers.rst.directives.html import MetaBody meta_cls = MetaBody.meta # type: ignore output = [] for key, value in data.items(): content = str(value or "") meta_node = meta_cls(content) meta_node.source = document["source"] meta_node.line = line meta_node["content"] = content try: if not content: raise ValueError("No content") for i, key_part in enumerate(key.split()): if "=" not in key_part and i == 0: meta_node["name"] = key_part continue if "=" not in key_part: raise ValueError(f"no '=' in {key_part}") attr_name, attr_val = key_part.split("=", 1) if not (attr_name and attr_val): raise ValueError(f"malformed {key_part}") meta_node[attr_name.lower()] = attr_val except ValueError as error: msg = reporter.error( f'Error parsing meta tag attribute "{key}": {error}.') output.append(msg) continue pending = nodes.pending( Filter, { "component": "writer", "format": "html", "nodes": [meta_node] }, ) document.note_pending(pending) output.append(pending) return output
def make_footnote(doc: nodes.document, label: str, uri: str) -> nodes.footnote: """Create a footnote node with children""" footnote = nodes.footnote(uri) para = nodes.paragraph() para.append(nodes.Text(uri)) footnote.append(para) footnote.insert(0, nodes.label('', label)) doc.note_autofootnote(footnote) return footnote
def get_description( doctree: nodes.document, description_length: int, known_titles: Iterable[str] = None, document: nodes.document = None, ): mcv = DescriptionParser(description_length, known_titles, document) doctree.walkabout(mcv) return mcv.description
def get_description( doctree: nodes.document, description_length: int, known_titles: Iterable[str] = None, document: nodes.document = None, ): mcv = DescriptionParser(description_length, known_titles, document) doctree.walkabout(mcv) # Parse quotation so they won't break html tags if smart quotes are disabled return mcv.description.replace('"', """)
def parse(self, inputstring: str, document: nodes.document) -> None: """Parse source text. :param inputstring: The source string to parse :param document: The root docutils node to add AST elements to """ self.setup_parse(inputstring, document) # check for exorbitantly long lines if hasattr(document.settings, "line_length_limit"): for i, line in enumerate(inputstring.split("\n")): if len(line) > document.settings.line_length_limit: error = document.reporter.error( f"Line {i+1} exceeds the line-length-limit:" f" {document.settings.line_length_limit}." ) document.append(error) return # create parsing configuration from the global config try: config = create_myst_config(document.settings, DOCUTILS_EXCLUDED_ARGS) except Exception as exc: error = document.reporter.error(f"Global myst configuration invalid: {exc}") document.append(error) config = MdParserConfig() # update the global config with the file-level config try: topmatter = read_topmatter(inputstring) except TopmatterReadError: pass # this will be reported during the render else: if topmatter: warning = lambda wtype, msg: create_warning( # noqa: E731 document, msg, line=1, append_to=document, subtype=wtype ) config = merge_file_level(config, topmatter, warning) # parse content parser = create_md_parser(config, DocutilsRenderer) parser.options["document"] = document parser.render(inputstring) # post-processing # replace raw nodes if raw is not allowed if not getattr(document.settings, "raw_enabled", True): for node in document.traverse(nodes.raw): warning = document.reporter.warning("Raw content disabled.") node.parent.replace(node, warning) self.finish_parse()
def write_doctree(self, docname: str, doctree: nodes.document) -> None: """Write the doctree to a file.""" # make it picklable doctree.reporter = None doctree.transformer = None doctree.settings.warning_stream = None doctree.settings.env = None doctree.settings.record_dependencies = None doctree_filename = path.join(self.doctreedir, docname + '.doctree') ensuredir(path.dirname(doctree_filename)) with open(doctree_filename, 'wb') as f: pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
def result_nodes(self, document: nodes.document, env: "BuildEnvironment", node: Element, is_ref: bool) -> Tuple[List[Node], List[system_message]]: if not is_ref: return [node], [] varname = node['reftarget'] tgtid = 'index-%s' % env.new_serialno('index') indexnode = addnodes.index() indexnode['entries'] = [ ('single', varname, tgtid, '', None), ('single', _('environment variable; %s') % varname, tgtid, '', None) ] targetnode = nodes.target('', '', ids=[tgtid]) document.note_explicit_target(targetnode) return [indexnode, targetnode, node], []
def make_glossary_term(env: "BuildEnvironment", textnodes: Iterable[Node], index_key: str, source: str, lineno: int, node_id: str = None, document: nodes.document = None) -> nodes.term: # get a text-only representation of the term and register it # as a cross-reference target term = nodes.term('', '', *textnodes) term.source = source term.line = lineno termtext = term.astext() if node_id: # node_id is given from outside (mainly i18n module), use it forcedly term['ids'].append(node_id) elif document: node_id = make_id(env, document, 'term', termtext) term['ids'].append(node_id) document.note_explicit_target(term) else: warnings.warn( 'make_glossary_term() expects document is passed as an argument.', RemovedInSphinx40Warning) gloss_entries = env.temp_data.setdefault('gloss_entries', set()) node_id = nodes.make_id('term-' + termtext) if node_id == 'term': # "term" is not good for node_id. Generate it by sequence number instead. node_id = 'term-%d' % env.new_serialno('glossary') while node_id in gloss_entries: node_id = 'term-%d' % env.new_serialno('glossary') gloss_entries.add(node_id) term['ids'].append(node_id) std = cast(StandardDomain, env.get_domain('std')) std.note_object('term', termtext.lower(), node_id, location=(env.docname, lineno)) # add an index entry too indexnode = addnodes.index() indexnode['entries'] = [('single', termtext, node_id, 'main', index_key)] indexnode.source, indexnode.line = term.source, term.line term.append(indexnode) return term
def resolve_doctitle( doctree: nodes.document ) -> Tuple[Optional[nodes.title], Optional[nodes.title]]: toplevel_sectnode = doctree.next_node(nodes.section) if not toplevel_sectnode: return (None, None) titlenode = _safe_descend(toplevel_sectnode, 0) # NOTE: nodes.subtitle does not make senses beacuse Sphinx doesn't support # subtitle: # # > Sphinx does not support a "subtitle". # > Sphinx recognizes it as a mere second level section # # ref: # - https://github.com/sphinx-doc/sphinx/issues/3574#issuecomment-288722585 # - https://github.com/sphinx-doc/sphinx/issues/3567#issuecomment-288093991 if len(toplevel_sectnode) != 2: return (titlenode, None) # HACK: For our convenience, we regard second level section title # (under document) as subtitle:: # <section> # <title> # <section> # <(sub)title> subtitlenode = toplevel_sectnode[1][0] if not isinstance(subtitlenode, nodes.title): return (titlenode, None) return (titlenode, subtitlenode)
def get_and_resolve_doctree(self, docname: str, builder: "Builder", doctree: nodes.document = None, prune_toctrees: bool = True, includehidden: bool = False) -> nodes.document: """Read the doctree from the pickle, resolve cross-references and toctrees and return it. """ if doctree is None: doctree = self.get_doctree(docname) # resolve all pending cross-references self.apply_post_transforms(doctree, docname) # now, resolve all toctree nodes for toctreenode in doctree.traverse(addnodes.toctree): result = TocTree(self).resolve(docname, builder, toctreenode, prune=prune_toctrees, includehidden=includehidden) if result is None: toctreenode.replace_self([]) else: toctreenode.replace_self(result) return doctree
def write_doc(self, docname: str, doctree: document) -> None: for node in doctree.traverse(nodes.reference): # add ``target=_blank`` attributes to external links if node.get('internal') is None and 'refuri' in node: node['target'] = '_blank' super().write_doc(docname, doctree)
def process(self, doctree: nodes.document, docname: str) -> None: todos = sum(self.domain.todos.values(), []) # type: List[todo_node] document = new_document('') for node in doctree.traverse(todolist): if not self.config.todo_include_todos: node.parent.remove(node) continue if node.get('ids'): content = [nodes.target()] # type: List[Element] else: content = [] for todo in todos: # Create a copy of the todo node new_todo = todo.deepcopy() new_todo['ids'].clear() # (Recursively) resolve references in the todo content # # Note: To resolve references, it is needed to wrap it with document node document += new_todo self.env.resolve_references(document, todo['docname'], self.builder) document.remove(new_todo) content.append(new_todo) todo_ref = self.create_todo_reference(todo, docname) content.append(todo_ref) node.replace_self(content)
def process_todos(app: Sphinx, doctree: nodes.document) -> None: warnings.warn('process_todos() is deprecated.', RemovedInSphinx40Warning, stacklevel=2) # collect all todos in the environment # this is not done in the directive itself because it some transformations # must have already been run, e.g. substitutions env = app.builder.env if not hasattr(env, 'todo_all_todos'): env.todo_all_todos = [] # type: ignore for node in doctree.traverse(todo_node): app.events.emit('todo-defined', node) newnode = node.deepcopy() newnode['ids'] = [] env.todo_all_todos.append({ # type: ignore 'docname': env.docname, 'source': node.source or env.doc2path(env.docname), 'lineno': node.line, 'todo': newnode, 'target': node['ids'][0], }) if env.config.todo_emit_warnings: label = cast(nodes.Element, node[1]) logger.warning(__("TODO entry found: %s"), label.astext(), location=node)
def write_doc(self, docname: str, doctree: nodes.document) -> None: destination = StringOutput(encoding="utf-8") doctree.settings = self.docsettings self.secnumbers = self.env.toc_secnumbers.get(docname, {}) self.fignumbers = self.env.toc_fignumbers.get(docname, {}) self.imgpath = self.resources_path self.dlpath = self.resources_path self.current_docname = docname if ( doctree.children and doctree.children[0].children and doctree.children[0].children[0].tagname == "title" ): # Removing the h1 header to avoid having it duplicated. doctree.children[0].children.pop(0) self.docwriter.write(doctree, destination) self.docwriter.assemble_parts() body = self.docwriter.parts["fragment"] metatags = self.docwriter.clean_meta base_docname = os.path.basename(docname) if base_docname != "index": ctx = self.get_doc_context(docname, body, metatags) if self.config.html_published_location and doctree.children: ctx["source_url"] = self.build_external_url(docname) self.handle_page(docname, ctx, event_arg=doctree) self.write_card_definition(docname)
def from_doctree(cls, doctree: nodes.document, builder: Builder) -> "AnswerKey": """Create answer key from doctree.""" # questions will be used to instantiate an AnswerKey questions = [] for mcq in doctree.traverse(mcqnodes.mcq): question = McqQuestion.from_node(mcq, builder) # Populate question.choices from the list of mcq_choice nodes in mcq. # Each choice contains text of the answer choice as well as any feedback. try: choices_list_items = mcq.children[1].children except IndexError: choices_list_items = [] for choice in choices_list_items: choice_data = McqChoice.from_node(choice, builder) # Find feedback node and attach its data to choice_data fb_index = choice.first_child_matching_class( mcqnodes.mcq_choice_feedback) fb_node = choice[fb_index] choice_data.feedback = McqFeedback.from_node(fb_node, builder) question.choices.append(choice_data) # Finished building question, so append to questions list questions.append(question) return cls(questions)
def process_doc(self, app: Sphinx, doctree: nodes.document) -> None: docname = app.env.docname static_paths = app.builder.config['html_static_path'] for node in doctree.traverse(RevealjsNode): elm = getattr(node, 'revealit_el', None) if isinstance(elm, RjsElement): for img_uri in elm.get_image_uris(): uri = directives.uri(img_uri) if uri.find('://') != -1 or any( uri.startswith(p) for p in static_paths): continue # Update imgpath to a relative path from srcdir # from a relative path from current document. imgpath, _ = app.env.relfn2path(uri, docname) app.env.dependencies[docname].add(imgpath) if not os.access(os.path.join(app.srcdir, imgpath), os.R_OK): logger.warning(__('image file not readable: %s') % imgpath, type='image', location=node, subtype='not_readable') continue app.env.images.add_file(docname, imgpath) elm.images[img_uri] = imgpath
def on_html_page_context(app: Sphinx, pagename: str, templatename: str, context, doctree: nodes.document) -> None: """Called when the HTML builder has created a context dictionary to render a template with. Conditionally adding isso client script to <head /> if the directive is used in a page. :param sphinx.application.Sphinx app: Sphinx application object. :param str pagename: Name of the page being rendered (without .html or any file extension). :param str templatename: Page name with .html. :param dict context: Jinja2 HTML context. :param docutils.nodes.document doctree: Tree of docutils nodes. """ # Only embed comments for documents if not doctree: return # We supports embed mulitple comments box in same document for node in doctree.traverse(IssoNode): kwargs = { 'data-isso': app.config.isso_url, } for cfg in CONFIG_ITEMS: val = getattr(app.config, cfg) if val is not None: # Maybe 0, False, '' or anything issocfg, issoval = ext_config_to_isso_config(cfg, val) kwargs[issocfg] = issoval js_path = posixpath.join(app.config.isso_url, 'js/embed.min.js') app.add_js_file(js_path, **kwargs)
def process_images(self, doctree: nodes.document, src: str) -> None: """Handle any images contained in the document. This ensures that the actual image files referenced by the document are copied to the ``resources`` folder. It also ensures that the reference to the image within the document is rewritten to work with the resources folder. Parameters ---------- doctree: The doctree to process src: The path to the file containing the document being processed. """ docpath = pathlib.Path(src) for image in list(doctree.traverse(condition=nodes.image)): source = pathlib.Path(self.app.srcdir, image["uri"]) destination = pathlib.Path( "resources", docpath.with_suffix(""), source.name ) refuri = relative_uri(src, str(destination)) logger.debug("[tutorial]: image src: %s", source) logger.debug("[tutorial]: image dest: %s", destination) logger.debug("[tutorial]: image ref: %s", refuri) self.resources[str(destination)] = ("copy", source) image["uri"] = refuri
def process_doc( self: asset.DownloadFileCollector, app: application.Sphinx, doctree: nodes.document, ) -> None: """ This function is different from the original method only in doing some surgery on the paths it finds when a separate root directory is configured. """ for node in doctree.traverse(addnodes.download_reference): targetname = node["reftarget"] if "://" in targetname: node["refuri"] = targetname else: rel_filename, filename = app.env.relfn2path( targetname, app.env.docname) if app.config.dai_downloads_root: filename = os.path.abspath( os.path.join(app.config.dai_downloads_root, rel_filename)) rel_filename = os.path.relpath(filename, app.env.srcdir) app.env.dependencies[app.env.docname].add(rel_filename) if not os.access(filename, os.R_OK): logger.warning( __("download file not readable: %s") % filename) continue node["filename"] = app.env.dlfiles.add_file( app.env.docname, rel_filename)
def write_doctree(self, docname: str, doctree: nodes.document) -> None: """Write the doctree to a file.""" # make it picklable doctree.reporter = None doctree.transformer = None # Create a copy of settings object before modification because it is # shared with other documents. doctree.settings = doctree.settings.copy() doctree.settings.warning_stream = None doctree.settings.env = None doctree.settings.record_dependencies = None doctree_filename = path.join(self.doctreedir, docname + '.doctree') ensuredir(path.dirname(doctree_filename)) with open(doctree_filename, 'wb') as f: pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
def process(self, doctree: nodes.document, docname: str) -> None: channels = sum(self.domain.channels.values(), []) # type: List[todo_node] document = new_document('') for node in doctree.traverse(asyncapi_overview): table = self.create_full_table(node, channels, docname) node.replace_self(table)