def doctree_resolved(app, doctree, docname): i = 1 figids = {} for figure_info in doctree.traverse(figure): if app.builder.name != 'latex' and app.config.number_figures: for cap in figure_info.traverse(caption): cap[0] = Text("%s %d: %s" % (app.config.figure_caption_prefix, i, cap[0])) for id in figure_info['ids']: figids[id] = i i += 1 # replace numfig nodes with links if app.builder.name != 'latex': for ref_info in doctree.traverse(num_ref): if '#' in ref_info['reftarget']: label, target = ref_info['reftarget'].split('#') labelfmt = label + " %d" else: labelfmt = '%d' target = ref_info['reftarget'] if target not in figids: continue if app.builder.name == 'html': target_doc = app.builder.env.figid_docname_map[target] link = "%s#%s" % (app.builder.get_relative_uri(docname, target_doc), target) html = '<a class="pageref" href="%s">Fig. %s</a>' % (link, labelfmt %(figids[target])) ref_info.replace_self(raw(html, html, format='html')) else: ref_info.replace_self(Text(labelfmt % (figids[target])))
def _mk_sub_command(self, aliases: list[str], help_msg: str, parser: ArgumentParser) -> section: sub_title_prefix: str = self.options["group_sub_title_prefix"] title_prefix: str = self.options["group_title_prefix"] title_text = self._build_sub_cmd_title(parser, sub_title_prefix, title_prefix) title_ref: str = parser.prog if aliases: aliases_text: str = f" ({', '.join(aliases)})" title_text += aliases_text title_ref += aliases_text title_text = title_text.strip() ref_id = make_id(title_ref) group_section = section("", title("", Text(title_text)), ids=[ref_id], names=[title_ref]) self._register_ref(ref_id, title_ref, group_section) command_desc = (parser.description or help_msg or "").strip() if command_desc: desc_paragraph = paragraph("", Text(command_desc)) group_section += desc_paragraph group_section += self._mk_usage(parser) for group in parser._action_groups: if not group._group_actions: # do not show empty groups continue group_section += self._mk_option_group(group, prefix=parser.prog) return group_section
def _mk_option_line(self, action: Action, prefix: str) -> list_item: line = paragraph() as_key = action.dest if action.metavar: as_key = action.metavar if isinstance(action.metavar, str) else action.metavar[0] if action.option_strings: first = True is_flag = action.nargs == 0 for opt in action.option_strings: if first: first = False else: line += Text(", ") self._mk_option_name(line, prefix, opt) if not is_flag: line += Text(" ") line += literal(text=as_key.upper()) else: self._mk_option_name(line, prefix, as_key) point = list_item("", line, ids=[]) if action.help: help_text = load_help_text(action.help) temp = paragraph() self.state.nested_parse(StringList(help_text.split("\n")), 0, temp) line += Text(" - ") for content in cast(paragraph, temp.children[0]).children: line += content if action.default != SUPPRESS and not re.match(r".*[ (]default[s]? .*", (action.help or "")): line += Text(" (default: ") line += literal(text=str(action.default).replace(os.getcwd(), "{cwd}")) line += Text(")") return point
def relabel_references(app, doc): # Change 'hash-ref' to 'ref' in label text for citation_node in doc.traverse(citation): if _ascend(citation_node, desc_content) is None: # no desc node in ancestry -> not in a docstring # XXX: should we also somehow check it's in a References section? continue label_node = citation_node[0] prefix, _, new_label = label_node[0].astext().partition('-') assert len(prefix) == HASH_LEN + 1 new_text = Text(new_label) label_node.replace(label_node[0], new_text) for id in citation_node['backrefs']: ref = doc.ids[id] ref_text = ref[0] # Sphinx has created pending_xref nodes with [reftext] text. def matching_pending_xref(node): return (isinstance(node, pending_xref) and node[0].astext() == '[%s]' % ref_text) for xref_node in ref.parent.traverse(matching_pending_xref): xref_node.replace(xref_node[0], Text('[%s]' % new_text)) ref.replace(ref_text, new_text.copy())
def visit_Text(self, node: nodes.Text) -> None: if isinstance(node.parent, nodes.comment): return if self.current_cell.cell_type == "markdown": self.append(node.astext()) return source = node.astext() if ">>>" not in source: self.append(source) return pattern = re.compile("^(>>>|\\.\\.\\.) ?") def clean_line(line): return pattern.sub("", line) cleaned_source = "\n".join([ clean_line(line) for line in source.split("\n") if pattern.match(line) ]) self.append(cleaned_source)
def brackets(parameters_node): return [ emphasis('', 'self'), SphinxNodes.desc_name('', '', Text('[')), emphasis('', parameters_node.children[0].astext()), SphinxNodes.desc_name('', '', Text(']')), ]
def build_toc(descinfo, env): """Return a desc table of contents node tree""" separator = "—" child_ids = descinfo["children"] if not child_ids: return None max_fullname_len = 0 max_summary_len = 0 rows = [] for fullname, refid, summary in ichild_ids(child_ids, env): max_fullname_len = max(max_fullname_len, len(fullname)) max_summary_len = max(max_summary_len, len(summary)) reference_node = toc_ref(fullname, refid) ref_entry_node = entry("", line("", "", reference_node)) sep_entry_node = entry("", Text(separator, "")) sum_entry_node = entry("", Text(summary, "")) row_node = row("", ref_entry_node, sep_entry_node, sum_entry_node) rows.append(row_node) col0_len = max_fullname_len + 2 # add error margin col1_len = len(separator) # no padding col2_len = max_summary_len + 10 # add error margin tbody_node = tbody("", *rows) col0_colspec_node = colspec(colwidth=col0_len) col1_colspec_node = colspec(colwidth=col1_len) col2_colspec_node = colspec(colwidth=col2_len) tgroup_node = tgroup( "", col0_colspec_node, col1_colspec_node, col2_colspec_node, tbody_node, cols=3 ) return TocTable("", tgroup_node, classes=["toc"])
def visit_Text(self, node): parent = node.parent while parent: if isinstance(parent, node_blacklist): if DEBUG and any(i == 'math' for i, _ in split_dollars(node.rawsource)): print("sphinx-math-dollar: Skipping", node, "(node_blacklist = %s)" % (node_blacklist, ), file=sys.stderr) return parent = parent.parent data = split_dollars(node.rawsource) nodes = [] has_math = False for typ, text in data: if typ == "math": has_math = True nodes.append(math(text, Text(text))) elif typ == "text": nodes.append(Text(text)) else: raise ValueError("Unrecognized type from split_dollars %r" % typ) if has_math: node.parent.replace(node, nodes)
def visit_Text(self, node): parent = node.parent while parent: if isinstance(parent, node_blacklist): if DEBUG and any(i == 'math' for i, _ in split_dollars(str(node).replace('\x00', '\\'))): print("sphinx-math-dollar: Skipping", node, "(node_blacklist = %s)" % (node_blacklist,), file=sys.stderr) return parent = parent.parent # See https://github.com/sympy/sphinx-math-dollar/issues/22 data = split_dollars(str(node).replace('\x00', '\\')) nodes = [] has_math = False for typ, text in data: if typ == "math": has_math = True nodes.append(math(text, Text(text))) elif typ == "text": nodes.append(Text(text)) elif typ == "display math": has_math = True new_node = math_block(text, Text(text)) new_node.attributes.setdefault('nowrap', False) new_node.attributes.setdefault('number', None) nodes.append(new_node) else: raise ValueError("Unrecognized type from split_dollars %r" % typ) if has_math: node.parent.replace(node, nodes)
def brackets(parameters_node): return [ emphasis("", "self"), SphinxNodes.desc_name("", "", Text("[")), emphasis("", get_children(parameters_node)[0].astext()), SphinxNodes.desc_name("", "", Text("]")), ]
def visit_field_list(self, node): fields = {} for field_node in node.children: if not field_node.children: continue field_name_node = field_node.children[0] field_name_raw = field_name_node.rawsource if field_name_raw.startswith("param "): if not field_name_node.children: continue param_name = field_name_raw[len("param "):] param_type = None parts = param_name.rsplit(None, 1) if len(parts) == 2: param_type, param_name = parts # Strip leading escaped asterisks for vararg parameters in Google code style docstrings param_name = re.sub(r'\\\*', '*', param_name) field_name_node.children[0] = Text(param_name) fields[param_name] = field_node if param_type: field_node.type = param_type if field_name_raw in ("return", "returns"): fields[field_name_raw] = field_node for field_node in list(node.children): if len(field_node.children) < 2: continue field_name_node, field_body_node = field_node.children[:2] param_type = self._strip_markup(field_body_node.astext())[1] field_name_raw = field_name_node.rawsource if field_name_raw.startswith("type "): param_name = re.sub(r'\\\*', '*', field_name_raw[len("type "):]) if param_name in fields: node.children.remove(field_node) else: fields[param_name] = field_node field_name_node.children[0] = Text(param_name) field_body_node.children[:] = [] fields[param_name].type = param_type elif field_name_raw == "rtype": existing_return_tag = None if "returns" in fields: existing_return_tag = "returns" elif "return" in fields: existing_return_tag = "return" if existing_return_tag: node.children.remove(field_node) else: existing_return_tag = "return" fields[existing_return_tag] = field_node field_name_node.children[0] = Text(existing_return_tag) field_body_node.children[:] = [] fields[existing_return_tag].type = param_type HTMLTranslator.visit_field_list(self, node)
def xf(name_node, parameters_node): return inline( '', '', emphasis('', 'self'), Text(' '), patch_node(name_node, op, ()), Text(' '), emphasis('', parameters_node.children[0].astext()) )
def process_changelog_links(app, doctree, docname): for rex in app.changelog_links_rexes: if rex.match(docname): break else: # if the doc doesn't match any of the changelog regexes, don't process return if SPHINX_LT_16: info = app.info else: from sphinx.util import logging info = logging.getLogger(__name__).info info('[changelog_links] Adding changelog links to "{0}"'.format(docname)) for item in doctree.traverse(): if not isinstance(item, Text): continue # We build a new list of items to replace the current item. If # a link is found, we need to use a 'reference' item. children = [] # First cycle through blocks of issues (delimited by []) then # iterate inside each one to find the individual issues. prev_block_end = 0 for block in BLOCK_PATTERN.finditer(item): block_start, block_end = block.start(), block.end() children.append(Text(item[prev_block_end:block_start])) block = item[block_start:block_end] prev_end = 0 for m in ISSUE_PATTERN.finditer(block): start, end = m.start(), m.end() children.append(Text(block[prev_end:start])) issue_number = block[start:end] refuri = app.config.github_issues_url + issue_number[1:] children.append( reference(text=issue_number, name=issue_number, refuri=refuri)) prev_end = end prev_block_end = block_end # If no issues were found, this adds the whole item, # otherwise it adds the remaining text. children.append(Text(block[prev_end:block_end])) # If no blocks were found, this adds the whole item, otherwise # it adds the remaining text. children.append(Text(item[prev_block_end:])) # Replace item by the new list of items we have generated, # which may contain links. item.parent.replace(item, children)
def doctree_resolved(app, doctree, docname): index = {} refTable = {} if app.config.autonumber_by_chapter: # Record the number of each chapter env = app.builder.env sectionNumbers = {} for doc in env.toc_secnumbers: sections = env.toc_secnumbers[doc] for sectionId in sections: sectionNumbers[sectionId[1:]] = sections[sectionId] lastChapter = -1 # Assign numbers to all the autonumbered objects. for node in doctree.traverse(autonumber): category = node.astext().split(',')[0] if category in index: nextNumber = index[category] + 1 else: nextNumber = 1 if app.config.autonumber_by_chapter: parent = node.parent chapter = None while chapter is None: if isinstance(parent, section): chapter = parent parent = parent.parent chapter = sectionNumbers[chapter.attributes['ids'][0]][0] if chapter != lastChapter: index = {} nextNumber = 1 newNode = Text('%s %d-%d' % (category, chapter, nextNumber)) lastChapter = chapter else: newNode = Text('%s %d' % (category, nextNumber)) index[category] = nextNumber refTable[node.astext()] = newNode node.parent.replace(node, newNode) # Replace references with the name of the referenced object for ref_info in doctree.traverse(autonumber_ref): target = ref_info['reftarget'] if target not in refTable: raise ValueError('Unknown target for autonumber reference: ' + target) ref_info.replace_self(Text(refTable[target].astext()))
def doctree_read(app, doctree): # first generate figure numbers for each figure env = app.builder.env json_data = loadTable() i = getattr(env, 'i', 1) figids = getattr(env, 'figids', {}) figid_docname_map = getattr(env, 'figid_docname_map', {}) module = '' num_module = 0 for avdgm_info in doctree.traverse(Element): #av_dgm): if env.docname != module: i = 1 if env.docname in json_data: module = env.docname num_module = json_data[env.docname] if isinstance(avdgm_info, av_dgm) or (isinstance(avdgm_info, av_ss) and len(avdgm_info['ids']) > 0): for cap in avdgm_info.traverse(caption): cap[0] = Text( " %s %s.%d: %s" % (app.config.figure_caption_prefix, num_module, i, cap[0])) for id in avdgm_info['ids']: figids[id] = i figid_docname_map[id] = env.docname i += 1 if isinstance(avdgm_info, figure): i += 1 env.figid_docname_map = figid_docname_map env.i = i env.figids = figids
def run(self): nodes = super(CustomAutoprogramDirective, self).run() # By default, the document generated by sphinxcontrib.autoprogram # just has a page title which is the program name ("awx") # The code here changes this slightly so the reference guide starts # with a human-friendly title and preamble # configure a custom page heading (not `awx`) heading = Text('Reference Guide') heading.parent = nodes[0][0] nodes[0][0].children = [heading] # add a descriptive top synopsis of the reference guide nodes[0].children.insert( 1, paragraph(text=( 'This is an exhaustive guide of every available command in ' 'the awx CLI tool.'))) disclaimer = ( 'The commands and parameters documented here can (and will) ' 'vary based on a variety of factors, such as the AWX API ' 'version, AWX settings, and access level of the authenticated ' 'user. For the most accurate view of available commands, ' 'invoke the awx CLI using the --help flag.') nodes[0].children.insert(2, paragraph(text=disclaimer)) return nodes
def __call__(self, *args, **kwargs): nodes, messages = XRefRole.__call__(self, *args, **kwargs) for node in nodes: attrs = node.attributes target = attrs['reftarget'] parens = '' if target.endswith('()'): # Function call, :symbol:`mongoc_init()` target = target[:-2] parens = '()' if ':' in target: # E.g., 'bson:bson_t' has domain 'bson', target 'bson_t' attrs['domain'], name = target.split(':', 1) attrs['reftarget'] = name old = node.children[0].children[0] assert isinstance(old, Text) new = Text(name + parens, name + parens) # Ensure setup_child is called. node.children[0].replace(old, new) else: attrs['reftarget'] = target attrs['reftype'] = 'doc' attrs['classes'].append('symbol') if sphinx_version_info >= (1, 6): # https://github.com/sphinx-doc/sphinx/issues/3698 attrs['refdomain'] = 'std' return nodes, messages
def apply(self): for node in self.document.traverse(GenerateTagLinks.baseref): # find the entry for the link reference we want to substitute link_key = None for k in self.linkref_lut.keys(): if k in node["refuri"]: link_key = k if not link_key: continue link_value = self.linkref_lut[link_key] git_tag = subprocess.check_output(["git", "describe", "--always"]).decode("utf-8").strip() if len(re.findall(self.accepted_tag_format, git_tag)) != 1: git_tag = "main" link_value = link_value.format(MONAILABEL_GIT_TAG=git_tag) # replace the link reference with the link value target = node["refuri"].replace(link_key, link_value, 1) node.replace_attr("refuri", target) # replace the text as well where it occurs for txt in node.traverse(GenerateTagLinks.basetext): new_txt = Text(txt.replace(self.linkref_prefix, self.github_link, 1), txt.rawsource) txt.parent.replate(txt, new_txt)
def __call__(self, *args, **kwargs): nodes, messages = XRefRole.__call__(self, *args, **kwargs) for node in nodes: attrs = node.attributes target = attrs['reftarget'] parens = '' if target.endswith('()'): # Function call, :symbol:`mongoc_init()` target = target[:-2] parens = '()' if ':' in target: # E.g., 'bson:bson_t' has domain 'bson', target 'bson_t' attrs['domain'], name = target.split(':', 1) attrs['reftarget'] = name assert isinstance(node.children[0].children[0], Text) node.children[0].children[0] = Text(name + parens, name + parens) else: attrs['reftarget'] = target attrs['reftype'] = 'doc' attrs['classes'].append('symbol') return nodes, messages
def __recordDepart(self, elem): (pfx, nm) = self.uri2Prefixes.elem2PrefixName(elem) self.depth -= 1 self.document += Text( "%s} %s:%s\n" % (self.depth * self.indent, self.currentPrefix, self.currentTag)) return None
def __recordVisit(self, elem): (pfx, nm) = self.uri2Prefixes.elem2PrefixName(elem) attrs = "" for attr in sorted(elem.keys()): attrs += " %s=%r" % (attr, elem.get(attr)) self.document += Text("%s{ %s:%s%s\n" % (self.depth * self.indent, self.currentPrefix, self.currentTag, attrs)) self.depth += 1 control = elem.get('control', None) if control in ('SkipNode', 'SkipDeparture', 'SkipSiblings', 'SkipChildren', 'StopTraversal'): e = eval("docutils.nodes.%s()" % (control, )) try: raise e except (docutils.nodes.SkipNode, docutils.nodes.SkipSiblings): self.depth -= 1 raise elif control == 'SomeChildren': tags = [ child.split(':', 1) for child in elem.get('controlSomeChildren', '').split() ] raise SomeChildren(tags) return None
def dispatch_visit(self, node): if isinstance(node, TextElement) and not isinstance(node, literal_block): for i in range(len(node.children)): if type(node[i]) == Text: node[i] = Text(node[i].astext().replace('\r', '').replace( '\n', ''))
def explicit_releases_error_on_unfound_issues(self): # Just a release - result will have 1.0.0, 1.0.1, and unreleased changelog = release_list('1.0.1') # No issues listed -> this clearly doesn't exist in any buckets changelog[1][0].append(Text("25")) # This should asplode construct_releases(changelog, make_app())
def resolve_reference_in_node(node): if node.tagname == 'reference': ref_string = MarkdownTranslator._resolve_reference(node) if not node.parent is None: for i, n in enumerate(node.parent): if n is node: # Replace the reference node. node.parent.children[i] = Text(ref_string) break else: # If reference node has no parent, replace it's content. node.clear() node.children.append(Text(ref_string)) else: for child in node: if isinstance(child, Node): MarkdownTranslator.resolve_reference_in_node(child)
def trunc_whitespace(app, doctree, docname): if not app.config.japanesesupport_trunc_whitespace: return for node in doctree.traverse(Text): if isinstance(node.parent, paragraph): newtext = _PAT.sub(ur"\1\2", node.astext()) node.parent.replace(node, Text(newtext))
def parse_parameter_list(self, node): parameters = [] special = [] argnames = list(node.argnames) if node.kwargs: special.append(make_parameter(argnames[-1], excess_keyword=1)) argnames.pop() if node.varargs: special.append(make_parameter(argnames[-1], excess_positional=1)) argnames.pop() defaults = list(node.defaults) defaults = [None] * (len(argnames) - len(defaults)) + defaults function_parameters = self.token_parser.function_parameters( node.lineno) #print >>sys.stderr, function_parameters for argname, default in zip(argnames, defaults): if type(argname) is tuple: parameter = pynodes.parameter_tuple() for tuplearg in argname: parameter.append(make_parameter(tuplearg)) argname = normalize_parameter_name(argname) else: parameter = make_parameter(argname) if default: n_default = pynodes.parameter_default() n_default.append(Text(function_parameters[argname])) parameter.append(n_default) parameters.append(parameter) if parameters or special: special.reverse() parameters.extend(special) parameter_list = pynodes.parameter_list() parameter_list.extend(parameters) self.function.append(parameter_list)
def insert_callback(parameters_node): # We need to know what params are here already parameter_names = get_parameter_names(parameters_node) if 'callback' not in parameter_names: if '*args' in parameter_names: args_pos = parameter_names.index('*args') else: args_pos = len(parameter_names) if '**kwargs' in parameter_names: kwargs_pos = parameter_names.index('**kwargs') else: kwargs_pos = len(parameter_names) doc = ( " (optional): function taking (result, error), executed when" " operation completes") new_item = paragraph( '', '', literal('', 'callback'), Text(doc)) if parameters_node.children and isinstance(parameters_node.children[0], list_item): # Insert "callback" before *args and **kwargs parameters_node.insert(min(args_pos, kwargs_pos), list_item('', new_item)) else: parameters_node.append(new_item) # Insert "callback" before *args and **kwargs parameters_node.insert(min(args_pos, kwargs_pos), new_item)
def visit_field_list(self, node): fields = {} for n in node.children: if not n.children: continue child = n.children[0] rawsource = child.rawsource if rawsource.startswith("param "): index = rawsource.index("param ") if not child.children: continue # Strip leading escaped asterisks for vararg parameters in Google code style docstrings trimmed_name = re.sub(r'\\\*', '*', rawsource[index + 6:]) child.children[0] = Text(trimmed_name) fields[trimmed_name] = n if rawsource == "return": fields["return"] = n for n in node.children: if not n.children: continue child = n.children[0] rawsource = child.rawsource if rawsource.startswith("type "): index = rawsource.index("type ") name = re.sub(r'\\\*', '*', rawsource[index + 5:]) if name in fields: fields[name].type = n.children[1][0][0] node.children.remove(n) if rawsource == "rtype": if "return" in fields: fields["return"].type = n.children[1][0][0] node.children.remove(n) HTMLTranslator.visit_field_list(self, node)
def make_import_group(names, lineno, from_name=None): n = pynodes.import_group() n['lineno'] = lineno if from_name: n_from = pynodes.import_from() n_from.append(Text(from_name)) n.append(n_from) for name, alias in names: n_name = pynodes.import_name() n_name.append(Text(name)) if alias: n_alias = pynodes.import_alias() n_alias.append(Text(alias)) n_name.append(n_alias) n.append(n_name) return n
def visit_field_list(self, node): fields = {} for n in node.children: if len(n.children) == 0: continue child = n.children[0] rawsource = child.rawsource if rawsource.startswith("param "): index = rawsource.index("param ") if len(child.children) == 0: continue child.children[0] = Text(rawsource[index + 6:]) fields[rawsource[index + 6:]] = n if rawsource == "return": fields["return"] = n for n in node.children: if len(n.children) == 0: continue child = n.children[0] rawsource = child.rawsource if rawsource.startswith("type "): index = rawsource.index("type ") name = rawsource[index + 5:] if fields.has_key(name): fields[name].type = n.children[1][0][0] node.children.remove(n) if rawsource == "rtype": if fields.has_key("return"): fields["return"].type = n.children[1][0][0] node.children.remove(n) HTMLTranslator.visit_field_list(self, node)