def build_toc_file(self) -> None: """Create a ToC file (.hhp) on outdir.""" filename = path.join(self.outdir, self.config.htmlhelp_basename + '.hhc') with open(filename, 'w', encoding=self.encoding, errors='xmlcharrefreplace') as f: toctree = self.env.get_and_resolve_doctree(self.config.master_doc, self, prune_toctrees=False) visitor = ToCTreeVisitor(toctree) matcher = NodeMatcher(addnodes.compact_paragraph, toctree=True) for node in toctree.traverse( matcher): # type: addnodes.compact_paragraph node.walkabout(visitor) context = { 'body': visitor.astext(), 'suffix': self.out_suffix, 'short_title': self.config.html_short_title, 'master_doc': self.config.master_doc, 'domain_indices': self.domain_indices, } f.write(self.render('project.hhc', context))
def run(self): matcher = NodeMatcher(nodes.container, type='details') for node in self.document.traverse(matcher): newnode = details(**node.attributes) newnode += summary('', '', *node[0]) newnode.extend(node[1:]) node.replace_self(newnode)
def run(self, **kwargs): # type: (Any) -> None matcher = NodeMatcher(nodes.container, literal_block=True) for node in self.document.traverse(matcher): # type: nodes.container newnode = captioned_literal_block('', *node.children, **node.attributes) node.replace_self(newnode)
def run(self, **kwargs: Any) -> None: matcher = NodeMatcher(nodes.literal, classes=["kbd"]) # this list must be pre-created as during iteration new nodes # are added which match the condition in the NodeMatcher. for node in list( self.document.findall(matcher)): # type: nodes.literal parts = self.pattern.split(node[-1].astext()) if len(parts) == 1 or self.is_multiwords_key(parts): continue node['classes'].append('compound') node.pop() while parts: if self.is_multiwords_key(parts): key = ''.join(parts[:3]) parts[:3] = [] else: key = parts.pop(0) node += nodes.literal('', key, classes=["kbd"]) try: # key separator (ex. -, +, ^) sep = parts.pop(0) node += nodes.Text(sep) except IndexError: pass
def build_qhp(self, outdir, outname): # type: (str, str) -> None logger.info(__('writing project file...')) # sections tocdoc = self.env.get_and_resolve_doctree(self.config.master_doc, self, prune_toctrees=False) sections = [] matcher = NodeMatcher(addnodes.compact_paragraph, toctree=True) for node in tocdoc.traverse(matcher): # type: addnodes.compact_paragraph sections.extend(self.write_toc(node)) for indexname, indexcls, content, collapse in self.domain_indices: item = section_template % {'title': indexcls.localname, 'ref': '%s.html' % indexname} sections.append(' ' * 4 * 4 + item) sections = '\n'.join(sections) # type: ignore # keywords keywords = [] index = IndexEntries(self.env).create_index(self, group_entries=False) for (key, group) in index: for title, (refs, subitems, key_) in group: keywords.extend(self.build_keywords(title, refs, subitems)) keywords = '\n'.join(keywords) # type: ignore # it seems that the "namespace" may not contain non-alphanumeric # characters, and more than one successive dot, or leading/trailing # dots, are also forbidden if self.config.qthelp_namespace: nspace = self.config.qthelp_namespace else: nspace = 'org.sphinx.%s.%s' % (outname, self.config.version) nspace = re.sub(r'[^a-zA-Z0-9.\-]', '', nspace) nspace = re.sub(r'\.+', '.', nspace).strip('.') nspace = nspace.lower() # write the project file with open(path.join(outdir, outname + '.qhp'), 'w', encoding='utf-8') as f: body = render_file('project.qhp', outname=outname, title=self.config.html_title, version=self.config.version, project=self.config.project, namespace=nspace, master_doc=self.config.master_doc, sections=sections, keywords=keywords, files=self.get_project_files(outdir)) f.write(body) homepage = 'qthelp://' + posixpath.join( nspace, 'doc', self.get_target_uri(self.config.master_doc)) startpage = 'qthelp://' + posixpath.join(nspace, 'doc', 'index.html') logger.info(__('writing collection project file...')) with open(path.join(outdir, outname + '.qhcp'), 'w', encoding='utf-8') as f: body = render_file('project.qhcp', outname=outname, title=self.config.html_short_title, homepage=homepage, startpage=startpage) f.write(body)
def test_NodeMatcher(): doctree = nodes.document(None, None) doctree += nodes.paragraph('', 'Hello') doctree += nodes.paragraph('', 'Sphinx', block=1) doctree += nodes.paragraph('', 'World', block=2) doctree += nodes.literal_block('', 'blah blah blah', block=3) # search by node class matcher = NodeMatcher(nodes.paragraph) assert len(list(doctree.traverse(matcher))) == 3 # search by multiple node classes matcher = NodeMatcher(nodes.paragraph, nodes.literal_block) assert len(list(doctree.traverse(matcher))) == 4 # search by node attribute matcher = NodeMatcher(block=1) assert len(list(doctree.traverse(matcher))) == 1 # search by node attribute (Any) matcher = NodeMatcher(block=Any) assert len(list(doctree.traverse(matcher))) == 3 # search by both class and attribute matcher = NodeMatcher(nodes.paragraph, block=Any) assert len(list(doctree.traverse(matcher))) == 2 # mismatched matcher = NodeMatcher(nodes.title) assert len(list(doctree.traverse(matcher))) == 0 # search with Any does not match to Text node matcher = NodeMatcher(blah=Any) assert len(list(doctree.traverse(matcher))) == 0
def apply(self): # type: () -> None if self.app.builder.name != 'latex': return matcher = NodeMatcher(nodes.container, literal_block=True) for node in self.document.traverse(matcher): newnode = captioned_literal_block('', *node.children, **node.attributes) node.replace_self(newnode)
def run(self, **kwargs) -> None: domain = cast(CitationDomain, self.env.get_domain('citation')) matcher = NodeMatcher(addnodes.pending_xref, refdomain='citation', reftype='ref') for node in self.document.traverse(matcher): # type: addnodes.pending_xref docname, labelid, _ = domain.citations.get(node['reftarget'], ('', '', 0)) if docname: citation_ref = nodes.citation_reference('', '', *node.children, docname=docname, refname=labelid) node.replace_self(citation_ref)
def apply(self, **kwargs: Any) -> None: from sphinx.builders.gettext import MessageCatalogBuilder if isinstance(self.app.builder, MessageCatalogBuilder): return matcher = NodeMatcher(nodes.inline, translatable=Any) for inline in self.document.traverse(matcher): # type: nodes.inline inline.parent.remove(inline) inline.parent += inline.children
def apply(self, **kwargs: Any) -> None: matcher = NodeMatcher(nodes.literal, nodes.emphasis, nodes.strong) for node in self.document.traverse(matcher): # type: TextElement if any(matcher(subnode) for subnode in node): pos = node.parent.index(node) for subnode in reversed(node[1:]): node.remove(subnode) if matcher(subnode): node.parent.insert(pos + 1, subnode) else: newnode = node.__class__('', '', subnode, **node.attributes) node.parent.insert(pos + 1, newnode)
def apply(self): # type: () -> None if self.app.builder.name != 'latex': return matcher = NodeMatcher(addnodes.pending_xref, refdomain='std', reftype='citation') citations = self.env.get_domain('std').data['citations'] for node in self.document.traverse(matcher): docname, labelid, _ = citations.get(node['reftarget'], ('', '', 0)) if docname: citation_ref = nodes.citation_reference('', *node.children, docname=docname, refname=labelid) node.replace_self(citation_ref)
def run(self): matcher = NodeMatcher(nodes.container, type="tabbed") tab_set = None for node in self.document.traverse(matcher): # type: nodes.container if tab_set is None: tab_set = TabSet(node) elif node["new_group"]: self.render_tab_set(tab_set) tab_set = TabSet(node) elif tab_set.is_next(node): tab_set.append(node) else: self.render_tab_set(tab_set) tab_set = TabSet(node) self.render_tab_set(tab_set)
def run(self): """Locate and replace `TabContainer`s.""" matcher = NodeMatcher(TabContainer) set_counter = 0 current_tab_set = [] # type: List[TabContainer] for node in self.document.traverse(matcher): # type: TabContainer if _should_start_new_set(node, current_tab_set): self.finalize_set(current_tab_set, set_counter) set_counter += 1 current_tab_set = [] current_tab_set.append(node) if current_tab_set: self.finalize_set(current_tab_set, set_counter)
def run(self, **kwargs): # type: (Any) -> None matcher = NodeMatcher(addnodes.pending_xref, refdomain='std', reftype='citation') citations = self.env.get_domain('std').data['citations'] for node in self.document.traverse( matcher): # type: addnodes.pending_xref docname, labelid, _ = citations.get(node['reftarget'], ('', '', 0)) if docname: citation_ref = nodes.citation_reference('', '', *node.children, docname=docname, refname=labelid) node.replace_self(citation_ref)
def apply(self, **kwargs: Any) -> None: matcher = NodeMatcher(nodes.literal, nodes.emphasis, nodes.strong) for node in list(self.document.traverse(matcher)): # type: TextElement if any(matcher(subnode) for subnode in node): pos = node.parent.index(node) for subnode in reversed(list(node)): node.remove(subnode) if matcher(subnode): node.parent.insert(pos + 1, subnode) else: newnode = node.__class__('', '', subnode, **node.attributes) node.parent.insert(pos + 1, newnode) # move node if all children became siblings of the node if not len(node): node.parent.remove(node)
def run(self, **kwargs: Any) -> None: matcher = NodeMatcher(nodes.literal, classes=["kbd"]) for node in self.document.traverse(matcher): # type: nodes.literal parts = self.pattern.split(node[-1].astext()) if len(parts) == 1: continue node.pop() while parts: key = parts.pop(0) node += nodes.literal('', key, classes=["kbd"]) try: # key separator (ex. -, +, ^) sep = parts.pop(0) node += nodes.Text(sep) except IndexError: pass
def build_eclipsehelp(self, outdir: String): logger.info(__('dumping eclipse help toc.xml...')) base_dir = self.config.eclipsehelp_basedir if (base_dir != ''): base_dir += '/' root = etree.Element('toc', label=self.config.html_title, topic="index.html") tree = etree.ElementTree(root) tocdoc = self.env.get_and_resolve_doctree(self.config.master_doc, self, prune_toctrees=False) def write_toc(node: nodes.Node, parent: etree.Element): if isinstance(node, addnodes.compact_paragraph) or \ isinstance(node, nodes.bullet_list): for subnode in node: write_toc(subnode, parent) elif isinstance(node, nodes.list_item): item = etree.SubElement(parent, 'topic') for subnode in node: write_toc(subnode, item) elif isinstance(node, nodes.reference): parent.attrib['label'] = node.astext() parent.attrib['href'] = base_dir + node['refuri'] matcher = NodeMatcher(addnodes.compact_paragraph, toctree=Any) for node in tocdoc.traverse( matcher): # type: addnodes.compact_paragraph write_toc(node, root) # Dump the XML file tocfile = path.join(outdir, 'toc.xml') with open(tocfile, 'wb') as f: tree.write(f, 'utf-8')
def run(self, **kwargs: Any) -> None: matcher = NodeMatcher(nodes.literal, classes=["kbd"]) for node in self.document.findall(matcher): # type: nodes.literal parts = self.pattern.split(node[-1].astext()) if len(parts) == 1 or self.is_multiwords_key(parts): continue node['classes'].append('compound') node.pop() while parts: if self.is_multiwords_key(parts): key = ''.join(parts[:3]) parts[:3] = [] else: key = parts.pop(0) node += nodes.literal('', key, classes=["kbd"]) try: # key separator (ex. -, +, ^) sep = parts.pop(0) node += nodes.Text(sep) except IndexError: pass
def run(self, **kwargs: Any) -> None: if "html" not in self.app.builder.name and self.app.builder.name not in ( "readthedocs", ): return if self.app.builder.math_renderer_name != "mathjax": return matcher = NodeMatcher(nodes.math_block, classes=["amsmath"]) for node in self.document.traverse(matcher): # type: nodes.math_block prefix, suffix = self.config.mathjax_display node.children[0] = nodes.Text(prefix + str(node.children[0]) + suffix) replace = [] if node["number"]: number = get_node_equation_number(self, node) replace.append( nodes.raw( "", f'<span class="eqno">({number})</span>', format="html" ) ) # TODO add permalink (see sphinx/ext/mathjax.py) # self.add_permalink_ref(node, _('Permalink to this equation')) replace.append(node) node.replace_self(replace)
def apply(self, **kwargs): # type: (Any) -> None settings, source = self.document.settings, self.document['source'] msgstr = u'' # XXX check if this is reliable assert source.startswith(self.env.srcdir) docname = path.splitext( relative_path(path.join(self.env.srcdir, 'dummy'), source))[0] textdomain = find_catalog(docname, self.config.gettext_compact) # fetch translations dirs = [ path.join(self.env.srcdir, directory) for directory in self.config.locale_dirs ] catalog, has_catalog = init_locale(dirs, self.config.language, textdomain) if not has_catalog: return # phase1: replace reference ids with translated names for node, msg in extract_messages(self.document): msgstr = catalog.gettext(msg) # type: ignore # XXX add marker to untranslated parts if not msgstr or msgstr == msg or not msgstr.strip(): # as-of-yet untranslated continue # Avoid "Literal block expected; none found." warnings. # If msgstr ends with '::' then it cause warning message at # parser.parse() processing. # literal-block-warning is only appear in avobe case. if msgstr.strip().endswith('::'): msgstr += '\n\n dummy literal' # dummy literal node will discard by 'patch = patch[0]' # literalblock need literal block notation to avoid it become # paragraph. if isinstance(node, LITERAL_TYPE_NODES): msgstr = '::\n\n' + indent(msgstr, ' ' * 3) patch = publish_msgstr(self.app, msgstr, source, node.line, self.config, settings) # XXX doctest and other block markup if not isinstance(patch, nodes.paragraph): continue # skip for now processed = False # skip flag # update title(section) target name-id mapping if isinstance(node, nodes.title): section_node = node.parent new_name = nodes.fully_normalize_name(patch.astext()) old_name = nodes.fully_normalize_name(node.astext()) if old_name != new_name: # if name would be changed, replace node names and # document nameids mapping with new name. names = section_node.setdefault('names', []) names.append(new_name) # Original section name (reference target name) should be kept to refer # from other nodes which is still not translated or uses explicit target # name like "`text to display <explicit target name_>`_".. # So, `old_name` is still exist in `names`. _id = self.document.nameids.get(old_name, None) explicit = self.document.nametypes.get(old_name, None) # * if explicit: _id is label. title node need another id. # * if not explicit: # # * if _id is None: # # _id is None means: # # 1. _id was not provided yet. # # 2. _id was duplicated. # # old_name entry still exists in nameids and # nametypes for another duplicated entry. # # * if _id is provided: bellow process if _id: if not explicit: # _id was not duplicated. # remove old_name entry from document ids database # to reuse original _id. self.document.nameids.pop(old_name, None) self.document.nametypes.pop(old_name, None) self.document.ids.pop(_id, None) # re-entry with new named section node. # # Note: msgnode that is a second parameter of the # `note_implicit_target` is not necessary here because # section_node has been noted previously on rst parsing by # `docutils.parsers.rst.states.RSTState.new_subsection()` # and already has `system_message` if needed. self.document.note_implicit_target(section_node) # replace target's refname to new target name matcher = NodeMatcher(nodes.target, refname=old_name) for old_target in self.document.traverse( matcher): # type: nodes.target old_target['refname'] = new_name processed = True # glossary terms update refid if isinstance(node, nodes.term): gloss_entries = self.env.temp_data.setdefault( 'gloss_entries', set()) for _id in node['names']: if _id in gloss_entries: gloss_entries.remove(_id) parts = split_term_classifiers(msgstr) patch = publish_msgstr(self.app, parts[0], source, node.line, self.config, settings) patch = make_glossary_term(self.env, patch, parts[1], source, node.line, _id) node['ids'] = patch['ids'] node['names'] = patch['names'] processed = True # update leaves with processed nodes if processed: for child in patch.children: child.parent = node node.children = patch.children node['translated'] = True # to avoid double translation # phase2: translation for node, msg in extract_messages(self.document): if node.get('translated', False): # to avoid double translation continue # skip if the node is already translated by phase1 msgstr = catalog.gettext(msg) # type: ignore # XXX add marker to untranslated parts if not msgstr or msgstr == msg: # as-of-yet untranslated continue # update translatable nodes if isinstance(node, addnodes.translatable): node.apply_translated_message(msg, msgstr) continue # update meta nodes if isinstance(node, nodes.pending) and is_pending_meta(node): node.details['nodes'][0]['content'] = msgstr continue # Avoid "Literal block expected; none found." warnings. # If msgstr ends with '::' then it cause warning message at # parser.parse() processing. # literal-block-warning is only appear in avobe case. if msgstr.strip().endswith('::'): msgstr += '\n\n dummy literal' # dummy literal node will discard by 'patch = patch[0]' # literalblock need literal block notation to avoid it become # paragraph. if isinstance(node, LITERAL_TYPE_NODES): msgstr = '::\n\n' + indent(msgstr, ' ' * 3) # Structural Subelements phase1 # There is a possibility that only the title node is created. # see: http://docutils.sourceforge.net/docs/ref/doctree.html#structural-subelements if isinstance(node, nodes.title): # This generates: <section ...><title>msgstr</title></section> msgstr = msgstr + '\n' + '-' * len(msgstr) * 2 patch = publish_msgstr(self.app, msgstr, source, node.line, self.config, settings) # Structural Subelements phase2 if isinstance(node, nodes.title): # get <title> node that placed as a first child patch = patch.next_node() # ignore unexpected markups in translation message if not isinstance( patch, (( nodes.paragraph, # expected form of translation nodes.title, # generated by above "Subelements phase2" ) + # following types are expected if # config.gettext_additional_targets is configured LITERAL_TYPE_NODES + IMAGE_TYPE_NODES)): continue # skip # auto-numbered foot note reference should use original 'ids'. def list_replace_or_append(lst, old, new): # type: (List[N], N, N) -> None if old in lst: lst[lst.index(old)] = new else: lst.append(new) is_autofootnote_ref = NodeMatcher(nodes.footnote_reference, auto=Any) old_foot_refs = node.traverse( is_autofootnote_ref ) # type: List[nodes.footnote_reference] # NOQA new_foot_refs = patch.traverse( is_autofootnote_ref ) # type: List[nodes.footnote_reference] # NOQA if len(old_foot_refs) != len(new_foot_refs): old_foot_ref_rawsources = [ ref.rawsource for ref in old_foot_refs ] new_foot_ref_rawsources = [ ref.rawsource for ref in new_foot_refs ] logger.warning(__( 'inconsistent footnote references in translated message.' + ' original: {0}, translated: {1}').format( old_foot_ref_rawsources, new_foot_ref_rawsources), location=node) old_foot_namerefs = { } # type: Dict[unicode, List[nodes.footnote_reference]] for r in old_foot_refs: old_foot_namerefs.setdefault(r.get('refname'), []).append(r) for newf in new_foot_refs: refname = newf.get('refname') refs = old_foot_namerefs.get(refname, []) if not refs: continue oldf = refs.pop(0) newf['ids'] = oldf['ids'] for id in newf['ids']: self.document.ids[id] = newf if newf['auto'] == 1: # autofootnote_refs list_replace_or_append(self.document.autofootnote_refs, oldf, newf) else: # symbol_footnote_refs list_replace_or_append(self.document.symbol_footnote_refs, oldf, newf) if refname: footnote_refs = self.document.footnote_refs.setdefault( refname, []) list_replace_or_append(footnote_refs, oldf, newf) refnames = self.document.refnames.setdefault(refname, []) list_replace_or_append(refnames, oldf, newf) # reference should use new (translated) 'refname'. # * reference target ".. _Python: ..." is not translatable. # * use translated refname for section refname. # * inline reference "`Python <...>`_" has no 'refname'. is_refnamed_ref = NodeMatcher(nodes.reference, refname=Any) old_refs = node.traverse( is_refnamed_ref) # type: List[nodes.reference] new_refs = patch.traverse( is_refnamed_ref) # type: List[nodes.reference] if len(old_refs) != len(new_refs): old_ref_rawsources = [ref.rawsource for ref in old_refs] new_ref_rawsources = [ref.rawsource for ref in new_refs] logger.warning( __('inconsistent references in translated message.' + ' original: {0}, translated: {1}').format( old_ref_rawsources, new_ref_rawsources), location=node) old_ref_names = [r['refname'] for r in old_refs] new_ref_names = [r['refname'] for r in new_refs] orphans = list(set(old_ref_names) - set(new_ref_names)) for newr in new_refs: if not self.document.has_name(newr['refname']): # Maybe refname is translated but target is not translated. # Note: multiple translated refnames break link ordering. if orphans: newr['refname'] = orphans.pop(0) else: # orphan refnames is already empty! # reference number is same in new_refs and old_refs. pass self.document.note_refname(newr) # refnamed footnote should use original 'ids'. is_refnamed_footnote_ref = NodeMatcher(nodes.footnote_reference, refname=Any) old_foot_refs = node.traverse(is_refnamed_footnote_ref) new_foot_refs = patch.traverse(is_refnamed_footnote_ref) refname_ids_map = {} # type: Dict[unicode, List[unicode]] if len(old_foot_refs) != len(new_foot_refs): old_foot_ref_rawsources = [ ref.rawsource for ref in old_foot_refs ] new_foot_ref_rawsources = [ ref.rawsource for ref in new_foot_refs ] logger.warning(__( 'inconsistent footnote references in translated message.' + ' original: {0}, translated: {1}').format( old_foot_ref_rawsources, new_foot_ref_rawsources), location=node) for oldf in old_foot_refs: refname_ids_map.setdefault(oldf["refname"], []).append(oldf["ids"]) for newf in new_foot_refs: refname = newf["refname"] if refname_ids_map.get(refname): newf["ids"] = refname_ids_map[refname].pop(0) # citation should use original 'ids'. is_citation_ref = NodeMatcher(nodes.citation_reference, refname=Any) old_cite_refs = node.traverse( is_citation_ref ) # type: List[nodes.citation_reference] # NOQA new_cite_refs = patch.traverse( is_citation_ref ) # type: List[nodes.citation_reference] # NOQA refname_ids_map = {} if len(old_cite_refs) != len(new_cite_refs): old_cite_ref_rawsources = [ ref.rawsource for ref in old_cite_refs ] new_cite_ref_rawsources = [ ref.rawsource for ref in new_cite_refs ] logger.warning(__( 'inconsistent citation references in translated message.' + ' original: {0}, translated: {1}').format( old_cite_ref_rawsources, new_cite_ref_rawsources), location=node) for oldc in old_cite_refs: refname_ids_map.setdefault(oldc["refname"], []).append(oldc["ids"]) for newc in new_cite_refs: refname = newc["refname"] if refname_ids_map.get(refname): newc["ids"] = refname_ids_map[refname].pop() # Original pending_xref['reftarget'] contain not-translated # target name, new pending_xref must use original one. # This code restricts to change ref-targets in the translation. old_xrefs = node.traverse(addnodes.pending_xref) new_xrefs = patch.traverse(addnodes.pending_xref) xref_reftarget_map = {} if len(old_xrefs) != len(new_xrefs): old_xref_rawsources = [xref.rawsource for xref in old_xrefs] new_xref_rawsources = [xref.rawsource for xref in new_xrefs] logger.warning( __('inconsistent term references in translated message.' + ' original: {0}, translated: {1}').format( old_xref_rawsources, new_xref_rawsources), location=node) def get_ref_key(node): # type: (addnodes.pending_xref) -> Tuple[unicode, unicode, unicode] case = node["refdomain"], node["reftype"] if case == ('std', 'term'): return None else: return ( node["refdomain"], node["reftype"], node['reftarget'], )
msgstr_parts = [] for part in msg_parts: msgstr = catalog.gettext(part) # type: ignore if not msgstr: msgstr = part msgstr_parts.append(msgstr) new_entries.append( (type, ';'.join(msgstr_parts), tid, main, None)) node['raw_entries'] = entries node['entries'] = new_entries # remove translated attribute that is used for avoiding double translation. for translated in self.document.traverse( NodeMatcher(translated=Any)): # type: nodes.Element # NOQA translated.delattr('translated') class RemoveTranslatableInline(SphinxTransform): """ Remove inline nodes used for translation as placeholders. """ default_priority = 999 def apply(self, **kwargs): # type: (Any) -> None from sphinx.builders.gettext import MessageCatalogBuilder if isinstance(self.app.builder, MessageCatalogBuilder): return
def apply(self, **kwargs: Any) -> None: matcher = NodeMatcher(*self.TARGET_NODES) for node in self.document.findall(matcher): # type: Element node['docname'] = self.env.docname
def apply(self, **kwargs) -> None: matcher = NodeMatcher(*self.TARGET_NODES) for node in self.document.traverse(matcher): # type: nodes.Element node['docname'] = self.env.docname
for type, msg, tid, main, key_ in entries: msg_parts = split_index_msg(type, msg) msgstr_parts = [] for part in msg_parts: msgstr = catalog.gettext(part) if not msgstr: msgstr = part msgstr_parts.append(msgstr) new_entries.append((type, ';'.join(msgstr_parts), tid, main, None)) node['raw_entries'] = entries node['entries'] = new_entries # remove translated attribute that is used for avoiding double translation. for translated in self.document.traverse(NodeMatcher(translated=Any)): # type: Element # NOQA translated.delattr('translated') class RemoveTranslatableInline(SphinxTransform): """ Remove inline nodes used for translation as placeholders. """ default_priority = 999 def apply(self, **kwargs: Any) -> None: from sphinx.builders.gettext import MessageCatalogBuilder if isinstance(self.app.builder, MessageCatalogBuilder): return matcher = NodeMatcher(nodes.inline, translatable=Any)
def run(self): matcher = NodeMatcher(nodes.container, type="dropdown") for node in self.document.traverse(matcher): open_marker = nodes.container( "", nodes.raw("", nodes.Text(get_opticon("chevron-up", size=24)), format="html"), is_div=True, classes=["summary-up"], ) closed_marker = nodes.container( "", nodes.raw("", nodes.Text(get_opticon("chevron-down", size=24)), format="html"), is_div=True, classes=["summary-down"], ) newnode = dropdown_main( opened=node["opened"], classes=["sphinx-bs", "dropdown", "card"] + node["container_classes"], ) if node["has_title"]: title_children = node[0] body_children = node[1:] else: title_children = [ nodes.raw( "...", nodes.Text( KEBAB # Note the custom opticon here has thicker dots # get_opticon("kebab-horizontal", classes="no-title", # size=24) ), format="html", ) ] body_children = node newnode += dropdown_title( "", "", *title_children, closed_marker, open_marker, classes=["summary-title", "card-header"] + node["title_classes"]) body_node = nodes.container( "", *body_children, is_div=True, classes=["summary-content", "card-body"] + node["body_classes"]) for para in body_node.traverse(nodes.paragraph): para["classes"] = ([] if "classes" in para else para["classes"]) + ["card-text"] newnode += body_node # newnode += open_marker node.replace_self(newnode)
def build_devhelp(self, outdir, outname): # type: (unicode, unicode) -> None logger.info(__('dumping devhelp index...')) # Basic info root = etree.Element('book', title=self.config.html_title, name=self.config.project, link="index.html", version=self.config.version) tree = etree.ElementTree(root) # TOC chapters = etree.SubElement(root, 'chapters') tocdoc = self.env.get_and_resolve_doctree(self.config.master_doc, self, prune_toctrees=False) def write_toc(node, parent): # type: (nodes.Node, nodes.Node) -> None if isinstance(node, addnodes.compact_paragraph) or \ isinstance(node, nodes.bullet_list): for subnode in node: write_toc(subnode, parent) elif isinstance(node, nodes.list_item): item = etree.SubElement(parent, 'sub') for subnode in node: write_toc(subnode, item) elif isinstance(node, nodes.reference): parent.attrib['link'] = node['refuri'] parent.attrib['name'] = node.astext() matcher = NodeMatcher(addnodes.compact_paragraph, toctree=Any) for node in tocdoc.traverse(matcher): write_toc(node, chapters) # Index functions = etree.SubElement(root, 'functions') index = IndexEntries(self.env).create_index(self) def write_index(title, refs, subitems): # type: (unicode, List[Any], Any) -> None if len(refs) == 0: pass elif len(refs) == 1: etree.SubElement(functions, 'function', name=title, link=refs[0][1]) else: for i, ref in enumerate(refs): etree.SubElement(functions, 'function', name="[%d] %s" % (i, title), link=ref[1]) if subitems: parent_title = re.sub(r'\s*\(.*\)\s*$', '', title) for subitem in subitems: write_index("%s %s" % (parent_title, subitem[0]), subitem[1], []) for (key, group) in index: for title, (refs, subitems, key) in group: write_index(title, refs, subitems) # Dump the XML file xmlfile = path.join(outdir, outname + '.devhelp.gz') with gzip.open(xmlfile, 'w') as f: # type: ignore tree.write(f, 'utf-8')
def apply(self): for node in self.document.traverse(NodeMatcher(*self.TARGET_NODES)): node['docname'] = self.env.docname
def apply(self, **kwargs: Any) -> None: matcher = NodeMatcher(nodes.table, nodes.figure) for node in self.document.traverse(matcher): # type: Element node.setdefault('align', 'default')
def build_hhx(self, outdir, outname): # type: (str, str) -> None logger.info(__('dumping stopword list...')) with self.open_file(outdir, outname + '.stp') as f: for word in sorted(stopwords): print(word, file=f) logger.info(__('writing project file...')) with self.open_file(outdir, outname + '.hhp') as f: f.write( project_template % { 'outname': outname, 'title': self.config.html_title, 'version': self.config.version, 'project': self.config.project, 'lcid': self.lcid, 'master_doc': self.config.master_doc + self.out_suffix }) if not outdir.endswith(os.sep): outdir += os.sep olen = len(outdir) for root, dirs, files in os.walk(outdir): dirs.sort() files.sort() staticdir = root.startswith(path.join(outdir, '_static')) for fn in sorted(files): if (staticdir and not fn.endswith('.js')) or \ fn.endswith('.html'): print(path.join(root, fn)[olen:].replace(os.sep, '\\'), file=f) logger.info(__('writing TOC file...')) with self.open_file(outdir, outname + '.hhc') as f: f.write(contents_header) # special books f.write('<LI> ' + object_sitemap % (self.config.html_short_title, self.config.master_doc + self.out_suffix)) for indexname, indexcls, content, collapse in self.domain_indices: f.write('<LI> ' + object_sitemap % (indexcls.localname, '%s.html' % indexname)) # the TOC tocdoc = self.env.get_and_resolve_doctree(self.config.master_doc, self, prune_toctrees=False) def write_toc(node, ullevel=0): # type: (nodes.Node, int) -> None if isinstance(node, nodes.list_item): f.write('<LI> ') for subnode in node: write_toc(subnode, ullevel) elif isinstance(node, nodes.reference): link = node['refuri'] title = chm_htmlescape(node.astext(), True) f.write(object_sitemap % (title, link)) elif isinstance(node, nodes.bullet_list): if ullevel != 0: f.write('<UL>\n') for subnode in node: write_toc(subnode, ullevel + 1) if ullevel != 0: f.write('</UL>\n') elif isinstance(node, addnodes.compact_paragraph): for subnode in node: write_toc(subnode, ullevel) matcher = NodeMatcher(addnodes.compact_paragraph, toctree=True) for node in tocdoc.traverse( matcher): # type: addnodes.compact_paragraph write_toc(node) f.write(contents_footer) logger.info(__('writing index file...')) index = IndexEntries(self.env).create_index(self) with self.open_file(outdir, outname + '.hhk') as f: f.write('<UL>\n') def write_index(title, refs, subitems): # type: (str, List[Tuple[str, str]], List[Tuple[str, List[Tuple[str, str]]]]) -> None # NOQA def write_param(name, value): # type: (str, str) -> None item = ' <param name="%s" value="%s">\n' % (name, value) f.write(item) title = chm_htmlescape(title, True) f.write('<LI> <OBJECT type="text/sitemap">\n') write_param('Keyword', title) if len(refs) == 0: write_param('See Also', title) elif len(refs) == 1: write_param('Local', refs[0][1]) else: for i, ref in enumerate(refs): # XXX: better title? write_param('Name', '[%d] %s' % (i, ref[1])) write_param('Local', ref[1]) f.write('</OBJECT>\n') if subitems: f.write('<UL> ') for subitem in subitems: write_index(subitem[0], subitem[1], []) f.write('</UL>') for (key, group) in index: for title, (refs, subitems, key_) in group: write_index(title, refs, subitems) f.write('</UL>\n')
def run(self): matcher = NodeMatcher(nodes.container, type="dropdown") for node in self.document.traverse(matcher): open_marker = nodes.container( "", nodes.raw( "", nodes.Text( CHEVRON.format(color=node["marker_color"], points="18 15 12 9 6 15")), format="html", ), is_div=True, classes=["summary-chevron-down"], ) closed_marker = nodes.container( "", nodes.raw( "", nodes.Text( CHEVRON.format(color=node["marker_color"], points="6 9 12 15 18 9")), format="html", ), is_div=True, classes=["summary-chevron-up"], ) newnode = dropdown_main( opened=node["opened"], classes=["sphinx-bs", "dropdown", "card"] + node["container_classes"], ) if node["has_title"]: title_children = node[0] body_children = node[1:] else: title_children = [ nodes.raw("...", nodes.Text(ELLIPSIS), format="html") ] body_children = node newnode += dropdown_title( "", "", *title_children, closed_marker, open_marker, classes=["summary-title", "card-header"] + node["title_classes"]) body_node = nodes.container( "", *body_children, is_div=True, classes=["summary-content", "card-body"] + node["body_classes"]) for para in body_node.traverse(nodes.paragraph): para["classes"] = ([] if "classes" in para else para["classes"]) + ["card-text"] newnode += body_node # newnode += open_marker node.replace_self(newnode)