def nav_tree(context, request, root_depth, klass): """render navtree structure, root_depth from root""" # get the root accoding to root_depth if isinstance(context, Folder): parents = [context] else: parents = [] current = context.__parent__ while current.__parent__: parents.insert(0, current) current = current.__parent__ # 超界 if len(parents) < root_depth + 1: return '' root = parents[root_depth] nodes = [] parent_paths = [obj.vpath for obj in parents] for obj in root.values(True, True): is_active = obj.vpath in parent_paths or obj.vpath == context.vpath nodes.append( nav_item_template.substitute( class_str = 'active' if is_active else '', node_url = resource_url(obj, request), node_title = obj.title, )) nav_items = ''.join(nodes) return nav_root_template.substitute(ul_class=klass, nav_items=nav_items)
def nav_tree(context, request, root_depth, klass): """render navtree structure, root_depth from root""" # get the root accoding to root_depth if isinstance(context, Folder): parents = [context] else: parents = [] current = context.__parent__ while current.__parent__: parents.insert(0, current) current = current.__parent__ # 超界 if len(parents) < root_depth + 1: return '' root = parents[root_depth] nodes = [] parent_paths = [obj.vpath for obj in parents] for obj in root.values(True, True): is_active = obj.vpath in parent_paths or obj.vpath == context.vpath nodes.append( nav_item_template.substitute( class_str='active' if is_active else '', node_url=obj.url(request), node_title=obj.title, )) nav_items = ''.join(nodes) return nav_root_template.substitute(ul_class=klass, nav_items=nav_items)
def run(self): # type: () -> List[nodes.Node] self.bridge = DocumenterBridge(self.env, self.state.document.reporter, Options(), self.lineno) names = [x.strip().split()[0] for x in self.content if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])] items = self.get_items(names) nodes = self.get_table(items) if 'toctree' in self.options: dirname = posixpath.dirname(self.env.docname) tree_prefix = self.options['toctree'].strip() docnames = [] excluded = Matcher(self.config.exclude_patterns) for name, sig, summary, real_name in items: docname = posixpath.join(tree_prefix, real_name) docname = posixpath.normpath(posixpath.join(dirname, docname)) if docname not in self.env.found_docs: if excluded(self.env.doc2path(docname, None)): logger.warning(__('toctree references excluded document %r'), docname) else: logger.warning(__('toctree references unknown document %r'), docname) docnames.append(docname) tocnode = addnodes.toctree() tocnode['includefiles'] = docnames tocnode['entries'] = [(None, docn) for docn in docnames] tocnode['maxdepth'] = -1 tocnode['glob'] = None nodes.append(autosummary_toc('', '', tocnode)) return nodes
def run(self): # type: () -> List[nodes.Node] self.genopt = Options() self.warnings = [] # type: List[nodes.Node] self.result = ViewList() names = [x.strip().split()[0] for x in self.content if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])] items = self.get_items(names) nodes = self.get_table(items) if 'toctree' in self.options: dirname = posixpath.dirname(self.env.docname) tree_prefix = self.options['toctree'].strip() docnames = [] for name, sig, summary, real_name in items: docname = posixpath.join(tree_prefix, real_name) docname = posixpath.normpath(posixpath.join(dirname, docname)) if docname not in self.env.found_docs: self.warn('toctree references unknown document %r' % docname) docnames.append(docname) tocnode = addnodes.toctree() tocnode['includefiles'] = docnames tocnode['entries'] = [(None, docn) for docn in docnames] tocnode['maxdepth'] = -1 tocnode['glob'] = None tocnode = autosummary_toc('', '', tocnode) nodes.append(tocnode) return self.warnings + nodes
def run(self): #print("directive overview:", self.arguments) # get current dir d = self.arguments[0] art = crawl(d) # sort by date art = sorted(art, key=lambda k: k["date"], reverse=True) # create feed rss(d, art) nodes = [] for a in art: entry = docutils.nodes.section() title = docutils.nodes.title() link = docutils.nodes.reference() link += docutils.nodes.Text(a["title"]) link["refuri"] = a["link"] title += link entry += title if a["desc"]: for (i, child) in enumerate(a["desc"].children): if child.tagname == "title": del a["desc"].children[i] entry += a["desc"] nodes.append(entry) return nodes
def run(self): # type: () -> List[nodes.Node] self.env = env = self.state.document.settings.env self.genopt = Options() self.warnings = [] # type: List[nodes.Node] self.result = ViewList() names = [x.strip().split()[0] for x in self.content if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])] items = self.get_items(names) nodes = self.get_table(items) if 'toctree' in self.options: dirname = posixpath.dirname(env.docname) tree_prefix = self.options['toctree'].strip() docnames = [] for name, sig, summary, real_name in items: docname = posixpath.join(tree_prefix, real_name) docname = posixpath.normpath(posixpath.join(dirname, docname)) if docname not in env.found_docs: self.warn('toctree references unknown document %r' % docname) docnames.append(docname) tocnode = addnodes.toctree() tocnode['includefiles'] = docnames tocnode['entries'] = [(None, docn) for docn in docnames] tocnode['maxdepth'] = -1 tocnode['glob'] = None tocnode = autosummary_toc('', '', tocnode) nodes.append(tocnode) return self.warnings + nodes
def run(self): self.env = env = self.state.document.settings.env self.genopt = {} self.warnings = [] names = [x.strip().split()[0] for x in self.content if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])] items = self.get_items(names) nodes = self.get_table(items) if 'toctree' in self.options: suffix = env.config.source_suffix dirname = posixpath.dirname(env.docname) tree_prefix = self.options['toctree'].strip() docnames = [] for name, sig, summary, real_name in items: docname = posixpath.join(tree_prefix, real_name) docname = posixpath.normpath(posixpath.join(dirname, docname)) if docname not in env.found_docs: self.warn('toctree references unknown document %r' % docname) docnames.append(docname) tocnode = addnodes.toctree() tocnode['includefiles'] = docnames tocnode['entries'] = [(None, docname) for docname in docnames] tocnode['maxdepth'] = -1 tocnode['glob'] = None tocnode = autosummary_toc('', '', tocnode) nodes.append(tocnode) return self.warnings + nodes
def run(self): logger = sphinx.util.logging.getLogger(__name__) nodes = [] src = project_root/self.arguments[0] with src.open('r', encoding='utf-8') as f: prevtype = None lines = LineIter(f) if lines and lines.peek.startswith('#!'): next(lines) while lines: if lines.peek.rstrip('\n') == '': next(lines) elif self._isdocline(lines.peek): # Collect all doc lines. contents = docutils.statemachine.ViewList() while lines and self._isdocline(lines.peek): i, line = next(lines) contents.append(line.lstrip()[2:], self.arguments[0], i) # Parse as rst into `node`. with sphinx.util.docutils.switch_source_input(self.state, contents): node = docutils.nodes.container() self.state.nested_parse(contents, 0, node) # Process sh roles. Add links to logs. for sh_node in node.traverse(docutils.nodes.literal): if 'nutils_sh' not in sh_node: continue cmdline = sh_node.get('nutils_sh') cmdline_parts = tuple(shlex.split(cmdline)) if cmdline_parts[:2] != ('python3', src.name): logger.warn('Not creating a log for {}.'.format(cmdline)) continue log_link = sphinx.addnodes.only(expr='html') log_link.append(docutils.nodes.inline('', ' ')) xref = sphinx.addnodes.pending_xref('', reftype='nutils-log', refdomain='std', reftarget=cmdline_parts[2:], script=src) xref += docutils.nodes.inline('', '(view log)', classes=['nutils-log-link']) log_link += xref sh_node.parent.insert(sh_node.parent.index(sh_node)+1, log_link) nodes.extend(node.children) else: # Collect all source lines. istart, line = next(lines) contents = [line] while lines and not self._isdocline(lines.peek): i, line = next(lines) contents.append(line) # Remove trailing empty lines. while contents and contents[-1].rstrip('\n') == '': del contents[-1] contents = ''.join(contents) # Create literal block. literal = docutils.nodes.literal_block(contents, contents) literal['language'] = 'python3' literal['linenos'] = True literal['highlight_args'] = dict(linenostart=istart+1) sphinx.util.nodes.set_source_info(self, literal) nodes.append(literal) return nodes
def run(self): global CURRENT_TUTORIAL global CURRENT_WORKING_DIR command_mode = True if self.name == 'command-block' else False opts = self.options download_opts = [k in opts for k in ['url', 'saveas']] if command_mode: self.assert_has_content() if any(download_opts): raise sphinx.errors.ExtensionError('command-block does not ' 'support the following ' 'options: `url`, `saveas`.') commands = functools.reduce(self._parse_multiline_commands, self.content, []) nodes = [self._get_literal_block_node(self.content)] else: if self.content: raise sphinx.errors.ExtensionError('Content block not ' 'supported for the ' 'download directive.') if not all(download_opts): raise sphinx.errors.ExtensionError('Missing options for the ' 'download directive. ' 'Please specify `url` and ' '`saveas`.') commands = ['wget -O "%s" "%s"' % (opts['saveas'], opts['url'])] id_ = self.state.document.settings.env.new_serialno('download') nodes = [download_node(id_, opts['url'], opts['saveas'])] env = self._get_env() if not ((env.config.command_block_no_exec and env.config.debug_page != env.docname) or 'no-exec' in opts): if env.docname != CURRENT_TUTORIAL: CURRENT_TUTORIAL = env.docname CURRENT_WORKING_DIR = os.path.join( env.app.command_block_working_dir.name, env.docname) working_dir = os.path.join(env.app.command_block_working_dir.name, env.docname) os.makedirs(working_dir, exist_ok=True) completed_processes = self._execute_commands(commands, working_dir) if command_mode: for stream_type in ['stdout', 'stderr']: if stream_type in opts: node = self._get_stream_node(completed_processes, stream_type) if node is not None: nodes.extend(node) artifacts, visualizations = self._get_output_paths(working_dir) if artifacts or visualizations: nodes.append( self._get_output_links_node(artifacts, visualizations)) return nodes
def fa(role, rawtext, text, lineno, inliner, options={}, content=[]): nodes = [] if key: nodes.append(faicon(key, fa_name=key)) else: for x in text.split(","): nodes.append(faicon(x, fa_name=x)) return nodes, []
def run(self) -> List[Node]: self.bridge = DocumenterBridge(self.env, self.state.document.reporter, Options(), self.lineno, self.state) names = [ x.strip().split()[0] for x in self.content if x.strip() and re.search(r"^[~a-zA-Z_]", x.strip()[0]) ] items = self.get_items(names) nodes = self.get_table(items) if "toctree" in self.options: dirname = posixpath.dirname(self.env.docname) tree_prefix = self.options["toctree"].strip() docnames = [] excluded = Matcher(self.config.exclude_patterns) filename_map = self.config.autosummary_filename_map for name, sig, summary, real_name, _ in items: real_name = filename_map.get(real_name, real_name) docname = posixpath.join(tree_prefix, real_name) docname = posixpath.normpath(posixpath.join(dirname, docname)) if docname not in self.env.found_docs: if excluded(self.env.doc2path(docname, None)): msg = __( "autosummary references excluded document %r. Ignored." ) else: msg = __("autosummary: stub file not found %r. " "Check your autosummary_generate setting.") logger.warning(msg, real_name, location=self.get_source_info()) continue docnames.append(docname) if docnames: tocnode = addnodes.toctree() tocnode["includefiles"] = docnames tocnode["entries"] = [(None, docn) for docn in docnames] tocnode["maxdepth"] = -1 tocnode["glob"] = None tocnode["caption"] = self.options.get("caption") nodes.append(autosummary_toc("", "", tocnode)) if "toctree" not in self.options and "caption" in self.options: logger.warning( __("A captioned autosummary requires :toctree: option. ignored." ), location=nodes[-1], ) return nodes
def run(self) -> List[Node]: self.bridge = DocumenterBridge(self.env, self.state.document.reporter, Options(), self.lineno, self.state) names = [ x.strip().split()[0] for x in self.content if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0]) ] items = self.get_items(names) nodes = self.get_table(items) if 'toctree' in self.options: dirname = posixpath.dirname(self.env.docname) tree_prefix = self.options['toctree'].strip() docnames = [] excluded = Matcher(self.config.exclude_patterns) filename_map = self.config.autosummary_filename_map for _name, _sig, _summary, real_name in items: real_name = filename_map.get(real_name, real_name) docname = posixpath.join(tree_prefix, real_name) docname = posixpath.normpath(posixpath.join(dirname, docname)) if docname not in self.env.found_docs: if excluded(self.env.doc2path(docname, False)): msg = __( 'autosummary references excluded document %r. Ignored.' ) else: msg = __('autosummary: stub file not found %r. ' 'Check your autosummary_generate setting.') logger.warning(msg, real_name, location=self.get_location()) continue docnames.append(docname) if docnames: tocnode = addnodes.toctree() tocnode['includefiles'] = docnames tocnode['entries'] = [(None, docn) for docn in docnames] tocnode['maxdepth'] = -1 tocnode['glob'] = None tocnode['caption'] = self.options.get('caption') nodes.append(autosummary_toc('', '', tocnode)) if 'toctree' not in self.options and 'caption' in self.options: logger.warning(__( 'A captioned autosummary requires :toctree: option. ignored.'), location=nodes[-1]) return nodes
def create_template_node(self, decl): """Creates a node for the ``template <...>`` part of the declaration.""" if not decl.templateparamlist: return None template = 'template ' nodes = [self.node_factory.desc_annotation(template, template), self.node_factory.Text('<')] nodes.extend(self.render(decl.templateparamlist)) nodes.append(self.node_factory.Text(">")) signode = self.node_factory.desc_signature() signode.extend(nodes) return signode
def process_tree(self, tree): self._prev_token = None nodes = [] names = [] placeholders = [] for typ, item in self._process_nodes(tree): if typ == "name": names.append(item) elif typ == "node": nodes.append(item) elif typ == "placeholder": placeholders.append(item) else: assert False return (nodes, names, placeholders)
def fa(role, rawtext, text, lineno, inliner, options={}, content=[]): nodes = [] if key: nodes.append(faicon(key, fa_name=key)) else: for x in text.split(","): args = {'fa_name': x} if x in custom_icons: args['svg_data'] = custom_icons[x] nodes.append(faicon(x, **args)) return nodes, []
def title(self, node): nodes = [] # Variable type or function return type nodes.extend(self.render_optional(node.type_)) if nodes: nodes.append(self.node_factory.Text(" ")) nodes.append(self.node_factory.desc_name(text=node.name)) return nodes
def run(self) -> List[Node]: nodes = [] # TODO: generate table docnames = ['fibre_types/' + self.arguments[0].replace('.', '_')] tocnode = addnodes.toctree() tocnode['includefiles'] = docnames tocnode['entries'] = [(None, docn) for docn in docnames] tocnode['maxdepth'] = -1 tocnode['glob'] = None tocnode['caption'] = self.options.get('caption') nodes.append(fibresummary_toc('', '', tocnode)) return nodes
def run(self): # type: () -> List[nodes.Node] self.bridge = DocumenterBridge(self.env, self.state.document.reporter, Options(), self.lineno, self.state) names = [ x.strip().split()[0] for x in self.content if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0]) ] items = self.get_items(names) nodes = self.get_table(items) if 'toctree' in self.options: dirname = posixpath.dirname(self.env.docname) tree_prefix = self.options['toctree'].strip() docnames = [] excluded = Matcher(self.config.exclude_patterns) for name, sig, summary, real_name in items: docname = posixpath.join(tree_prefix, real_name) docname = posixpath.normpath(posixpath.join(dirname, docname)) if docname not in self.env.found_docs: location = self.state_machine.get_source_and_line( self.lineno) if excluded(self.env.doc2path(docname, None)): msg = __( 'autosummary references excluded document %r. Ignored.' ) else: msg = __('autosummary: stub file not found %r. ' 'Check your autosummary_generate setting.') logger.warning(msg, real_name, location=location) continue docnames.append(docname) if docnames: tocnode = addnodes.toctree() tocnode['includefiles'] = docnames tocnode['entries'] = [(None, docn) for docn in docnames] tocnode['maxdepth'] = -1 tocnode['glob'] = None nodes.append(autosummary_toc('', '', tocnode)) return nodes
def run(self): document = self.state.document env = document.settings.env nodes = [] indent = min(len(line) - len(line.lstrip()) for line in self.content) code = ''.join(line[indent:] + '\n' for line in self.content) code_wo_spread = nutils.testing.FloatNeighborhoodOutputChecker.re_spread.sub( lambda m: m.group(0).split('±', 1)[0], code) literal = docutils.nodes.literal_block(code_wo_spread, code_wo_spread, classes=['console']) literal['language'] = 'python3' literal['linenos'] = False sphinx.util.nodes.set_source_info(self, literal) nodes.append(literal) import matplotlib.testing matplotlib.testing.setup() import matplotlib.pyplot parser = doctest.DocTestParser() runner = doctest.DocTestRunner( checker=nutils.testing.FloatNeighborhoodOutputChecker(), optionflags=doctest.ELLIPSIS) globs = getattr(document, '_console_globs', {}) test = parser.get_doctest(code, globs, 'test', env.docname, self.lineno) with treelog.set(self._console_log): failures, tries = runner.run(test, clear_globs=False) for fignum in matplotlib.pyplot.get_fignums(): fig = matplotlib.pyplot.figure(fignum) with io.BytesIO() as f: fig.savefig(f, format='svg') name = hashlib.sha1(f.getvalue()).hexdigest() + '.svg' uri = 'data:image/svg+xml;base64,{}'.format( base64.b64encode(f.getvalue()).decode()) nodes.append( docutils.nodes.image('', uri=uri, alt='image generated by matplotlib')) matplotlib.pyplot.close('all') if failures: document.reporter.warning('doctest failed', line=self.lineno) document._console_globs = test.globs return nodes
def run(self): self.env = env = self.state.document.settings.env self.genopt = {} self.warnings = [] names = [ x.strip().split()[0] for x in self.content if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0]) ] items = self.get_items(names) nodes = self.get_table(items) if 'toctree' in self.options: suffixes = env.config.source_suffix # adapt to a change with sphinx 1.3: # for sphinx >= 1.3 env.config.source_suffix is a list # see sphinx-doc/sphinx@bf3bdcc7f505a2761c0e83c9b1550e7206929f74 if map(int, sphinx.__version__.split(".")[:2]) < [1, 3]: suffixes = [suffixes] dirname = posixpath.dirname(env.docname) tree_prefix = self.options['toctree'].strip() docnames = [] for name, sig, summary, real_name in items: docname = posixpath.join(tree_prefix, real_name) for suffix in suffixes: if docname.endswith(suffix): docname = docname[:-len(suffix)] break docname = posixpath.normpath(posixpath.join(dirname, docname)) if docname not in env.found_docs: self.warn('toctree references unknown document %r' % docname) docnames.append(docname) tocnode = addnodes.toctree() tocnode['includefiles'] = docnames tocnode['entries'] = [(None, docname_) for docname_ in docnames] tocnode['maxdepth'] = -1 tocnode['glob'] = None tocnode = autosummary_toc('', '', tocnode) nodes.append(tocnode) return self.warnings + nodes
def run(self): source = self.state_machine.input_lines.source( self.lineno - self.state_machine.input_offset - 1) source_dir = os.path.dirname(os.path.abspath(source)) source_dir = utils.relative_path(None, source_dir) path = os.path.abspath(os.path.join('output', 'images')) if not os.path.exists(path): os.makedirs(path) nodes = [] body = '\n'.join(self.content) tf = tempfile.NamedTemporaryFile(delete=True) tf.write('@startuml\n') tf.write(body.encode('utf8')) tf.write('\n@enduml') tf.flush() imgformat = self.options.get('format', 'png') if imgformat == 'png': imgext = ".png" outopt = "-tpng" elif imgformat == 'svg': imgext = ".svg" outopt = "-tsvg" else: logger.error("Bad uml image format: " + imgformat) # make a name name = tf.name + imgext alt = self.options.get('alt', 'uml diagram') classes = self.options.pop('class', ['uml']) cmdline = ['plantuml', '-o', path, outopt, tf.name] try: p = Popen(cmdline, stdout=PIPE, stderr=PIPE) out, err = p.communicate() except Exception, exc: error = self.state_machine.reporter.error( 'Failed to run plantuml: %s' % (exc, ), literal_block(self.block_text, self.block_text), line=self.lineno) nodes.append(error)
def run(self): source = self.state_machine.input_lines.source(self.lineno - self.state_machine.input_offset - 1) source_dir = os.path.dirname(os.path.abspath(source)) source_dir = utils.relative_path(None, source_dir) path = os.path.abspath(os.path.join('output', 'images')) if not os.path.exists(path): os.makedirs(path) nodes = [] body = '\n'.join(self.content) tf = tempfile.NamedTemporaryFile(delete=True) tf.write('@startuml\n') tf.write(body.encode('utf8')) tf.write('\n@enduml') tf.flush() imgformat = self.options.get('format', 'png') if imgformat == 'png': imgext = ".png" outopt = "-tpng" elif imgformat == 'svg': imgext = ".svg" outopt = "-tsvg" else: logger.error("Bad uml image format: "+imgformat) # make a name name = tf.name+imgext alt = self.options.get('alt', 'uml diagram') classes = self.options.pop('class', ['uml']) cmdline = ['plantuml', '-o', path, outopt, tf.name ] try: p = Popen(cmdline, stdout=PIPE, stderr=PIPE) out, err = p.communicate() except Exception, exc: error = self.state_machine.reporter.error( 'Failed to run plantuml: %s' % (exc, ), literal_block(self.block_text, self.block_text), line=self.lineno) nodes.append(error)
def extract_nodes(text, keys): tokens = filter(None, re.split('\+(\+|[^\+]*\+$|[^\+]+)', text)) result = [] for value in tokens: try: value = keys[value] except KeyError: pass result.append(value) nodes = [] for value in result: nodes.append(value) return nodes
def jupyter_result_list(self, results, stdout, **options): """Create a list of results.""" nodes = [] if results: if len(results) != 1 or 'matplotlib' not in results[ 0].__class__.__module__: stream = StringIO() pprint.pprint(results, stream=stream, indent=1, depth=4) literal = stream.getvalue() stream.close() node = docutils.nodes.literal_block(literal, literal) nodes.append(node) else: for result in results: _nodes, stdout = self.jupyter_results( result, stdout, **options) nodes.extend(_nodes) return nodes, stdout
def run(self): self.env = env = self.state.document.settings.env self.genopt = {} self.warnings = [] names = [ x.strip().split()[0] for x in self.content if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0]) ] items = self.get_items(names) if 'hidden' in self.options: nodes = [] else: nodes = self.get_table(items) if 'toctree' in self.options: suffix = env.config.source_suffix dirname = posixpath.dirname(env.docname) tree_prefix = self.options['toctree'].strip() docnames = [] for name, sig, summary, real_name in items: docname = posixpath.join(tree_prefix, real_name) if docname.endswith(suffix): docname = docname[:-len(suffix)] docname = posixpath.normpath(posixpath.join(dirname, docname)) if docname not in env.found_docs: self.warn('toctree references unknown document %r' % docname) docnames.append(docname) tocnode = addnodes.toctree() tocnode['includefiles'] = docnames tocnode['entries'] = [(None, docname) for docname in docnames] tocnode['maxdepth'] = -1 tocnode['glob'] = None tocnode = autosummary_toc('', '', tocnode) if not 'hidden' in self.options: nodes.append(tocnode) return self.warnings + nodes
def run(self): command_mode = True if self.name == 'command-block' else False opts = self.options download_opts = [k in opts for k in ['url', 'saveas']] if command_mode: self.assert_has_content() if any(download_opts): raise sphinx.errors.ExtensionError('command-block does not ' 'support the following ' 'options: `url`, `saveas`.') commands = functools.reduce(self._parse_multiline_commands, self.content, []) nodes = [self._get_literal_block_node(self.content)] else: if self.content: raise sphinx.errors.ExtensionError('Content block not ' 'supported for the ' 'download directive.') if not all(download_opts): raise sphinx.errors.ExtensionError('Missing options for the ' 'download directive. ' 'Please specify `url` and ' '`saveas`.') commands = ['wget -O "%s" "%s"' % (opts['saveas'], opts['url'])] id_ = self.state.document.settings.env.new_serialno('download') nodes = [download_node(id_, opts['url'], opts['saveas'])] env = self._get_env() if not (env.config.command_block_no_exec or 'no-exec' in self.options): working_dir = os.path.join(env.app.command_block_working_dir.name, env.docname) os.makedirs(working_dir, exist_ok=True) self._execute_commands(commands, working_dir) if command_mode: artifacts, visualizations = self._get_output_paths(working_dir) if artifacts or visualizations: nodes.append( self._get_output_links_node(artifacts, visualizations)) return nodes
def run(self): self.env = env = self.state.document.settings.env self.genopt = {} self.warnings = [] names = [x.strip().split()[0] for x in self.content if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])] items = self.get_items(names) nodes = self.get_table(items) if 'toctree' in self.options: suffixes = env.config.source_suffix # adapt to a change with sphinx 1.3: # for sphinx >= 1.3 env.config.source_suffix is a list # see sphinx-doc/sphinx@bf3bdcc7f505a2761c0e83c9b1550e7206929f74 if map(int, sphinx.__version__.split(".")[:2]) < [1, 3]: suffixes = [suffixes] dirname = posixpath.dirname(env.docname) tree_prefix = self.options['toctree'].strip() docnames = [] for name, sig, summary, real_name in items: docname = posixpath.join(tree_prefix, real_name) for suffix in suffixes: if docname.endswith(suffix): docname = docname[:-len(suffix)] break docname = posixpath.normpath(posixpath.join(dirname, docname)) if docname not in env.found_docs: self.warn('toctree references unknown document %r' % docname) docnames.append(docname) tocnode = addnodes.toctree() tocnode['includefiles'] = docnames tocnode['entries'] = [(None, docname_) for docname_ in docnames] tocnode['maxdepth'] = -1 tocnode['glob'] = None tocnode = autosummary_toc('', '', tocnode) nodes.append(tocnode) return self.warnings + nodes
def visit_Text(self, node): parent = node.parent while parent: if isinstance(parent, node_blacklist): return parent = parent.parent rawtext = node.rawsource data = rawtext.split("@@") if len(data) == 1: return nodes = [] for i in range(len(data)): text = data[i] if i % 2 == 0: nodes.append(Text(text)) else: formula = eval(text, pygrim.__dict__) latex = formula.latex() #nodes.append(literal(text, text)) nodes.append(math(latex, Text(latex))) #nodes.append(math_block(latex, Text(latex))) node.parent.replace(node, nodes)
def apply(self): # Get the document biblio database biblio = self.startnode.details['biblio'] sort = self.startnode.details['sort'] hidden = self.startnode.details['hidden'] # Done with the bibliography self.document.settings.biblio = None # Hidden bibliography directives show no entries if hidden: self.startnode.replace_self([]) return # List cited references bib_entries = list(zip(biblio.items, biblio.bibliography())) if sort == 'alpha': bib_entries.sort(key=lambda x: x[0].key) def link_format(x): return 'bib-' + x.lower() nodes = [] for (itm, bibitem) in bib_entries: text = re.sub('\n', ' ', ''.join(bibitem)) doc_item = parse_fragment(self.document.settings, text) entry_node = docutils.nodes.paragraph('', classes=['bibentry'], ids=[link_format(itm.key)]) for child in doc_item.children[0].children: entry_node.setup_child(child) entry_node += child self.startnode.setup_child(entry_node) nodes.append(entry_node) self.startnode.replace_self(nodes)
def run(self): # type: () -> List[nodes.Node] self.bridge = DocumenterBridge(self.env, self.state.document.reporter, Options(), self.lineno) names = [x.strip().split()[0] for x in self.content if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])] items = self.get_items(names) nodes = self.get_table(items) if 'toctree' in self.options: dirname = posixpath.dirname(self.env.docname) tree_prefix = self.options['toctree'].strip() docnames = [] excluded = Matcher(self.config.exclude_patterns) for name, sig, summary, real_name in items: docname = posixpath.join(tree_prefix, real_name) docname = posixpath.normpath(posixpath.join(dirname, docname)) if docname not in self.env.found_docs: if excluded(self.env.doc2path(docname, None)): self.warn('toctree references excluded document %r' % docname) else: self.warn('toctree references unknown document %r' % docname) docnames.append(docname) tocnode = addnodes.toctree() tocnode['includefiles'] = docnames tocnode['entries'] = [(None, docn) for docn in docnames] tocnode['maxdepth'] = -1 tocnode['glob'] = None nodes.append(autosummary_toc('', '', tocnode)) return nodes
def run(self): document = self.state.document env = document.settings.env nodes = [] indent = min(len(line)-len(line.lstrip()) for line in self.content) code = ''.join(line[indent:]+'\n' for line in self.content) code_wo_spread = nutils.testing.FloatNeighborhoodOutputChecker.re_spread.sub(lambda m: m.group(0).split('±', 1)[0], code) literal = docutils.nodes.literal_block(code_wo_spread, code_wo_spread, classes=['console']) literal['language'] = 'python3' literal['linenos'] = False sphinx.util.nodes.set_source_info(self, literal) nodes.append(literal) import matplotlib.testing matplotlib.testing.setup() import matplotlib.pyplot parser = doctest.DocTestParser() runner = doctest.DocTestRunner(checker=nutils.testing.FloatNeighborhoodOutputChecker(), optionflags=doctest.ELLIPSIS) globs = getattr(document, '_console_globs', {}) test = parser.get_doctest(code, globs, 'test', env.docname, self.lineno) with treelog.set(self._console_log): failures, tries = runner.run(test, clear_globs=False) for fignum in matplotlib.pyplot.get_fignums(): fig = matplotlib.pyplot.figure(fignum) with io.BytesIO() as f: fig.savefig(f, format='svg') name = hashlib.sha1(f.getvalue()).hexdigest()+'.svg' uri = 'data:image/svg+xml;base64,{}'.format(base64.b64encode(f.getvalue()).decode()) nodes.append(docutils.nodes.image('', uri=uri, alt='image generated by matplotlib')) matplotlib.pyplot.close('all') if failures: document.reporter.warning('doctest failed', line=self.lineno) document._console_globs = test.globs return nodes
def apply(self): raw_cit = self.startnode.details['raw_citation'] cit = self.startnode.details['citation'] biblio = self.startnode.details['biblio'] def warn(cit_item): print('warning: citation reference not found for', cit_item.key, file=sys.stderr) cit_txt = biblio.cite(cit, warn) nodes = [] if sys.version_info >= (3, 0): txt = str(cit_txt) else: txt = unicode(cit_txt) nodes = [] doc_item = parse_fragment(self.document.settings, txt) for child in doc_item.children[0].children: nodes.append(child) self.startnode.replace_self(nodes)
def run(self): set_classes(self.options) self.assert_has_content() # join lines, separate blocks content = '\n'.join(self.content).split('\n\n') nodes = [] for block in content: nodes.append(Text("Input: ")) nodes.append(literal(block, Text(block))) formula = eval(block, pygrim.__dict__) latex = formula.latex() latex = "$$" + latex + "$$" node = math_block(latex, Text(latex), **self.options) node.attributes['nowrap'] = True nodes.append(node) return nodes '''
# renaming output image using an hash code, just to not pullate # output directory with a growing number of images name = os.path.join(path, os.path.basename(name)) newname = os.path.join(path, "%08x" % (adler32(body) & 0xffffffff)) + imgext try: # for Windows os.remove(newname) except Exception, exc: logger.debug('File ' + newname + ' does not exist, not deleted') os.rename(name, newname) url = global_siteurl + '/images/' + os.path.basename(newname) imgnode = image(uri=url, classes=classes, alt=alt) nodes.append(imgnode) else: error = self.state_machine.reporter.error( 'Error in "%s" directive: %s' % (self.name, err), literal_block(self.block_text, self.block_text), line=self.lineno) nodes.append(error) return nodes def custom_url(generator, metadata): global global_siteurl global_siteurl = generator.settings['SITEURL']
else: if p.returncode == 0: # renaming output image using an hash code, just to not pullate # output directory with a growing number of images name = os.path.join(path, os.path.basename(name)) newname = os.path.join(path, "%08x" % (adler32(body) & 0xffffffff))+imgext try: # for Windows os.remove(newname) except Exception, exc: logger.debug('File '+newname+' does not exist, not deleted') os.rename(name, newname) url = global_siteurl + '/images/' + os.path.basename(newname) imgnode = image(uri=url, classes=classes, alt=alt) nodes.append(imgnode) else: error = self.state_machine.reporter.error( 'Error in "%s" directive: %s' % (self.name, err), literal_block(self.block_text, self.block_text), line=self.lineno) nodes.append(error) return nodes def custom_url(generator, metadata): global global_siteurl global_siteurl = generator.settings['SITEURL'] def register(): """Plugin registration."""
def run(self): source = self.state_machine.input_lines.source(self.lineno - self.state_machine.input_offset - 1) source_dir = os.path.dirname(os.path.abspath(source)) source_dir = utils.relative_path(None, source_dir) path = os.path.abspath(os.path.join('output', 'images')) if not os.path.exists(path): os.makedirs(path) nodes = [] body = '\n'.join(self.content) tf = tempfile.NamedTemporaryFile(delete=True) tf.write('@startuml\n'.encode('utf8')) if len(preamble) > 0: tf.write((preamble + '\n').encode('utf8')) tf.write(body.encode('utf8')) tf.write('\n@enduml'.encode('utf8')) tf.flush() imgformat = self.options.get('format', 'png') if imgformat == 'png': imgext = ".png" outopt = "-tpng" elif imgformat == 'svg': imgext = ".svg" outopt = "-tsvg" else: logger.error("Bad uml image format: " + imgformat) # make a name name = tf.name+imgext alt = self.options.get('alt', 'uml diagram') classes = self.options.pop('class', ['uml']) cmdline = ['plantuml', '-o', path, outopt, tf.name] try: p = Popen(cmdline, stdout=PIPE, stderr=PIPE) out, err = p.communicate() except Exception as exc: error = self.state_machine.reporter.error( 'Failed to run plantuml: {}'.format(exc), literal_block(self.block_text, self.block_text), line=self.lineno) nodes.append(error) else: if p.returncode == 0: # renaming output image using an hash code, just to not pullate # output directory with a growing number of images name = os.path.join(path, os.path.basename(name)) filename = "%08x" % (adler32(body.encode('utf8')) & 0xffffffff) newname = os.path.join(path, filename) + imgext try: # for Windows os.remove(newname) except Exception as exc: logger.debug('File '+newname+' does not exist, not deleted') os.rename(name, newname) url = global_siteurl + '/images/' + os.path.basename(newname) imgnode = image(uri=url, classes=classes, alt=alt) nodes.append(imgnode) else: error = self.state_machine.reporter.error( 'Error in "%s" directive: %s' % (self.name, err), literal_block(self.block_text, self.block_text), line=self.lineno) nodes.append(error) return nodes
def run(self, **kwargs): """Transform each :class:`~sphinxcontrib.bibtex.nodes.bibliography` node into a list of citations. """ env = cast("BuildEnvironment", self.document.settings.env) domain = cast("BibtexDomain", env.get_domain('cite')) for bibnode in self.document.traverse(bibliography_node): # reminder: env.docname may be equal to 'index' instead of # bibnode['docname'] in post-transform phase (e.g. latex builder) bib_key = BibliographyKey(docname=bibnode['docname'], id_=bibnode['ids'][0]) bibliography = domain.bibliographies[bib_key] citations = [ citation for citation in domain.citations if citation.bibliography_key == bib_key ] # create citation nodes for all references if bibliography.list_ == "enumerated": nodes = docutils.nodes.enumerated_list() nodes['enumtype'] = bibliography.enumtype if bibliography.start >= 1: nodes['start'] = bibliography.start env.temp_data['bibtex_enum_count'] = bibliography.start else: nodes['start'] = env.temp_data.setdefault( 'bibtex_enum_count', 1) elif bibliography.list_ == "bullet": nodes = docutils.nodes.bullet_list() else: # "citation" nodes = [] for citation in citations: citation_node = bibliography.citation_nodes[citation.key] if bibliography.list_ in {"enumerated", "bullet"}: citation_node += self.backend.paragraph( citation.formatted_entry) else: # "citation" # backrefs only supported in same document backrefs = [ citation_ref.citation_ref_id for citation_ref in domain.citation_refs if bib_key.docname == citation_ref.docname and citation.key in citation_ref.keys ] if backrefs: citation_node['backrefs'] = backrefs citation_node += docutils.nodes.label( '', citation.formatted_entry.label, support_smartquotes=False) citation_node += self.backend.paragraph( citation.formatted_entry) citation_node['docname'] = bib_key.docname node_text_transform(citation_node, transform_url_command) nodes.append(citation_node) if bibliography.list_ == "enumerated": env.temp_data['bibtex_enum_count'] += 1 if citations: final_node = domain.bibliography_header.deepcopy() final_node += nodes bibnode.replace_self(final_node) else: bibnode.replace_self(docutils.nodes.target())
def run(self): nodes = [] for x in self.content[0].split(' '): nodes.append(faicon(x, fa_name=x)) return nodes