class AutoFunction(ObjectDescription): def handle_signature(self, sig, signode): cache = _APP_CACHES.get(self.env.app, {}) key = CursorKind.FUNCTION_DECL, (sig, ) if key in cache: node, comment, start, end, _ = cache[key] result_type = node.type.get_result() signode += addnodes.desc_type(result_type.spelling, result_type.spelling + ' ') signode += addnodes.desc_name(node.spelling, node.spelling) paramlist = addnodes.desc_parameterlist() for argument in node.get_arguments(): parameter = addnodes.desc_parameter() parameter += addnodes.desc_type(argument.type.spelling, argument.type.spelling + ' ') parameter += nodes.Text(argument.spelling, argument.spelling) paramlist += parameter signode += paramlist self.content = ViewList() for lineno, line in enumerate(comment.splitlines(), start[0]): self.content.append(line, '<unknown>', lineno) return sig def get_index_text(self, name): return '%s (C function)' % name def add_target_and_index(self, name, sig, signode): CObject.add_target_and_index.__func__(self, name, sig, signode)
def append_class_hierarchy(node, state, cls, level=0, clstree=None): if clstree is None: clstree = [] name = normalize_class_name(cls.__module__, cls.__name__) clstree.append((level, name)) for c in cls.__bases__: if c != object: append_class_hierarchy(node, state, c, level+1, clstree) if level == 0: clstree = sorted(set(clstree), key=lambda x: -x[0]) depth = max(clstree, key=lambda x: x[0])[0] for level, name in [ (abs(level-depth), cls) for level, cls in clstree ]: row = hierarchy_row() row.level, row.depth = level, depth if level != depth: name = ':class:`%s`' % name list = ViewList() list.append(name, '') state.nested_parse(list, 0, row) node.append(row)
def run(self): module_path, class_name, attribute_name = self.arguments mod = importlib.import_module(module_path) klass = getattr(mod, class_name) options = getattr(klass(), attribute_name) if not isinstance(options, OptionsDictionary): raise TypeError("Object '%s' is not an OptionsDictionary." % attribute_name) lines = ViewList() n = 0 for line in options.__rst__(): lines.append(line, "options table", n) n += 1 # Note applicable to System, Solver and Driver 'options', but not to 'recording_options' if attribute_name != 'recording_options': lines.append("", "options table", n+1) # Blank line required after table. # Create a node. node = nodes.section() node.document = self.state.document # Parse the rst. nested_parse_with_titles(self.state, lines, node) # And return the result. return node.children
def directive( dirname, arguments, options, content, lineno, content_offset, block_text, state, state_machine ): reverse_mapping = {} for name, func in mapping.items(): reverse_mapping.setdefault(func, []).append(name) filters = [] compare_ops = set(('lt', 'le', 'eq', 'ne', 'ge', 'gt')) for func, names in reverse_mapping.items(): aliases = sorted(names, key=len) aliases = sorted(aliases, key=lambda x: x in compare_ops) name = aliases.pop() filters.append((name, aliases, func)) filters.sort() result = ViewList() for name, aliases, func in filters: for item in format_function(name, aliases, func): result.append(item, '<jinjaext>') node = nodes.paragraph() state.nested_parse(result, content_offset, node) return node.children
def run(self): env = self.state.document.settings.env app = env.app namespace = " ".join(self.content).strip() app.info("documenting plugins from %r" % namespace) overline_style = self.options.get("overline-style", "") underline_style = self.options.get("underline-style", "=") def report_load_failure(mgr, ep, err): app.warn("Failed to load %s: %s" % (ep.module_name, err)) mgr = extension.ExtensionManager(namespace, on_load_failure_callback=report_load_failure) result = ViewList() if "detailed" in self.options: data = _detailed_list(mgr, over=overline_style, under=underline_style) else: data = _simple_list(mgr) for text, source in data: for line in text.splitlines(): result.append(line, source) # Parse what we have into a new section. node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): # figure out what attributes to exclude: obj = import_obj_from_args(self.arguments) if not issubclass(obj, traitlets.HasTraits): raise ValueError('altair-class directive should only be used ' 'on altair classes; not {0}'.format(obj)) exclude = ['skip'] exclude.extend(getattr(obj, 'skip', [])) exclude.extend([attr for attr in obj.class_traits()]) exclude.extend([attr for attr in dir(traitlets.HasTraits) if not attr.startswith('_')]) # generate the documentation string rst_text = ALTAIR_CLASS_TEMPLATE.render( classname=self.arguments[0], exclude_members=','.join(exclude) ) # parse and return documentation result = ViewList() for line in rst_text.split("\n"): result.append(line, "<altair-class>") node = nodes.paragraph() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): namespace = ' '.join(self.content).strip() LOG.info('documenting plugins from %r' % namespace) overline_style = self.options.get('overline-style', '') underline_style = self.options.get('underline-style', '=') def report_load_failure(mgr, ep, err): LOG.warning(u'Failed to load %s: %s' % (ep.module_name, err)) mgr = extension.ExtensionManager( namespace, on_load_failure_callback=report_load_failure, ) result = ViewList() titlecase = 'titlecase' in self.options if 'detailed' in self.options: data = _detailed_list( mgr, over=overline_style, under=underline_style, titlecase=titlecase) else: data = _simple_list(mgr) for text, source in data: for line in text.splitlines(): result.append(line, source) # Parse what we have into a new section. node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): path_to_model = self.arguments[0] np = os.path.normpath(os.path.join(os.getcwd(), path_to_model)) # check that the file exists if not os.path.isfile(np): raise IOError('File does not exist({0})'.format(np)) html_name = os.path.join(os.getcwd(), (os.path.basename(path_to_model).split('.')[0] + "_n2.html")) cmd = subprocess.Popen(['openmdao', 'view_model', np, '--no_browser', '--embed', '-o' + html_name]) cmd_out, cmd_err = cmd.communicate() rst = ViewList() # Add the content one line at a time. # Second argument is the filename to report in any warnings # or errors, third argument is the line number. env = self.state.document.settings.env docname = env.doc2path(env.docname) rst.append(".. raw:: html", docname, self.lineno) rst.append(" :file: %s" % html_name, docname, self.lineno) # Create a node. node = nodes.section() # Parse the rst. nested_parse_with_titles(self.state, rst, node) # And return the result. return node.children
def run(self): env = self.state.document.settings.env app = env.app split_namespaces = 'split-namespaces' in self.options config_file = self.options.get('config-file') if config_file: app.info('loading config file %s' % config_file) conf = cfg.ConfigOpts() conf.register_opts(generator._generator_opts) conf( args=['--config-file', config_file], project='oslo.config.sphinxext', ) namespaces = conf.namespace[:] else: namespaces = [ c.strip() for c in self.content if c.strip() ] result = ViewList() source_name = '<' + __name__ + '>' for line in _format_option_help(app, namespaces, split_namespaces): result.append(line, source_name) node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): size = self.options.get('size', 4) shuffle = 'shuffle' in self.options seed = self.options.get('seed', 42) titles = self.options.get('titles', False) width = self.options.get('width', None) env = self.state.document.settings.env app = env.app gallery_dir = app.builder.config.altair_gallery_dir gallery_ref = app.builder.config.altair_gallery_ref examples = populate_examples(shuffle=shuffle, shuffle_seed=seed, num_examples=size, gallery_dir=gallery_dir, gallery_ref=gallery_ref, code_below=True) include = MINIGALLERY_TEMPLATE.render(image_dir='/_images', gallery_dir=gallery_dir, examples=examples, titles=titles, width=width) # parse and return documentation result = ViewList() for line in include.split('\n'): result.append(line, "<altair-minigallery>") node = nodes.paragraph() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): env = self.state.document.settings.env app = env.app classname = self.arguments[0].split('(')[0].strip() try: obj = import_obj(classname, default_module='altair') except ImportError: raise warnings.warn('Could not make table for {0}. Unable to import' ''.format(object)) # create the table from the object include_vl_link = ('include-vegalite-link' in self.options) table = altair_rst_table(obj, include_description=include_vl_link) # parse and return documentation result = ViewList() for line in table: result.append(line, "<altair-class>") node = nodes.paragraph() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): """For each file in noseOfYeti/specs, output nodes to represent each spec file""" with hp.a_temp_file() as fle: fle.write(dedent(""" --- environments: { dev: {account_id: "123"} } stacks: { app: {} } """).encode('utf-8')) fle.seek(0) collector = Collector() collector.prepare(fle.name, {'bespin': {'extra': ""}, "command": None, "bash": None}) section = nodes.section() section['ids'].append("available-tasks") title = nodes.title() title += nodes.Text("Default tasks") section += title for name, task in sorted(collector.configuration['task_finder'].tasks.items(), key=lambda x: len(x[0])): lines = [name] + [" {0}".format(line.strip()) for line in task.description.split('\n')] viewlist = ViewList() for line in lines: viewlist.append(line, name) self.state.nested_parse(viewlist, self.content_offset, section) return [section]
def run(self): node = nodes.section() node.document = self.state.document result = ViewList() for line in self.make_rst(): result.append(line, '<autoroutr>') nested_parse_with_titles(self.state, result, node) return node.children
def _parse(self, rst_text, annotation): result = ViewList() for line in rst_text.split("\n"): result.append(line, annotation) node = nodes.paragraph() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): node = nodes.section() node.document = self.state.document result = ViewList() for line in self.make_rst(): result.append(line, '<{0}>'.format(self.__class__.__name__)) nested_parse_with_titles(self.state, result, node) return node.children
def run(self): # Construct an empty node new_content = ViewList() ref = u":ref:`{0} <{1}>`".format( self.options.get("label", "Link To"), "".join(self.arguments)) new_content.append(ref, source=self.content) self.content = new_content return super(LinkToBlock, self).run()
def run(self): assert self.document_type in ["yaml", "json"] node = nodes.section() node.document = self.state.document result = ViewList() for line in self.make_rst(): result.append(line, "<rx>") nested_parse_with_titles(self.state, result, node) return node.children
def append_row(*column_texts): row = nodes.row('') for text in column_texts: node = nodes.paragraph('') vl = ViewList() vl.append(text, '<autosummary>') state.nested_parse(vl, 0, node) row.append(nodes.entry('', node)) body.append(row)
class Extractor(object): """ Main extraction class """ def __init__(self): """ """ self.content = ViewList("",'comment') self.lineno = 0 def extract(self, source): """ Process the source file and fill in the content. SOURCE is a fileobject. """ for l in source: self.lineno = self.lineno + 1 l = l.strip() m = re_comment.match(l) if m: self.comment(m.group(1), source) def comment(self, cur, source): """ Read the whole comment and strip the stars. CUR is currently read line and SOURCE is a fileobject with the source code. """ self.content.append(cur.strip(), "comment") for line in source: self.lineno = self.lineno + 1 line = line.strip() if re_cmtend.match(line): break if line.startswith("/*"): raise ExtractError("%d: Nested comments are not supported yet." % self.lineno) if line.startswith(".. "): self.content.append(line, "comment") continue m = re_cmtnext.match(line) if m: self.content.append(" " + m.group(1).strip(), "comment") continue self.content.append(line, "comment") self.content.append('\n', "comment")
def run(self): node = nodes.paragraph() node.document = self.state.document desc = ViewList() description = dedent(commands[self.arguments[0]].__doc__) for line in description.split('\n'): desc.append(line, "") self.state.nested_parse(desc, 0, node) return [node]
def run(self): env = self.state.document.settings.env app = env.app spec_path = self.arguments[0] env.note_reread() dest_dir = join(dirname(self.state_machine.node.source), "gallery") if not exists (dest_dir): makedirs(dest_dir) target_id = "bokeh-plot-%d" % env.new_serialno('bokeh-plot') target_node = nodes.target('', '', ids=[target_id]) result = [target_node] source_position = self.options.get('source-position', 'below') spec = json.load(open(spec_path)) details = spec['details'] for i, detail in enumerate(details): path = detail['path'] name = detail['name'] prev_ref, next_ref = None, None if i > 0: prev_ref = "gallery_" + details[i-1]['name'] if i < len(details)-1: next_ref = "gallery_" + details[i+1]['name'] rst = DETAIL_TEMPLATE.render( name=name, underline="#"*len(name), path=abspath("../" + path), symbol=detail.get('symbol'), prev_ref=prev_ref, up_ref="gallery", next_ref=next_ref, source_position=source_position, ) with open(join(dest_dir, "%s.rst" % name), "w") as f: f.write(rst) env.clear_doc(join("docs", "gallery", name)) env.read_doc(join("docs", "gallery", name), app=app) result = ViewList() names = [detail['name'] for detail in details] env.gallery_names = [join("docs", "gallery", n) for n in names] text = GALLERY_TEMPLATE.render(names=names) for line in text.split("\n"): result.append(line, "<bokeh-gallery>") node = nodes.paragraph() node.document = self.state.document self.state.nested_parse(result, 0, node) return node.children
def parse_text(directive, text, node_type=nodes.paragraph): """Parses text in ReST format and returns a node with the content.""" vl = ViewList() for line in text.split('\n'): vl.append(line, line) node = node_type(rawsource=text) directive.state.nested_parse(vl, 0, node) return node
def run(self): result = ViewList() for line in _get_cogbin_data(): result.append(line, '<cogbin>') node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def parse(self, text): node = nodes.paragraph() vl = ViewList() for line in text.splitlines(): vl.append(line, '<xmlschema>') nested_parse_with_titles(self.state, vl, node) try: return node[0] except IndexError: return build_paragraph(text)
def jinja_changelog(dirname, arguments, options, content, lineno, content_offset, block_text, state, state_machine): doc = ViewList() changelog = file(os.path.join(os.path.dirname(jinja2.__file__), '..', 'CHANGES')) try: for line in islice(changelog, 3, None): doc.append(line.rstrip(), '<jinjaext>') finally: changelog.close() return parse_rst(state, content_offset, doc)
def run(self): doc = ViewList() info = self.get_scenario_info() for request in info['requests']: self.write_request(doc, request['request']) doc.append('', '') self.write_response(doc, request['response']) doc.append('', '') return parse_rst(self.state, self.content_offset, doc)
def parse_text(directive, text, node_type=nodes.paragraph, where=None): """Parses text in ReST format and returns a node with the content.""" assert text is not None, "Missing text during parse_text in %s" % where vl = ViewList() for line in text.split("\n"): vl.append(line, line) node = node_type(rawsource=text) directive.state.nested_parse(vl, 0, node) return node
def _nested_parse(state, text, node, with_titles=False): result = ViewList() if isinstance(text, str): for line in text.split("\n"): result.append(line, "<nested>") else: for line in text: result.append(line, "<nested>") if with_titles: _nested_parse_with_titles(state, result, node) else: state.nested_parse(result, 0, node)
def _get_source_nodes(self, source): linenos = 'linenos' in self.options emphasize_lines = self.options.get('emphasize-lines', False) if emphasize_lines: linenos = True result = ViewList() text = SOURCE_TEMPLATE.render(source=source, linenos=linenos, emphasize_lines=emphasize_lines) for line in text.split("\n"): result.append(line, "<bokeh-plot>") node = nodes.paragraph() node.document = self.state.document self.state.nested_parse(result, 0, node) return node.children
def run(self): env = self.state.document.settings.env app = env.app def info(msg): app.info('[reno] %s' % (msg,)) title = ' '.join(self.content) branch = self.options.get('branch') reporoot_opt = self.options.get('reporoot', '.') reporoot = os.path.abspath(reporoot_opt) relnotessubdir = self.options.get('relnotessubdir', defaults.RELEASE_NOTES_SUBDIR) notessubdir = self.options.get('notesdir', defaults.NOTES_SUBDIR) version_opt = self.options.get('version') # FIXME(dhellmann): Force this flag True for now and figure # out how Sphinx passes a "false" flag later. collapse = True # 'collapse-pre-releases' in self.options earliest_version = self.options.get('earliest-version') notesdir = os.path.join(relnotessubdir, notessubdir) info('scanning %s for %s release notes' % (os.path.join(reporoot, notesdir), branch or 'current branch')) ldr = loader.Loader( reporoot=reporoot, notesdir=notesdir, branch=branch, collapse_pre_releases=collapse, earliest_version=earliest_version, ) if version_opt is not None: versions = [ v.strip() for v in version_opt.split(',') ] else: versions = ldr.versions text = formatter.format_report( ldr, versions, title=title, ) source_name = '<' + __name__ + '>' result = ViewList() for line in text.splitlines(): result.append(line, source_name) node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def container_wrapper(directive, literal_node, caption): # type: (Directive, nodes.Node, unicode) -> nodes.container container_node = nodes.container('', literal_block=True, classes=['literal-block-wrapper']) parsed = nodes.Element() directive.state.nested_parse(ViewList([caption], source=''), directive.content_offset, parsed) if isinstance(parsed[0], nodes.system_message): msg = __('Invalid caption: %s' % parsed[0].astext()) raise ValueError(msg) caption_node = nodes.caption(parsed[0].rawsource, '', *parsed[0].children) caption_node.source = literal_node.source caption_node.line = literal_node.line container_node += caption_node container_node += literal_node return container_node
def run(self): """Run method for the directive""" doc_nodes = AutodocDirective.run(self) if 'autosummary' not in self.options: return doc_nodes try: self.env = self.state.document.settings.env except AttributeError: pass # is set automatically with sphinx >= 1.8.0 if sphinx_version < [2, 0]: self.warnings = [] self.result = ViewList() documenter = self.autosummary_documenter grouped_documenters = documenter.get_grouped_documenters() summ_nodes = self.autosumm_nodes(documenter, grouped_documenters) dn = summ_nodes.pop(documenter.fullname) if self.name == 'automodule': doc_nodes = self.inject_summ_nodes(doc_nodes, summ_nodes) # insert the nodes directly after the paragraphs if self.name == 'autoclass': for node in dn[::-1]: self._insert_after_paragraphs(doc_nodes[1], node) dn = [] elif self.name == 'automodule': # insert table before the documentation of the members istart = 2 if 'noindex' not in self.options else 0 # if we have a title in the module, we look for the section if (len(doc_nodes) >= istart + 1 and isinstance(doc_nodes[istart], nodes.section)): others = doc_nodes[istart] istart = 2 # skip the title else: others = doc_nodes found = False if len(others[istart:]) >= 2: for i in range(istart, len(others)): if isinstance(others[i], sphinx.addnodes.index): found = True break if found: for node in dn[::-1]: others.insert(i, node) dn = [] return self.warnings + dn + doc_nodes
def run(self): env = self.state.document.settings.env app = env.app group_name = ' '.join(self.content) namespace = self.options.get('namespace') cached_groups = env.domaindata['oslo.config']['groups'] # Store the current group for use later in option directives env.temp_data['oslo.config:group'] = group_name app.info('oslo.config group %r' % group_name) # Store the location where this group is being defined # for use when resolving cross-references later. # FIXME: This should take the source namespace into account, too cached_groups[group_name] = env.docname result = ViewList() source_name = '<' + __name__ + '>' def _add(text): "Append some text to the output result view to be parsed." result.append(text, source_name) if namespace: title = '%s: %s' % (namespace, group_name) else: title = group_name _add(title) _add('-' * len(title)) node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) first_child = node.children[0] # Compute the normalized target and set the node to have that # as an id target_name = cfg._normalize_group_name(group_name) first_child['ids'].append(target_name) indexnode = addnodes.index(entries=[]) return [indexnode] + node.children
def run(self): rst = [] files = self.get_files() for ff in files: rst.append(".. image:: {}".format(ff)) rst.append(" :target: _images/{}".format(ff.name)) rst.append(" :scale: 25%") rst.append(" :align: left") rst.append("") vl = ViewList(rst, "fakefile.rst") # Create a node. node = nodes.section() node.document = self.state.document # Parse the rst. nested_parse_with_titles(self.state, vl, node) return node.children
def run(self): env = self.state.document.settings.env name = None if self.arguments: m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) name = m.group(2).strip() if not name: name = self.arguments[0] lines = list(self.content) mangle_docstrings(env.app, objtype, name, None, None, lines) # local import to avoid testing dependency from docutils.statemachine import ViewList self.content = ViewList(lines, self.content.parent) return base_directive.run(self)
def run(self): tc_type = self.arguments[0] try: registry = TEST_CASE_REGISTRIES[tc_type] except KeyError: raise Exception( "test-case-documentation first argument must be {}".format( " or ".join(TEST_CASE_REGISTRIES.keys()))) test_cases = [( tc_function.__name__, dedent(inspect.getdoc(tc_function)), inspect.getsourcefile(tc_function), ) for tc_function in registry.iter_registered_functions()] out = [] for tc_name, tc_docs, tc_file in sorted(test_cases): # This is a crude hack (i.e. creating the directive by adding some # RST to a string...) but docutils/sphinx are sufficiently # poorly documented to make this the only viable option after # several hours of searching... section = nodes.section( ids=["test-case-{}-{}".format(tc_type, tc_name)]) title = nodes.title() title += nodes.Text("{} test case: ".format(tc_type.title())) title += nodes.literal(text=tc_name) section += title section += nodes.Text("") # Required to make ref below work... nested_parse_with_titles( self.state, ViewList(tc_docs.splitlines(), tc_file), section, ) out.append( make_from_rst(self.state, ".. {}-test-case:: {}".format(tc_type, tc_name))) out.append(section) return out
def build_node(self): srclang = self.arguments[0].strip() if srclang not in RENDER_MARKUP_EXT.keys(): return [ self.state_machine.reporter.warning( 'Unknown source language "%s", use one of: %s.' % (srclang, ",".join(RENDER_MARKUP_EXT.keys())), line=self.lineno) ] code = '\n'.join(self.content) if not code.strip(): return [ self.state_machine.reporter.warning( 'Ignoring "%s" directive without content.' % (self.name), line=self.lineno) ] node = kernel_render() node['alt'] = self.options.get('alt', '') node['srclang'] = srclang literal_node = nodes.literal_block(code, code) node += literal_node caption = self.options.get('caption') if caption: # parse caption's content parsed = nodes.Element() self.state.nested_parse(ViewList([caption], source=''), self.content_offset, parsed) caption_node = nodes.caption(parsed[0].rawsource, '', *parsed[0].children) caption_node.source = parsed[0].source caption_node.line = parsed[0].line figure_node = nodes.figure('', node) for k, v in self.options.items(): figure_node[k] = v figure_node += caption_node node = figure_node return node
def run(self): series = self.options.get('series', 'cleaning') if series != 'cleaning': raise NotImplementedError('Showing deploy steps not implemented') source_name = '<{}>'.format(__name__) result = ViewList() for interface_name in [ 'power', 'management', 'deploy', 'bios', 'raid' ]: interface_info = _clean_steps.get(interface_name, {}) if not interface_info: continue title = '{} Interface'.format(interface_name.capitalize()) result.append(title, source_name) result.append('~' * len(title), source_name) for driver_name, steps in sorted(interface_info.items()): _list_table( title='{} cleaning steps'.format(driver_name), add=lambda x: result.append(x, source_name), headers=[ 'Name', 'Details', 'Priority', 'Stoppable', 'Arguments' ], columns=[20, 30, 10, 10, 30], data=(( '``{}``'.format(s['step']), s['doc'], s['priority'], 'yes' if s['abortable'] else 'no', _format_args(s['argsinfo']), ) for s in steps), ) # NOTE(dhellmann): Useful for debugging. # print('\n'.join(result)) node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): self.filename_set = set() # a set of dependent filenames self.reporter = self.state.document.reporter self.env = self.state.document.settings.env self.warnings = [] self.result = ViewList() # find out what documenter to call objtype = self.name[4:] doc_class = self._registry[objtype] # process the options with the selected documenter's option_spec self.genopt = Options( assemble_option_dict(self.options.items(), doc_class.option_spec) ) # generate the output documenter = doc_class(self, self.arguments[0]) documenter.generate(more_content=self.content) if not self.result: return self.warnings # record all filenames as dependencies -- this will at least # partially make automatic invalidation possible for fn in self.filename_set: self.env.note_dependency(fn) # use a custom reporter that correctly assigns lines to source # filename/description and lineno old_reporter = self.state.memo.reporter self.state.memo.reporter = AutodocReporter( self.result, self.state.memo.reporter ) if self.name == "automodule": node = nodes.section() # necessary so that the child nodes get the right source/line set node.document = self.state.document nested_parse_with_titles(self.state, self.result, node) else: node = nodes.paragraph() node.document = self.state.document self.state.nested_parse(self.result, 0, node) self.state.memo.reporter = old_reporter return self.warnings + node.children
def _generate_nodes( self, name: str, command: click.Command, parent: Optional[click.Context], nested: NestedOption, commands: Optional[str] = None, ) -> List[nodes.Node]: """ Generate the relevant Sphinx nodes. Format a :class:`click.Group` or :class:`click.Command`. :param name: Name of command, as used on the command line. :param command: Instance of `click.Group` or `click.Command`. :param parent: Instance of `click.Context`, or None :param nested: The granularity of subcommand details. :param commands: Display only listed commands or skip the section if empty. :returns: A list of nested docutils nodes. """ if command.hidden: return [] targetid = f"click-{self.env.new_serialno('click'):d}" targetnode = nodes.target('', '', ids=[targetid]) # Summary ctx = click.Context(command, info_name=name, parent=parent) content = list(_format_command(ctx, nested, commands)) view = ViewList(content) click_node = nodes.paragraph(rawsource='\n'.join(content)) self.state.nested_parse( view, # type: ignore[arg-type] self.content_offset, click_node, ) click_purger.add_node(self.env, click_node, targetnode, self.lineno) return [targetnode, click_node]
def run(self): env = self.state.document.settings.env name = None if self.arguments: m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) name = m.group(2).strip() if not name: name = self.arguments[0] lines = list(self.content) mangle_docstrings(env.app, objtype, name, None, None, lines) if self.content: items = match_items(lines, self.content) self.content = ViewList(lines, items=items, parent=self.content.parent) return base_directive.run(self)
def run(self): config = self.state.document.settings.env.config reporter = self.state.document.reporter api = load_api(config['api_path']) obj_type = self.name[3:] objs = filter_by_type(obj_type, find_by_path(self.arguments[0], api)) parent = get_parent(self.arguments[0]) render_fn = self.get_writer(obj_type) text = '\n'.join(self.content) content = '\n\n'.join(map(lambda x: render_fn(x, parent=parent, more_content=text, brief=False, source = reporter.source).render(), objs)) node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, ViewList(content.split('\n')), node) return node.children
def run(self): # type: () -> List[N_co] self.genopt = Options() self.warnings = [] # type: List[nodes.Node] self.result = ViewList() names = [ x.strip().split()[0] for x in self.content if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0]) ] items = self.get_items(names) nodes = self.get_table(items) if 'toctree' in self.options: dirname = posixpath.dirname(self.env.docname) tree_prefix = self.options['toctree'].strip() docnames = [] excluded = Matcher(self.config.exclude_patterns) for name, sig, summary, real_name in items: docname = posixpath.join(tree_prefix, real_name) docname = posixpath.normpath(posixpath.join(dirname, docname)) if docname not in self.env.found_docs: if excluded(self.env.doc2path(docname, None)): self.warn('toctree references excluded document %r' % docname) else: self.warn('toctree references unknown document %r' % docname) docnames.append(docname) tocnode = addnodes.toctree() tocnode['includefiles'] = docnames tocnode['entries'] = [(None, docn) for docn in docnames] tocnode['maxdepth'] = -1 tocnode['glob'] = None tocnode = autosummary_toc('', '', tocnode) nodes.append(tocnode) return self.warnings + nodes
def run(self): # The rose command to document (i.e. rose / rosie) cli_help_format, command = self.arguments[0:2] if cli_help_format == 'rose': # Generate CLI documentation as a list of rst formatted text lines. lines = [] write = lines.append write_command_reference(write, get_rose_command_reference(command)) # Parse these lines into a docutills node. node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, ViewList(lines), node) # Return the children of this node (the generated nodes). return node.children else: raise Exception('Invalid/Unsupported CLI help format "%s"' % cli_help_format)
def make_caption_for_directive(directive, caption): """ Creates a caption for the given directive Based on sphinx.directives.code.container_wrapper() :param directive: Directive :param caption: str :return: nodes.caption """ assert isinstance(directive, Directive) parsed = nodes.Element() directive.state.nested_parse(ViewList([caption], source=''), directive.content_offset, parsed) caption_node = nodes.caption(parsed[0].rawsource, '', *parsed[0].children) caption_node.source = parsed[0].source caption_node.line = parsed[0].line return caption_node
def run(self): self.env = env = self.state.document.settings.env self.genopt = Options() self.warnings = [] self.result = ViewList() names = [ x.strip().split()[0] for x in self.content if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0]) ] items = self.get_items(names) if 'hidden' in self.options: nodes = [] else: nodes = self.get_table(items) if 'toctree' in self.options: dirname = posixpath.dirname(env.docname) tree_prefix = self.options['toctree'].strip() docnames = [] for name, sig, summary, real_name in items: docname = posixpath.join(tree_prefix, real_name) docname = posixpath.normpath(posixpath.join(dirname, docname)) if docname not in env.found_docs: self.warn('toctree references unknown document %r' % docname) docnames.append(docname) tocnode = addnodes.toctree() tocnode['includefiles'] = docnames tocnode['entries'] = [(None, docn) for docn in docnames] tocnode['maxdepth'] = -1 tocnode['glob'] = None tocnode = autosummary_toc('', '', tocnode) if not 'hidden' in self.options: nodes.append(tocnode) return self.warnings + nodes
def run(self): full_path = self.arguments[0] project = self.state.document.settings.env.config.github_project def insert_github_link(reobj): line = reobj.string instr = line[reobj.start():reobj.end()] issue = instr.strip("#()") link = "https://github.com/{}/issues/".format(project) rstlink = "(`#{issue} <{link}{issue}>`_)".format(issue=issue, link=link) return rstlink with io.open(full_path, "r") as myfile: text = myfile.readlines() rst = [] for line in text: line = line.strip("\n") line = line.replace(r"_", r"\_") if line.startswith(" ") and line.strip().startswith("-"): # list in list: rst.append("") if not line.startswith(" "): rst.append("") line = "version " + line rst.append(line) rst.append("-" * len(line)) elif not line.strip(): rst.append(line) else: line = re.sub(r"\(#[0-9]*\)", insert_github_link, line) rst.append(line) vl = ViewList(rst, "fakefile.rst") # Create a node. node = nodes.section() node.document = self.state.document # Parse the rst. nested_parse_with_titles(self.state, vl, node) return node.children
def run(self, *, srcdir: Path, destdir: Path = os.curdir, syntax_style: Style = None, output_style: Style = None, ): env = self.state.document.settings.env if syntax_style is None: syntax_style = self.options["syntax-style"] = \ Style(env.config.exhibit_syntax_style) if output_style is None: output_style = self.options["output-style"] = \ Style(env.config.exhibit_output_style) e_state = env.exhibit_state if e_state.stage is Stage.RstGeneration: for src_path, docname in self.get_src_paths_and_docnames(): dest_path = Path(env.doc2path(docname)) dest_path.parent.mkdir(parents=True, exist_ok=True) doc_info = doc_info_from_py_source( src_path, syntax_style=self.options["syntax-style"], output_style=self.options["output-style"]) dest_path.write_text(doc_info.rst) # NOTE: We don't actually need this source; it is only copied # for compat with s-g and its use by the .. plot:: directive. # FIXME: Also arrange to delete this file. shutil.copyfile(str(src_path), str(dest_path.parent / src_path.name)) e_state.docnames[docname] = doc_info return [] else: # Read stage, either ExhibitExecution or ExecutionDone. cur_dir = self.get_current_source().parent lines = ([".. toctree::", " :titlesonly:", ""] + [" /{}".format(docname) for _, docname in self.get_src_paths_and_docnames()]) node = rst.nodes.Element() self.state.nested_parse(ViewList(lines), 0, node) return node.children
def run(self): # If the user specifies a team, track only the deliverables # for that team. self.team_name = self.options.get('name') if not self.team_name: error = self.state_machine.reporter.error( 'No team name in team directive', nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error] self.team_deliverables = _deliverables.get_team_deliverables( self.team_name) all_series = reversed( sorted(_deliverables.get_team_series(self.team_name))) # If independent is in the list, it should be sorted last all_series = sorted(all_series, key='independent'.__eq__) result = ViewList() def _add(text): result.append(text, '<team tag>') for series in all_series: series_title = series.lstrip('_').title() _add(series_title) _add('=' * len(series_title)) _add('') _add('.. deliverable::') _add(' :series: %s' % series) _add(' :team: %s' % self.team_name) _add('') # NOTE(dhellmann): Useful for debugging. # print('\n'.join(result)) node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): # pragma: no cover """Called by Sphinx to generate documentation for this directive.""" if self.directive_name is None: raise NotImplementedError('directive_name must be implemented by ' 'subclasses of BaseDirective') env, state = self._prepare_env() state.doc_names.add(env.docname) directive_name = '<{}>'.format(self.directive_name) node = nodes.section() node.document = self.state.document result = ViewList() for line in self._render_rst(): if line.startswith(HEADING_TOKEN): # Remove heading token, then append 2 lines, one with # the heading text, and the other with the dashes to # underline the heading. heading = line[HEADING_TOKEN_LENGTH:] result.append(heading, directive_name) result.append('-' * len(heading), directive_name) else: result.append(line, directive_name) nested_parse_with_titles(self.state, result, node) return node.children
def run(self) -> List[nodes.Node]: """ Create the extras_require node. """ extra: str = self.arguments[0] targetid = f'extras_require-{self.env.new_serialno("extras_require"):d}' targetnode = nodes.target('', '', ids=[targetid]) valid_requirements = get_requirements( env=self.env, extra=extra, options=self.options, content=self.content, ) if not valid_requirements: return self._problematic( "No requirements specified! No notice will be shown in the documentation." ) scope = self.options.get("scope", "module") pypi_name = self.env.config.pypi_name or self.env.config.project content = make_node_content(valid_requirements, pypi_name, extra, scope=scope) view = ViewList(content.split('\n')) extras_require_node = nodes.attention(rawsource=content) self.state.nested_parse(view, self.content_offset, extras_require_node) # type: ignore[arg-type] extras_require_purger.add_node(self.env, extras_require_node, targetnode, self.lineno) return [targetnode, extras_require_node]
def run(self): table_code, test_code = CodeDiffParser().parse(list(self.content), **self.options) # Create a test node as a comment node so it won't show up in the docs. # We add attribute "testnodetype" so it is be picked up by the doctest # builder. This functionality is not officially documented but can be found # in the source code: # https://github.com/sphinx-doc/sphinx/blob/3.x/sphinx/ext/doctest.py # (search for 'testnodetype'). test_code = '\n'.join(test_code) test_node = nodes.comment(test_code, test_code, testnodetype='testcode') # Set the source info so the error message is correct when testing. self.set_source_info(test_node) test_node['options'] = {} test_node['language'] = 'python3' # The table node is the side-by-side diff view that will be shown on RTD. table_node = nodes.paragraph() self.content = ViewList(table_code, self.content.parent) self.state.nested_parse(self.content, self.content_offset, table_node) return [table_node, test_node]
def setup_test(): global options, directive global processed_signatures options = Options( inherited_members=False, undoc_members=False, private_members=False, special_members=False, imported_members=False, show_inheritance=False, noindex=False, annotation=None, synopsis='', platform='', deprecated=False, members=[], member_order='alphabetic', exclude_members=set(), ignore_module_all=False, ) directive = Struct( env=app.builder.env, genopt=options, result=ViewList(), filename_set=set(), state=Mock(), ) directive.state.document.settings.tab_width = 8 processed_signatures = [] app._status.truncate(0) app._warning.truncate(0) yield app.registry.autodoc_attrgettrs.clear()
def run(self) -> List[nodes.Node]: """ Process the content of the directive. """ summary = getattr(self.config, "documentation_summary", None) if self.env.app.builder.format.lower() == "latex" or not summary: return [] targetid = f'documentation-summary-{self.env.new_serialno("documentation-summary"):d}' content = f'**{summary}**' targetnode = nodes.paragraph(rawsource=f'**{summary}**', ids=[targetid]) self.state.nested_parse(ViewList([content]), self.content_offset, targetnode) # type: ignore summary_node_purger.add_node(self.env, targetnode, targetnode, self.lineno) return [targetnode]
def generate_func_autodoc(app, func): ad = AutoDirective(name='autofunc', arguments=[FULL_NAMES[func]], options={'noindex': True}, content=StringList([], items=[]), lineno=0, content_offset=1, block_text='', state=None, state_machine=None) ad.env = BuildEnvironment(app) ad.genopt = Options(noindex=True) ad.filename_set = set() ad.result = ViewList() documenter = FunctionDocumenter(ad, ad.arguments[0]) documenter.generate(all_members=True) with open(OUTPUT_FILES[func], 'a') as fid: for line in ad.result: fid.write(line + '\n')
def _setup(**kw): global options, directive, _warnings, app from schedula.ext.dispatcher import PLOT _warnings = [] options = Struct( des=True, opt=PLOT, data=True, func=True, code=True, dsp=True, height=None, width=None, inherited_members=False, undoc_members=False, private_members=False, special_members=False, imported_members=False, show_inheritance=False, noindex=False, annotation=None, synopsis='', platform='', deprecated=False, members=[], member_order='alphabetic', exclude_members=set(), ) settings = Struct(tab_width=8) document = Struct(settings=settings) from docutils.statemachine import ViewList directive = Struct(env=app.builder.env, genopt=options, result=ViewList(), warn=warnfunc, filename_set=set(), state=Struct(document=document), **kw)
def nestedParse(self, lines, fname): env = self.state.document.settings.env content = ViewList() node = nodes.section() if "debug" in self.options: code_block = "\n\n.. code-block:: rst\n :linenos:\n" for l in lines.split("\n"): code_block += "\n " + l lines = code_block + "\n\n" line_regex = re.compile("^\.\. LINENO (\S+)\#([0-9]+)$") ln = 0 n = 0 f = fname for line in lines.split("\n"): n = n + 1 match = line_regex.search(line) if match: new_f = match.group(1) # Sphinx parser is lazy: it stops parsing contents in the # middle, if it is too big. So, handle it per input file if new_f != f and content: self.do_parse(content, node) content = ViewList() # Add the file to Sphinx build dependencies env.note_dependency(os.path.abspath(f)) f = new_f # sphinx counts lines from 0 ln = int(match.group(2)) - 1 else: content.append(line, f, ln) kernellog.info(self.state.document.settings.env.app, "%s: parsed %i lines" % (fname, n)) if content: self.do_parse(content, node) return node.children
def run(self) -> List[nodes.Node]: """ Create the rest_example node. """ targetid = f'example-{self.env.new_serialno("sphinx-toolbox rest_example"):d}' targetnode = nodes.target('', '', ids=[targetid]) content = make_rest_example( self.options, self.env, self.content, # type: ignore ) view = ViewList(content) example_node = nodes.paragraph(rawsource=content) # type: ignore self.state.nested_parse(view, self.content_offset, example_node) # type: ignore rest_example_purger.add_node(self.env, example_node, targetnode, self.lineno) return [targetnode, example_node]
def setup_test(): global options, directive global processed_docstrings, processed_signatures, _warnings options = Struct( inherited_members=False, undoc_members=False, private_members=False, special_members=False, imported_members=False, show_inheritance=False, noindex=False, annotation=None, synopsis='', platform='', deprecated=False, members=[], member_order='alphabetic', exclude_members=set(), ignore_module_all=False, ) directive = Struct( env=app.builder.env, genopt=options, result=ViewList(), warn=warnfunc, filename_set=set(), ) processed_docstrings = [] processed_signatures = [] _warnings = [] yield AutoDirective._special_attrgetters.clear()
def _sections(self, motors): """Generate a section for each sensor""" for m in motors: dummy = nodes.section() result = ViewList() result.append('.. _{}:'.format(m['name']), source=m['source_file'], offset=m['source_line']) if m['name'] != m['url_name']: result.append('.. _{}:'.format(m['url_name']), source=m['source_file'], offset=m['source_line']) self.state.nested_parse(result, 0, dummy) for c in dummy.children: yield c # FIXME: not sure why this does not have the same effect as above # target = nodes.target(ids=[s['url_name']], names=[s['url_name']]) # yield target section = nodes.section(ids=[m['name']], names=[m['name']]) title_text = m.get('vendor_part_name', None) or m['vendor_part_number'] if 'vendor_name' in m: title_text = '{} {}'.format(m['vendor_name'], title_text) title = nodes.title(text=title_text) section += title info_section = nodes.section(ids=[m['name'] + '-info'], names=[m['name'] + '\\ info']) info_title = nodes.title(text='General Info') info_section += info_title info_table = self._table([1, 1], None, [ r for r in self._info_rows(m) ]) info_section += info_table section += info_section notes = self._notes(m) if notes: section += notes yield section