def _render_service(self, path, service, methods): env = self.state.document.settings.env service_id = "service-%d" % env.new_serialno('service') service_node = nodes.section(ids=[service_id]) service_node += nodes.title(text='Service at %s' % service.route_name) if service.description is not None: service_node += rst2node(_dedent(service.description)) for method, info in methods.items(): method_id = '%s-%s' % (service_id, method) method_node = nodes.section(ids=[method_id]) method_node += nodes.title(text=method) docstring = info['func'].__doc__ or "" if 'validator' in info: validators = to_list(info['validator']) for validator in validators: if validator.__doc__ is not None: if docstring is not None: docstring += '\n' + validator.__doc__.strip() if 'accept' in info: accept = info['accept'] if callable(accept): if accept.__doc__ is not None: docstring += accept.__doc__.strip() else: accept = to_list(accept) accept_node = nodes.strong(text='Accepted content types:') node_accept_list = nodes.bullet_list() accept_node += node_accept_list for item in accept: temp = nodes.list_item() temp += nodes.inline(text=item) node_accept_list += temp method_node += accept_node node = rst2node(docstring) if node is not None: method_node += node renderer = info['renderer'] if renderer == 'simplejson': renderer = 'json' response = nodes.paragraph() response += nodes.strong(text='Response: %s' % renderer) method_node += response service_node += method_node return service_node
def _render_service(self, path, service, methods): env = self.state.document.settings.env service_id = "service-%d" % env.new_serialno('service') service_node = nodes.section(ids=[service_id]) service_node += nodes.title(text='Service at %s' % service.route_name) if service.description is not None: service_node += rst2node(_dedent(service.description)) for method, info in methods.items(): method_id = '%s-%s' % (service_id, method) method_node = nodes.section(ids=[method_id]) method_node += nodes.title(text=method) node = rst2node(_dedent(info['docstring'])) if node is not None: method_node += node renderer = info['renderer'] if renderer == 'simplejson': renderer = 'json' response = nodes.paragraph() response += nodes.strong(text='Response: %s' % renderer) method_node += response service_node += method_node return service_node
def test_ids_generated(self): from docutils import utils, nodes from docutils.core import publish_from_doctree doc = utils.new_document('<program>') docsect = nodes.section('') docsect['classes'] = ('c1 c2',) docsect['ids'] = ('my-test-id',) docsect['target-ids'] = ('my-test-id',) docsect.append(nodes.title('', '', nodes.Text('Title'))) docsect.append(nodes.paragraph('', '', nodes.Text('some text.'))) docsect.append( nodes.section( '', nodes.title('', '', nodes.Text('Sub-Title')), nodes.paragraph('', '', nodes.Text('some more text')))) doc.append(docsect) chk = '''\ .. class:: c1 c2 .. _`my-test-id`: ====== Title ====== some text. --------- Sub-Title --------- some more text ''' out = publish_from_doctree(doc, writer=rst.Writer()) self.assertMultiLineEqual(out, chk)
def formatComponent(self, moduleName, name, X): # no class bases available from repository scanner CLASSNAME = self.formatClassStatement(name, X.bases) CLASSDOC = self.docString(X.doc) INBOXES = self.boxes(name,"Inboxes", X.inboxes) OUTBOXES = self.boxes(name,"Outboxes", X.outboxes) if self.config.includeMethods and len(X.listAllFunctions()): METHODS = [ nodes.section('', nodes.title('', 'Methods defined here'), boxright('', nodes.paragraph('', '', nodes.strong('', nodes.Text("Warning!")) ), nodes.paragraph('', '', nodes.Text("You should be using the inbox/outbox interface, not these methods (except construction). This documentation is designed as a roadmap as to their functionalilty for maintainers and new component developers.") ), ), * self.formatMethodDocStrings(name,X) ) ] else: METHODS = [] return \ nodes.section('', * [ nodes.title('', CLASSNAME, ids=["symbol-"+name]) ] + CLASSDOC + [ INBOXES, OUTBOXES ] + METHODS + [ self.formatInheritedMethods(name,X) ] )
def render(service, service_id): service_node = nodes.section(ids=[service_id]) title = "%s service" % service.baseRouteName service_node += nodes.title(text=title) if service.description is not None: service_node += create_node(trim(service.description)) for pattern, route_kw, view_kw, func, schema in service.methods: method = route_kw.get('request_method', 'GET') method_id = "%s_%s" % (service_id, method) method_node = nodes.section(ids=[method_id]) desc = func.__doc__ title = None if desc: # use the first line as title for the endpoint sp = desc.split('\n', 1) title = sp[0].strip() if len(sp) > 1: desc = sp[1].strip() else: desc = '' if not title: title = '%s - %s' % (method, pattern) method_node += nodes.title(text=title) url = "::\n\n \n %s - %s\n\n" % (method, pattern) method_node += create_node(url) # render description from docstring if desc: method_node += create_node(trim(desc)) accept = route_kw.get('accept') content_type = route_kw.get('content_type') # Render Accept Header documentation if accept: accept_desc = 'Accept: %s' % accept method_node += create_node(accept_desc) # Render Content-Type Header documentation if content_type: content_desc = 'Content-Type: %s' % content_type method_node += create_node(content_desc) # Render Validator if schema: schema_id = "%s_%s_%s" % (service_id, method, 'validator') node = nodes.section(ids=[schema_id]) title = nodes.title(text='Validation Schema') node += title text = json.dumps(schema, indent=4) # indent the text block text = '\n '.join([l for l in text.splitlines()]) text = '::\n\n ' + text + '\n\n' node += create_node(text) method_node += node service_node += method_node return service_node
def print_subcommands(data, nested_content, markDownHelp=False, settings=None): """ Each subcommand is a dictionary with the following keys: ['usage', 'action_groups', 'bare_usage', 'name', 'help'] In essence, this is all tossed in a new section with the title 'name'. Apparently there can also be a 'description' entry. """ definitions = map_nested_definitions(nested_content) items = [] if 'children' in data: subCommands = nodes.section(ids=["Sub-commands:"]) subCommands += nodes.title('Sub-commands:', 'Sub-commands:') for child in data['children']: sec = nodes.section(ids=[child['name']]) sec += nodes.title(child['name'], child['name']) if 'description' in child and child['description']: desc = [child['description']] elif child['help']: desc = [child['help']] else: desc = ['Undocumented'] # Handle nested content subContent = [] if child['name'] in definitions: classifier, s, subContent = definitions[child['name']] if classifier == '@replace': desc = [s] elif classifier == '@after': desc.append(s) elif classifier == '@before': desc.insert(0, s) for element in renderList(desc, markDownHelp): sec += element sec += nodes.literal_block(text=child['bare_usage']) for x in print_action_groups(child, nested_content + subContent, markDownHelp, settings=settings): sec += x for x in print_subcommands(child, nested_content + subContent, markDownHelp, settings=settings): sec += x if 'epilog' in child and child['epilog']: for element in renderList([child['epilog']], markDownHelp): sec += element subCommands += sec items.append(subCommands) return items
def run(self): minimal = self.options.get('minimal') module = self.arguments[0] template_args = {} template_args.update(get_authors()) get_argparser = __import__(str(module), fromlist=[str('get_argparser')]).get_argparser parser = get_argparser(AutoManParser) if minimal: container = nodes.container() container += parser.automan_usage(self.options['prog']) container += parser.automan_description() return [container] synopsis_section = nodes.section( '', nodes.title(text='Synopsis'), ids=['synopsis-section'], ) synopsis_section += parser.automan_usage(self.options['prog']) description_section = nodes.section( '', nodes.title(text='Description'), ids=['description-section'], ) description_section += parser.automan_description() author_section = nodes.section( '', nodes.title(text='Author'), nodes.paragraph( '', nodes.Text('Written by {authors} and contributors. The glyphs in the font patcher are created by {glyphs_author}.'.format( **get_authors() )) ), ids=['author-section'] ) issues_url = 'https://github.com/powerline/powerline/issues' reporting_bugs_section = nodes.section( '', nodes.title(text='Reporting bugs'), nodes.paragraph( '', nodes.Text('Report {prog} bugs to '.format( prog=self.options['prog'])), nodes.reference( issues_url, issues_url, refuri=issues_url, internal=False, ), nodes.Text('.'), ), ids=['reporting-bugs-section'] ) return [synopsis_section, description_section, author_section, reporting_bugs_section]
def run(self): env = self.state.document.settings.env node = nodes.Element() node.document = self.state.document self.state.nested_parse(self.content, self.content_offset, node) nav = [] blocks = [] for i, child in enumerate(node): if isinstance(child, nodes.literal_block): targetid = "%s" % self.formats[child['language']] # targetid = "code-box-%d" % env.new_serialno('code-box') targetnode = nodes.target('', '', ids=[targetid]) block = nodes.section('') block.append(child) block.set_class('code') block.set_class('code-'+self.formats[child['language']]) block.set_class('tab-pane') if i == 0: block.set_class('active') para = nodes.paragraph() para += nodes.reference(self.formats[child['language']], self.formats[child['language']], refuri='#' + targetid) entry = nodes.list_item('') entry.append(para) entry.set_class('codebox-tab') entry.set_class('codebox-tab-'+self.formats[child['language']]) if i == 0: entry.set_class('active') nav.append(entry) blocks.append(targetnode) blocks.append(block) resultnode = codebox() navList = nodes.bullet_list('', *nav, ids=['myTab']) navList.set_class('nav') resultnode.append(navList) tabContent = nodes.section('') tabContent.set_class('tab-content') tabContent.extend(blocks) resultnode.append(tabContent) return [resultnode]
def build_http_method_section(self, resource, http_method): doc = self.get_doc_for_http_method(resource, http_method) http_method_func = self.get_http_method_func(resource, http_method) # Description text returned_nodes = [ parse_text(self, doc, where='HTTP %s doc' % http_method) ] # Request Parameters section required_fields = getattr(http_method_func, 'required_fields', []) optional_fields = getattr(http_method_func, 'optional_fields', []) if required_fields or optional_fields: all_fields = dict(required_fields) all_fields.update(optional_fields) fields_section = nodes.section(ids=['%s_params' % http_method]) returned_nodes.append(fields_section) fields_section += nodes.title(text='Request Parameters') table = self.build_fields_table(all_fields, required_fields=required_fields, show_requirement_labels=True) fields_section += table # Errors section errors = getattr(http_method_func, 'response_errors', []) if errors: errors_section = nodes.section(ids=['%s_errors' % http_method]) returned_nodes.append(errors_section) errors_section += nodes.title(text='Errors') bullet_list = nodes.bullet_list() errors_section += bullet_list for error in sorted(errors, key=lambda x: x.code): item = nodes.list_item() bullet_list += item paragraph = nodes.paragraph() item += paragraph paragraph += get_ref_to_error(error) return returned_nodes
def formatDeclarationPage(self, moduleName, name, method, item): parentURI = self.renderer.makeURI(item.module) trailTitle = self.formatTrailAsTitle(moduleName+"."+name) itemDocTree = method(moduleName, name, item) return nodes.section('', trailTitle, nodes.paragraph('', '', nodes.Text("For examples and more explanations, see the "), nodes.reference('', 'module level docs.', refuri=parentURI) ), nodes.transition(), nodes.section('', *itemDocTree), )
def _section(self, parent, title, id_pattern): id = id_pattern % self.resource_type section = nodes.section(ids=[id]) parent.append(section) title = nodes.title('', title) section.append(title) return section
def render_cmd(app, node, usage, description): title = node.get('title') titleid = idregex.sub('-', title).lower() section = nodes.section('', ids=[titleid]) if title: section.append(nodes.title(title, title)) output = "$ {}".format(usage) new_node = nodes.literal_block(output, output) new_node['language'] = 'text' section.append(new_node) settings = docutils.frontend.OptionParser( components=(docutils.parsers.rst.Parser,) ).get_default_values() document = docutils.utils.new_document('', settings) parser = docutils.parsers.rst.Parser() description = inline_literal_regex.sub('``', description) description = link_regex.sub(r'`\1 <\2>`_', description) description = flag_default_regex.sub(r'\1 (\2)', description) parser.parse(description, document) for el in document.children: section.append(el) node.replace_self(section)
def _auto_directive(dirname, arguments, options, content, lineno, content_offset, block_text, state, state_machine): what = dirname[4:] name = arguments[0] members = options.get('members', []) undoc = 'undoc-members' in options filename_set = set() warnings, result = generate_rst(what, name, members, undoc, content, state.document, lineno, filename_set=filename_set) # record all filenames as dependencies -- this will at least partially make # automatic invalidation possible for fn in filename_set: state.document.settings.env.note_dependency(fn) if dirname == 'automodule': node = nodes.section() # hack around title style bookkeeping surrounding_title_styles = state.memo.title_styles surrounding_section_level = state.memo.section_level state.memo.title_styles = [] state.memo.section_level = 0 state.nested_parse(result, content_offset, node, match_titles=1) state.memo.title_styles = surrounding_title_styles state.memo.section_level = surrounding_section_level else: node = nodes.paragraph() state.nested_parse(result, content_offset, node) return warnings + node.children
def new_subsection(self, level, title, lineno, messages): """Append new subsection to document tree. On return, check level.""" memo = self.memo mylevel = memo.section_level memo.section_level += 1 section_node = nodes.section() self.parent += section_node textnodes, title_messages = self.inline_text(title, lineno) titlenode = nodes.title(title, '', *textnodes) name = normalize_name(titlenode.astext()) section_node['names'].append(name) section_node += titlenode section_node += messages section_node += title_messages self.document.note_implicit_target(section_node, section_node) offset = self.state_machine.line_offset + 1 absoffset = self.state_machine.abs_line_offset() + 1 newabsoffset = self.nested_parse( self.state_machine.input_lines[offset:], input_offset=absoffset, node=section_node, match_titles=1) self.goto_line(newabsoffset) if memo.section_level <= mylevel: # can't handle next section? raise EOFError # bubble up to supersection # reset section_level; next pass will detect it properly memo.section_level = mylevel
def run(self): if "READTHEDOCS" in os.environ: project = os.environ["READTHEDOCS_PROJECT"] version = os.environ["READTHEDOCS_VERSION"] is_rtd = os.environ["READTHEDOCS"] == "True" link = "https://readthedocs.org/projects/" \ + "{}/downloads/pdf/{}/".format(project, version) else: is_rtd = False rst = [] if is_rtd: rst = "This documentation is also available as a " \ + "`PDF <{}>`_.".format(link) rst = [rst] vl = ViewList(rst, "fakefile.rst") # Create a node. node = nodes.section() node.document = self.state.document # Parse the rst. nested_parse_with_titles(self.state, vl, node) return node.children
def autohelp_directive(dirname, arguments, options, content, lineno, content_offset, block_text, state, state_machine): """produces rst from nose help""" config = Config(parserClass=OptBucket, plugins=BuiltinPluginManager()) parser = config.getParser(TestProgram.usage()) rst = ViewList() for line in parser.format_help().split('\n'): rst.append(line, '<autodoc>') rst.append('Options', '<autodoc>') rst.append('-------', '<autodoc>') rst.append('', '<autodoc>') for opt in parser: rst.append(opt.options(), '<autodoc>') rst.append(' \n', '<autodoc>') rst.append(' ' + opt.help + '\n', '<autodoc>') rst.append('\n', '<autodoc>') node = nodes.section() node.document = state.document surrounding_title_styles = state.memo.title_styles surrounding_section_level = state.memo.section_level state.memo.title_styles = [] state.memo.section_level = 0 state.nested_parse(rst, 0, node, match_titles=1) state.memo.title_styles = surrounding_title_styles state.memo.section_level = surrounding_section_level return node.children
def run(self): env = self.state.document.settings.env app = env.app split_namespaces = 'split-namespaces' in self.options config_file = self.options.get('config-file') if config_file: app.info('loading config file %s' % config_file) conf = cfg.ConfigOpts() conf.register_opts(generator._generator_opts) conf( args=['--config-file', config_file], project='oslo.config.sphinxext', ) namespaces = conf.namespace[:] else: namespaces = [ c.strip() for c in self.content if c.strip() ] result = ViewList() source_name = '<' + __name__ + '>' for line in _format_option_help(app, namespaces, split_namespaces): result.append(line, source_name) node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def run(self): env = self.state.document.settings.env # getting the options pkg = self.options['package'] service_name = self.options.get('service') all_services = service_name is None # listing the services for the package services = self._get_services(pkg) if all_services: # we want to list all of them services_id = "services-%d" % env.new_serialno('services') services_node = nodes.section(ids=[services_id]) services_node += nodes.title(text='Services') services_ = [(service.index, path, service, methods) \ for (path, service), methods in services.items()] services_.sort() for _, path, service, methods in services_: services_node += self._render_service(path, service, methods) return [services_node] else: # we just want a single service # # XXX not efficient for (path, service), methods in services.items(): if service.name != service_name: continue return [self._render_service(path, service, methods)] return []
def apply(self): doc = self.document i = len(doc) - 1 refsect = copyright = None while i >= 0 and isinstance(doc[i], nodes.section): title_words = doc[i][0].astext().lower().split() if 'references' in title_words: refsect = doc[i] break elif 'copyright' in title_words: copyright = i i -= 1 if not refsect: refsect = nodes.section() refsect += nodes.title('', 'References') doc.set_id(refsect) if copyright: # Put the new "References" section before "Copyright": doc.insert(copyright, refsect) else: # Put the new "References" section at end of doc: doc.append(refsect) pending = nodes.pending(references.TargetNotes) refsect.append(pending) self.document.note_pending(pending, 0) pending = nodes.pending(misc.CallBack, details={'callback': self.cleanup_callback}) refsect.append(pending) self.document.note_pending(pending, 1)
def run(self): language = self.arguments[0] indexed_languages = self.options.get('index_as') or language index_specs = ['pair: {}; language'.format(l) for l in indexed_languages.splitlines()] name = nodes.fully_normalize_name(language) target = 'language-{}'.format(name) targetnode = nodes.target('', '', ids=[target]) self.state.document.note_explicit_target(targetnode) indexnode = addnodes.index() indexnode['entries'] = [] indexnode['inline'] = False set_source_info(self, indexnode) for spec in index_specs: indexnode['entries'].extend(process_index_entry(spec, target)) sectionnode = nodes.section() sectionnode['names'].append(name) title, messages = self.state.inline_text(language, self.lineno) titlenode = nodes.title(language, '', *title) sectionnode += titlenode sectionnode += messages self.state.document.note_implicit_target(sectionnode, sectionnode) self.state.nested_parse(self.content, self.content_offset, sectionnode) return [indexnode, targetnode, sectionnode]
def run(self): lineno = self.state_machine.abs_line_number() target = nodes.target() section = nodes.section(classes=["detail-control"]) # env = self.state.document.settings.env # env.app.info("Parent %s" % self.state.parent.attributes) node = rest_method() # TODO(sdague): this is a super simplistic parser, should be # more robust. method, sep, url = self.content[0].partition(' ') node['method'] = method node['url'] = url node['target'] = self.state.parent.attributes['ids'][0] # We need to build a temporary target that we can replace # later in the processing to get the TOC to resolve correctly. temp_target = "%s-selector" % node['target'] target = nodes.target(ids=[temp_target]) self.state.add_target(temp_target, '', target, lineno) section += node return [target, section]
def run(self): env = self.state.document.settings.env app = env.app iface_type = ' '.join(self.content).strip() app.info('documenting service interface %r' % iface_type) source_name = '<' + __name__ + '>' api_map = interfaces.construct_map(trakt.Trakt.client) iface_map = {iface_type: api_map.get(iface_type)} result = ViewList() for api_path, api_ref, api_methods in _format_apis(iface_map): result.append(api_path, source_name) result.append('', source_name) result.append(api_ref, source_name) result.append('', source_name) for method in api_methods: result.append(method, source_name) result.append('', source_name) # Parse what we have into a new section. node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, result, node) return node.children
def visit_nextslide(self, node, building_slides): index = node.parent.index(node) if (not building_slides or not node.parent.children[index+1:]): node.replace_self([]) # figure out where to hoist the subsequent content to parent = node.parent grandparent = node.parent.parent insertion_point = grandparent.index(node.parent) + 1 # truncate siblings, storing a reference to the rest of the # content new_children = parent.children[index+1:] parent.children = parent.children[:index+1] # create the next section new_section = nodes.section() new_section += self._make_title_node(node) new_section.extend(new_children) self.document.set_id(new_section) # attach the section and delete the nextslide node grandparent.insert(insertion_point, new_section) del node.parent[index]
def _notes(self, device): """Extract and combine notes from a device Returns a section, including a title or ``None`` if there are no notes. """ section = nodes.section(ids=[device['name'] + '-notes'], names=[device['name'] + '\\ notes']) section += nodes.title(text='Notes') result = ViewList() has_notes = False if 'notes' in device: has_notes = True for line in device['notes']: result.append(line, device['source_file'], device['source_line']) if 'mode_info' in device: for mode in device['mode_info']: if 'notes' in mode: has_notes = True for line in mode['notes']: result.append(line, device['source_file'], device['source_line']) if 'cmd_info' in device: for cmd in device['cmd_info']: if 'notes' in cmd: has_notes = True for line in cmd['notes']: result.append(line, device['source_file'], device['source_line']) self.state.nested_parse(result, 0, section) return has_notes and section or None
def test_assert_having_only(self): node = addnodes.Section() node.assert_having_only(addnodes.Action) node.assert_having_only((addnodes.Action, addnodes.Parameters)) # standard nodes are ignored node += nodes.section() node += nodes.paragraph() node += nodes.bullet_list() node.assert_having_only(addnodes.Action) # success node += addnodes.Action() node.assert_having_only(addnodes.Action) node += addnodes.Parameters() node.assert_having_only((addnodes.Action, addnodes.Parameters)) # failed with self.assertRaises(AssertionError): node.assert_having_only(addnodes.Action) # descendants are ignored node[0] += addnodes.Body() node.assert_having_only((addnodes.Action, addnodes.Parameters))
def test_assert_having_any(self): node = addnodes.Section() # standard nodes are ignored node += nodes.section() node += nodes.paragraph() node += nodes.bullet_list() node.assert_having_any(addnodes.Action) # success node.assert_having_any(addnodes.Action) # no items node += addnodes.Action() node.assert_having_any(addnodes.Action) # one item node += addnodes.Action() node.assert_having_any(addnodes.Action) # two items # descendants are ignored node.pop() node[0] += addnodes.Action() node.assert_having_any(addnodes.Action) # other sections are ignored node[0] += addnodes.Parameters() node[0] += addnodes.Body() node.assert_having_any(addnodes.Action)
def run(self): path_to_model = self.arguments[0] np = os.path.normpath(os.path.join(os.getcwd(), path_to_model)) # check that the file exists if not os.path.isfile(np): raise IOError('File does not exist({0})'.format(np)) html_name = os.path.join(os.getcwd(), (os.path.basename(path_to_model).split('.')[0] + "_n2.html")) cmd = subprocess.Popen(['openmdao', 'view_model', np, '--no_browser', '--embed', '-o' + html_name]) cmd_out, cmd_err = cmd.communicate() rst = ViewList() # Add the content one line at a time. # Second argument is the filename to report in any warnings # or errors, third argument is the line number. env = self.state.document.settings.env docname = env.doc2path(env.docname) rst.append(".. raw:: html", docname, self.lineno) rst.append(" :file: %s" % html_name, docname, self.lineno) # Create a node. node = nodes.section() # Parse the rst. nested_parse_with_titles(self.state, rst, node) # And return the result. return node.children
def _construct_main_sections(self, parser): """ Construct Synopsis, Description and Options sections :param parser: pre-configured ArgumentParser instance :return: list of section nodes """ cc_sections, cc_options = self._get_custom_content() result = [] for section in ['synopsis', 'description', 'options']: method = '_build_program_{}'.format(section) method = getattr(self, method) args = [parser] if section == 'options': args.append(cc_options) title = nodes.title(text=section.upper()) content = method(*args) if section in cc_sections: cc = cc_sections[section] if cc['action'] == 'append': content.extend(cc['content'].children) elif cc['action'] == 'prepend': content[0:0] = cc['content'].children elif cc['action'] == 'replace': content[:] = cc['content'] else: # append empty paragraph to ensure separation from consecutive section content.append(nodes.paragraph(text='')) result.append(nodes.section('', title, content, ids=[section.lower()])) return result
def test_assert_at_least_one(self): node = addnodes.Section() # failed with self.assertRaises(AssertionError): node.assert_having_at_least_one(addnodes.Action) # success node += addnodes.Action() node.assert_having_at_least_one(addnodes.Action) # one item node += addnodes.Action() node.assert_having_at_least_one(addnodes.Action) # two items # standard nodes are ignored node += nodes.section() node += nodes.paragraph() node += nodes.bullet_list() node.assert_having_at_least_one(addnodes.Action) # descendants are ignored node.pop() node[0] += addnodes.Action() node.assert_having_at_least_one(addnodes.Action) # other sections are ignored node[0] += addnodes.Parameters() node[0] += addnodes.Body() node.assert_having_at_least_one(addnodes.Action)
def _build_program_options(self, parser, custom_content): """ Build list of program options :param parser: pre-configured ArgumentParser instance :param custom_content: custom content for options :return: node forming program options """ result = nodes.container() if self.ignore_option_groups: actions = parser._get_positional_actions() + parser._get_optional_actions() actions = [a for a in actions if a.help is not SUPPRESS] for action in actions: cc = [v for k, v in custom_content.items() if k in action.option_strings] result.append(self._build_option(parser, action, cc[0] if cc else None)) else: for group in parser._action_groups: actions = [a for a in group._group_actions if a.help is not SUPPRESS] if actions: title = nodes.title(text=group.title.capitalize()) options = nodes.container() for action in actions: cc = [v for k, v in custom_content.items() if k in action.option_strings] options.append(self._build_option(parser, action, cc[0] if cc else None)) result.append(nodes.section('', title, options, ids=[group.title.lower()])) return result
def visit_headings(self, element): section = nodes.section(level=int(element.tag[1])) section += self.make_node(nodes.title, element) return section
def assemble_doctree(self, indexfile, toctree_only, appendices): self.docnames = set([indexfile] + appendices) self.info(darkgreen(indexfile) + " ", nonl=1) def process_tree(docname, tree): tree = tree.deepcopy() for toctreenode in tree.traverse(addnodes.toctree): newnodes = [] includefiles = map(str, toctreenode['includefiles']) for includefile in includefiles: try: self.info(darkgreen(includefile) + " ", nonl=1) subtree = process_tree( includefile, self.env.get_doctree(includefile)) self.docnames.add(includefile) except Exception: self.warn( 'toctree contains ref to nonexisting ' 'file %r' % includefile, self.env.doc2path(docname)) else: sof = addnodes.start_of_file(docname=includefile) sof.children = subtree.children newnodes.append(sof) toctreenode.parent.replace(toctreenode, newnodes) return tree tree = self.env.get_doctree(indexfile) tree['docname'] = indexfile if toctree_only: # extract toctree nodes from the tree and put them in a # fresh document new_tree = new_document('<latex output>') new_sect = nodes.section() new_sect += nodes.title(u'<Set title in conf.py>', u'<Set title in conf.py>') new_tree += new_sect for node in tree.traverse(addnodes.toctree): new_sect += node tree = new_tree largetree = process_tree(indexfile, tree) largetree['docname'] = indexfile for docname in appendices: appendix = self.env.get_doctree(docname) appendix['docname'] = docname largetree.append(appendix) self.info() self.info("resolving references...") self.env.resolve_references(largetree, indexfile, self) # resolve :ref:s to distant tex files -- we can't add a cross-reference, # but append the document name for pendingnode in largetree.traverse(addnodes.pending_xref): docname = pendingnode['refdocname'] sectname = pendingnode['refsectname'] newnodes = [nodes.emphasis(sectname, sectname)] for subdir, title in self.titles: if docname.startswith(subdir): newnodes.append(nodes.Text(_(' (in '), _(' (in '))) newnodes.append(nodes.emphasis(title, title)) newnodes.append(nodes.Text(')', ')')) break else: pass pendingnode.replace_self(newnodes) return largetree
def _generate_nodes(self, name, command, parent=None, show_nested=False, commands=None): """Generate the relevant Sphinx nodes. Format a `click.Group` or `click.Command`. :param name: Name of command, as used on the command line :param command: Instance of `click.Group` or `click.Command` :param parent: Instance of `click.Context`, or None :param show_nested: Whether subcommands should be included in output :param commands: Display only listed commands or skip the section if empty :returns: A list of nested docutil nodes """ ctx = click.Context(command, info_name=name, parent=parent) if CLICK_VERSION >= (7, 0) and command.hidden: return [] # Title item = nodes.section( "", nodes.title(text=name), ids=[nodes.make_id(ctx.command_path)], names=[nodes.fully_normalize_name(ctx.command_path)], ) # Summary source_name = ctx.command_path result = statemachine.ViewList() lines = _format_command(ctx, show_nested, commands) for line in lines: LOG.debug(line) result.append(line, source_name) self.state.nested_parse(result, 0, item) # Subcommands if not show_nested: return [item] commands = _filter_commands(ctx, commands) commands = self._sort_commands(command, commands) for help_section, subcommands in self._group_commands( command, commands): group_name = help_section.name if group_name == doc.UNSECTIONED: for subcommand in subcommands: item.extend( self._generate_nodes(subcommand.name, subcommand, ctx, show_nested)) self.state.nested_parse(result, 0, item) continue group_item = nodes.section( "", nodes.title(text=group_name), ids=[nodes.make_id(group_name)], names=[nodes.fully_normalize_name(group_name)], ) group_list = statemachine.ViewList() # pylint: disable=fixme # XXX This is supposed to add documentation lines to each group, but it doesn't seem to work. for line in help_section.doc.splitlines(): group_list.append(line, group_name) for subcommand in subcommands: group_item.extend( self._generate_nodes(subcommand.name, subcommand, ctx, show_nested)) self.state.nested_parse(group_list, 0, group_item) item += group_item return [item]
def run(self): rst = USAGE_DETAILS_TEMPLATE.format(content="\n".join(self.content)) string_list = StringList(rst.split('\n')) node = nodes.section() self.state.nested_parse(string_list, self.content_offset, node) return [node]
def run(self): env = self.state.document.settings.env cmd = env.config.kerneldoc_bin + ['-rst', '-enable-lineno'] # Pass the version string to kernel-doc, as it needs to use a different # dialect, depending what the C domain supports for each specific # Sphinx versions cmd += ['-sphinx-version', sphinx.__version__] filename = env.config.kerneldoc_srctree + '/' + self.arguments[0] export_file_patterns = [] # Tell sphinx of the dependency env.note_dependency(os.path.abspath(filename)) tab_width = self.options.get('tab-width', self.state.document.settings.tab_width) # FIXME: make this nicer and more robust against errors if 'export' in self.options: cmd += ['-export'] export_file_patterns = str(self.options.get('export')).split() elif 'internal' in self.options: cmd += ['-internal'] export_file_patterns = str(self.options.get('internal')).split() elif 'doc' in self.options: cmd += ['-function', str(self.options.get('doc'))] elif 'functions' in self.options: functions = self.options.get('functions').split() if functions: for f in functions: cmd += ['-function', f] else: cmd += ['-no-doc-sections'] for pattern in export_file_patterns: for f in glob.glob(env.config.kerneldoc_srctree + '/' + pattern): env.note_dependency(os.path.abspath(f)) cmd += ['-export-file', f] cmd += [filename] try: kernellog.verbose(env.app, 'calling kernel-doc \'%s\'' % (" ".join(cmd))) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8') if p.returncode != 0: sys.stderr.write(err) kernellog.warn(env.app, 'kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode)) return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))] elif env.config.kerneldoc_verbosity > 0: sys.stderr.write(err) lines = statemachine.string2lines(out, tab_width, convert_whitespace=True) result = ViewList() lineoffset = 0; line_regex = re.compile("^#define LINENO ([0-9]+)$") for line in lines: match = line_regex.search(line) if match: # sphinx counts lines from 0 lineoffset = int(match.group(1)) - 1 # we must eat our comments since the upset the markup else: result.append(line, filename, lineoffset) lineoffset += 1 node = nodes.section() self.do_parse(result, node) return node.children except Exception as e: # pylint: disable=W0703 kernellog.warn(env.app, 'kernel-doc \'%s\' processing failed with: %s' % (" ".join(cmd), str(e))) return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
def parse(self, input, document): section = nodes.section(ids=['id1']) section += nodes.title('Generated section', 'Generated section') document += section
def create_section(self, title): section = nodes.section(ids=[title]) section += nodes.title(title, title) return section
def run(self): path_to_model = self.arguments[0] n2_dims = [1200, 700] show_toolbar = False if len(self.arguments) > 1 and self.arguments[1]: n2_dim_idx = 0 for idx in range(1, len(self.arguments)): if self.arguments[idx] == "toolbar": show_toolbar = True else: n2_dims[n2_dim_idx] = self.arguments[idx] n2_dim_idx = 1 np = os.path.normpath(os.path.join(os.getcwd(), path_to_model)) # check that the file exists if not os.path.isfile(np): raise IOError("File does not exist({0})".format(np)) # Generate N2 files into the target_dir. Those files are later copied # into the top of the HTML hierarchy, so the HTML doc file needs a # relative path to them. target_dir = os.path.join(os.getcwd(), "_n2html") rel_dir = os.path.relpath( os.getcwd(), os.path.dirname(self.state.document.settings._source)) html_base_name = os.path.basename(path_to_model).split( ".")[0] + "_n2.html" html_name = os.path.join(target_dir, html_base_name) html_rel_name = os.path.join(rel_dir, html_base_name) if show_toolbar: html_rel_name += "#toolbar" cmd = subprocess.Popen([ "openmdao", "n2", np, "--no_browser", "--embed", "-o" + html_name ]) cmd_out, cmd_err = cmd.communicate() rst = ViewList() # Add the content one line at a time. # Second argument is the filename to report in any warnings # or errors, third argument is the line number. env = self.state.document.settings.env docname = env.doc2path(env.docname) object_tag = ("<iframe width='" + str(n2_dims[0]) + "'" " height='" + str(n2_dims[1]) + "'" " style='border: 1px solid lightgray; resize: both;'" " src='" + html_rel_name + "'></iframe>") rst.append(".. raw:: html", docname, self.lineno) rst.append("", docname, self.lineno) # leave an empty line rst.append(" %s" % object_tag, docname, self.lineno) # Create a node. node = nodes.section() # Parse the rst. nested_parse_with_titles(self.state, rst, node) # And return the result. return node.children
def process_exercise_nodes(app, doctree, fromdocname): """ Once the doctree is resolved, the exercises are injected where they need to. """ # Copy saved arguments to nodes, restore node pointers for node in doctree.traverse(exercise): node_id = (node['docname'], node['ids'][0]) meta = app.env.exercises_exercises_map[node_id] node['label'] = meta['label'] node['number'] = meta['number'] node['title'] = meta['title'] meta['node'] = node # Sort exercises in ascending order all_exercises = app.env.exercises_all_exercises all_exercises.sort(key=lambda ex: ex['number']) # Regroup exercises organized by chapters hierarchy = OrderedDict() for ex in all_exercises: chapter = ex['number'][0] if chapter not in hierarchy: hierarchy[chapter] = [] hierarchy[chapter].append(ex) # Update exercise titles for node in doctree.traverse(exercise): label = node['label'] if node['title']: label += ' ' + node['title'] node.children[0].replace_self(exercise_title(label, label)) # Populate the solutions directive for node in doctree.traverse(solutions): content = [] for chapter, exs in hierarchy.items(): # Create a section per chapter section = nodes.section(ids=[f'solution-chapter-{chapter}'], auto=0) name = _('Chapter Solutions') + ' ' + str(chapter) section.append(nodes.title(name, name)) content.append(section) # Insert the solutions for ex in [e for e in exs if e['solution']]: description = ex['label'] para = nodes.paragraph() title = exercise_title(description, description) if app.builder.format in ['latex', 'html']: ref = nodes.reference('', '') ref['refdocname'] = ex['docname'] ref['refuri'] = app.builder.get_relative_uri( fromdocname, ex['docname']) ref['refuri'] += '#' + ex['target']['refid'] ref.append(title) title = ref para.append(title) content.append(para) content.extend(ex['solution'].children) node.replace_self(content) # Remove solution from the exercises for ex in doctree.traverse(exercise): ex.children = list( filter(lambda x: not isinstance(x, solution), ex.children)) # Inject LaTeX header if all_exercises and hasattr(app.builder, 'context'): inject_latex_header(app, app.builder.context)
def run(self): if self.arguments and self.arguments[0]: module_path = self.arguments[0] if self.arguments and self.arguments[1]: class_name = self.arguments[1] if self.arguments and self.arguments[2]: attribute_name = self.arguments[2] exec('from {} import {}'.format(module_path, class_name), globals()) exec('obj = {}()'.format(class_name), globals()) options = getattr(obj, attribute_name) outputs = [] for option_name, option_data in sorted(iteritems(options._dict)): name = option_name default = option_data['value'] values = option_data['values'] types = option_data['types'] desc = option_data['desc'] if types is None: types = "N/A" elif types is not None: if not isinstance(types, (tuple, list)): types = (types, ) types = [type_.__name__ for type_ in types] if values is None: values = "N/A" elif values is not None: if not isinstance(values, (tuple, list)): values = (values, ) values = [value for value in values] outputs.append([name, default, values, types, desc]) lines = ViewList() col_heads = [ 'Option', 'Default', 'Acceptable Values', 'Acceptable Types', 'Description' ] max_sizes = {} for j, col in enumerate(col_heads): max_sizes[j] = len(col) for output in outputs: for j, item in enumerate(output): length = len(str(item)) if max_sizes[j] < length: max_sizes[j] = length header = "" titles = "" for key, val in iteritems(max_sizes): header += '=' * val + ' ' for j, head in enumerate(col_heads): titles += "%s " % head size = max_sizes[j] space = size - len(head) if space > 0: titles += space * ' ' lines.append(header, "options table", 1) lines.append(titles, "options table", 2) lines.append(header, "options table", 3) n = 3 for output in outputs: line = "" for j, item in enumerate(output): line += "%s " % str(item) size = max_sizes[j] space = size - len(str(item)) if space > 0: line += space * ' ' lines.append(line, "options table", n) n += 1 lines.append(header, "options table", n) # Create a node. node = nodes.section() node.document = self.state.document # Parse the rst. nested_parse_with_titles(self.state, lines, node) # And return the result. return node.children
def autoplugin_directive(dirname, arguments, options, content, lineno, content_offset, block_text, state, state_machine): mod_name = arguments[0] mod = resolve_name(mod_name) plug_name = options.get('plugin', None) if plug_name: obj = getattr(mod, plug_name) else: for entry in dir(mod): obj = getattr(mod, entry) if isclass(obj) and issubclass(obj, Plugin) and obj is not Plugin: plug_name = '%s.%s' % (mod_name, entry) break # mod docstring rst = ViewList() rst.append('.. automodule :: %s\n' % mod_name, '<autodoc>') rst.append('', '<autodoc>') # options rst.append('Options', '<autodoc>') rst.append('-------', '<autodoc>') rst.append('', '<autodoc>') plug = obj() opts = OptBucket() plug.options(opts, {}) for opt in opts: rst.append(opt.options(), '<autodoc>') rst.append(' \n', '<autodoc>') rst.append(' ' + opt.help + '\n', '<autodoc>') rst.append('\n', '<autodoc>') # plugin class rst.append('Plugin', '<autodoc>') rst.append('------', '<autodoc>') rst.append('', '<autodoc>') rst.append('.. autoclass :: %s\n' % plug_name, '<autodoc>') rst.append(' :members:\n', '<autodoc>') rst.append(' :show-inheritance:\n', '<autodoc>') rst.append('', '<autodoc>') # source rst.append('Source', '<autodoc>') rst.append('------', '<autodoc>') rst.append( '.. include :: %s\n' % utils.relative_path( state_machine.document['source'], os.path.abspath(mod.__file__.replace('.pyc', '.py'))), '<autodoc>') rst.append(' :literal:\n', '<autodoc>') rst.append('', '<autodoc>') node = nodes.section() node.document = state.document surrounding_title_styles = state.memo.title_styles surrounding_section_level = state.memo.section_level state.memo.title_styles = [] state.memo.section_level = 0 state.nested_parse(rst, 0, node, match_titles=1) state.memo.title_styles = surrounding_title_styles state.memo.section_level = surrounding_section_level return node.children
def create_section(name): section = nodes.section(ids=[name]) section.append(nodes.title(text=name)) return section
def _make_section(self, title): """Return a section node with optional title""" section = nodes.section(ids=[self._sphinx_directive.new_serialno()]) if title: section += nodes.title(title, title) return section
def _generate_nodes( self, name: str, command: click.Command, parent: ty.Optional[click.Context], nested: str, commands: ty.Optional[ty.List[str]] = None, semantic_group: bool = False, ) -> ty.List[nodes.section]: """Generate the relevant Sphinx nodes. Format a `click.Group` or `click.Command`. :param name: Name of command, as used on the command line :param command: Instance of `click.Group` or `click.Command` :param parent: Instance of `click.Context`, or None :param nested: The granularity of subcommand details. :param commands: Display only listed commands or skip the section if empty :param semantic_group: Display command as title and description for `click.CommandCollection`. :returns: A list of nested docutil nodes """ ctx = click.Context(command, info_name=name, parent=parent) if command.hidden: return [] # Title section = nodes.section( '', nodes.title(text=name), ids=[nodes.make_id(ctx.command_path)], names=[nodes.fully_normalize_name(ctx.command_path)], ) # Summary source_name = ctx.command_path result = statemachine.ViewList() if semantic_group: lines = _format_description(ctx) else: lines = _format_command(ctx, nested, commands) for line in lines: LOG.debug(line) result.append(line, source_name) sphinx_nodes.nested_parse_with_titles(self.state, result, section) # Subcommands if nested == NESTED_FULL: if isinstance(command, click.CommandCollection): for source in command.sources: section.extend( self._generate_nodes( source.name, source, parent=ctx, nested=nested, semantic_group=True, )) else: commands = _filter_commands(ctx, commands) for command in commands: parent = ctx if not semantic_group else ctx.parent section.extend( self._generate_nodes(command.name, command, parent=parent, nested=nested)) return [section]
def section(id, title): node = nodes.section('', nodes.title('', title)) node['ids'].append(id) return node
def __init__(self, sphinx_directive): self._cur_doc = None self._sphinx_directive = sphinx_directive self._top_node = nodes.section() self._active_headings = [self._top_node]
class ErrorDirective(Directive): has_content = True final_argument_whitespace = True option_spec = { 'instance': directives.unchanged_required, 'example-data': directives.unchanged, 'title': directives.unchanged, } MIMETYPES = [ 'application/json', 'application/xml', ] def run(self): try: error_obj = self.get_error_object(self.options['instance']) except ErrorNotFound, e: return e.error_node # Add the class's file and this extension to the dependencies. self.state.document.settings.env.note_dependency(__file__) self.state.document.settings.env.note_dependency( sys.modules[error_obj.__module__].__file__) docname = 'webapi2.0-error-%s' % error_obj.code error_title = self.get_error_title(error_obj) targetnode = nodes.target('', '', ids=[docname], names=[docname]) self.state.document.note_explicit_target(targetnode) main_section = nodes.section(ids=[docname]) # Details section main_section += nodes.title(text=error_title) main_section += self.build_details_table(error_obj) # Example section examples_section = nodes.section(ids=['examples']) examples_section += nodes.title(text='Examples') extra_params = {} if 'example-data' in self.options: extra_params = json.loads(self.options['example-data']) has_examples = False for mimetype in self.MIMETYPES: headers, data = \ fetch_response_data(WebAPIResponseError, mimetype, err=error_obj, extra_params=extra_params) example_node = build_example(headers, data, mimetype) if example_node: example_section = nodes.section(ids=['example_' + mimetype]) examples_section += example_section example_section += nodes.title(text=mimetype) example_section += example_node has_examples = True if has_examples: main_section += examples_section return [targetnode, main_section]
def process_feed(app, doctree, fromdocname): env = app.builder.env if env.config.disqus_shortname and doctree.traverse(entrymeta): node = disqus(classes=['feed-disqus']) node['shortname'] = env.config.disqus_shortname node['identifier'] = "/%s" % fromdocname node['title'] = env.titles[fromdocname][0] node['developer'] = env.config.disqus_developer doctree += node for node in doctree.traverse(feed): rss_filename = node['rss'] rss_title = node['title'] rss_link = node['link'] rss_description = node['description'] rss_date = datetime.datetime.utcnow() rss_items = [] replacement = [] for docname in node['entries']: entry = env.get_doctree(docname) for meta in entry.traverse(entrymeta): section_node = nodes.section() title = env.titles[docname] section_node['ids'] = entry[0]['ids'] title_node = nodes.title() ref_node = nodes.reference(classes=['feed-ref']) ref_node['internal'] = True ref_node['refdocname'] = docname ref_node['refuri'] = \ app.builder.get_relative_uri(fromdocname, docname) ref_node['refuri'] += '#' + section_node['ids'][0] ref_node += title[0] title_node += ref_node section_node += title_node rss_item_title = "%s" % title[0] rss_item_link = rss_link + app.builder.get_target_uri(docname) rss_item_description = nodes.compound() for subnode in entry[0]: if isinstance(subnode, (nodes.title, disqus)): continue if isinstance(subnode, entrycut): para_node = nodes.paragraph() ref_node = nodes.reference(classes=['feed-more']) ref_node['internal'] = True ref_node['refdocname'] = docname ref_node['refuri'] = \ app.builder.get_relative_uri(fromdocname, docname) ref_node['refuri'] += '#' + section_node['ids'][0] ref_node += nodes.Text('Read more\u2026') para_node += ref_node section_node += para_node break section_node += subnode.deepcopy() if isinstance(subnode, entrymeta): continue rss_item_description += subnode.deepcopy() env.resolve_references(section_node, fromdocname, app.builder) replacement.append(section_node) env.resolve_references(rss_item_description, docname, app.builder) if app.builder.format == 'html': rss_item_description = app.builder.render_partial( rss_item_description)['body'] rss_item_date = meta['date'] rss_item = RSSItem(rss_item_title, rss_item_link, rss_item_description, rss_item_date) rss_items.append(rss_item) node.replace_self(replacement) if app.builder.format == 'html': rss_feed = RSSFeed(rss_title, rss_link, rss_description, rss_date, rss_items) if rss_filename: rss_path = os.path.join(app.builder.outdir, rss_filename) rss_stream = open(rss_path, 'wb') write_rss(rss_feed, rss_stream) rss_stream.close()
def run(self): fcts = lasif_cli._get_functions() # Group the functions. Functions with no group will be placed in the # group "Misc". fct_groups = {} for fct_name, fct in fcts.iteritems(): group_name = fct.group_name \ if hasattr(fct, "group_name") else "Misc" fct_groups.setdefault(group_name, {}) fct_groups[group_name][fct_name] = fct all_nodes = [] # Print in a grouped manner. for group_name, fcts in sorted(fct_groups.iteritems()): node = nodes.section() node.document = self.state.document result = ViewList() title = group_name + " Functions" result.append("", "<lasif_cli_list>") result.append("------------------", "<lasif_cli_list>") result.append("", "<lasif_cli_list>") result.append(title, "<lasif_cli_list>") result.append("-" * len(title), "<lasif_cli_list>") result.append("", "<lasif_cli_list>") if group_name in lasif_cli.COMMAND_GROUP_DOCS: result.append(".. admonition:: %s" % group_name, "<lasif_cli_list>") result.append("", "<lasif_cli_list>") for line in lasif_cli.COMMAND_GROUP_DOCS[group_name]\ .splitlines(): result.append(" " + line, "<lasif_cli_list>") for fct_name, fct in fcts.iteritems(): parser = lasif_cli._get_argument_parser(fct) # The parser receive all their options just before they are # being parsed. Therefore monkey patch the parse_args() method # to get a fully ready parser object. class TempException(Exception): pass def wild_monkey_patch(*args, **kwargs): raise TempException parser.parse_args = wild_monkey_patch try: fct(parser, []) except TempException: pass for i in scan_programs(parser): cmd_name = "lasif %s" % fct_name positional_args, optional_args, desc, usage = i title = cmd_name result.append("", "<lasif_cli_list>") result.append(".. program:: " + title, "<lasif_cli_list>") result.append("", "<lasif_cli_list>") result.append(title, "<lasif_cli_list>") result.append("*" * len(title), "<lasif_cli_list>") result.append("", "<lasif_cli_list>") if hasattr(fct, "_is_mpi_enabled") and fct._is_mpi_enabled: result.append("**This function can be used with MPI**", "<lasif_cli_list>") result.append("", "<lasif_cli_list>") result.append(" .. code-block:: none", "<lasif_cli_list>") result.append("", "<lasif_cli_list>") result.append(" " + "\n ".join(usage.splitlines()), "<lasif_cli_list>") result.append("", "<lasif_cli_list>") for line in textwrap.dedent(fct.__doc__).splitlines(): result.append(line, "<lasif_cli_list>") result.append("", "<lasif_cli_list>") # Collect arguments in table and render it. table = [] if positional_args: table.append(("**Positional Arguments**", )) for option_strings, help_ in positional_args: for i, j in itertools.izip_longest( (", ".join( ["``%s``" % _i for _i in option_strings]), ), textwrap.wrap(help_, 50), fillvalue=""): table.append((i, j)) if optional_args: table.append(("**Optional Arguments**", )) for option_strings, help_ in optional_args: for i, j in itertools.izip_longest( (", ".join( ["``%s``" % _i for _i in option_strings]), ), textwrap.wrap(help_, 20), fillvalue=""): table.append((i, j)) # Render table. padding = 1 max_length = max(len(_i) for _i in table) rows = [] for i in range(max_length): max_i = 0 for row in table: if len(row) < max_length: continue max_i = max(max_i, len(row[i]) + 2 * padding) rows.append(max_i) separator = "+" + "+".join("-" * _i for _i in rows) + "+" final_table = [ separator, ] for row in table: if len(row) == 1: final_table.append( "|%-{0}s|".format(sum(rows) + len(rows) - 1) % (" " * padding + row[0])) elif len(row) == max_length: # Super special case handling for LASIF! if row[0] == "": final_table.pop(-1) final_table.append("|" + "|".join( "%-{0}s".format(length) % (" " * padding + _i) for _i, length in zip(row, rows)) + "|") else: warnings.warn("Table cannot be rendered!") final_table.append(separator) for line in final_table: result.append(line, "<lasif_cli_list>") self.state.nested_parse(result, 0, node, match_titles=1) all_nodes.extend(node.children) return all_nodes
class ResourceDirective(Directive): has_content = True required_arguments = 0 option_spec = { 'classname': directives.unchanged_required, 'is-list': directives.flag, 'hide-links': directives.flag, 'hide-examples': directives.flag, } item_http_methods = set(['GET', 'DELETE', 'PUT']) list_http_methods = set(['GET', 'POST']) FILTERED_MIMETYPES = [ 'application/json', 'application/xml', ] type_mapping = { int: 'Integer', str: 'String', unicode: 'String', bool: 'Boolean', dict: 'Dictionary', file: 'Uploaded File', } def run(self): try: resource_class = self.get_resource_class(self.options['classname']) except ResourceNotFound, e: return e.error_node # Add the class's file and this extension to the dependencies. self.state.document.settings.env.note_dependency(__file__) self.state.document.settings.env.note_dependency( sys.modules[resource_class.__module__].__file__) resource = get_resource_from_class(resource_class) is_list = 'is-list' in self.options docname = 'webapi2.0-%s-resource' % \ get_resource_docname(resource, is_list) resource_title = get_resource_title(resource, is_list) targetnode = nodes.target('', '', ids=[docname], names=[docname]) self.state.document.note_explicit_target(targetnode) main_section = nodes.section(ids=[docname]) # Main section main_section += nodes.title(text=resource_title) main_section += parse_text( self, inspect.getdoc(resource), where='%s class docstring' % self.options['classname']) # Details section details_section = nodes.section(ids=['details']) main_section += details_section details_section += nodes.title(text='Details') details_section += self.build_details_table(resource) # Fields section if (resource.fields and (not is_list or resource.singleton)): fields_section = nodes.section(ids=['fields']) main_section += fields_section fields_section += nodes.title(text='Fields') fields_section += self.build_fields_table(resource.fields) # Links section if 'hide-links' not in self.options: fields_section = nodes.section(ids=['links']) main_section += fields_section fields_section += nodes.title(text='Links') fields_section += self.build_links_table(resource) # HTTP method descriptions for http_method in self.get_http_methods(resource, is_list): method_section = nodes.section(ids=[http_method]) main_section += method_section method_section += nodes.title(text='HTTP %s' % http_method) method_section += self.build_http_method_section(resource, http_method) if 'hide-examples' not in self.options: examples_section = nodes.section(ids=['examples']) examples_section += nodes.title(text='Examples') has_examples = False if is_list: mimetype_key = 'list' else: mimetype_key = 'item' for mimetype in resource.allowed_mimetypes: try: mimetype = mimetype[mimetype_key] except KeyError: continue if mimetype in self.FILTERED_MIMETYPES: # Resources have more specific mimetypes. We want to # filter out the general ones (like application/json) # so we don't show redundant examples. continue if mimetype.endswith('xml'): # JSON is preferred. While we support XML, let's not # continue to advertise it. continue url, headers, data = \ self.fetch_resource_data(resource, mimetype) example_node = build_example(headers, data, mimetype) if example_node: example_section = \ nodes.section(ids=['example_' + mimetype], classes=['examples', 'requests-example']) examples_section += example_section example_section += nodes.title(text=mimetype) accept_mimetype = mimetype if (mimetype.startswith('application/') and mimetype.endswith('+json')): # Instead of telling the user to ask for a specific # mimetype on the request, show them that asking for # application/json works fine. accept_mimetype = 'application/json' curl_text = ( '$ curl http://reviews.example.com%s -H "Accept: %s"' % (url, accept_mimetype) ) example_section += nodes.literal_block( curl_text, curl_text, classes=['cmdline']) example_section += nodes.literal_block( headers, headers, classes=['http-headers']) example_section += example_node has_examples = True if has_examples: main_section += examples_section return [targetnode, main_section]
def _generate_nodes(self, name, command, parent=None, show_nested=False): """Generate the relevant Sphinx nodes. Format a `click.Group` or `click.Command`. :param name: Name of command, as used on the command line :param command: Instance of `click.Group` or `click.Command` :param parent: Instance of `click.Context`, or None :param show_nested: Whether subcommands should be included in output :returns: A list of nested docutil nodes """ ctx = click.Context(command, info_name=name, parent=parent) # Title # We build this with plain old docutils nodes section = nodes.section( '', nodes.title(text=name), ids=[nodes.make_id(ctx.command_path)], names=[nodes.fully_normalize_name(ctx.command_path)]) source_name = ctx.command_path result = statemachine.ViewList() # Description # We parse this as reStructuredText, allowing users to embed rich # information in their help messages if they so choose. if ctx.command.help: for line in statemachine.string2lines( ctx.command.help, tab_width=4, convert_whitespace=True): result.append(line, source_name) result.append('', source_name) # Summary if isinstance(command, click.Command): summary = _format_command(ctx, show_nested) else: # TODO(stephenfin): Do we care to differentiate? Perhaps we # shouldn't show usage for groups? summary = _format_command(ctx, show_nested) for line in summary: result.append(line, source_name) self.state.nested_parse(result, 0, section) # Commands if show_nested: commands = getattr(ctx.command, 'commands', {}) for command_name, command_obj in sorted(commands.items()): section.extend(self._generate_nodes( command_name, command_obj, ctx, show_nested)) return [section]
def add_section(): new = nodes.section() new.children = list(new_section_content) self.document.insert(index, new) new_section_content[:] = []
def run(self): event = self.arguments[0] anchor = event.lower().replace('_', '-') kind = self.options.get('type') inType = self.options.get('in') outType = self.options.get('out') or 'void' subject = self.options.get('subject') params = self.options.get('params') or '' since = self.options.get('since') or '' desc = u'\n'.join(self.content) # create section # optionally insert zero-width breaks: # event.replace('_', u"_\u200B") sec = nodes.section() sec.append(nodes.title('', event)) sec['names'].append(anchor) self.state.document.note_implicit_target(sec, sec) # the signature sig = '%s %s(%s)' % (outType, event, inType) if kind == 'until': sig += ' BREAKS' # additional params for this event paramlist = None if len(params) > 0: paramlist = self._buildParamList(nodes.bullet_list(), params) # create actual definition list dl = nodes.definition_list('', nodes.definition_list_item('', nodes.term('', '', nodes.strong('', 'Signatur:')), nodes.definition('', nodes.literal('', sig)) ), nodes.definition_list_item('', nodes.term('', '', nodes.strong('', 'Beschreibung:')), nodes.definition('', self._parseInline(desc)) ), nodes.definition_list_item('', nodes.term('', '', nodes.strong('', 'Subject:')), nodes.definition('', self._parseInline(subject)) ) ) if paramlist: dl.append(nodes.definition_list_item('', nodes.term('', '', nodes.strong('', 'Weitere Parameter:')), nodes.definition('', paramlist) )) if len(since) > 0: since = 'v%s' % since dl.append(nodes.definition_list_item('', nodes.term('', '', nodes.strong('', u'Hinzugefügt in:')), nodes.definition('', self._parseInline(since)) )) sec.append(dl) return [sec]
def format_path(self, path_doc): container = n.section(ids=[n.make_id(path_doc['path'])], names=[]) container += n.title(text=path_doc['path']) container.append(n.paragraph(text=path_doc['description'])) container.append(self.format_operation(path_doc['operations'])) return container
def _construct_manpage_specific_structure(self, parser_info): """ Construct a typical man page consisting of the following elements: NAME (automatically generated, out of our control) SYNOPSIS DESCRIPTION OPTIONS FILES SEE ALSO BUGS """ # SYNOPSIS section synopsis_section = nodes.section( '', nodes.title(text='Synopsis'), nodes.literal_block(text=parser_info["bare_usage"]), ids=['synopsis-section']) # DESCRIPTION section description_section = nodes.section( '', nodes.title(text='Description'), nodes.paragraph(text=parser_info.get( 'description', parser_info.get('help', "undocumented").capitalize())), ids=['description-section']) nested_parse_with_titles(self.state, self.content, description_section) if parser_info.get('epilog'): # TODO: do whatever sphinx does to understand ReST inside # docstrings magically imported from other places. The nested # parse method invoked above seem to be able to do this but # I haven't found a way to do it for arbitrary text description_section += nodes.paragraph(text=parser_info['epilog']) # OPTIONS section options_section = nodes.section('', nodes.title(text='Options'), ids=['options-section']) if 'args' in parser_info: options_section += nodes.paragraph() options_section += nodes.subtitle(text='Positional arguments:') options_section += self._format_positional_arguments(parser_info) for action_group in parser_info['action_groups']: if 'options' in parser_info: options_section += nodes.paragraph() options_section += nodes.subtitle(text=action_group['title']) options_section += self._format_optional_arguments( action_group) items = [ # NOTE: we cannot generate NAME ourselves. It is generated by # docutils.writers.manpage synopsis_section, description_section, # TODO: files # TODO: see also # TODO: bugs ] if len(options_section.children) > 1: items.append(options_section) if 'nosubcommands' not in self.options: # SUBCOMMANDS section (non-standard) subcommands_section = nodes.section( '', nodes.title(text='Sub-Commands'), ids=['subcommands-section']) if 'children' in parser_info: subcommands_section += self._format_subcommands(parser_info) if len(subcommands_section) > 1: items.append(subcommands_section) if os.getenv("INCLUDE_DEBUG_SECTION"): import json # DEBUG section (non-standard) debug_section = nodes.section( '', nodes.title(text="Argparse + Sphinx Debugging"), nodes.literal_block(text=json.dumps(parser_info, indent=' ')), ids=['debug-section']) items.append(debug_section) return items
def handle_package(self, package_decl, content): # type: (lal.BasicDecl, List[nodes.Node]) # Each declaration can group the documentation of several other # declarations. This mapping (decl -> list[decl]) describes this # grouping. associated_decls = defaultdict(list) # List of top-level declarations to document toplevel_decls = [] # Set mirorring toplevel_decls, used to check whether a decl is part of # it already. toplevel_decls_set = set() def append_decl(decl): """ Append ``decl`` to ``toplevel_decls`` if it's not there yet. """ if decl not in toplevel_decls_set: toplevel_decls.append(decl) toplevel_decls_set.add(decl) # Go through all declarations that appear in the top-level package and # organize them in sections the way we want to document them. decls = [d for d in package_decl.f_public_part.f_decls if d.is_a(lal.BasicDecl)] types = {} for decl in decls: _, annotations = self.get_documentation(decl) # Skip documentation for this entity if annotations.get('no-document'): continue if decl.is_a(lal.BasicSubpDecl, lal.ExprFunction): # Look for the type under which this subprogram should be # documented ("owning_type"). This is either the explicitly # asked type ("belongs-to" annotation) or the type that is a # primitive for this subprogram (if the type is declared in the # same file). owning_type = None if annotations.get('belongs-to'): owning_type = types[annotations['belongs-to']] else: prim_type = decl.f_subp_spec.p_primitive_subp_first_type() if prim_type and prim_type.unit == self.unit: owning_type = prim_type append_decl(owning_type) # If we found a relevant type, document the subprogram under # it, otherwise document it at the top-level. if owning_type: associated_decls[owning_type].append(decl) else: append_decl(decl) elif decl.is_a(lal.BaseTypeDecl): # New type declaration: document it and register it as a type types[decl.p_defining_name.text] = decl append_decl(decl) elif decl.is_a(lal.ObjectDecl): # Try to associate object declarations to their type, if there # is one in the current package. type_name = (decl.f_type_expr.p_designated_type_decl .p_defining_name) t = types.get(type_name.text) if type_name else None if t: associated_decls[t].append(decl) else: append_decl(decl) elif decl.is_a(lal.BasicDecl): if not decl.is_a(lal.ExceptionDecl, lal.PackageRenamingDecl, lal.GenericPackageInstantiation, lal.GenericSubpInstantiation): self.warn('default entity handling for {}:{}', decl.unit.filename, decl) append_decl(decl) # Get documentation for the top-level package itself pkg_doc, annotations = self.get_documentation(package_decl) # Create the documentation's content # Create a section pn = package_decl.p_defining_name.text normalize_pn = pn.replace(".", "-").replace("_", "-").lower() section = nodes.section(ids=normalize_pn) section['names'].append(normalize_pn) # we create a title and we add it to section section += nodes.title(text=package_decl.p_defining_name.text) content.append(section) self.parse_into(pkg_doc, section) # Go through all entities to generate their documentation for decl in toplevel_decls: if decl.is_a(lal.PackageDecl): self.handle_package(decl, section) elif decl.is_a(lal.GenericPackageDecl): self.handle_package(decl.f_package_decl, section) else: n, content_node = self.handle_signature_decl(decl) section += n for assoc_decls in associated_decls[decl]: assoc_nodes, _ = self.handle_signature_decl(assoc_decls) content_node += assoc_nodes
def test_emit_with_nonascii_name_node(app, status, warning): node = nodes.section(names=['\u65e5\u672c\u8a9e']) app.emit('my_event', node)
def run(self): env = self.state.document.settings.env cmd = [env.config.kerneldoc_bin, '-rst', '-enable-lineno'] filename = env.config.kerneldoc_srctree + '/' + self.arguments[0] export_file_patterns = [] # Tell sphinx of the dependency env.note_dependency(os.path.abspath(filename)) tab_width = self.options.get('tab-width', self.state.document.settings.tab_width) # FIXME: make this nicer and more robust against errors if 'export' in self.options: cmd += ['-export'] export_file_patterns = str(self.options.get('export')).split() elif 'internal' in self.options: cmd += ['-internal'] export_file_patterns = str(self.options.get('internal')).split() elif 'doc' in self.options: cmd += ['-function', str(self.options.get('doc'))] elif 'functions' in self.options: for f in str(self.options.get('functions')).split(): cmd += ['-function', f] for pattern in export_file_patterns: for f in glob.glob(env.config.kerneldoc_srctree + '/' + pattern): env.note_dependency(os.path.abspath(f)) cmd += ['-export-file', f] cmd += [filename] try: env.app.verbose('calling kernel-doc \'%s\'' % (" ".join(cmd))) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) out, err = p.communicate() # python2 needs conversion to unicode. # python3 with universal_newlines=True returns strings. if sys.version_info.major < 3: out, err = unicode(out, 'utf-8'), unicode(err, 'utf-8') if p.returncode != 0: sys.stderr.write(err) env.app.warn('kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode)) return [ nodes.error(None, nodes.paragraph(text="kernel-doc missing")) ] elif env.config.kerneldoc_verbosity > 0: sys.stderr.write(err) lines = statemachine.string2lines(out, tab_width, convert_whitespace=True) result = ViewList() lineoffset = 0 line_regex = re.compile("^#define LINENO ([0-9]+)$") for line in lines: match = line_regex.search(line) if match: # sphinx counts lines from 0 lineoffset = int(match.group(1)) - 1 # we must eat our comments since the upset the markup else: result.append(line, filename, lineoffset) lineoffset += 1 node = nodes.section() node.document = self.state.document self.state.nested_parse(result, self.content_offset, node) return node.children except Exception as e: env.app.warn('kernel-doc \'%s\' processing failed with: %s' % (" ".join(cmd), str(e))) return [ nodes.error(None, nodes.paragraph(text="kernel-doc missing")) ]
def doctree_read(app, doctree): # Get the configuration parameters if app.config.edit_on_bitbucket_project == 'REQUIRED': raise ValueError( "The edit_on_bitbucket_project configuration variable must be " "provided in the conf.py") source_root = app.config.edit_on_bitbucket_source_root if source_root != '' and not source_root.endswith('/'): source_root += '/' doc_root = app.config.edit_on_bitbucket_doc_root if doc_root != '' and not doc_root.endswith('/'): doc_root += '/' url = 'http://bitbucket.org/%s/src/tip/' % ( app.config.edit_on_bitbucket_project) docstring_message = app.config.edit_on_bitbucket_docstring_message page_message = app.config.edit_on_bitbucket_page_message # Handle the "edit this page" link doc_path = os.path.relpath(doctree.get('source'), app.builder.srcdir) if not re.match(app.config.edit_on_bitbucket_skip_regex, doc_path): path = url + doc_root + doc_path onlynode = addnodes.only(expr='html') onlynode += nodes.reference( reftitle=app.config.edit_on_bitbucket_help_message, refuri=path) onlynode[0] += nodes.inline('', page_message, classes=['edit-on-bitbucket']) para = nodes.paragraph() para.update_basic_atts({'classes': ['edit-on-bitbucket-para']}) para += onlynode #import pdb; pdb.set_trace() if 'edit-section' in doctree[-1].attributes['classes']: doctree[-1] += para else: section = nodes.section() section.update_basic_atts({'classes': ['edit-section']}) section += para doctree += section # Handle the docstring-editing links for objnode in doctree.traverse(addnodes.desc): if objnode.get('domain') != 'py': continue names = set() for signode in objnode: if not isinstance(signode, addnodes.desc_signature): continue modname = signode.get('module') if not modname: continue fullname = signode.get('fullname') if fullname in names: # only one link per name, please continue names.add(fullname) obj = import_object(modname, fullname) anchor = None if obj is not None: try: lines, lineno = inspect.getsourcelines(obj) except: pass else: anchor = '#cl-%d' % lineno if anchor: path = '%s%s%s.py%s' % (url, source_root, modname.replace('.', '/'), anchor) onlynode = addnodes.only(expr='html') onlynode += nodes.reference( reftitle=app.config.edit_on_bitbucket_help_message, refuri=path) onlynode[0] += nodes.inline( '', '', nodes.raw('', ' ', format='html'), nodes.Text(docstring_message), classes=['edit-on-bitbucket', 'viewcode-link']) signode += onlynode
def _render_service(self, service): service_id = "service-%d" % self.env.new_serialno('service') service_node = nodes.section(ids=[service_id]) title = '%s service at %s' % (service.name.title(), service.path) service_node += nodes.title(text=title) if service.description is not None: service_node += rst2node(trim(service.description)) for method, view, args in service.definitions: if method == 'HEAD': # Skip head - this is essentially duplicating the get docs. continue method_id = '%s-%s' % (service_id, method) method_node = nodes.section(ids=[method_id]) method_node += nodes.title(text=method) if is_string(view): if 'klass' in args: ob = args['klass'] view_ = getattr(ob, view.lower()) docstring = trim(view_.__doc__ or "") + '\n' else: docstring = trim(view.__doc__ or "") + '\n' if 'schema' in args: schema = args['schema'] attrs_node = nodes.inline() for location in ('header', 'querystring', 'body'): attributes = schema.get_attributes(location=location) if attributes: attrs_node += nodes.inline(text='values in the %s' % location) location_attrs = nodes.bullet_list() for attr in attributes: temp = nodes.list_item() # Get attribute data-type if hasattr(attr, 'type'): attr_type = attr.type elif hasattr(attr, 'typ'): attr_type = attr.typ.__class__.__name__ else: attr_type = None temp += nodes.strong(text=attr.name) if attr_type is not None: temp += nodes.inline(text=' (%s)' % attr_type) if not attr.required or attr.description: temp += nodes.inline(text=' - ') if not attr.required: if attr.missing is not None: default = json.dumps(attr.missing) temp += nodes.inline( text='(default: %s) ' % default) else: temp += nodes.inline( text='(optional) ') if attr.description: temp += nodes.inline(text=attr.description) location_attrs += temp attrs_node += location_attrs method_node += attrs_node for validator in args.get('validators', ()): if validator.__doc__ is not None: docstring += trim(validator.__doc__) if 'accept' in args: accept = to_list(args['accept']) if callable(accept): if accept.__doc__ is not None: docstring += accept.__doc__.strip() else: accept_node = nodes.strong(text='Accepted content types:') node_accept_list = nodes.bullet_list() accept_node += node_accept_list for item in accept: temp = nodes.list_item() temp += nodes.inline(text=item) node_accept_list += temp method_node += accept_node node = rst2node(docstring) DocFieldTransformer(self).transform_all(node) if node is not None: method_node += node renderer = args['renderer'] if renderer == 'simplejson': renderer = 'json' response = nodes.paragraph() response += nodes.strong(text='Response: %s' % renderer) method_node += response service_node += method_node return service_node