def make_node(self, title, contents, content_type=None): section = self._section_block(title=title) if not content_type: # Doc section for content in contents['docs']: for paragraph in content.split('\n'): retnode = nodes.paragraph() retnode.append(self._raw_html_block(data=paragraph)) section.append(retnode) # Options Section options_list = nodes.field_list() options_section = self._section_block(title='Options') for key, value in contents['options'].items(): options_list.append( self._raw_fields(data=value['description'], field_name=key)) else: options_section.append(options_list) section.append(options_section) # Authors Section authors_list = nodes.field_list() authors_list.append(self._raw_fields(data=contents['author'])) authors_section = self._section_block(title='Authors') authors_section.append(authors_list) section.append(authors_section) elif content_type == 'yaml': for content in contents: section.append( self._literal_block(data=content, dump_data=False)) return section
def run(self): env = self.state.document.settings.env module = importlib.import_module(self.options['module']) things = tuple(self._import(module)) sections = [] sections.append(self.links(things)) # Make index for thng in things: # type: Thing # Generate a section for each class, with a title, # fields description and a paragraph section = n.section(ids=[self._id(thng)]) section += n.title(thng.__name__, thng.__name__) section += self.parse('*Extends {}*'.format(thng._base_class)) if thng.__doc__: section += self.parse(thng.__doc__) fields = n.field_list() for key, f in thng._own: name = n.field_name(text=f.data_key or key) body = [ self.parse('{} {}'.format( self.type(f), f.metadata.get('description', ''))) ] if isinstance(f, EnumField): body.append(self._parse_enum_field(f)) attrs = n.field_list() if f.dump_only: attrs += self.field('Submit', 'No.') if f.required: attrs += self.field('Required', f.required) fields += n.field('', name, n.field_body('', *body, attrs)) section += fields sections.append(section) return sections
def get_field_list(self, node): """Return the node's field list, if there isn't one then create it first.""" # Add a field list if there isn't one if not node[1][-1].children: node[1][-1].append(nodes.field_list()) if not isinstance(node[1][-1][0], nodes.field_list): node[1][-1].append(nodes.field_list()) return node[1][-1][-1]
def make_content(self, all_members): ret = nodes.section() doc = self.item if doc.doc: self.directive.state.nested_parse(to_list(doc.doc), 0, ret) check_parameters(self, doc) params, subtypes = extract_subtypes(self.item.name, self.item) rdoc = doc.return_val.doc rtype = doc.return_val.type if params or rtype or rdoc: with addto(ret, nodes.field_list()) as fields: if params: with addto(fields, nodes.field()) as field: field += nodes.field_name('Parameters', 'Parameters') with addto(field, nodes.field_body()) as body,\ addto(body, nodes.bullet_list()) as holder: holder.extend(make_parameters(params, mod=doc['sourcemodule'].name)) if rdoc: with addto(fields, nodes.field()) as field: field += nodes.field_name("Returns", "Returns") with addto(field, nodes.field_body()) as body,\ addto(body, nodes.paragraph()) as p: p += nodes.inline(rdoc, rdoc) if rtype: with addto(fields, nodes.field()) as field: field += nodes.field_name("Return Type", "Return Type") with addto(field, nodes.field_body()) as body, \ addto(body, nodes.paragraph()) as p: p += make_types(rtype, mod=doc['sourcemodule'].name) ret += self.document_subtypes(subtypes) return ret.children
def make_content(self, all_members): doc = self.item content = addnodes.desc_content() if doc.exports or doc.dependencies: with addto(content, nodes.field_list()) as fields: if doc.exports: with addto(fields, nodes.field()) as field: field += nodes.field_name('Exports', 'Exports') with addto(field, nodes.field_body()) as body: ref = doc['exports'] # warning: not the same as doc.exports label = ref or '<anonymous>' link = addnodes.pending_xref( ref, nodes.paragraph(ref, label), refdomain='js', reftype='any', reftarget=ref, ) link['js:module'] = doc.name body += link if doc.dependencies: with addto(fields, nodes.field()) as field: self.make_dependencies(field, doc) if doc.doc: # FIXME: source offset self.directive.state.nested_parse(to_list(doc.doc, source=doc['sourcefile']), 0, content) self.directive.state.nested_parse(self.directive.content, 0, content) content += self.document_properties(all_members) return content
def _process_definitions(self, schema): def_list = field_list() for name, definition in schema.items(): def_list += self._create_field(name, 'jsonschema-definition', self._dispatch(definition)) return def_list
def _build_markup(self): field_list = nodes.field_list() item = nodes.paragraph() item.append(field_list) if 'branch' in self.options and self.branch_name is not None: name = nodes.field_name(text="Branch") body = nodes.field_body() body.append(nodes.emphasis(text=self.branch_name)) field = nodes.field() field += [name, body] field_list.append(field) if 'commit' in self.options: name = nodes.field_name(text="Commit") body = nodes.field_body() if 'no_github_link' in self.options: body.append(self._commit_text_node()) else: body.append(self._github_link()) field = nodes.field() field += [name, body] field_list.append(field) if 'uncommitted' in self.options and self.repo.is_dirty(): item.append(nodes.warning('', nodes.inline( text="There were uncommitted changes when this was compiled." ))) if 'untracked' in self.options and self.repo.untracked_files: item.append(nodes.warning('', nodes.inline( text="There were untracked files when this was compiled." ))) return [item]
def _process_singleobjects(self, label, schema): body = field_list() body += self._create_field('Combination', 'json-combinatortype', paragraph(text=label)) body += self._create_field('Types', 'jsonschema-combinedtypes', self._dispatch(schema)) return body
def make_content(self, all_members): doc = self.item ret = nodes.section() ctor = self.item.constructor params = subtypes = [] if ctor: check_parameters(self, ctor) params, subtypes = extract_subtypes(doc.name, ctor) fields = nodes.field_list() fields += self.make_super() fields += self.make_mixins() fields += self.make_params(params) if fields.children: ret += fields if doc.doc: self.directive.state.nested_parse(to_list(doc.doc), 0, ret) self.directive.state.nested_parse(self.directive.content, 0, ret) ret += self.document_properties(all_members) ret += self.document_subtypes(subtypes) return ret.children
def process_motor_nodes(app, doctree): # Search doctree for Motor's methods and attributes whose docstrings were # copied from PyMongo, and fix them up for Motor: # 1. Add a 'callback' param (sometimes optional, sometimes required) to # all async methods. If the PyMongo method took no params, we create # a parameter-list from scratch, otherwise we edit PyMongo's list. # 2. Remove all version annotations like "New in version 2.0" since # PyMongo's version numbers are meaningless in Motor's docs. # 3. Remove "seealso" directives that reference PyMongo's docs. # # We do this here, rather than by registering a callback to Sphinx's # 'autodoc-process-signature' event, because it's way easier to handle the # parsed doctree before it's turned into HTML than it is to update the RST. for objnode in doctree.traverse(desc): if objnode["objtype"] in ("method", "attribute"): signature_node = find_by_path(objnode, [desc_signature])[0] name = ".".join([signature_node["module"], signature_node["fullname"]]) assert name.startswith("motor.") obj_motor_info = motor_info.get(name) if obj_motor_info: desc_content_node = find_by_path(objnode, [desc_content])[0] if obj_motor_info.get("is_async_method"): try: # Find the parameter list, a bullet_list instance parameters_node = find_by_path(desc_content_node, [field_list, field, field_body, bullet_list])[ 0 ] except IndexError: # PyMongo method has no parameters, create an empty # params list parameters_node = bullet_list() parameters_field_list_node = field_list( "", field("", field_name("", "Parameters "), field_body("", parameters_node)) ) desc_content_node.append(parameters_field_list_node) insert_callback(parameters_node) callback_future_text = "If a callback is passed, returns None, else returns a" " Future." desc_content_node.append(paragraph("", Text(callback_future_text))) if obj_motor_info["is_pymongo_docstring"]: # Remove all "versionadded", "versionchanged" and # "deprecated" directives from the docs we imported from # PyMongo version_nodes = find_by_path(desc_content_node, [versionmodified]) for version_node in version_nodes: version_node.parent.remove(version_node) # Remove all "seealso" directives that contain :doc: # references from PyMongo's docs seealso_nodes = find_by_path(desc_content_node, [seealso]) for seealso_node in seealso_nodes: if 'reftype="doc"' in str(seealso_node): seealso_node.parent.remove(seealso_node)
def _build_markup(self): field_list = nodes.field_list() item = nodes.paragraph() item.append(field_list) if 'branch' in self.options: name = nodes.field_name(text="Branch") body = nodes.field_body() body.append(nodes.emphasis(text=self.branch_name)) field = nodes.field() field += [name, body] field_list.append(field) if 'commit' in self.options: name = nodes.field_name(text="Commit") body = nodes.field_body() if 'no_github_link' in self.options: body.append(self._commit_text_node()) else: body.append(self._github_link()) field = nodes.field() field += [name, body] field_list.append(field) if 'uncommitted' in self.options and self.repo.is_dirty(): item.append(nodes.warning('', nodes.inline( text="There were uncommitted changes when this was compiled." ))) if 'untracked' in self.options and self.repo.untracked_files: item.append(nodes.warning('', nodes.inline( text="There were untracked files when this was compiled." ))) return [item]
def make_field_list(self, data): field_list = nodes.field_list() for name, text, citations in data: field = nodes.field() field_name = nodes.field_name(text=name) field_body = nodes.field_body() para = nodes.paragraph(text=text) if citations is not None and len(citations) > 0: para += nodes.Text(" (") for i, citation in enumerate(citations): text = f"{citation.author}, {citation.year}" para += nodes.reference(internal=False, refuri=citation.doi, text=text) if i != len(citations) - 1: para += nodes.Text("; ") para += nodes.Text(")") field_body += para field += field_name field += field_body field_list += field return field_list
def run(self): try: fields = self.future_fields.get(timeout=30) except Queue.Empty: return [self.state_machine.reporter.error( "Timed out while fetching fields related to action [%s]" % self.arguments[0] )] if fields is None: return [self.state_machine.reporter.warning( "Could not find any field related to the action [%s]" % self.arguments[0] )] whitelist = set(self.options.get('only', '').split()) return [nodes.field_list('', *( nodes.field('', nodes.field_name(text=v['string'] or k), nodes.field_body('', # keep help formatting around (e.g. newlines for lists) nodes.line_block('', *( nodes.line(text=line) for line in v['help'].split('\n') )) ) ) for k, v in fields.iteritems() # if there's a whitelist, only display whitelisted fields if not whitelist or k in whitelist # only display if there's a help text if v.get('help') ))]
def run(self): childrens = [text_field('Name', self.arguments[0])] if 'model' in self.options: childrens.append(text_field('Model', self.options['model'])) if 'aliases' in self.options: childrens.append(bullet_list_field('Aliases', self.options['aliases'])) if 'services' in self.options: childrens.append(bullet_list_field('Services', self.options['services'])) if 'cpu' in self.options: childrens.append(bullet_list_field('CPU', self.options['cpu'])) if 'ram' in self.options: childrens.append(bullet_list_field('RAM', self.options['ram'])) if 'storage' in self.options: childrens.append(bullet_list_field('Storage', self.options['storage'])) if 'network' in self.options: childrens.append(bullet_list_field('Network', self.options['network'])) if 'os' in self.options: childrens.append(text_field('Operating System', self.options['os'])) if 'provider' in self.options: childrens.append(text_field('Provider', self.options['provider'])) if 'location' in self.options: childrens.append(text_field('Location', self.options['location'])) childrens.append(boolean_field('Virtual Machine', 'virtual-machine' in self.options)) childrens.append(boolean_field('Publicly accessible', 'public' in self.options)) return [nodes.field_list('', *childrens)]
def run(self): env = self.state.document.settings.env txb_name = self.arguments[0] txb_id = nodes.make_id(txb_name) # First, make a generic desc() node to be the parent. node = sphinx.addnodes.desc() node.document = self.state.document node['objtype'] = 'extractor' # Next, make a signature node. This creates a permalink and a highlighted background when the link is selected. title = sphinx.addnodes.desc_signature(txb_name, '') title['ids'].append(txb_id) title['names'].append(txb_name) title['first'] = False title['objtype'] = 'extractor' self.add_name(title) title.set_class('directive-title') # Finally, add a desc_name() node to display the name of the # configuration variable. title += sphinx.addnodes.desc_name(txb_name, txb_name) node.append(title) if ('class' in self.options): title.set_class(self.options.get('class')) # This has to be a distinct node before the title. if nested then the browser will scroll forward to just past the title. anchor = nodes.target('', '', names=[txb_name]) # Second (optional) arg is 'msgNode' - no idea what I should pass for that # or if it even matters, although I now think it should not be used. self.state.document.note_explicit_target(title) env.domaindata['txb']['extractor'][txb_name] = env.docname fl = nodes.field_list() if ('result' in self.options): fl.append( self.make_field( 'Result', sphinx.addnodes.literal_emphasis( text=self.options['result']))) # fl.append(self.make_field('Result', self.options['result'])) if ('arg' in self.options): fl.append(self.make_field('Argument', self.options['arg'])) # Get any contained content nn = nodes.compound() self.state.nested_parse(self.content, self.content_offset, nn) # Create an index node so that Sphinx adds this directive to the index. indexnode = sphinx.addnodes.index(entries=[]) indexnode['entries'].append( ('single', _('%s') % txb_name, txb_id, '', '')) return [indexnode, node, fl, nn]
def make_properties_list(self, field): """Fill the ``field`` into a properties list and return it. :param dict field: the content of the property list to make :return: field_list instance filled with given field :rtype: nodes.field_list """ properties_list = nodes.field_list() # changing the order of elements in this list affects # the order in which they are displayed property_names = [ "label", "type", "description", "required", "disabled", "hidden", "default", "placeholder", "validate_regex", "choices", "collapse", "group", ] for name in property_names: if name not in field: continue value = field[name] # Value should be formatted in code-style (=literal) mode if name in ["type", "default", "placeholder", "validate_regex"]: literal_node = nodes.literal(str(value), str(value)) properties_list += self.make_field(name, literal_node) # Special formating of ``value`` is needed if name == 'choices' elif name == "choices": bullet_list = nodes.bullet_list() for choice in value: label = nodes.Text(choice["label"] + ": ") val = nodes.literal(choice["value"], choice["value"]) paragraph = nodes.paragraph() paragraph += label paragraph += val list_item = nodes.list_item() list_item += paragraph bullet_list += list_item properties_list += self.make_field(name, bullet_list) else: properties_list += self.make_field(name, str(value)) return properties_list
def run(self): self.cabal_meta = self.get_meta() result = super(CabalObject, self).run() if self.cabal_meta.since is not None \ or self.cabal_meta.deprecated is not None: #find content part of description for item in result: if isinstance(item, addnodes.desc): desc = item break else: return result for item in desc: if isinstance(item, addnodes.desc_content): contents = item break else: return result # find exsting field list and add to it # or create new one for item in contents: if isinstance(item, nodes.field_list): field_list = item break else: field_list = nodes.field_list('') contents.insert(0, field_list) if self.cabal_meta.since is not None: #docutils horror field = nodes.field('') field_name = nodes.field_name('Since', 'Since') since = 'Cabal ' + str(self.cabal_meta.since) field_body = nodes.field_body(since, nodes.paragraph(since, since)) field += field_name field += field_body field_list.insert(0, field) if self.cabal_meta.deprecated is not None: field = nodes.field('') field_name = nodes.field_name('Deprecated', 'Deprecated') if isinstance(self.cabal_meta.deprecated, StrictVersion): since = 'Cabal ' + str(self.cabal_meta.deprecated) else: since = '' field_body = nodes.field_body(since, nodes.paragraph(since, since)) field += field_name field += field_body field_list.insert(0, field) return result
def make_node(self, lang='en'): if lang not in texts.keys(): lang = 'en' arg_map = texts[lang]["arg_map"] task_title = texts[lang]["task_title"] module_title = texts[lang]["module_title"] module_args = {} # Search task definition for modules and associated arguments. for key, value in self.args.items(): if key not in arg_map.keys(): module_args[key] = value # Create task node (using type: admonition) item = nodes.admonition() title = nodes.title(text=self.name) item.append(title) # Add modules and arguments to task node for module, args in module_args.items(): field_list = nodes.field_list() # wrap module header in field_list field_list.append(self.make_arg_simple(module_title, module)) item.append(field_list) if isinstance(args, str): item.append(nodes.literal_block(text=args)) else: item.append(self.make_list_representation(args)) # Handle non-module task parameters. field_list = nodes.field_list() for arg, txt in arg_map.items(): if not txt: # skip name etc... continue if arg not in self.args: continue value = self.args[arg] # value of that task arg if isinstance(value, list) or isinstance(value, dict): field_list.append(self.make_arg_complex(txt, value)) else: field_list.append(self.make_arg_simple(txt, value)) item.append(field_list) return item
def _process_object_properties(self, schema, prop_name): body = field_list() for key, item in schema[prop_name].items(): required_class = ' jsonschema-required' if key in schema.get('required', []) else '' inner_body = field_list() if key in schema.get('required', []): inner_body += self._create_field('Required', 'jsonschema-required', paragraph(text='Yes')) else: inner_body += self._create_field('Required', 'jsonschema-required', paragraph(text='No')) inner_body += self._create_field('Type', 'jsonschema-type', self._dispatch(item)) body += self._create_field(key, 'jsonschema-property' + required_class, inner_body) return body
def handle_doc_fields(node): # don't traverse, only handle field lists that are immediate children for child in node.children: if not isinstance(child, nodes.field_list): continue params = None param_nodes = {} param_types = {} new_list = nodes.field_list() for field in child: fname, fbody = field try: typ, obj = fname.astext().split(None, 1) typ = doc_fields_with_arg[typ] if len(fbody.children) == 1 and \ isinstance(fbody.children[0], nodes.paragraph): children = fbody.children[0].children else: children = fbody.children if typ == 'param': if not params: pfield = nodes.field() pfield += nodes.field_name('Parameters', 'Parameters') pfield += nodes.field_body() params = nodes.bullet_list() pfield[1] += params new_list += pfield dlitem = nodes.list_item() dlpar = nodes.paragraph() dlpar += nodes.emphasis(obj, obj) dlpar += nodes.Text(' -- ', ' -- ') dlpar += children param_nodes[obj] = dlpar dlitem += dlpar params += dlitem elif typ == 'type': param_types[obj] = fbody.astext() else: fieldname = typ + ' ' + obj nfield = nodes.field() nfield += nodes.field_name(fieldname, fieldname) nfield += nodes.field_body() nfield[1] += fbody.children new_list += nfield except (KeyError, ValueError): fnametext = fname.astext() try: typ = doc_fields_without_arg[fnametext] except KeyError: # at least capitalize the field name typ = fnametext.capitalize() fname[0] = nodes.Text(typ) new_list += field for param, type in param_types.iteritems(): if param in param_nodes: param_nodes[param].insert(1, nodes.Text(' (%s)' % type)) child.replace_self(new_list)
def _parse(self, tree): if isinstance(tree, minidom.Document): return self._parse(tree.childNodes[0]) if isinstance(tree, minidom.Text): return nodes.Text(tree.data) # Get children. children = [self._parse(c) for c in tree.childNodes] if tree.tagName == 'epytext': return children if tree.tagName == 'para': return nodes.paragraph('','', *children) if tree.tagName == 'section': return nodes.section('', *children) if tree.tagName == 'heading': return nodes.title('','', *children) if tree.tagName == 'fieldlist': return nodes.field_list('', *children) if tree.tagName == 'field': return nodes.field('', *self._parse_field(tree, children)) if tree.tagName == 'literalblock': return nodes.literal_block('','', *children) if tree.tagName == 'doctestblock': return nodes.doctest_block('','', *children) if tree.tagName == 'ulist': return nodes.bullet_list('', *children) if tree.tagName == 'olist': return nodes.enumerated_list('', *children) if tree.tagName == 'li': return nodes.list_item('', *children) if tree.tagName == 'link': # [XX] discards link target. name, target = children return nodes.title_reference('','', name) if tree.tagName == 'uri': name, target = children return nodes.reference('','', name, refuid=target.astext()) if tree.tagName == 'code': return nodes.literal('','', *children) if tree.tagName == 'math': return nodes.emphasis('','', *children) if tree.tagName == 'italic': return nodes.emphasis('','', *children) if tree.tagName == 'bold': return nodes.strong('','', *children) if tree.tagName == 'indexed': # [XX] doesn't mark the fact that it's indexedd return nodes.emphasis('','', *children) if tree.tagName == 'symbol': # use substitutions. # [XX] this needs to be fixed! return nodes.Text(children[0]) elif tree.tagName in ('tag', 'arg', 'name', 'target'): return children[0] else: raise ValueError, ('unknown %s' % tree.tagName)
def _parse(self, tree): if isinstance(tree, minidom.Document): return self._parse(tree.childNodes[0]) if isinstance(tree, minidom.Text): return nodes.Text(tree.data) # Get children. children = [self._parse(c) for c in tree.childNodes] if tree.tagName == 'epytext': return children if tree.tagName == 'para': return nodes.paragraph('', '', *children) if tree.tagName == 'section': return nodes.section('', *children) if tree.tagName == 'heading': return nodes.title('', '', *children) if tree.tagName == 'fieldlist': return nodes.field_list('', *children) if tree.tagName == 'field': return nodes.field('', *self._parse_field(tree, children)) if tree.tagName == 'literalblock': return nodes.literal_block('', '', *children) if tree.tagName == 'doctestblock': return nodes.doctest_block('', '', *children) if tree.tagName == 'ulist': return nodes.bullet_list('', *children) if tree.tagName == 'olist': return nodes.enumerated_list('', *children) if tree.tagName == 'li': return nodes.list_item('', *children) if tree.tagName == 'link': # [XX] discards link target. name, target = children return nodes.title_reference('', '', name) if tree.tagName == 'uri': name, target = children return nodes.reference('', '', name, refuid=target.astext()) if tree.tagName == 'code': return nodes.literal('', '', *children) if tree.tagName == 'math': return nodes.emphasis('', '', *children) if tree.tagName == 'italic': return nodes.emphasis('', '', *children) if tree.tagName == 'bold': return nodes.strong('', '', *children) if tree.tagName == 'indexed': # [XX] doesn't mark the fact that it's indexedd return nodes.emphasis('', '', *children) if tree.tagName == 'symbol': # use substitutions. # [XX] this needs to be fixed! return nodes.Text(children[0]) elif tree.tagName in ('tag', 'arg', 'name', 'target'): return children[0] else: raise ValueError, ('unknown %s' % tree.tagName)
def fieldlist(fields, *classes): node = nodes.field_list('') node['classes'].extend(classes) for name, value in fields: node += nodes.field('', nodes.field_name('', name), nodes.field_body('', nodes.paragraph('', value)), ) return node
def annotation_parser(argument): if argument: doc = sandbox_rst_parser(argument) if doc is not None: docinfo = doc[0] annotations = nodes.field_list() annotations.source, annotations.line = docinfo.source, docinfo.line annotations.extend(docinfo.children) return annotations return []
def run(self): index, content = ObjectDescription.run(self) fields = nodes.field_list() for f in self.fields: field = f.make_field(self) if field: fields += field # Insert option fields right after signature content.insert(1, fields) return [index, content]
def fieldlist(fields, *classes): node = nodes.field_list('') node['classes'].extend(classes) for name, value in fields: node += nodes.field( '', nodes.field_name('', name), nodes.field_body('', nodes.paragraph('', value)), ) return node
def _parse_enum_field(self, f): from ereuse_devicehub.resources.device import states if issubclass(f.enum, (Subdivision, Currency, Country, Layouts, states.State)): return self.parse(f.enum.__doc__) else: enum_fields = n.field_list() for el in f.enum: enum_fields += self.field(el.name, el.value) return enum_fields
def make_node(self, lang='en'): if lang not in texts.keys(): lang = 'en' arg_map = texts[lang]["arg_map"] task_title = texts[lang]["task_title"] module_title = texts[lang]["module_title"] module = "" module_args = [] # first, search module for arg, m in self.args.items(): if arg not in arg_map.keys(): module = arg module_args.append(m) item = nodes.admonition() title = nodes.title(text=self.name) item.append(title) for m in module_args: if isinstance(m, str): item.append(nodes.paragraph(text=m)) else: mlist = [] for k, v in m.items(): mlist.append("%s=%s" % (k, v)) item.append(nodes.paragraph(text=" ".join(mlist))) field_list = nodes.field_list() field_list.append(self.make_arg(module_title, module)) # second, create node tree for arg, txt in arg_map.items(): if not txt: # skip name etc... continue if arg not in self.args: continue value = self.args[arg] # value of that task arg if isinstance(value, list): bl = nodes.bullet_list() for v in value: body = nodes.emphasis(text=v) bl.append(nodes.list_item('', body)) name = nodes.field_name(text=txt) body = nodes.field_body() body.append(bl) field = nodes.field() field += [name, body] field_list.append(field) else: field_list.append(self.make_arg(txt, value)) item.append(field_list) return item
def insert_field_list(node: Element) -> nodes.field_list: field_list = nodes.field_list() desc = [n for n in node if isinstance(n, addnodes.desc)] if desc: # insert just before sub object descriptions (ex. methods, nested classes, etc.) index = node.index(desc[0]) node.insert(index - 1, [field_list]) else: node += field_list return field_list
def construct_property_description_list(name): """ Construct a skeleton for sphinx member description block """ description_group_list = nodes.field_list() obj_desc = nodes.field() obj_desc += nodes.field_name(_(name), _(name)) description_group_list += obj_desc obj_list = nodes.bullet_list() description_group_list += nodes.field_body('', obj_list) return description_group_list, obj_list
def run(self): field_list = nodes.field_list() for name, value in TAGS: fieldname = nodes.field_name() fieldname += nodes.Text(name) fieldbody = nodes.field_body() para = nodes.paragraph() para += nodes.Text(value) fieldbody += para field = nodes.field('', fieldname, fieldbody) field_list += field return [field_list]
def _process_combinator(self, label, schema): entries = bullet_list() for subtype in schema: entry = list_item() entry += self._dispatch(subtype) entries += entry body = field_list() body += self._create_field('Combination', 'json-combinatortype', paragraph(text=label)) body += self._create_field('Types', 'jsonschema-combinedtypes', entries) return body
def process_motor_nodes(app, doctree): # Search doctree for Motor's methods and attributes whose docstrings were # copied from PyMongo, and fix them up for Motor: # 1. Add a 'callback' param (sometimes optional, sometimes required) to # all async methods. If the PyMongo method took no params, we create # a parameter-list from scratch, otherwise we edit PyMongo's list. # 2. Remove all version annotations like "New in version 2.0" since # PyMongo's version numbers are meaningless in Motor's docs. # # We do this here, rather than by registering a callback to Sphinx's # 'autodoc-process-signature' event, because it's way easier to handle the # parsed doctree before it's turned into HTML than it is to update the RST. for objnode in doctree.traverse(desc): if objnode['objtype'] in ('method', 'attribute'): signature_node = find_by_path(objnode, [desc_signature])[0] name = '.'.join([ signature_node['module'], signature_node['fullname']]) assert name.startswith('motor.') obj_motor_info = motor_info.get(name) if obj_motor_info: desc_content_node = find_by_path(objnode, [desc_content])[0] if obj_motor_info.get('is_async_method'): try: # Find the parameter list, a bullet_list instance parameters_node = find_by_path(desc_content_node, [field_list, field, field_body, bullet_list])[0] except IndexError: # PyMongo method has no parameters, create an empty # params list parameters_node = bullet_list() parameters_field_list_node = field_list('', field('', field_name('', 'Parameters '), field_body('', parameters_node))) desc_content_node.append(parameters_field_list_node) insert_callback( parameters_node, obj_motor_info['callback_required']) if obj_motor_info['is_pymongo_docstring']: # Remove all "versionadded", "versionchanged" and # "deprecated" directives from the docs we imported from # PyMongo version_nodes = find_by_path( desc_content_node, [versionmodified]) for version_node in version_nodes: version_node.parent.remove(version_node)
def to_fields(x): to_definiton_list = False for v in x.values(): if isinstance(v, dict): to_definiton_list = True break if to_definiton_list: node = nodes.definition_list() previous_fieldlist = None for key, v in x.items(): df = nodes.definition_list_item() if isinstance(v, str): # embed field_list inside definition_list if previous_fieldlist is None: fv = previous_fieldlist = nodes.field_list() df.append(fv) node.append(df) else: fv = previous_fieldlist fvf = nodes.field() fv.append(fvf) fvf.append(nodes.field_name(text=key)) fvf.append(nodes.field_body(v, nodes.Text(v))) else: previous_fieldlist = None df.append(nodes.term(text=key)) dfv = nodes.definition() dfv.append(to_fields(v)) df.append(dfv) node.append(df) else: node = nodes.field_list() for key, v in x.items(): df = nodes.field() df.append(nodes.field_name(text=key)) dfv = nodes.field_body(v, nodes.Text(v)) df.append(dfv) node.append(df) return node
def _process_reftype(self, schema): body = field_list() if 'description' in schema: body += self._create_field('Description', 'jsonschema-description', self._parse_text(schema['description'])) body += self._create_field('Reference', 'jsonschema-reference', self._parse_text(':ref:`'+schema['$ref']+'`')) if 'definitions' in schema: body += self._create_field('Definitions', 'jsonschema-definitions', self._process_definitions(schema['definitions'])) return body
def transform(self, node): """Transform named field(s) into desired manner.""" fields = [] fieldargs = {} # The input can be in arbitrary order so sort it out for field in node: fieldname, fieldbody = field try: fieldtype, fieldarg = fieldname.astext().split(None, 1) except ValueError: fieldtype, fieldarg = fieldname.astext(), '' if fieldtype == 'field': fields.append((fieldarg, fieldbody)) else: assert fieldarg is not None if not fieldarg in fieldargs: fieldargs[fieldarg] = {} fieldargs[fieldarg][fieldtype] = fieldbody if len(fields) == 0: return new_list = nodes.field_list() field_name = nodes.field_name('', _('Fields')) list = nodes.bullet_list() for name, body in fields: header = [] if not name in fieldargs: fieldargs[name] = {} options = fieldargs[name] if 'requires' in options and \ options['requires'].astext().lower() == 'yes': header.append(nodes.strong('', name)) else: header.append(nodes.emphasis('', name)) if 'type' in options: header.append(nodes.inline('', ' (')) n = nodes.inline() _describe_type(options['type'].astext(), n, **fieldargs[name]) header.extend(n) header.append(nodes.inline('', ')')) header.append(nodes.inline('', ' -- ')) list += nodes.list_item('', *(header + body.children[0].children)) field_body = nodes.field_body('', list) new_list += nodes.field('', field_name, field_body) node.replace_self(new_list)
def _objectproperties(self, schema, key): # process the `properties` key of the object type # used for `properties`, `patternProperties` and # `definitions`. body = field_list() if key in schema: for prop in schema[key].keys(): required = '' if 'required' in schema: if prop in schema['required']: required = ' required' obj = schema[key][prop] body += self._create_field(key, 'type'+required, self._dispatch(obj)) return body
def transform(self, node): """This one should be used with unnamed fields.""" fields = [] fieldargs = {} # Pack into index = -1 for field in node: fieldname, fieldbody = field fieldtype = fieldname.astext() if fieldtype == 'field': index += 1 fields.append(fieldbody) else: assert index >= 0 if not index in fieldargs: fieldargs[index] = {} fieldargs[index][fieldtype] = fieldbody if len(fields) == 0: return new_list = nodes.field_list() field_name = nodes.field_name('', _('Fields')) list = nodes.bullet_list() for index, body in enumerate(fields): footer = [] if not index in fieldargs: fieldargs[index] = {} options = fieldargs[index] if 'type' in options: footer.append(nodes.inline('', ' (')) n = nodes.inline() _describe_type(options['type'].astext(), n, **fieldargs[index]) footer.extend(n) footer.append(nodes.inline('', ')')) if 'requires' in options and \ options['requires'].astext().lower() == 'yes': content = nodes.strong() else: content = nodes.inline() content += body.children[0].children list += nodes.list_item('', content, *footer) field_body = nodes.field_body('', list) new_list += nodes.field('', field_name, field_body) node.replace_self(new_list)
def make_properties_list(self, field): """Fill the ``field`` into a properties list and return it. :param dict field: the content of the property list to make :return: field_list instance filled with given field :rtype: nodes.field_list """ properties_list = nodes.field_list() # changing the order of elements in this list affects # the order in which they are displayed property_names = ['label', 'type', 'description', 'required', 'disabled', 'hidden', 'default', 'placeholder', 'validate_regex', 'choices', 'collapse', 'group'] for name in property_names: if name not in field: continue value = field[name] # Value should be formatted in code-style (=literal) mode if name in ['type', 'default', 'placeholder', 'validate_regex']: literal_node = nodes.literal(str(value), str(value)) properties_list += self.make_field(name, literal_node) # Special formating of ``value`` is needed if name == 'choices' elif name == 'choices': bullet_list = nodes.bullet_list() for choice in value: label = nodes.Text(choice['label'] + ': ') val = nodes.literal(choice['value'], choice['value']) paragraph = nodes.paragraph() paragraph += label paragraph += val list_item = nodes.list_item() list_item += paragraph bullet_list += list_item properties_list += self.make_field(name, bullet_list) else: properties_list += self.make_field(name, str(value)) return properties_list
def rfc2822(self, match, context, next_state): """RFC2822-style field list item.""" fieldlist = nodes.field_list(classes=['rfc2822']) self.parent += fieldlist field, blank_finish = self.rfc2822_field(match) fieldlist += field offset = self.state_machine.line_offset + 1 # next line newline_offset, blank_finish = self.nested_list_parse( self.state_machine.input_lines[offset:], input_offset=self.state_machine.abs_line_offset() + 1, node=fieldlist, initial_state='RFC2822List', blank_finish=blank_finish) self.goto_line(newline_offset) if not blank_finish: self.parent += self.unindent_warning( 'RFC2822-style field list') return [], next_state, []
def __insert_field_list(self, form_class): index = self.document.first_child_not_matching_class( nodes.PreBibliographic) if index: candidate = self.document[index] #logger.info('Candidate %s', candidate) if isinstance(candidate, nodes.field_list): return candidate elif isinstance(candidate, nodes.docinfo): index += 1 else: index = 0 field_list = nodes.field_list(classes=[form_class]) if index: self.document.insert(index, field_list) else: self.document.append(field_list) return field_list
def run(self): populated = CPPAutoDocObject._populate(self) self.name = 'function' res, obj = CPPFunctionObject.run(self), self._get_obj() if populated: fieldlist, _empty = nodes.field_list(), True doc_args = [it for it in obj.signature if obj.brief('param_' + str(it.get_name()))] if doc_args: tmp = [] for it in doc_args: param_name = 'param_' + str(it.get_name()) node = addnodes.compact_paragraph() if obj.param_ways.get(param_name, None) is not None: node += nodes.literal(text='[{}] '.format( obj.param_ways[param_name] )) node += nodes.Text(obj.brief(param_name)[0]) tmp.append((it.name, node)) fieldlist += self.doc_field_types[0].make_field( [], # [it.type for it in doc_args], self._get_domain(), tmp, ) _empty = False def _simple_field(fieldlist, name, nb_): if obj.brief(name): fieldlist += self.doc_field_types[nb_].make_field( None, self._get_domain(), (None, [nodes.Text(it) for it in obj.brief(name)]) ) return False return True _empty =_simple_field(fieldlist, 'return', 1) and _empty _empty = _simple_field(fieldlist, 'pre', 3) and _empty _empty = _simple_field(fieldlist, 'post', 4) and _empty if not _empty: res[1][1].insert(0, fieldlist) if obj.details() and not _empty: para = nodes.paragraph() para += nodes.emphasis(text='Brief: ') para += nodes.Text(''.join(obj.brief())) res[1][1].insert(0, para) return res
def field_marker(self, match, context, next_state): """Field list item.""" field_list = nodes.field_list() self.parent += field_list field, blank_finish = self.field(match) field_list += field offset = self.state_machine.line_offset + 1 # next line newline_offset, blank_finish = self.nested_list_parse( self.state_machine.input_lines[offset:], input_offset=self.state_machine.abs_line_offset() + 1, node=field_list, initial_state='FieldList', blank_finish=blank_finish) self.goto_line(newline_offset) if not blank_finish: self.parent += self.unindent_warning('Field list') raw = self.state_machine.input_lines[offset-1:newline_offset-2].data field_list.realsource = '\n'.join(raw) return [], next_state, []
def removeComments(self, bulletList): newBulletList = nodes.bullet_list() for bulletListItem in bulletList: newFieldList = nodes.field_list() fieldList = bulletListItem[0] for field in fieldList: fieldName = field[0] fieldNameAsText = fieldName.astext() firstChar = fieldNameAsText[0] if firstChar in COMMENT_DRAWING_CHARS: if (firstChar * len(fieldNameAsText)) == fieldNameAsText: pass else: newFieldList += field else: newFieldList += field if len(newFieldList): newListItem = nodes.list_item() newListItem += newFieldList newBulletList += newListItem return newBulletList
def options_to_field_list(self, option_map): field_list = None for option, name in option_map.iteritems(): if option in self.options: if (field_list == None): field_list = nodes.field_list() option_field = nodes.field() option_field_name = nodes.field_name() option_field_name += nodes.Text(name) option_field += option_field_name option_field_body = nodes.field_body() option_field += option_field_body; # Don't attempt to split integer fields: if(type(self.options[option]) != int): l = self.options[option].split(",") if (len(l) > 1): option_field_body += nodes.raw('',' \\ \n\n',format="latex") for item in l: parts = item.split("`") p = nodes.paragraph() is_reference = False for part in parts: if is_reference: n = nodes.emphasis() refid = nodes.make_id(self.arguments[0].strip() + "param"+part) n += nodes.reference('',part, refid=refid) p += n else: p += nodes.Text(part) is_reference = not is_reference option_field_body += p field_list += option_field return field_list
def run(self): name = self.options.get('name') env = self.state.document.settings.env targetid = "indigo-option-%d" % env.new_serialno('indigo-option') targetnode = nodes.target('', '', ids=[targetid]) section_node = optioninfo() section_node['names'].append(normalize_name(name)) section_node['ids'].append(normalize_name(name)) titlenode = nodes.title('', name + ' = ' + self.options.get('default')) section_node += titlenode new_list = nodes.field_list() new_list += self.make_field('type', nodes.Text(self.options.get('type'))) new_list += self.make_field('default', nodes.Text(self.options.get('default'))) new_list += self.make_field('description', nodes.Text(self.options.get('short'))) section_node += new_list text = '\n'.join(self.content) if text: self.state.nested_parse(self.content, self.content_offset, section_node) if not hasattr(env, 'indigo_options'): env.indigo_options = [] env.indigo_options.append({ 'docname': env.docname, 'lineno': self.lineno, 'name': name, 'type': self.options.get('type'), 'default': self.options.get('default'), 'short':self.options.get('short'), 'target': targetnode }) return [ targetnode, section_node ]
def apply(self): settings = self.document.settings if not getattr(settings, 'expose_settings', 0): return logging.debug("DEBUG: Writing settings field-list to document. ") field_list = nodes.field_list() for setting in dir(settings): if setting.startswith('_'): continue value = getattr(settings, setting) if callable(value): value = value.__module__ +':'+ value.__name__ +'()' field = nodes.field('',*[ nodes.field_name('',setting), nodes.field_body('',*[ nodes.paragraph('',*[ nodes.Text(str(value)) ]) ]) ]) field_list += field self.document += nodes.section('',*[ nodes.title('','Docutils settings'), field_list ])
def create_attribute_list_node(self, app, docname, traceable): relationships = traceable.relationships # Determine which attributes to list in which order. attributes = traceable.attributes.copy() for relationship_name in relationships.keys(): attributes.pop(relationship_name, None) attributes.pop("title", None) # Create node to contain list of attributes. field_list_node = nodes.field_list() # Add relationship attributes. for relationship_name, relatives in sorted(relationships.items()): field_node = nodes.field() field_node += nodes.field_name(text=relationship_name) content = nodes.inline() for relative in sorted(relatives, key=lambda t: t.tag): if len(content): content += nodes.inline(text=", ") content += relative.make_reference_node(app.builder, docname) field_node += nodes.field_body("", content) field_list_node += field_node # Add non-relationship attributes. for attribute_name, attribute_value in sorted(attributes.items()): field_node = nodes.field() field_node += nodes.field_name(text=attribute_name) # Prepend space to avoid bug in the LaTeX builder of Sphinx v1.4 # which can cause \leavevmode to be stuck to following text. content = nodes.inline(text=" " + attribute_value) field_node += nodes.field_body("", content) field_list_node += field_node return field_list_node
def create_attribute_list_node(self, docname, placeholder_node): tag = placeholder_node["traceable-tag"] traceable = self.storage.get_traceable_by_tag(tag) relationships = traceable.relationships # Determine which attributes to list in which order. attributes = placeholder_node["traceable-attributes"].copy() for relationship_name in relationships.keys(): attributes.pop(relationship_name, None) attributes.pop("title", None) # Create node to contain list of attributes. field_list_node = nodes.field_list() # Add relationship attributes. for relationship_name, relatives in sorted(relationships.items()): field_node = nodes.field() field_node += nodes.field_name(text=relationship_name) content = nodes.inline() for relative in sorted(relatives, key=lambda t: t.tag): if len(content): content += nodes.inline(text=", ") content += relative.make_reference_node(self.app.builder, docname) field_node += nodes.field_body("", content) field_list_node += field_node # Add non-relationship attributes. for attribute_name, attribute_value in sorted(attributes.items()): field_node = nodes.field() field_node += nodes.field_name(text=attribute_name) content = nodes.inline(text=attribute_value) field_node += nodes.field_body("", content) field_list_node += field_node return field_list_node
def run(self): env = self.state.document.settings.env cv_default = None cv_scope, cv_name, cv_type = self.arguments[0:3] if len(self.arguments) > 3: cv_default = self.arguments[3] # First, make a generic desc() node to be the parent. node = sphinx.addnodes.desc() node.document = self.state.document node["objtype"] = "cv" # Next, make a signature node. This creates a permalink and a # highlighted background when the link is selected. title = sphinx.addnodes.desc_signature(cv_name, "") title["ids"].append(nodes.make_id(cv_name)) title["ids"].append(cv_name) title["names"].append(cv_name) title["first"] = False title["objtype"] = "cv" self.add_name(title) title.set_class("ts-cv-title") # Finally, add a desc_name() node to display the name of the # configuration variable. title += sphinx.addnodes.desc_name(cv_name, cv_name) node.append(title) if "class" in self.options: title.set_class(self.options.get("class")) # This has to be a distinct node before the title. if nested then # the browser will scroll forward to just past the title. anchor = nodes.target("", "", names=[cv_name]) # Second (optional) arg is 'msgNode' - no idea what I should pass for that # or if it even matters, although I now think it should not be used. self.state.document.note_explicit_target(title) env.domaindata["ts"]["cv"][cv_name] = env.docname fl = nodes.field_list() fl.append(self.make_field("Scope", cv_scope)) fl.append(self.make_field("Type", cv_type)) if cv_default: fl.append(self.make_field("Default", cv_default)) else: fl.append(self.make_field("Default", sphinx.addnodes.literal_emphasis(text="*NONE*"))) if "metric" in self.options: fl.append(self.make_field("Metric", self.options["metric"])) if "reloadable" in self.options: fl.append(self.make_field("Reloadable", "Yes")) if "overridable" in self.options: fl.append(self.make_field("Overridable", "Yes")) if "deprecated" in self.options: fl.append(self.make_field("Deprecated", "Yes")) # Get any contained content nn = nodes.compound() self.state.nested_parse(self.content, self.content_offset, nn) # Create an index node so that Sphinx adds this config variable to the # index. nodes.make_id() specifies the link anchor name that is # implicitly generated by the anchor node above. indexnode = sphinx.addnodes.index(entries=[]) indexnode["entries"].append(("single", _("%s") % cv_name, nodes.make_id(cv_name), "")) return [indexnode, node, fl, nn]
def transform(self, node): """Transform a single field list *node*.""" typemap = self.typemap entries = [] groupindices = {} types = {} # step 1: traverse all fields and collect field types and content for field in node: fieldname, fieldbody = field try: # split into field type and argument fieldtype, fieldarg = fieldname.astext().split(None, 1) except ValueError: # maybe an argument-less field type? fieldtype, fieldarg = fieldname.astext(), '' typedesc, is_typefield = typemap.get(fieldtype, (None, None)) # sort out unknown fields if typedesc is None or typedesc.has_arg != bool(fieldarg): # either the field name is unknown, or the argument doesn't # match the spec; capitalize field name and be done with it new_fieldname = fieldtype[0:1].upper() + fieldtype[1:] if fieldarg: new_fieldname += ' ' + fieldarg fieldname[0] = nodes.Text(new_fieldname) entries.append(field) continue typename = typedesc.name # collect the content, trying not to keep unnecessary paragraphs if _is_single_paragraph(fieldbody): content = fieldbody.children[0].children else: content = fieldbody.children # if the field specifies a type, put it in the types collection if is_typefield: # filter out only inline nodes; others will result in invalid # markup being written out content = [n for n in content if isinstance(n, nodes.Inline) or isinstance(n, nodes.Text)] if content: types.setdefault(typename, {})[fieldarg] = content continue # also support syntax like ``:param type name:`` if typedesc.is_typed: try: argtype, argname = fieldarg.split(None, 1) except ValueError: pass else: types.setdefault(typename, {})[argname] = \ [nodes.Text(argtype)] fieldarg = argname translatable_content = nodes.inline(fieldbody.rawsource, translatable=True) translatable_content.source = fieldbody.parent.source translatable_content.line = fieldbody.parent.line translatable_content += content # grouped entries need to be collected in one entry, while others # get one entry per field if typedesc.is_grouped: if typename in groupindices: group = entries[groupindices[typename]] else: groupindices[typename] = len(entries) group = [typedesc, []] entries.append(group) entry = typedesc.make_entry(fieldarg, [translatable_content]) group[1].append(entry) else: entry = typedesc.make_entry(fieldarg, [translatable_content]) entries.append([typedesc, entry]) # step 2: all entries are collected, construct the new field list new_list = nodes.field_list() for entry in entries: if isinstance(entry, nodes.field): # pass-through old field new_list += entry else: fieldtype, content = entry fieldtypes = types.get(fieldtype.name, {}) new_list += fieldtype.make_field(fieldtypes, self.domain, content) node.replace_self(new_list)
def handle_doc_fields(node, env): # don't traverse, only handle field lists that are immediate children for child in node.children: if not isinstance(child, nodes.field_list): continue params = None param_nodes = {} param_types = {} new_list = nodes.field_list() for field in child: fname, fbody = field try: typ, obj = fname.astext().split(None, 1) typdesc = _(doc_fields_with_arg[typ]) if _is_only_paragraph(fbody): children = fbody.children[0].children else: children = fbody.children if typdesc == '%param': if not params: pfield = nodes.field() pfield += nodes.field_name('', _('Parameters')) pfield += nodes.field_body() params = nodes.bullet_list() pfield[1] += params new_list += pfield dlitem = nodes.list_item() dlpar = nodes.paragraph() dlpar += nodes.emphasis(obj, obj) dlpar += nodes.Text(' -- ', ' -- ') dlpar += children param_nodes[obj] = dlpar dlitem += dlpar params += dlitem elif typdesc == '%type': typenodes = fbody.children if _is_only_paragraph(fbody): typenodes = [nodes.Text(' (')] + \ typenodes[0].children + [nodes.Text(')')] param_types[obj] = typenodes else: fieldname = typdesc + ' ' nfield = nodes.field() nfieldname = nodes.field_name(fieldname, fieldname) nfield += nfieldname node = nfieldname if typ in doc_fields_with_linked_arg: node = addnodes.pending_xref(obj, reftype='obj', refcaption=False, reftarget=obj, modname=env.currmodule, classname=env.currclass) nfieldname += node node += nodes.Text(obj, obj) nfield += nodes.field_body() nfield[1] += fbody.children new_list += nfield except (KeyError, ValueError): fnametext = fname.astext() try: typ = _(doc_fields_without_arg[fnametext]) except KeyError: # at least capitalize the field name typ = fnametext.capitalize() fname[0] = nodes.Text(typ) new_list += field for param, type in param_types.iteritems(): if param in param_nodes: param_nodes[param][1:1] = type child.replace_self(new_list)