def make_definition_list(self, term, definition=None, classifier=None): definition_list = nodes.definition_list_item() if not isinstance(term, nodes.Node): term = nodes.strong(text=term) definition_list.append(term) if classifier is not None: definition_list.append(nodes.classifier(text=classifier)) if definition is not None: if isinstance(definition, (list, tuple)): definition_list.append(nodes.definition('', *definition)) else: definition_list.append(nodes.definition('', definition)) return definition_list
def print_command_args_and_opts(arg_list, opt_list, sub_list=None): items = [] if arg_list: items.append(nodes.definition_list_item( '', nodes.term(text='Positional arguments:'), nodes.definition('', arg_list))) if opt_list: items.append(nodes.definition_list_item( '', nodes.term(text='Options:'), nodes.definition('', opt_list))) if sub_list and len(sub_list): items.append(nodes.definition_list_item( '', nodes.term(text='Sub-commands:'), nodes.definition('', sub_list))) return nodes.definition_list('', *items)
def contribute_property(self, prop_list, prop_key, prop): prop_item = nodes.definition_list_item( '', nodes.term('', prop_key)) prop_list.append(prop_item) prop_item.append(nodes.classifier('', prop.type)) definition = nodes.definition() prop_item.append(definition) if not prop.implemented: para = nodes.inline('', _('Not implemented.')) warning = nodes.note('', para) definition.append(warning) return if prop.description: para = nodes.paragraph('', prop.description) definition.append(para) if prop.update_allowed: para = nodes.paragraph('', _('Can be updated without replacement.')) definition.append(para) else: para = nodes.paragraph('', _('Updates cause replacement.')) definition.append(para) if prop.required: para = nodes.paragraph('', _('Required property.')) elif prop.default is not None: para = nodes.paragraph( '', _('Optional property, defaults to "%s".') % prop.default) else: para = nodes.paragraph('', _('Optional property.')) definition.append(para) for constraint in prop.constraints: para = nodes.paragraph('', str(constraint)) definition.append(para) sub_schema = None if prop.schema and prop.type == properties.MAP: para = nodes.emphasis('', _('Map properties:')) definition.append(para) sub_schema = prop.schema elif prop.schema and prop.type == properties.LIST: para = nodes.emphasis( '', _('List contents:')) definition.append(para) sub_schema = prop.schema if sub_schema: sub_prop_list = nodes.definition_list() definition.append(sub_prop_list) for sub_prop_key in sorted(sub_schema.keys()): sub_prop = sub_schema[sub_prop_key] self.contribute_property(sub_prop_list, sub_prop_key, sub_prop)
def print_subcommand_list(data, nested_content): definitions = map_nested_definitions(nested_content) items = [] if 'children' in data: for child in data['children']: my_def = [nodes.paragraph( text=child['help'])] if child['help'] else [] name = child['name'] my_def = apply_definition(definitions, my_def, name) if len(my_def) == 0: my_def.append(nodes.paragraph(text='Undocumented')) my_def.append(nodes.literal_block(text=child['usage'])) my_def.append(print_command_args_and_opts( print_arg_list(child, nested_content), print_opt_list(child, nested_content), text_from_rst(child.get('description', ""), is_rst=True), print_subcommand_list(child, nested_content), )) items.append( nodes.definition_list_item( '', nodes.term('', '', nodes.strong(text=name)), nodes.definition('', *my_def) ) ) return nodes.definition_list('', *items)
def run(self): result = nodes.definition_list() for option in sorted(self.options.keys()): if option == 'added': continue term = option.capitalize() result += nodes.term(text=term) definition = nodes.definition() if option in ['kerk', 'predikant', 'tags']: taglink = {'kerk': SERMONCHURHLINK, 'predikant': SERMONREFERENTLINK, 'tags': SERMONTAGLINK}[option] value = self.options[option] values = [value.strip() for value in value.split(',')] paragraph = nodes.paragraph() for i, value in enumerate(values): link = taglink % value paragraph += nodes.reference(refuri=link, text=value) if not i == len(values) - 1: paragraph += nodes.inline(text=', ') definition += paragraph else: paragraph = nodes.paragraph() paragraph += nodes.inline(text=self.options[option]) definition += paragraph result += definition return [result]
def contribute_attributes(self, parent): if not self.attrs_schemata: return section = self._section(parent, _('Attributes'), '%s-attrs') prop_list = nodes.definition_list() section.append(prop_list) for prop_key, prop in sorted(self.attrs_schemata.items()): description = prop.description prop_item = nodes.definition_list_item( '', nodes.term('', prop_key)) prop_list.append(prop_item) definition = nodes.definition() prop_item.append(definition) if prop.support_status.status != support.SUPPORTED: sstatus = prop.support_status.to_dict() msg = _('%(status)s') if sstatus['message'] is not None: msg = _('%(status)s - %(message)s') para = nodes.inline('', msg % sstatus) warning = nodes.note('', para) definition.append(warning) if description: def_para = nodes.paragraph('', description) definition.append(def_para)
def run(self): # Raise an error if the directive does not have contents. self.assert_has_content() self.document = self.state_machine.document text = '\n'.join(self.content) # Create the admonition node, to be populated by `nested_parse`. self.name=self.arguments[0].strip() term = nodes.term() term += nodes.strong(text=self.arguments[0]) targetnode = self.make_targetnode() deflist = nodes.definition_list() configuration_def = nodes.definition_list_item() configuration_def += term defn = nodes.definition() configuration_def += defn deflist += configuration_def # Parse the directive contents. self.state.nested_parse(self.content, self.content_offset, defn) option_map = {} option_map['features'] = 'Required for features' field_list = self.options_to_field_list(option_map) if (field_list != None): defn += field_list self.parsed('configuration').append(self) return [targetnode, deflist]
def add_coqtop_output(self): """Add coqtop's responses to a Sphinx AST Finds nodes to process using is_coqtop_block.""" with CoqTop(color=True) as repl: for node in self.document.traverse(CoqtopBlocksTransform.is_coqtop_block): options = node['coqtop_options'] opt_undo, opt_reset, opt_input, opt_output = self.parse_options(options) if opt_reset: repl.sendone("Reset Initial.") pairs = [] for sentence in self.split_sentences(node.rawsource): pairs.append((sentence, repl.sendone(sentence))) if opt_undo: repl.sendone("Undo {}.".format(len(pairs))) dli = nodes.definition_list_item() for sentence, output in pairs: # Use Coqdoq to highlight input in_chunks = highlight_using_coqdoc(sentence) dli += nodes.term(sentence, '', *in_chunks, classes=self.block_classes(opt_input)) # Parse ANSI sequences to highlight output out_chunks = AnsiColorsParser().colorize_str(output) dli += nodes.definition(output, *out_chunks, classes=self.block_classes(opt_output, output)) node.clear() node.rawsource = self.make_rawsource(pairs, opt_input, opt_output) node['classes'].extend(self.block_classes(opt_input or opt_output)) node += nodes.inline('', '', classes=['coqtop-reset'] * opt_reset) node += nodes.definition_list(node.rawsource, dli)
def print_command_args_and_opts(arg_list, opt_list, sub_list=None): items = [] if arg_list: items.append(nodes.definition_list_item( '', nodes.term(text='Positional arguments:'), nodes.definition('', arg_list))) for opt_dict in opt_list: opts = opt_dict['options'] if opts is not None: items.append(nodes.definition_list_item( '', nodes.term(text=opt_dict['title']), nodes.definition('', opts))) if sub_list and len(sub_list): items.append(nodes.definition_list_item( '', nodes.term(text='Sub-commands:'), nodes.definition('', sub_list))) return nodes.definition_list('', *items)
def process_form(self, formclass, parent): assert issubclass(formclass, Form) fields = nodes.definition_list() parent.append(nodes.definition('', fields)) forminstance = formclass() for field in forminstance: self.process_field_delegate(field, fields)
def run(self): # Raise an error if the directive does not have contents. self.assert_has_content() self.document = self.state_machine.document text = '\n'.join(self.content) # Create the admonition node, to be populated by `nested_parse`. self.name = self.arguments[0] term = nodes.term() term += nodes.strong(text=self.arguments[0]) targetnode = self.make_targetnode() deflist = nodes.definition_list() test_def = nodes.definition_list_item() test_def += term defn = nodes.definition() test_def += defn deflist += test_def # CURRENT : Parse direction list if provided, which is comma-separated if 'direction' in self.options: input = 0 output = 0 for p in self.options['direction'].split(","): # print "Testing `" + p.strip() + "' in test_procedure `" + self.name + "'..." if p == "input": input = 1 if p == "output": output = 2 self.direction = input + output # Parse the directive contents. self.state.nested_parse(self.content, self.content_offset, defn) option_map = {} option_map['setup'] = 'Required setup' option_map['direction'] = 'Direction (input|output|both)' field_list = self.options_to_field_list(option_map) if (field_list != None): defn += field_list #print "*** TestProcedure options setup = " + self.options['setup'] if 'setup' in self.options: self.setup = self.options['setup'] self.parsed('test_procedure').append(self) return [targetnode, deflist]
def run(self): options = io.get_options_for_format(self.arguments[0]) field_list_node = nodes.definition_list() for name, description, value in options: item = nodes.definition_list_item() item.append(nodes.term(name + ' ',name+ ' ')) item.append(nodes.definition('', nodes.paragraph('', description))) field_list_node.append(item) return [field_list_node]
def codeitem_directive(dirname, arguments, options, content, lineno, content_offset, block_text, state, state_machine): if not content: content = [u""] m = _CALLABLE_RE.match(u"".join(arguments)) m2 = _OTHER_RE.match(u"".join(arguments)) if m: g = m.groupdict() if g['rest'] is None: g['rest'] = '' if g['args'].strip(): firstline = "%s%s **%s** (``%s``) %s" % (g['pre'].replace('*', r'\*'), g['module'], g['name'], g['args'], g['rest']) else: firstline = "%s%s **%s** () %s" % (g['pre'].replace('*', r'\*'), g['module'], g['name'], g['rest']) if g['module']: target = '%s%s' % (g['module'], g['name']) else: target = g['name'] elif m2: g = m2.groupdict() firstline = "%s%s **%s**" % (g['pre'].replace('*', r'\*'), g['module'], g['name']) if g['module']: target = '%s%s' % (g['module'], g['name']) else: target = g['name'] else: firstline = u"".join(arguments) target = None dl = nodes.definition_list() di = nodes.definition_list_item() dl += di title_stuff, messages = state.inline_text(firstline, lineno) dt = nodes.term(firstline, *title_stuff) di += dt dd = nodes.definition() di += dd if target: dt['ids'] += [rst.make_target_id(target)] dl['classes'] += [dirname, 'code-item'] _nested_parse(state, content, dd) return [dl]
def render(self): symbol = self.symbol() if not symbol.exceptions: yield nodes.paragraph(text=_('None.')) else: definition_list = nodes.definition_list() for k, v in symbol.exceptions.iteritems(): definition_list_item = nodes.definition_list_item('', nodes.term('', '', nodes.literal('', k)), nodes.definition('', nodes.paragraph(text=v))) definition_list.append(definition_list_item) yield definition_list
def codeitem_directive(dirname, arguments, options, content, lineno, content_offset, block_set, state, state_machine): if not content: content = [u""] m = _CALLABLE_RE.match(u"".join(arguments)) m2 = _OTHER_RE.match(u"".join(arguments)) if m: g = m.groupdict() if g["rest"] is None: g["rest"] = "" if g["args"].strip(): firstline = "%s%s **%s** (``%s``) %s" % ( g["pre"].replace("*", r"\*"), g["module"], g["name"], g["args"], g["rest"], ) else: firstline = "%s%s **%s** () %s" % (g["pre"].replace("*", r"\*"), g["module"], g["name"], g["rest"]) if g["module"]: target = "%s%s" % (g["module"], g["name"]) else: target = g["name"] elif m2: g = m2.groupdict() firstline = "%s%s **%s**" % (g["pre"].replace("*", r"\*"), g["module"], g["name"]) if g["module"]: target = "%s%s" % (g["module"], g["name"]) else: target = g["name"] else: firstline = u"".join(arguments) target = None dl = nodes.definition_list() di = nodes.definition_list_item() dl += di title_stuff, messages = state.inline_text(firstline, lineno) dt = nodes.term(firstline, *title_stuff) di += dt dd = nodes.definition() di += dd if target: dt["ids"] += [target] dl["classes"] += [dirname, "code-item"] _nested_parse(state, content, dd) return [dl]
def run(self): proptype = self.arguments[0] default = self.arguments[1] dl = nodes.definition_list() dl['classes'].append('propparams') term = nodes.term('', 'Type') defnode = nodes.definition('', nodes.paragraph('', proptype)) dl += nodes.definition_list_item('', term, defnode) if 'values' in self.options: term = nodes.term('', 'Values') defnode = nodes.definition('', nodes.paragraph('', self.options['values'])) dl += nodes.definition_list_item('', term, defnode) term = nodes.term('', 'Default') defnode = nodes.definition('', nodes.paragraph('', default)) dl += nodes.definition_list_item('', term, defnode) return [dl]
def format_arguments(arguments): return [nodes.definition_list( '', *[ nodes.definition_list_item( '', nodes.term( # node.Text('') is required because otherwise for some # reason first name node is seen in HTML output as # `<strong>abc</strong>`. '', *([nodes.Text('')] + ( insert_separators([ nodes.strong('', '', *[nodes.Text(ch) for ch in name]) for name in argument.names ], ', ') if argument.is_option else # Unless node.Text('') is here metavar is written in # bold in the man page. [nodes.Text(''), nodes.emphasis(text=argument.metavar)] ) + ( [] if not argument.is_option or not argument.nargs else [nodes.Text(' '), nodes.emphasis('', argument.metavar)] )) ), nodes.definition('', nodes.paragraph('', *parse_argparse_text(argument.help or ''))), ) for argument in flatten_groups(arguments) ] + [ nodes.definition_list_item( '', nodes.term( '', nodes.Text(''), nodes.strong(text='-h'), nodes.Text(', '), nodes.strong('', '', nodes.Text('-'), nodes.Text('-help')), ), nodes.definition('', nodes.paragraph('', nodes.Text('Display help and exit.'))) ) ] )]
def run(self): refid = 'cmdoption-arg-' + nodes.make_id(self.arguments[0]) target = nodes.target(names=[refid], ids=[refid]) dl = nodes.definition_list() dt = nodes.definition_list_item() term = nodes.term() term += nodes.literal(self.arguments[0], self.arguments[0], classes=["descname"]) dt += term definition = nodes.definition() dt += definition definition.document = self.state.document self.state.nested_parse(self.content, self.content_offset, definition) dl += dt return [target, dl]
def described_as(self): item = self._current_node assert isinstance(item, nodes.definition_list_item), item definition = nodes.definition() para = nodes.paragraph() definition += para item += definition self._current_node = para yield # When this is done, we're back to the list self._current_node = item.parent
def run(self): #self.assert_has_content() self.document = self.state_machine.document #text = '\n'.join(self.content) # Create the admonition node, to be populated by `nested_parse`. self.name = self.arguments[0] term = nodes.term() term += nodes.strong(text=self.arguments[0]) targetnode = self.make_targetnode() deflist = nodes.definition_list() test_def = nodes.definition_list_item() test_def += term defn = nodes.definition() test_def += defn deflist += test_def # Parse the directive contents. self.state.nested_parse(self.content, self.content_offset, defn) option_map = {} option_map['runtests'] = 'Tests to run' field_list = self.options_to_field_list(option_map) if 'runtests' in self.options: self.runtests = [] for p in self.options['runtests'].split(","): # print "Testing for `" + p.strip() + "' in prepare_setup `" + self.name + "'..." newruntests = [t for t in self.parsed('test') if p.strip() == t.name] if len(newruntests) == 0: sys.stderr.write("ERROR : runtests field couldn't expand to any tests for name `" + p.strip() + "'\n") if (self.check_errors()): exit(1) # for t in newruntests : # print "Runtests adding test : " + t.name self.runtests.extend(newruntests) else: self.runtests = [] if (field_list != None): defn += field_list self.parsed('prepare_setup').append(self) return [targetnode, deflist]
def render(self): symbol = self.symbol() if not symbol.params: yield nodes.paragraph(text=_('None.')) else: definition_list = nodes.definition_list() for param in symbol.params: param_name = param.get('declname') if param_name is not None: param_desc = param.get('briefdescription', '') definition_list_item = nodes.definition_list_item('', nodes.term('', '', nodes.literal('', param_name)), nodes.definition('', nodes.paragraph(text=param_desc))) definition_list.append(definition_list_item) yield definition_list
def _prop_section(self, parent, title, id_pattern): id = id_pattern % self.resource_type section = nodes.section(ids=[id]) parent.append(section) # Ignore title generated for list items if title != '*': title = nodes.term('', title) ref = nodes.reference('', u'\xb6') ref['classes'] = ['headerlink'] ref['refid'] = id title.append(ref) section.append(title) field = nodes.definition() section.append(field) return field
def handle_item(fieldarg: str, content: List[nodes.inline]) -> nodes.definition_list_item: head = nodes.term() head += makerefs(self.rolename, fieldarg, addnodes.literal_strong) fieldtype = types.pop(fieldarg, None) if fieldtype is not None: head += nodes.Text(' : ') if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text): typename = ''.join(n.astext() for n in fieldtype) head += makerefs(self.typerolename, typename, addnodes.literal_emphasis) else: head += fieldtype body_content = nodes.paragraph('', '', *content) body = nodes.definition('', body_content) return nodes.definition_list_item('', head, body)
def rst_nodes(self): nodelist = [] for entry in self.parameternamelist: nodelist.extend(entry.rst_nodes()) term = nodes.term("","", *nodelist) nodelist = [] if self.parameterdescription: nodelist.extend(self.parameterdescription.rst_nodes()) definition = nodes.definition("", *nodelist) return [nodes.definition_list_item("", term, definition)]
def _format_subcommands(self, parser_info): assert 'children' in parser_info items = [] for subcmd in parser_info['children']: subcmd_items = [] if subcmd['help']: subcmd_items.append(nodes.paragraph(text=subcmd['help'])) else: subcmd_items.append(nodes.paragraph(text='Undocumented')) items.append( nodes.definition_list_item( '', nodes.term('', '', nodes.strong( text=subcmd['bare_usage'])), nodes.definition('', *subcmd_items))) return nodes.definition_list('', *items)
def document(self): eid = (self.tag, self.fqname) if eid in self.documented: return [build_paragraph(get_xref(self.tag, eid[1]))] else: self.documented.append(eid) rv = [self.target_node(self.tag, self.ns_name, self.name)] data = addnodes.desc(objtype=self.tag) targetid = get_target_id(self.tag, self.ns_name, self.name) header = addnodes.desc_signature('', '', first=True, ids=[targetid]) if self.include['doc']: header.extend([nodes.emphasis(self.tag, self.tag), text(" "), self.tname]) data.append(header) contents = nodes.definition() if self.include['doc']: contents.append(self.get_doc(self.entity)) contents.extend(getattr(self, "document_%s" % self.tag)()) data.append(contents) rv.append(data) if self.parent is None: # avoid adding duplicate dependencies added = [(self.type, self.name)] for typ, name, entity in self.dependencies: if not name: name = entity.get('name') if (typ, name) in added: continue ns_name, name = self.split_ns(name) ns_uri = self.namespaces[ns_name] if not entity: try: entity = self.entities[ns_uri][typ][name] except KeyError: self.app.warn("Dependency %s not found in schemas" % get_target_id(typ, ns_name, name)) continue doc = self.get_documentor(entity, name=name, ns_uri=ns_uri) rv.extend(doc.document()) added.append((typ, name)) return rv
def run(self): env = self.state.document.settings.env package = env.temp_data.get('el:package') keymap_list = DATA.get(package, {}).get('keymap', []) keymap_name = self.arguments[0] for keymap in keymap_list: if keymap['name'] == keymap_name: break else: return [self.state.reporter.warning( "Keymap {0} not found".format(keymap_name))] nodelist = [] mapdoc = keymap['doc'] if mapdoc: nd = nodes.paragraph() lines = string2lines(doc_to_rst(mapdoc)) if lines and lines[-1].startswith('(fn '): lines = lines[:-1] self.state.nested_parse(StringList(lines), 0, nd) nodelist.append(nd) exclude = self.options.get('exclude', []) replace = self.options.get('replace', []) for keybind in filter_by_exclude_regexp_list( keymap['data'], exclude, lambda x: x['func']): desc = addnodes.desc() desc['domain'] = 'el' desc['objtype'] = 'keybind' desc['noindex'] = False signode = addnodes.desc_signature() # signode += addnodes.desc_annotation("", 'keybind ') key = simple_sed(replace, keybind['key']) signode += addnodes.desc_name("", key) signode += addnodes.desc_addname("", " " + keybind['func']) desc += signode if keybind['doc']: nd = addnodes.desc_content() lines = string2lines(doc_to_rst(keybind['doc'])) if lines and lines[-1].startswith('(fn '): lines = lines[:-1] self.state.nested_parse(StringList(lines), 0, nd) desc += nodes.definition("", nd) nodelist.append(desc) return nodelist
def contribute_attributes(self, parent): schema = self.resource_class.attributes_schema if not schema: return section = self._section(parent, _("Attributes"), "%s-attrs") prop_list = nodes.definition_list() section.append(prop_list) for prop_key in sorted(schema.keys()): description = schema[prop_key] prop_item = nodes.definition_list_item("", nodes.term("", prop_key)) prop_list.append(prop_item) definition = nodes.definition() prop_item.append(definition) if description: def_para = nodes.paragraph("", description) definition.append(def_para)
def generate_flag_summary(flags, category): summary_node = nodes.definition_list_item() term_node = nodes.term(text=categories[category]) summary_node += term_node block = nodes.definition() summary_node += block # Fill block with flags for flag_info in flags: for name in flag_info['names']: block += nodes.literal(text=name) block += nodes.inline(text=' ') block += nodes.inline(text='\n') return summary_node
def generate_flag_list(flags, category): list_node = nodes.definition_list() for flag_info in flags: dl_item_node = nodes.definition_list_item() term_node = nodes.term() # The man writer is picky, so we have to remove the outer # paragraph node to get just the flag name term_node += flag_info['cells'][0][0] dl_item_node += term_node def_node = nodes.definition() def_node += flag_info['cells'][1] dl_item_node += def_node list_node += dl_item_node return list_node
def run(self): env = self.state.document.settings.env # generate the linkback node for this option targetid = "option-%s" % self.options['config'] targetnode = nodes.target('', '', ids=[targetid]) # Each option will be outputted as a single-item definition list # (just like it was doing before we used this extension) dl = nodes.definition_list() dl['classes'].append('mrjob-opt') dli = nodes.definition_list_item() term = nodes.term() # config option shall be bold if 'config' in self.options: cfg = self.options['config'] term.append(nodes.strong(cfg, cfg)) if 'switch' in self.options: term.append(nodes.Text(' (', ' (')) # switch shall be comma-separated literals if 'switch' in self.options: switches = self.options['switch'].split(', ') for i, s in enumerate(switches): if i > 0: term.append(nodes.Text(', ', ', ')) term.append(nodes.literal(s, s)) if 'config' in self.options: term.append(nodes.Text(')', ')')) dli.append(term) # classifier is either plain text or a link to some more docs, so parse # its contents classifier = nodes.classifier() type_nodes, messages = self.state.inline_text( self.options.get('type', ''), self.lineno) classifier.extend(type_nodes) dli.append(classifier) # definition holds the description defn = nodes.definition() # add a default if any default_nodes = [] if 'default' in self.options: default_par = nodes.paragraph() default_par.append(nodes.strong('Default: ', 'Default: ')) textnodes, messages = self.state.inline_text( self.options['default'], self.lineno) default_nodes = textnodes default_par.extend(textnodes) defn.append(default_par) # parse the description like a nested block (see # sphinx.compat.make_admonition) desc_par = nodes.paragraph() self.state.nested_parse(self.content, self.content_offset, desc_par) defn.append(desc_par) dli.append(defn) dl.append(dli) if not hasattr(env, 'optionlist_all_options'): env.optionlist_all_options = [] env.optionlist_indexed_options = {} # store info for the optionlist traversal to find info = { 'docname': env.docname, 'lineno': self.lineno, 'options': self.options, 'content': self.content, 'target': targetnode, 'type_nodes': type_nodes, 'default_nodes': default_nodes, } env.optionlist_all_options.append(info) env.optionlist_indexed_options[self.options['config']] = info return [targetnode, dl]
def contribute_property(self, prop_list, prop_key, prop): prop_item = nodes.definition_list_item('', nodes.term('', prop_key)) prop_list.append(prop_item) prop_type = prop.get('Type') classifier = prop_type if prop.get('MinValue'): classifier += _(' from %s') % prop.get('MinValue') if prop.get('MaxValue'): classifier += _(' up to %s') % prop.get('MaxValue') if prop.get('MinLength'): classifier += _(' from length %s') % prop.get('MinLength') if prop.get('MaxLength'): classifier += _(' up to length %s') % prop.get('MaxLength') prop_item.append(nodes.classifier('', classifier)) definition = nodes.definition() prop_item.append(definition) if not prop.get('Implemented', True): para = nodes.inline('', _('Not implemented.')) warning = nodes.note('', para) definition.append(warning) return description = prop.get('Description') if description: para = nodes.paragraph('', description) definition.append(para) if prop.get('Required'): para = nodes.paragraph('', _('Required property.')) elif prop.get('Default'): para = nodes.paragraph( '', _('Optional property, defaults to "%s".') % prop.get('Default')) else: para = nodes.paragraph('', _('Optional property.')) definition.append(para) if prop.get('AllowedPattern'): para = nodes.paragraph( '', _('Value must match pattern: %s') % prop.get('AllowedPattern')) definition.append(para) if prop.get('AllowedValues'): allowed = [ str(a) for a in prop.get('AllowedValues') if a is not None ] para = nodes.paragraph( '', _('Allowed values: %s') % ', '.join(allowed)) definition.append(para) sub_schema = None if prop.get('Schema') and prop_type == 'Map': para = nodes.emphasis('', _('Map properties:')) definition.append(para) sub_schema = prop.get('Schema') elif prop_type == 'List' and prop.get('Schema', {}).get('Schema'): para = nodes.emphasis('', _('List contains maps with the properties:')) definition.append(para) sub_schema = prop.get('Schema').get('Schema') if sub_schema: sub_prop_list = nodes.definition_list() definition.append(sub_prop_list) for sub_prop_key in sorted(sub_schema.keys()): sub_prop = sub_schema[sub_prop_key] self.contribute_property(sub_prop_list, sub_prop_key, sub_prop)
def run(self) -> List[Node]: node = addnodes.glossary() node.document = self.state.document # This directive implements a custom format of the reST definition list # that allows multiple lines of terms before the definition. This is # easy to parse since we know that the contents of the glossary *must # be* a definition list. # first, collect single entries entries = [ ] # type: List[Tuple[List[Tuple[str, str, int]], StringList]] in_definition = True in_comment = False was_empty = True messages = [] # type: List[Node] for line, (source, lineno) in zip(self.content, self.content.items): # empty line -> add to last definition if not line: if in_definition and entries: entries[-1][1].append('', source, lineno) was_empty = True continue # unindented line -> a term if line and not line[0].isspace(): # enable comments if line.startswith('.. '): in_comment = True continue else: in_comment = False # first term of definition if in_definition: if not was_empty: messages.append( self.state.reporter.warning(_( 'glossary term must be preceded by empty line' ), source=source, line=lineno)) entries.append(([(line, source, lineno)], StringList())) in_definition = False # second term and following else: if was_empty: messages.append( self.state.reporter.warning(_( 'glossary terms must not be separated by empty lines' ), source=source, line=lineno)) if entries: entries[-1][0].append((line, source, lineno)) else: messages.append( self.state.reporter.warning(_( 'glossary seems to be misformatted, check indentation' ), source=source, line=lineno)) elif in_comment: pass else: if not in_definition: # first line of definition, determines indentation in_definition = True indent_len = len(line) - len(line.lstrip()) if entries: entries[-1][1].append(line[indent_len:], source, lineno) else: messages.append( self.state.reporter.warning(_( 'glossary seems to be misformatted, check indentation' ), source=source, line=lineno)) was_empty = False # now, parse all the entries into a big definition list items = [] for terms, definition in entries: termtexts = [] # type: List[str] termnodes = [] # type: List[Node] system_messages = [] # type: List[Node] for line, source, lineno in terms: parts = split_term_classifiers(line) # parse the term with inline markup # classifiers (parts[1:]) will not be shown on doctree textnodes, sysmsg = self.state.inline_text(parts[0], lineno) # use first classifier as a index key term = make_glossary_term(self.env, textnodes, parts[1], source, lineno, document=self.state.document) term.rawsource = line system_messages.extend(sysmsg) termtexts.append(term.astext()) termnodes.append(term) termnodes.extend(system_messages) defnode = nodes.definition() if definition: self.state.nested_parse(definition, definition.items[0][1], defnode) termnodes.append(defnode) items.append( (termtexts, nodes.definition_list_item('', *termnodes))) if 'sorted' in self.options: items.sort( key=lambda x: unicodedata.normalize('NFD', x[0][0].lower())) dlist = nodes.definition_list() dlist['classes'].append('glossary') dlist.extend(item[1] for item in items) node += dlist return messages + [node]
def run(self): result = [] symbol = self.arguments[0] descriptor = descriptors_by_symbol[symbol] comment = find_comment(symbol, prefix='') if comment: result += produce_nodes(self.state, comment) if descriptor.client_streaming: text = ('This method uses client-streaming.') result.append( nodes.warning( '', nodes.paragraph('', '', nodes.Text(text)), )) if descriptor.server_streaming: text = ('This method uses server-streaming. ' + 'Yamcs sends an unspecified amount of data ' + 'using chunked transfer encoding.') result.append( nodes.warning( '', nodes.paragraph('', '', nodes.Text(text)), )) route_options = descriptor.options.Extensions[annotations_pb2.route] route_text = get_route_for_method_descriptor(descriptor) raw = '.. rubric:: URI Template\n' raw += '.. code-block:: uritemplate\n\n' raw += ' ' + route_text + '\n' result += produce_nodes(self.state, raw) input_descriptor = descriptors_by_symbol[descriptor.input_type] route_params = get_route_params(route_text) if route_params: dl_items = [] for param in route_params: param_template = get_route_param_template(route_text, param) comment = find_comment(descriptor.input_type + '.' + param, prefix='') or '' dl_items.append( nodes.definition_list_item( '', nodes.term('', '', nodes.literal('', param_template)), nodes.definition('', nodes.paragraph(text=comment)), )) result += [nodes.definition_list('', *dl_items)] if route_options.get: query_param_fields = [] for field in input_descriptor.field: if field.json_name not in route_params: query_param_fields.append(field) if query_param_fields: dl_items = [] for field in query_param_fields: field_symbol = descriptor.input_type + '.' + field.name comment_node = nodes.section() comment = find_comment(field_symbol, prefix='') if comment: for child in produce_nodes(self.state, comment): comment_node += child dl_items.append( nodes.definition_list_item( '', nodes.term('', '', nodes.literal('', field.json_name)), nodes.definition('', comment_node), )) result += [ nodes.rubric('', 'Query Parameters'), nodes.definition_list('', *dl_items), ] return result
def run(self): # Content is optional, so don't raise an error if it's missing... # print self.state_machine.document.current_source self.document = self.state_machine.document self.is_config_option = False text = '\n'.join(self.content) # Create the admonition node, to be populated by `nested_parse`. self.name = self.arguments[0].strip() title = self.arguments[0] if 'parent' in self.options: title += " (" + self.options['parent'] + ")" term = nodes.term() n = nodes.strong(text=title) term += n targetnode = self.make_targetnode() deflist = nodes.definition_list() feature_def = nodes.definition_list_item() feature_def += term defn = nodes.definition() feature_def += defn deflist += feature_def # Parse the directive contents. self.state.nested_parse(self.content, self.content_offset, defn) option_map = {} option_map['parents'] = 'Parent features' option_map['config_options'] = 'Options' field_list = self.options_to_field_list(option_map) if 'parents' in self.options: self.parents = [] for p in self.options['parents'].split(","): found = False for f in self.parsed('feature'): if p.strip() == f.name: found = True if not found: sys.stderr.write("ERROR: Feature `" + self.name + "' refers to unknown parent `" + p.strip() + "'") if self.check_errors(): exit(1) for p in self.options['parents'].split(","): self.parents.extend( [f for f in self.parsed('feature') if p.strip() == f.name]) ancestors = set(self.parents) for p in self.parents: ancestors = ancestors | set(p.ancestors) self.ancestors = list(ancestors) else: self.parents = [] self.ancestors = [] self.summarize = ('summarize' in self.options) if 'config_options' in self.options: self.choices = [] found_default = False optstr = self.options['config_options'].strip() p = re.compile("(.*), *default *= *(.*)") m = p.match(optstr) if m: optstr = m.groups(0)[0] default = m.groups(0)[1] else: default = None p = re.compile("(.*)\.\.(.*)") m = p.match(optstr) if m: lb = int(m.groups(0)[0]) ub = int(m.groups(0)[1]) name = self.name + "__RANGE" choice = Feature(name) choice.summarize = ('summarize_options' in self.options) self.choices.append(choice) self.parsed('feature').append(choice) choice.is_range = True choice.is_config_option = True choice.is_default = False choice.parents = [self] choice.ancestors = [self] choice.ancestors.extend(self.ancestors) else: for o in self.options['config_options'].split('|'): p = re.compile("(.*) \(default\)") is_default = False name = o.strip() if (p.match(name)): name = p.match(name).groups(0)[0] name = name.strip() is_default = True found_default = True if (name == default): is_default = True found_default = True name = self.name + "_" + name choice = Feature(name) choice.is_range = False choice.is_default = is_default choice.summarize = ('summarize_options' in self.options) self.choices.append(choice) self.parsed('feature').append(choice) choice.is_config_option = True choice.parents = [self] choice.ancestors = [self] choice.ancestors.extend(self.ancestors) if not found_default: self.choices[0].is_default = True else: self.choices = [] if (field_list != None): defn += field_list self.parsed('feature').append(self) return [targetnode, deflist]
def process_reftargetslist_nodes(app, doctree, fromdocname): env = app.builder.env etc = env.ext_targets_cache cntLabels = 0 cntAnonLabels = 0 for node in doctree.traverse(reftargetslist_node): # srcdir = folder path to document with # ref-targets-list directive srcdir = os.path.split(node.astext())[0] definition_list = nodes.definition_list(classes=['ref-targets-list']) labels = env.domains['std'].data['labels'] anonlabels = env.domains['std'].data['anonlabels'] for doc in sorted(etc.keys(), key=keyfunc): relpath = getRelPath(srcdir, doc).replace('\\', '/') relpath = os.path.splitext(relpath)[0] + '.html' rstrelpath = os.path.join('_sources', doc) rstrelpath = getRelPath(srcdir, rstrelpath).replace('\\', '/') rstrelpath = os.path.splitext(rstrelpath)[0] + '.txt' bullet_list = nodes.bullet_list(rawsource='', bullet='-') for lineno, refid in sorted(etc[doc], key=itemgetter(0)): if labels.has_key(refid): flag = 'label' cntLabels += 1 elif anonlabels.has_key(refid): flag = 'anonlabel' cntAnonLabels += 1 else: flag = None if flag: linktext1 = '%04d' % lineno refuri1 = '%s?refid=%s&line=%s' % (rstrelpath, refid, lineno) reftitle1 = None reference1 = nodes.reference(text=linktext1, internal=True, refuri=refuri1, classes=['e2']) if flag == 'label': linktext2 = ':ref:`%s`' % refid reftitle2 = labels[refid][2] else: linktext2 = ':ref:`... <%s>`' % refid reftitle2 = None refuri2 = '%s#%s' % (relpath, refid) if reftitle2: reference2 = nodes.reference(text=linktext2, internal=True, refuri=refuri2, reftitle=reftitle2, classes=['e4']) else: reference2 = nodes.reference(text=linktext2, internal=True, refuri=refuri2, classes=['e4']) paragraph = nodes.paragraph() paragraph.append(nodes.inline(text='[', classes=['e1'])) paragraph.append(reference1) paragraph.append(nodes.inline(text='] ', classes=['e3'])) paragraph.append(reference2) list_item = nodes.list_item() list_item.append(paragraph) bullet_list.append(list_item) if len(bullet_list): term = nodes.term(text=doc) definition = nodes.definition() definition.append(bullet_list) definition_list_item = nodes.definition_list_item() definition_list_item.append(term) definition_list_item.append(definition) definition_list.append(definition_list_item) summary = ('Summary: %s targets (%s with link text, %s without).' % (cntLabels + cntAnonLabels, cntLabels, cntAnonLabels)) summaryP = nodes.paragraph(text=summary, classes=['ref-targets-list-summary']) node.replace_self([definition_list, summaryP])
def run(self): env = self.state.document.settings.env # generate the linkback node for this option targetid = "option-%d" % env.new_serialno('mrjob-opt') targetnode = nodes.target('', '', ids=[targetid]) # Each option will be outputted as a single-item definition list # (just like it was doing before we used this extension) dl = nodes.definition_list() dli = nodes.definition_list_item() term = nodes.term() # config option shall be bold if 'config' in self.options: cfg = self.options['config'] term.append(nodes.strong(cfg, cfg)) if 'switch' in self.options: term.append(nodes.Text(' (', ' (')) # switch shall be comma-separated literals if 'switch' in self.options: switches = self.options['switch'].split(', ') for i, s in enumerate(switches): if i > 0: term.append(nodes.Text(', ', ', ')) term.append(nodes.literal(s, s)) if 'config' in self.options: term.append(nodes.Text(')', ')')) dli.append(term) # classifier is either plan text or a link to some more docs, so parse # its contents classifier = nodes.classifier() type_nodes, messages = self.state.inline_text( self.options.get('type', ''), self.lineno) # failed attempt at a markup shortcut; may be able to make this work # later #t = option_info['options']['type'] #refnode = addnodes.pending_xref( # t, reftarget='data-type-%s' % t, # refexplicit=True, reftype='ref') #print refnode #refnode += nodes.Text(t, t) #type_nodes = [refnode] classifier.extend(type_nodes) dli.append(classifier) # definition holds the description defn = nodes.definition() # add a default if any default_nodes = [] if 'default' in self.options: default_par = nodes.paragraph() default_par.append(nodes.strong('Default: ', 'Default: ')) textnodes, messages = self.state.inline_text( self.options['default'], self.lineno) default_nodes = textnodes default_par.extend(textnodes) defn.append(default_par) # parse the description like a nested block (see # sphinx.compat.make_admonition) desc_par = nodes.paragraph() self.state.nested_parse(self.content, self.content_offset, desc_par) defn.append(desc_par) dli.append(defn) dl.append(dli) if not hasattr(env, 'optionlist_all_options'): env.optionlist_all_options = [] # store info for the optionlist traversal to find env.optionlist_all_options.append({ 'docname': env.docname, 'lineno': self.lineno, 'options': self.options, 'content': self.content, 'target': targetnode, 'type_nodes': [n.deepcopy() for n in type_nodes], 'default_nodes': [n.deepcopy() for n in default_nodes] }) return [targetnode, dl]
def run(self): env = self.state.document.settings.env # Parse the content of the directive recursively node = nodes.Element() node.document = self.state.document self.state.nested_parse(self.content, self.content_offset, node) brief_nodes = [] output_nodes = [] positional_params = [] required_params = {} optional_params = {} for child in node: if isinstance(child, CMakeParamNode): if child["positional"]: positional_params.append(child) elif child["required"]: required_params[child["name"]] = child else: optional_params[child["name"]] = child elif isinstance(child, CMakeBriefNode): par = nodes.paragraph() self.state.nested_parse(child['content'], self.content_offset, par) brief_nodes.append(par) else: output_nodes.append(child) def render_required(paramnode): if paramnode["multi"]: sl.append(" " * 5 + paramnode['name'] + ' ' + paramnode['argname'] + '1 [' + paramnode['argname'] + '2 ...]\n') if paramnode["single"]: sl.append(" " * 5 + paramnode['name'] + ' ' + paramnode['argname'] + '\n') if paramnode["option"]: sl.append(" " * 5 + paramnode['name'] + '\n') if paramnode["special"]: sl.append(" " * 5 + paramnode['argname'] + '\n') def render_optional(paramnode): if paramnode["multi"]: sl.append(' ' * 4 + '[' + paramnode['name'] + ' ' + paramnode['argname'] + '1 [' + paramnode['argname'] + '2 ...]' + ']\n') if paramnode["single"]: sl.append(" " * 4 + '[' + paramnode['name'] + ' ' + paramnode['argname'] + ']\n') if paramnode["option"]: sl.append(" " * 4 + '[' + paramnode['name'] + ']\n') if paramnode["special"]: sl.append(" " * 4 + '[' + paramnode['argname'] + ']\n') # Build the content of the box sl = [self.arguments[0] + '(\n'] for paramnode in positional_params: if paramnode["required"]: render_required(paramnode) else: render_optional(paramnode) for rp, paramnode in required_params.items(): render_required(paramnode) for op, paramnode in optional_params.items(): render_optional(paramnode) sl.append(")\n") lb = nodes.literal_block(''.join(sl), ''.join(sl)) brief_nodes.append(lb) dl = nodes.definition_list() for paramnode in chain(positional_params, required_params.values(), optional_params.values()): dli = nodes.definition_list_item() dl += dli dlit = nodes.term(text=paramnode["name"]) dli += dlit dlic = nodes.definition() dli += dlic self.state.nested_parse(paramnode['content'], self.content_offset, dlic) # add the parameter list to the output brief_nodes.append(dl) return brief_nodes + output_nodes
def add_df_item(root, term, *contents): root += nodes.definition_list_item('', nodes.term('', term), nodes.definition('', *contents))
def process_item_nodes(app, doctree, fromdocname): """ This function should be triggered upon ``doctree-resolved event`` Replace all ItemList nodes with a list of the collected items. Augment each item with a backlink to the original location. """ env = app.builder.env if sphinx_version < '1.6.0': try: env.traceability_collection.self_test(fromdocname) except TraceabilityException as err: report_warning(env, err, fromdocname) except MultipleTraceabilityExceptions as errs: for err in errs.iter(): report_warning(env, err, err.get_document()) # Processing of the item-link items. for node in doctree.traverse(ItemLink): # The ItemLink node has no final representation, so is removed from the tree node.replace_self([]) # Item matrix: # Create table with related items, printing their target references. # Only source and target items matching respective regexp shall be included for node in doctree.traverse(ItemMatrix): showcaptions = not node['nocaptions'] source_ids = env.traceability_collection.get_items(node['source']) target_ids = env.traceability_collection.get_items(node['target']) top_node = create_top_node(node['title']) table = nodes.table() tgroup = nodes.tgroup() left_colspec = nodes.colspec(colwidth=5) right_colspec = nodes.colspec(colwidth=5) tgroup += [left_colspec, right_colspec] tgroup += nodes.thead( '', nodes.row( '', nodes.entry('', nodes.paragraph('', node['sourcetitle'])), nodes.entry('', nodes.paragraph('', node['targettitle'])))) tbody = nodes.tbody() tgroup += tbody table += tgroup relationships = node['type'] if not relationships: relationships = env.traceability_collection.iter_relations() count_total = 0 count_covered = 0 for source_id in source_ids: source_item = env.traceability_collection.get_item(source_id) count_total += 1 covered = False row = nodes.row() left = nodes.entry() left += make_internal_item_ref(app, node, fromdocname, source_id, showcaptions) right = nodes.entry() for relationship in relationships: if REGEXP_EXTERNAL_RELATIONSHIP.search(relationship): for target_id in source_item.iter_targets(relationship): right += make_external_item_ref( app, target_id, relationship) covered = True for target_id in target_ids: if env.traceability_collection.are_related( source_id, relationships, target_id): right += make_internal_item_ref(app, node, fromdocname, target_id, showcaptions) covered = True if covered: count_covered += 1 row += left row += right tbody += row try: percentage = int(100 * count_covered / count_total) except ZeroDivisionError: percentage = 0 disp = 'Statistics: {cover} out of {total} covered: {pct}%'.format( cover=count_covered, total=count_total, pct=percentage) if node['stats']: p_node = nodes.paragraph() txt = nodes.Text(disp) p_node += txt top_node += p_node top_node += table node.replace_self(top_node) # Item attribute matrix: # Create table with items, printing their attribute values. for node in doctree.traverse(ItemAttributesMatrix): docname, lineno = get_source_line(node) showcaptions = not node['nocaptions'] item_ids = env.traceability_collection.get_items( node['filter'], sortattributes=node['sort'], reverse=node['reverse']) top_node = create_top_node(node['title']) table = nodes.table() tgroup = nodes.tgroup() colspecs = [nodes.colspec(colwidth=5)] hrow = nodes.row('', nodes.entry('', nodes.paragraph('', ''))) for attr in node['attributes']: colspecs.append(nodes.colspec(colwidth=5)) p_node = nodes.paragraph() if attr in app.config.traceability_attribute_to_string: attrstr = app.config.traceability_attribute_to_string[attr] else: report_warning( env, 'Traceability: attribute {attr} cannot be translated to string' .format(attr=attr), docname, lineno) attrstr = attr p_node += nodes.Text(attrstr) hrow.append(nodes.entry('', p_node)) tgroup += colspecs tgroup += nodes.thead('', hrow) tbody = nodes.tbody() for item_id in item_ids: item = env.traceability_collection.get_item(item_id) row = nodes.row() cell = nodes.entry() cell += make_internal_item_ref(app, node, fromdocname, item_id, showcaptions) row += cell for attr in node['attributes']: cell = nodes.entry() p_node = nodes.paragraph() txt = item.get_attribute(attr) p_node += nodes.Text(txt) cell += p_node row += cell tbody += row tgroup += tbody table += tgroup top_node += table node.replace_self(top_node) # Item 2D matrix: # Create table with related items, printing their target references. # Only source and target items matching respective regexp shall be included for node in doctree.traverse(Item2DMatrix): source_ids = env.traceability_collection.get_items(node['source']) target_ids = env.traceability_collection.get_items(node['target']) top_node = create_top_node(node['title']) table = nodes.table() tgroup = nodes.tgroup() colspecs = [nodes.colspec(colwidth=5)] hrow = nodes.row('', nodes.entry('', nodes.paragraph('', ''))) for source_id in source_ids: colspecs.append(nodes.colspec(colwidth=5)) src_cell = make_internal_item_ref(app, node, fromdocname, source_id, False) hrow.append(nodes.entry('', src_cell)) tgroup += colspecs tgroup += nodes.thead('', hrow) tbody = nodes.tbody() for target_id in target_ids: row = nodes.row() tgt_cell = nodes.entry() tgt_cell += make_internal_item_ref(app, node, fromdocname, target_id, False) row += tgt_cell for source_id in source_ids: cell = nodes.entry() p_node = nodes.paragraph() if env.traceability_collection.are_related( source_id, node['type'], target_id): txt = node['hit'] else: txt = node['miss'] p_node += nodes.Text(txt) cell += p_node row += cell tbody += row tgroup += tbody table += tgroup top_node += table node.replace_self(top_node) # Item list: # Create list with target references. Only items matching list regexp # shall be included for node in doctree.traverse(ItemList): item_ids = env.traceability_collection.get_items( node['filter'], node['attributes']) showcaptions = not node['nocaptions'] top_node = create_top_node(node['title']) ul_node = nodes.bullet_list() for i in item_ids: bullet_list_item = nodes.list_item() p_node = nodes.paragraph() p_node.append( make_internal_item_ref(app, node, fromdocname, i, showcaptions)) bullet_list_item.append(p_node) ul_node.append(bullet_list_item) top_node += ul_node node.replace_self(top_node) # Item tree: # Create list with target references. Only items matching list regexp # shall be included for node in doctree.traverse(ItemTree): top_item_ids = env.traceability_collection.get_items(node['top']) showcaptions = not node['nocaptions'] top_node = create_top_node(node['title']) ul_node = nodes.bullet_list() ul_node.set_class('bonsai') for i in top_item_ids: if is_item_top_level(env, i, node['top'], node['top_relation_filter']): ul_node.append( generate_bullet_list_tree(app, env, node, fromdocname, i, showcaptions)) top_node += ul_node node.replace_self(top_node) # Resolve item cross references (from ``item`` role) for node in doctree.traverse(PendingItemXref): # Create a dummy reference to be used if target reference fails new_node = make_refnode(app.builder, fromdocname, fromdocname, 'ITEM_NOT_FOUND', node[0].deepcopy(), node['reftarget'] + '??') # If target exists, try to create the reference item_info = env.traceability_collection.get_item(node['reftarget']) if item_info: if item_info.is_placeholder(): docname, lineno = get_source_line(node) report_warning( env, 'Traceability: cannot link to %s, item is not defined' % item_info.get_id(), docname, lineno) else: try: new_node = make_refnode(app.builder, fromdocname, item_info.docname, item_info.node['refid'], node[0].deepcopy(), node['reftarget']) except NoUri: # ignore if no URI can be determined, e.g. for LaTeX output :( pass else: docname, lineno = get_source_line(node) report_warning( env, 'Traceability: item %s not found' % node['reftarget'], docname, lineno) node.replace_self(new_node) # Item: replace item nodes, with admonition, list of relationships for node in doctree.traverse(Item): docname, lineno = get_source_line(node) currentitem = env.traceability_collection.get_item(node['id']) showcaptions = not node['nocaptions'] header = currentitem.get_id() if currentitem.caption: header += ' : ' + currentitem.caption top_node = create_top_node(header) if app.config.traceability_render_relationship_per_item: par_node = nodes.paragraph() dl_node = nodes.definition_list() if currentitem.iter_attributes(): li_node = nodes.definition_list_item() dt_node = nodes.term() txt = nodes.Text('Attributes') dt_node.append(txt) li_node.append(dt_node) for attr in currentitem.iter_attributes(): dd_node = nodes.definition() p_node = nodes.paragraph() if attr in app.config.traceability_attribute_to_string: attrstr = app.config.traceability_attribute_to_string[ attr] else: report_warning( env, 'Traceability: attribute {attr} cannot be translated to string' .format(attr=attr), docname, lineno) attrstr = attr txt = nodes.Text('{attr}: {value}'.format( attr=attrstr, value=currentitem.get_attribute(attr))) p_node.append(txt) dd_node.append(p_node) li_node.append(dd_node) dl_node.append(li_node) for rel in env.traceability_collection.iter_relations(): tgts = currentitem.iter_targets(rel) if tgts: li_node = nodes.definition_list_item() dt_node = nodes.term() if rel in app.config.traceability_relationship_to_string: relstr = app.config.traceability_relationship_to_string[ rel] else: report_warning( env, 'Traceability: relation {rel} cannot be translated to string' .format(rel=rel), docname, lineno) relstr = rel txt = nodes.Text(relstr) dt_node.append(txt) li_node.append(dt_node) for tgt in tgts: dd_node = nodes.definition() p_node = nodes.paragraph() if REGEXP_EXTERNAL_RELATIONSHIP.search(rel): link = make_external_item_ref(app, tgt, rel) else: link = make_internal_item_ref( app, node, fromdocname, tgt, showcaptions) p_node.append(link) dd_node.append(p_node) li_node.append(dd_node) dl_node.append(li_node) par_node.append(dl_node) top_node.append(par_node) # Note: content should be displayed during read of RST file, as it contains other RST objects node.replace_self(top_node)
def run(self): env = self.state.document.settings.env objects = env.domaindata['std']['objects'] gloss_entries = env.temp_data.setdefault('gloss_entries', set()) node = addnodes.glossary() node.document = self.state.document # This directive implements a custom format of the reST definition list # that allows multiple lines of terms before the definition. This is # easy to parse since we know that the contents of the glossary *must # be* a definition list. # first, collect single entries entries = [] in_definition = True was_empty = True messages = [] for line, (source, lineno) in zip(self.content, self.content.items): # empty line -> add to last definition if not line: if in_definition and entries: entries[-1][1].append('', source, lineno) was_empty = True continue # unindented line -> a term if line and not line[0].isspace(): # first term of definition if in_definition: if not was_empty: messages.append(self.state.reporter.system_message( 2, 'glossary term must be preceded by empty line', source=source, line=lineno)) entries.append(([(line, source, lineno)], ViewList())) in_definition = False # second term and following else: if was_empty: messages.append(self.state.reporter.system_message( 2, 'glossary terms must not be separated by empty ' 'lines', source=source, line=lineno)) if entries: entries[-1][0].append((line, source, lineno)) else: messages.append(self.state.reporter.system_message( 2, 'glossary seems to be misformatted, check ' 'indentation', source=source, line=lineno)) else: if not in_definition: # first line of definition, determines indentation in_definition = True indent_len = len(line) - len(line.lstrip()) if entries: entries[-1][1].append(line[indent_len:], source, lineno) else: messages.append(self.state.reporter.system_message( 2, 'glossary seems to be misformatted, check ' 'indentation', source=source, line=lineno)) was_empty = False # now, parse all the entries into a big definition list items = [] for terms, definition in entries: termtexts = [] termnodes = [] system_messages = [] ids = [] for line, source, lineno in terms: # parse the term with inline markup res = self.state.inline_text(line, lineno) system_messages.extend(res[1]) # get a text-only representation of the term and register it # as a cross-reference target tmp = nodes.paragraph('', '', *res[0]) termtext = tmp.astext() new_id = 'term-' + nodes.make_id(termtext) if new_id in gloss_entries: new_id = 'term-' + str(len(gloss_entries)) gloss_entries.add(new_id) ids.append(new_id) objects['term', termtext.lower()] = env.docname, new_id termtexts.append(termtext) # add an index entry too indexnode = addnodes.index() indexnode['entries'] = [('single', termtext, new_id, 'main')] termnodes.append(indexnode) termnodes.extend(res[0]) termnodes.append(addnodes.termsep()) # make a single "term" node with all the terms, separated by termsep # nodes (remove the dangling trailing separator) term = nodes.term('', '', *termnodes[:-1]) term['ids'].extend(ids) term['names'].extend(ids) term += system_messages defnode = nodes.definition() self.state.nested_parse(definition, definition.items[0][1], defnode) items.append((termtexts, nodes.definition_list_item('', term, defnode))) if 'sorted' in self.options: items.sort(key=lambda x: unicodedata.normalize('NFD', x[0][0].lower())) dlist = nodes.definition_list() dlist['classes'].append('glossary') dlist.extend(item[1] for item in items) node += dlist return messages + [node]
def generate_properties(self, node, properties): """ Add the properties to the node """ for name, property in properties.items(): default = property.get('default') type = property.get('type') info = '**type:** :cfy:datatype:`{}`'.format(type) if type else '' if default is not None: if default != '': info += ' **default:** ``{}``'.format(property['default']) elif property.get('required', True): info += ' **required**' try: description = property['description'] except KeyError: if type in { 'string', 'boolean', 'list', 'integer', None, }: # only custom defined types are allowed to not have a # description self.state.document.settings.env.app.warn( '{type} property {name} has no description'.format( type=self.arguments[0], name=name, )) description = '' lines = ViewList(prepare_docstring( info + '\n\n' + description + '\n\n')) term = nodes.term('', name) definition = nodes.definition() self.state.nested_parse( lines, self.content_offset + 4, definition, ) if type not in [ 'string', 'boolean', 'list', 'integer', ]: # Try tp get the nested properties of the type data_type = types.get('data_types', {}).get(type) if data_type: sub_props = nodes.definition_list() definition.append(sub_props) self.generate_properties( sub_props, data_type['properties'] ) node.append(nodes.definition_list_item( '', term, definition, ))
def contribute_property(self, prop_list, prop_key, prop): prop_item = nodes.definition_list_item( '', nodes.term('', prop_key)) prop_list.append(prop_item) prop_item.append(nodes.classifier('', prop.type)) definition = nodes.definition() prop_item.append(definition) if prop.support_status.status != support.SUPPORTED: sstatus = prop.support_status.to_dict() msg = _('%(status)s') if sstatus['message'] is not None: msg = _('%(status)s - %(message)s') para = nodes.inline('', msg % sstatus) warning = nodes.note('', para) definition.append(warning) if not prop.implemented: para = nodes.inline('', _('Not implemented.')) warning = nodes.note('', para) definition.append(warning) return if prop.description: para = nodes.paragraph('', prop.description) definition.append(para) if prop.update_allowed: para = nodes.paragraph('', _('Can be updated without replacement.')) definition.append(para) else: para = nodes.paragraph('', _('Updates cause replacement.')) definition.append(para) if prop.required: para = nodes.paragraph('', _('Required property.')) elif prop.default is not None: para = nodes.paragraph( '', _('Optional property, defaults to "%s".') % prop.default) else: para = nodes.paragraph('', _('Optional property.')) definition.append(para) for constraint in prop.constraints: para = nodes.paragraph('', str(constraint)) definition.append(para) sub_schema = None if prop.schema and prop.type == properties.Schema.MAP: para = nodes.emphasis('', _('Map properties:')) definition.append(para) sub_schema = prop.schema elif prop.schema and prop.type == properties.Schema.LIST: para = nodes.emphasis( '', _('List contents:')) definition.append(para) sub_schema = prop.schema if sub_schema: sub_prop_list = nodes.definition_list() definition.append(sub_prop_list) for sub_prop_key, sub_prop in sorted(sub_schema.items(), self.cmp_prop): self.contribute_property( sub_prop_list, sub_prop_key, sub_prop)
def run(self): event = self.arguments[0] anchor = event.lower().replace('_', '-') kind = self.options.get('type') inType = self.options.get('in') outType = self.options.get('out') or 'void' subject = self.options.get('subject') params = self.options.get('params') or '' since = self.options.get('since') or '' desc = u'\n'.join(self.content) # create section # optionally insert zero-width breaks: # event.replace('_', u"_\u200B") sec = nodes.section() sec.append(nodes.title('', event)) sec['names'].append(anchor) self.state.document.note_implicit_target(sec, sec) # the signature sig = '%s %s(%s)' % (outType, event, inType) if kind == 'until': sig += ' BREAKS' # additional params for this event paramlist = None if len(params) > 0: paramlist = self._buildParamList(nodes.bullet_list(), params) # create actual definition list dl = nodes.definition_list('', nodes.definition_list_item('', nodes.term('', '', nodes.strong('', 'Signatur:')), nodes.definition('', nodes.literal('', sig)) ), nodes.definition_list_item('', nodes.term('', '', nodes.strong('', 'Beschreibung:')), nodes.definition('', self._parseInline(desc)) ), nodes.definition_list_item('', nodes.term('', '', nodes.strong('', 'Subject:')), nodes.definition('', self._parseInline(subject)) ) ) if paramlist: dl.append(nodes.definition_list_item('', nodes.term('', '', nodes.strong('', 'Weitere Parameter:')), nodes.definition('', paramlist) )) if len(since) > 0: since = 'v%s' % since dl.append(nodes.definition_list_item('', nodes.term('', '', nodes.strong('', u'Hinzugefügt in:')), nodes.definition('', self._parseInline(since)) )) sec.append(dl) return [sec]
def run(self): content = [] # First, add a short description supplied by the directive. text = '\n'.join(self.content) text_node = nodes.paragraph(rawsource=text) # Parse the directive contents. self.state.nested_parse(self.content, self.content_offset, text_node) content.append(text_node) klass_name = self.arguments[0] klass = get_class(klass_name) definition_list = nodes.definition_list() # Now, go over the class and find and interogate all the properties. raw_text = "" for attribute in klass.__class_schema_nodes__: list_item = nodes.definition_list_item() # TODO: Make it parse in restructuredtext from the nodes themselves. term = nodes.term(text=attribute.name) # Add two classifiers; one for the type and the other # if it's required or optional. node_type = nodes.classifier(text=attribute.typ.__class__.__name__) required_text = 'Optional' if attribute.required: required_text = 'Required' required = nodes.classifier(text=required_text) # Set up the description, adding in full stops if needed. definition = nodes.definition() description_text = attribute.title if not attribute.title.endswith('.'): description_text += '.' description_text += ' ' + attribute.description if not description_text.endswith('.'): description_text += '.' description = nodes.paragraph(text=description_text) definition += description if attribute.default != colander.null: # There is a default set. Add it. if isinstance(attribute.default, bool): # Convert boolean's str() output to lowercase. # Why? In yaml it must be lowercase, so having it in the # docs as uppercase is confusing to users. default_text = "Default value: %s" % str( attribute.default).lower() else: default_value = str(attribute.default) if len(default_value) == 0: default_value = "''" default_text = "Default value: %s" % default_value default = nodes.paragraph(text=default_text) definition += default list_item += term list_item += node_type list_item += required list_item += definition definition_list += list_item content.append(definition_list) content.append(nodes.paragraph(text="Original class: %s" % klass)) return content
def visit_dd(self, node): if node.text: node.text = html.unescape(node.text) return nodes.definition()
def run(self): env = self.state.document.settings.env # Parse the content of the directive recursively node = nodes.Element() node.document = self.state.document self.state.nested_parse(self.content, self.content_offset, node) # define defaults node['name'] = self.arguments[0] node['operates_on_value'] = self.options.get('operates_on_value', False) node['content'] = self.content arg_nodes = [] other_nodes = [] required_params = {} optional_params = {} for child in node: if isinstance(child, MetaIniArgNode): if child["required"]: required_params[child["name"]] = child else: optional_params[child["name"]] = child else: other_nodes.append(child) # Build the content of the box prefix = '' if node["operates_on_value"]: prefix = '<value> | ' else: prefix = '<key> = <value> | ' sl = [prefix + self.arguments[0] + ' '] for rp, paramnode in required_params.items(): if paramnode["multi"]: sl.append('<' + paramnode['name'] + '1 [' + paramnode['name'] + '2 ...]' + '> ') if paramnode["single"]: sl.append('<' + paramnode['name'] + '> ') for op, paramnode in optional_params.items(): if paramnode["multi"]: sl.append('[<' + paramnode['name'] + '1 [' + paramnode['name'] + '2 ...]' + '>] ') if paramnode["single"]: sl.append('[<' + paramnode['name'] + '>] ') lb = nodes.literal_block(''.join(sl), ''.join(sl)) arg_nodes.append(lb) # provide a defition list for the arguments dl = nodes.definition_list() for param, paramnode in chain(required_params.items(), optional_params.items()): dli = nodes.definition_list_item() dl += dli dlit = nodes.term(text=param) dli += dlit dlic = nodes.definition() dli += dlic self.state.nested_parse(paramnode['content'], self.content_offset, dlic) # add the parameter list to the output arg_nodes.append(dl) # Add a target for referencing! section = nodes.section(names=[node['name']]) section += nodes.subtitle(text="The " + node['name'] + " command") return [section] + arg_nodes + other_nodes
def run(self): env = self.state.document.settings.env node = addnodes.glossary() node.document = self.state.document # This directive implements a custom format of the reST definition list # that allows multiple lines of terms before the definition. This is # easy to parse since we know that the contents of the glossary *must # be* a definition list. # first, collect single entries entries = [] in_definition = True was_empty = True messages = [] for line, (source, lineno) in zip(self.content, self.content.items): # empty line -> add to last definition if not line: if in_definition and entries: entries[-1][1].append('', source, lineno) was_empty = True continue # unindented line -> a term if line and not line[0].isspace(): # enable comments if line.startswith('.. '): continue # first term of definition if in_definition: if not was_empty: messages.append(self.state.reporter.system_message( 2, 'glossary term must be preceded by empty line', source=source, line=lineno)) entries.append(([(line, source, lineno)], ViewList())) in_definition = False # second term and following else: if was_empty: messages.append(self.state.reporter.system_message( 2, 'glossary terms must not be separated by empty ' 'lines', source=source, line=lineno)) if entries: entries[-1][0].append((line, source, lineno)) else: messages.append(self.state.reporter.system_message( 2, 'glossary seems to be misformatted, check ' 'indentation', source=source, line=lineno)) else: if not in_definition: # first line of definition, determines indentation in_definition = True indent_len = len(line) - len(line.lstrip()) if entries: entries[-1][1].append(line[indent_len:], source, lineno) else: messages.append(self.state.reporter.system_message( 2, 'glossary seems to be misformatted, check ' 'indentation', source=source, line=lineno)) was_empty = False # now, parse all the entries into a big definition list items = [] for terms, definition in entries: termtexts = [] termnodes = [] system_messages = [] ids = [] for line, source, lineno in terms: # parse the term with inline markup res = self.state.inline_text(line, lineno) system_messages.extend(res[1]) # get a text-only representation of the term and register it # as a cross-reference target tmp = nodes.paragraph('', '', *res[0]) tmp.source = source tmp.line = lineno new_id, termtext, new_termnodes = \ make_termnodes_from_paragraph_node(env, tmp) ids.append(new_id) termtexts.append(termtext) termnodes.extend(new_termnodes) term = make_term_from_paragraph_node(termnodes, ids) term += system_messages defnode = nodes.definition() if definition: self.state.nested_parse(definition, definition.items[0][1], defnode) items.append((termtexts, nodes.definition_list_item('', term, defnode))) if 'sorted' in self.options: items.sort(key=lambda x: unicodedata.normalize('NFD', x[0][0].lower())) dlist = nodes.definition_list() dlist['classes'].append('glossary') dlist.extend(item[1] for item in items) node += dlist return messages + [node]
def run(self): extension_list_name = self.options.pop('list', '') set_classes(self.options) admonition_node = nodes.admonition('', **self.options) self.add_name(admonition_node) title_text = self.arguments[0] textnodes, _ = self.state.inline_text(title_text, self.lineno) title = nodes.title(title_text, '', *textnodes) title.line = 0 title.source = 'extension_list_' + extension_list_name admonition_node += title if not 'classes' in self.options: admonition_node['classes'] += ['admonition', 'note'] admonition_node['classes'] += ['extension_list'] admonition_node['ids'] += ['extensionlist-' + extension_list_name] definition_list = nodes.definition_list() definition_list.line = 0 for num, extension in enumerate(extension_json['extensions']): if not extension.get('core'): continue category = extension.get('category') if extension_list_name and category != extension_list_name: continue name = extension['name']['en'] description = extension['description']['en'] some_term, _ = self.state.inline_text(name, self.lineno) some_def, _ = self.state.inline_text(description, self.lineno) link = nodes.reference(name, '', *some_term) path_split = self.state.document.attributes['source'].split('/') root_path = "../" * (len(path_split) - path_split.index('docs') - 2) link['refuri'] = root_path + 'extensions/' + extension.get( 'slug', '') link['translatable'] = True link.source = 'extension_list_' + extension_list_name link.line = num + 1 term = nodes.term(name, '', link) definition_list += term text = nodes.paragraph(description, '', *some_def) text.source = 'extension_list_' + extension_list_name text.line = num + 1 definition_list += nodes.definition(description, text) admonition_node += definition_list community = "The following are community extensions and are not maintained by Open Contracting Partnership." community_text, _ = self.state.inline_text(community, self.lineno) community_paragraph = nodes.paragraph(community, *community_text) community_paragraph['classes'] += ['hide'] community_paragraph.source = 'extension_list_' + extension_list_name community_paragraph.line = num + 2 admonition_node += community_paragraph return [admonition_node]
def run(self): # Tests don't have to have contents now, so check for it but don't assert: self.document = self.state_machine.document text = [] if self.content: text = '\n'.join(self.content) # Create the admonition node, to be populated by `nested_parse`. self.name = self.arguments[0] if 'test_time' in self.options: self.test_time = self.options['test_time'] # test_procedure name #if 'test_procedure' in self.options: if 'test_procedure' in self.options: self.test_procedure = self.options['test_procedure'] else: self.assert_has_content() proc = TestProcedure(self.name + "_procedure") self.test_procedure = proc.name if 'setup' in self.options: proc.setup = self.options['setup'] else: proc.setup = "" proc.content = self.content self.parsed('test_procedure').append(proc) term = nodes.term() term += nodes.strong(text=self.arguments[0]) targetnode = self.make_targetnode() deflist = nodes.definition_list() test_def = nodes.definition_list_item() test_def += term defn = nodes.definition() test_def += defn deflist += test_def if 'parameters' in self.options: params = self.parse_parameters() defn += nodes.paragraph(text="Parameters:") for param in params: name = param['param'] field_list = nodes.field_list() param_field = nodes.field() param_field_name = nodes.field_name() param_field_name += nodes.Text(name) param_field += param_field_name param_field_body = nodes.field_body() choices_str = param['choices_str'] if (len(choices_str) < 50): param_field_body += nodes.paragraph(text=choices_str) else: choices = param['choices'] param_field_body += nodes.raw('', ' \\ \n\n', format="latex") for choice in choices: param_field_body += nodes.paragraph(text=choice) param_field += param_field_body field_list += param_field name = self.arguments[0].strip() + "param" + name param_target = nodes.target('', '', ids=[nodes.make_id(name)]) name = nodes.fully_normalize_name(name) param_target['names'].append(name) self.state_machine.document.note_explicit_target( param_target, param_target) defn += param_target defn += field_list # Parse the directive contents. self.state.nested_parse(self.content, self.content_offset, defn) option_map = {} option_map['configurations'] = 'Valid configurations' option_map['setup'] = 'Required setup' option_map['test_time'] = 'Test time (min)' option_map['priority'] = 'Priority' option_map['test_procedure'] = 'Test procedure' field_list = self.options_to_field_list(option_map) if (field_list != None): defn += field_list self.parsed('test').append(self) return [targetnode, deflist]
def after_contentnode(self, node): # derived_from: deriv = self.data['derived_from'] xref_node = addnodes.pending_xref( '', refdomain='cfy', reftype=self.kind, reftarget=deriv, modname=None, classname=None, ) xref_node += nodes.Text(deriv, deriv) node.append( nodes.paragraph( 'Derived from: ', 'Derived from: ', xref_node, )) if 'properties' in self.data: node.append(nodes.rubric('', 'Properties:')) props = nodes.definition_list() node.append(props) for name, property in self.data['properties'].items(): try: desc = property['description'] except KeyError: print('{type} property {name} has no description'.format( type=self.arguments[0], name=name, )) print(get_doc(self.arguments[0].strip(), name)) raise info = '' default = property.get('default', None) if default is not None: if default != '': info += ' **default:** {}'.format(property['default']) elif property.get('required', True): info += ' **required**' term = nodes.term('', name) lines = ViewList( prepare_docstring(info + '\n\n' + desc + '\n\n')) definition = nodes.definition() self.state.nested_parse( lines, self.content_offset + 4, definition, ) props.append(nodes.definition_list_item( '', term, definition, ))
def run(self): config = self.state.document.settings.env.config extension_versions = config.extension_versions language = config.overrides.get('language', 'en') extension_list_name = self.options.pop('list', '') set_classes(self.options) admonition_node = nodes.admonition('', **self.options) self.add_name(admonition_node) title_text = self.arguments[0] textnodes, _ = self.state.inline_text(title_text, self.lineno) title = nodes.title(title_text, '', *textnodes) title.line = 0 title.source = 'extension_list_' + extension_list_name admonition_node += title if 'classes' not in self.options: admonition_node['classes'] += ['admonition', 'note'] admonition_node['classes'] += ['extension_list'] admonition_node['ids'] += ['extensionlist-' + extension_list_name] definition_list = nodes.definition_list() definition_list.line = 0 # Only list core extensions whose version matches the version specified in `conf.py` and whose category matches # the category specified by the directive's `list` option. registry = ExtensionRegistry(extension_versions_url, extensions_url) num = 0 for identifier, version in extension_versions.items(): extension = registry.get(id=identifier, core=True, version=version) if extension_list_name and extension.category != extension_list_name: continue # Avoid "403 Client Error: rate limit exceeded for url" on development branches. try: metadata = extension.metadata except requests.exceptions.HTTPError: if live_branch: raise metadata = { 'name': { 'en': identifier }, 'description': { 'en': identifier } } name = metadata['name']['en'] description = metadata['description']['en'] some_term, _ = self.state.inline_text(name, self.lineno) some_def, _ = self.state.inline_text(description, self.lineno) link = nodes.reference(name, '', *some_term) link['refuri'] = extension_explorer_template.format( language, identifier, version) link['translatable'] = True link.source = 'extension_list_' + extension_list_name link.line = num + 1 term = nodes.term(name, '', link) definition_list += term text = nodes.paragraph(description, '', *some_def) text.source = 'extension_list_' + extension_list_name text.line = num + 1 definition_list += nodes.definition(description, text) if extension_list_name and not registry.filter( category=extension_list_name): raise self.warning( f'No extensions have category {extension_list_name} in extensionlist directive' ) admonition_node += definition_list community = "The following are community extensions and are not maintained by Open Contracting Partnership." community_text, _ = self.state.inline_text(community, self.lineno) community_paragraph = nodes.paragraph(community, *community_text) community_paragraph['classes'] += ['hide'] community_paragraph.source = 'extension_list_' + extension_list_name community_paragraph.line = num + 2 admonition_node += community_paragraph return [admonition_node]
def contribute_property(self, prop_list, prop_key, prop, upd_para=None): prop_item = nodes.definition_list_item( '', nodes.term('', prop_key)) prop_list.append(prop_item) prop_item.append(nodes.classifier('', prop.type)) definition = nodes.definition() prop_item.append(definition) self._status_str(prop.support_status, definition) if not prop.implemented: para = nodes.paragraph('', _('Not implemented.')) note = nodes.note('', para) definition.append(note) return if prop.description: para = nodes.paragraph('', prop.description) definition.append(para) if upd_para is not None: definition.append(upd_para) else: if prop.update_allowed: upd_para = nodes.paragraph( '', _('Can be updated without replacement.')) definition.append(upd_para) elif prop.immutable: upd_para = nodes.paragraph('', _('Updates are not supported. ' 'Resource update will fail on' ' any attempt to update this ' 'property.')) definition.append(upd_para) else: upd_para = nodes.paragraph('', _('Updates cause replacement.')) definition.append(upd_para) if prop.default is not None: para = nodes.paragraph('', _('Defaults to "%s".') % prop.default) definition.append(para) for constraint in prop.constraints: para = nodes.paragraph('', str(constraint)) definition.append(para) sub_schema = None if prop.schema and prop.type == properties.Schema.MAP: para = nodes.paragraph() emph = nodes.emphasis('', _('Map properties:')) para.append(emph) definition.append(para) sub_schema = prop.schema elif prop.schema and prop.type == properties.Schema.LIST: para = nodes.paragraph() emph = nodes.emphasis('', _('List contents:')) para.append(emph) definition.append(para) sub_schema = prop.schema if sub_schema: sub_prop_list = nodes.definition_list() definition.append(sub_prop_list) for sub_prop_key, sub_prop in sorted(sub_schema.items(), self.cmp_prop): if sub_prop.support_status.status != support.HIDDEN: self.contribute_property( sub_prop_list, sub_prop_key, sub_prop, upd_para)
def make_document(self, doc_strings): """make doctree representation of collected fragments""" opt = self.opt big_doc = publish_doctree("") self.document = big_doc big_doc += nodes.title(text="Plugins listing generated %s" % time.asctime()) contents = nodes.container() if opt.include_contents: big_doc += nodes.topic('', nodes.title(text='Contents'), contents) if not opt.no_summary: def_list = nodes.definition_list() alpha_list = nodes.paragraph() big_doc += nodes.section('', nodes.title(text="Plugins summary"), alpha_list, def_list) last_alpha = '' for doc in doc_strings: section = nodes.section() big_doc += section section += nodes.title(text=doc[0]) self.add_ids(section) if not opt.no_summary: firstpara = (self.first_text(doc[2]) or nodes.paragraph(text='No summary found')) reference = nodes.reference('', refid=section['ids'][0], name=doc[0], anonymous=1) reference += nodes.Text(doc[0]) def_list += nodes.definition_list_item( '', nodes.term('', '', reference), nodes.definition('', firstpara)) # add letter quick index entry if needed if doc[0][0].upper() != last_alpha: last_alpha = doc[0][0].upper() self.add_ids(reference) alpha_list += nodes.reference('', nodes.Text(last_alpha + ' '), refid=reference['ids'][0], name=doc[0], anonymous=1) for element in doc[2]: # if the docstring has titles, we need another level if element.tagname == 'title': subsection = nodes.section() section += subsection section = subsection break for element in doc[2]: try: section += element.deepcopy() except TypeError: err('Element.deepcopy() failed, dropped element for %s\n' % doc[0]) if opt.include_contents: contents.details = {'text': 'Contents here'} self.add_ids(big_doc) transform = Contents(big_doc, contents) transform.apply() return big_doc