Exemplo n.º 1
0
def print_command_args_and_opts(arg_list, opt_list, sub_list=None):
    items = []
    if arg_list:
        items.append(nodes.definition_list_item(
            '', nodes.term(text='Positional arguments:'),
            nodes.definition('', arg_list)))
    if opt_list:
        items.append(nodes.definition_list_item(
            '', nodes.term(text='Options:'),
            nodes.definition('', opt_list)))
    if sub_list and len(sub_list):
        items.append(nodes.definition_list_item(
            '', nodes.term(text='Sub-commands:'),
            nodes.definition('', sub_list)))
    return nodes.definition_list('', *items)
Exemplo n.º 2
0
    def run(self):
        # Raise an error if the directive does not have contents.
        self.assert_has_content()
        self.document = self.state_machine.document

        text = '\n'.join(self.content)
        # Create the admonition node, to be populated by `nested_parse`.

        self.name=self.arguments[0].strip()

        term = nodes.term()
        term += nodes.strong(text=self.arguments[0]) 

        targetnode = self.make_targetnode()

        deflist = nodes.definition_list()
        configuration_def = nodes.definition_list_item()
        configuration_def += term
        defn = nodes.definition()
        configuration_def += defn
        deflist += configuration_def

        # Parse the directive contents.
        self.state.nested_parse(self.content, self.content_offset,
                                defn)
        
        option_map = {}
        option_map['features'] = 'Required for features'
        field_list = self.options_to_field_list(option_map)

        if (field_list != None):
            defn += field_list

        self.parsed('configuration').append(self)
        return [targetnode, deflist]
Exemplo n.º 3
0
    def add_coqtop_output(self):
        """Add coqtop's responses to a Sphinx AST

        Finds nodes to process using is_coqtop_block."""
        with CoqTop(color=True) as repl:
            for node in self.document.traverse(CoqtopBlocksTransform.is_coqtop_block):
                options = node['coqtop_options']
                opt_undo, opt_reset, opt_input, opt_output = self.parse_options(options)

                if opt_reset:
                    repl.sendone("Reset Initial.")
                pairs = []
                for sentence in self.split_sentences(node.rawsource):
                    pairs.append((sentence, repl.sendone(sentence)))
                if opt_undo:
                    repl.sendone("Undo {}.".format(len(pairs)))

                dli = nodes.definition_list_item()
                for sentence, output in pairs:
                    # Use Coqdoq to highlight input
                    in_chunks = highlight_using_coqdoc(sentence)
                    dli += nodes.term(sentence, '', *in_chunks, classes=self.block_classes(opt_input))
                    # Parse ANSI sequences to highlight output
                    out_chunks = AnsiColorsParser().colorize_str(output)
                    dli += nodes.definition(output, *out_chunks, classes=self.block_classes(opt_output, output))
                node.clear()
                node.rawsource = self.make_rawsource(pairs, opt_input, opt_output)
                node['classes'].extend(self.block_classes(opt_input or opt_output))
                node += nodes.inline('', '', classes=['coqtop-reset'] * opt_reset)
                node += nodes.definition_list(node.rawsource, dli)
Exemplo n.º 4
0
 def _definition_item(self, term, classifier):
     item = nodes.definition_list_item()
     term = nodes.term(text=term)
     item.append(term)
     classifier = nodes.classifier(text=classifier)
     item.append(classifier)
     return item
Exemplo n.º 5
0
def print_subcommand_list(data, nested_content):
    definitions = map_nested_definitions(nested_content)
    items = []
    if 'children' in data:
        for child in data['children']:
            my_def = [nodes.paragraph(
                text=child['help'])] if child['help'] else []
            name = child['name']
            my_def = apply_definition(definitions, my_def, name)
            if len(my_def) == 0:
                my_def.append(nodes.paragraph(text='Undocumented'))
            my_def.append(nodes.literal_block(text=child['usage']))
            my_def.append(print_command_args_and_opts(
                print_arg_list(child, nested_content),
                print_opt_list(child, nested_content),
                text_from_rst(child.get('description', ""), is_rst=True),
                print_subcommand_list(child, nested_content),

            ))
            items.append(
                nodes.definition_list_item(
                    '',
                    nodes.term('', '', nodes.strong(text=name)),
                    nodes.definition('', *my_def)
                )
            )
    return nodes.definition_list('', *items)
Exemplo n.º 6
0
    def contribute_attributes(self, parent):
        if not self.attrs_schemata:
            return
        section = self._section(parent, _('Attributes'), '%s-attrs')
        prop_list = nodes.definition_list()
        section.append(prop_list)
        for prop_key, prop in sorted(self.attrs_schemata.items()):
            description = prop.description
            prop_item = nodes.definition_list_item(
                '', nodes.term('', prop_key))
            prop_list.append(prop_item)

            definition = nodes.definition()
            prop_item.append(definition)

            if prop.support_status.status != support.SUPPORTED:
                sstatus = prop.support_status.to_dict()
                msg = _('%(status)s')
                if sstatus['message'] is not None:
                    msg = _('%(status)s - %(message)s')
                para = nodes.inline('', msg % sstatus)
                warning = nodes.note('', para)
                definition.append(warning)

            if description:
                def_para = nodes.paragraph('', description)
                definition.append(def_para)
Exemplo n.º 7
0
    def contribute_property(self, prop_list, prop_key, prop):
        prop_item = nodes.definition_list_item(
            '', nodes.term('', prop_key))
        prop_list.append(prop_item)

        prop_item.append(nodes.classifier('', prop.type))

        definition = nodes.definition()
        prop_item.append(definition)

        if not prop.implemented:
            para = nodes.inline('', _('Not implemented.'))
            warning = nodes.note('', para)
            definition.append(warning)
            return

        if prop.description:
            para = nodes.paragraph('', prop.description)
            definition.append(para)

        if prop.update_allowed:
            para = nodes.paragraph('',
                                   _('Can be updated without replacement.'))
            definition.append(para)
        else:
            para = nodes.paragraph('', _('Updates cause replacement.'))
            definition.append(para)

        if prop.required:
            para = nodes.paragraph('', _('Required property.'))
        elif prop.default is not None:
            para = nodes.paragraph(
                '',
                _('Optional property, defaults to "%s".') % prop.default)
        else:
            para = nodes.paragraph('', _('Optional property.'))
        definition.append(para)

        for constraint in prop.constraints:
            para = nodes.paragraph('', str(constraint))
            definition.append(para)

        sub_schema = None
        if prop.schema and prop.type == properties.MAP:
            para = nodes.emphasis('', _('Map properties:'))
            definition.append(para)
            sub_schema = prop.schema

        elif prop.schema and prop.type == properties.LIST:
            para = nodes.emphasis(
                '', _('List contents:'))
            definition.append(para)
            sub_schema = prop.schema

        if sub_schema:
            sub_prop_list = nodes.definition_list()
            definition.append(sub_prop_list)
            for sub_prop_key in sorted(sub_schema.keys()):
                sub_prop = sub_schema[sub_prop_key]
                self.contribute_property(sub_prop_list, sub_prop_key, sub_prop)
Exemplo n.º 8
0
def print_command_args_and_opts(arg_list, opt_list, sub_list=None):
    items = []
    if arg_list:
        items.append(nodes.definition_list_item(
            '', nodes.term(text='Positional arguments:'),
            nodes.definition('', arg_list)))
    for opt_dict in opt_list:
        opts = opt_dict['options']
        if opts is not None:
            items.append(nodes.definition_list_item(
                '', nodes.term(text=opt_dict['title']),
                nodes.definition('', opts)))
    if sub_list and len(sub_list):
        items.append(nodes.definition_list_item(
            '', nodes.term(text='Sub-commands:'),
            nodes.definition('', sub_list)))
    return nodes.definition_list('', *items)
Exemplo n.º 9
0
    def run(self):
        # Raise an error if the directive does not have contents.
        self.assert_has_content()
        self.document = self.state_machine.document

        text = '\n'.join(self.content)
        # Create the admonition node, to be populated by `nested_parse`.

        self.name = self.arguments[0]
        
        term = nodes.term()
        term += nodes.strong(text=self.arguments[0]) 
        
        targetnode = self.make_targetnode()

        deflist = nodes.definition_list()
        test_def = nodes.definition_list_item()
        test_def += term
        defn = nodes.definition()
        test_def += defn
        deflist += test_def


        # CURRENT : Parse direction list if provided, which is comma-separated
        if 'direction' in self.options:
            input = 0
            output = 0

            for p in self.options['direction'].split(","):
#                print "Testing `" + p.strip() + "' in test_procedure `" + self.name + "'..."

                if p == "input":
                    input = 1
                
                if p == "output":
                    output = 2

            self.direction = input + output


        # Parse the directive contents.
        self.state.nested_parse(self.content, self.content_offset, defn)

        option_map = {}
        option_map['setup'] = 'Required setup'
        option_map['direction'] = 'Direction (input|output|both)'
        field_list = self.options_to_field_list(option_map)

        if (field_list != None):
            defn += field_list

        #print "*** TestProcedure options setup = " + self.options['setup']
        if 'setup' in self.options:
            self.setup = self.options['setup']

        self.parsed('test_procedure').append(self)
        return [targetnode, deflist]
Exemplo n.º 10
0
 def run(self):
     options = io.get_options_for_format(self.arguments[0])
     field_list_node = nodes.definition_list()
     for name, description, value in options:
         item = nodes.definition_list_item()
         item.append(nodes.term(name + ' ',name+ ' '))
         item.append(nodes.definition('', nodes.paragraph('', description)))
         field_list_node.append(item)
     return [field_list_node]
Exemplo n.º 11
0
 def render_simple(self, app, doctree, objs, n1, n2):
     if objs:
         #ni  = nodes.list_item()
         ni  = nodes.definition_list_item()
         rub = nodes.rubric(text=n1)
         st = nodes.strong()
         st.append(rub)
         ren = filebrief_replace_node(app, doctree, n2, objs)
         ni.append(st)
         ni.append(ren)
         return ni
Exemplo n.º 12
0
def codeitem_directive(dirname, arguments, options, content, lineno,
                       content_offset, block_text, state, state_machine):
    if not content:
        content = [u""]

    m = _CALLABLE_RE.match(u"".join(arguments))
    m2 = _OTHER_RE.match(u"".join(arguments))
    if m:
        g = m.groupdict()
        if g['rest'] is None:
            g['rest'] = ''
        if g['args'].strip():
            firstline = "%s%s **%s** (``%s``) %s" % (g['pre'].replace('*', r'\*'),
                                                     g['module'], g['name'],
                                                     g['args'], g['rest'])
        else: 
            firstline = "%s%s **%s** () %s" % (g['pre'].replace('*', r'\*'),
                                               g['module'], g['name'],
                                               g['rest'])
        if g['module']:
            target = '%s%s' % (g['module'], g['name'])
        else:
            target = g['name']
    elif m2:
        g = m2.groupdict()
        firstline = "%s%s **%s**" % (g['pre'].replace('*', r'\*'),
                                     g['module'], g['name'])
        if g['module']:
            target = '%s%s' % (g['module'], g['name'])
        else:
            target = g['name']
    else:
        firstline = u"".join(arguments)
        target = None


    dl = nodes.definition_list()
    di = nodes.definition_list_item()
    dl += di
    
    title_stuff, messages = state.inline_text(firstline, lineno)
    dt = nodes.term(firstline, *title_stuff)
    di += dt

    dd = nodes.definition()
    di += dd
    
    if target:
        dt['ids'] += [rst.make_target_id(target)]

    dl['classes'] += [dirname, 'code-item']
    _nested_parse(state, content, dd)
    
    return [dl]
def codeitem_directive(dirname, arguments, options, content, lineno, content_offset, block_set, state, state_machine):
    if not content:
        content = [u""]

    m = _CALLABLE_RE.match(u"".join(arguments))
    m2 = _OTHER_RE.match(u"".join(arguments))
    if m:
        g = m.groupdict()
        if g["rest"] is None:
            g["rest"] = ""
        if g["args"].strip():
            firstline = "%s%s **%s** (``%s``) %s" % (
                g["pre"].replace("*", r"\*"),
                g["module"],
                g["name"],
                g["args"],
                g["rest"],
            )
        else:
            firstline = "%s%s **%s** () %s" % (g["pre"].replace("*", r"\*"), g["module"], g["name"], g["rest"])
        if g["module"]:
            target = "%s%s" % (g["module"], g["name"])
        else:
            target = g["name"]
    elif m2:
        g = m2.groupdict()
        firstline = "%s%s **%s**" % (g["pre"].replace("*", r"\*"), g["module"], g["name"])
        if g["module"]:
            target = "%s%s" % (g["module"], g["name"])
        else:
            target = g["name"]
    else:
        firstline = u"".join(arguments)
        target = None

    dl = nodes.definition_list()
    di = nodes.definition_list_item()
    dl += di

    title_stuff, messages = state.inline_text(firstline, lineno)
    dt = nodes.term(firstline, *title_stuff)
    di += dt

    dd = nodes.definition()
    di += dd

    if target:
        dt["ids"] += [target]

    dl["classes"] += [dirname, "code-item"]
    _nested_parse(state, content, dd)

    return [dl]
Exemplo n.º 14
0
    def run(self):
        proptype = self.arguments[0]
        default = self.arguments[1]
        dl = nodes.definition_list()
        dl['classes'].append('propparams')

        term = nodes.term('', 'Type')
        defnode = nodes.definition('', nodes.paragraph('', proptype))
        dl += nodes.definition_list_item('', term, defnode)

        if 'values' in self.options:
            term = nodes.term('', 'Values')
            defnode = nodes.definition('',  nodes.paragraph('',
                                       self.options['values']))
            dl += nodes.definition_list_item('', term, defnode)

        term = nodes.term('', 'Default')
        defnode = nodes.definition('',  nodes.paragraph('', default))
        dl += nodes.definition_list_item('', term, defnode)

        return [dl]
Exemplo n.º 15
0
 def render(self):
   symbol = self.symbol()
   if not symbol.exceptions:
     yield nodes.paragraph(text=_('None.'))
   else:
     definition_list = nodes.definition_list()
     for k, v in symbol.exceptions.iteritems():
       definition_list_item = nodes.definition_list_item('',
         nodes.term('', '', nodes.literal('', k)),
         nodes.definition('', nodes.paragraph(text=v)))
       definition_list.append(definition_list_item)
     yield definition_list
Exemplo n.º 16
0
def format_arguments(arguments):
	return [nodes.definition_list(
		'', *[
			nodes.definition_list_item(
				'',
				nodes.term(
					# node.Text('') is required because otherwise for some 
					# reason first name node is seen in HTML output as 
					# `<strong>abc</strong>`.
					'', *([nodes.Text('')] + (
						insert_separators([
							nodes.strong('', '', *[nodes.Text(ch) for ch in name])
							for name in argument.names
						], ', ')
						if argument.is_option else
						# Unless node.Text('') is here metavar is written in 
						# bold in the man page.
						[nodes.Text(''), nodes.emphasis(text=argument.metavar)]
					) + (
						[] if not argument.is_option or not argument.nargs else
						[nodes.Text(' '), nodes.emphasis('', argument.metavar)]
					))
				),
				nodes.definition('', nodes.paragraph('', *parse_argparse_text(argument.help or ''))),
			)
			for argument in flatten_groups(arguments)
		] + [
			nodes.definition_list_item(
				'',
				nodes.term(
					'', nodes.Text(''),
					nodes.strong(text='-h'),
					nodes.Text(', '),
					nodes.strong('', '', nodes.Text('-'), nodes.Text('-help')),
				),
				nodes.definition('', nodes.paragraph('', nodes.Text('Display help and exit.')))
			)
		]
	)]
Exemplo n.º 17
0
    def run(self):
        #self.assert_has_content()
        self.document = self.state_machine.document
        #text = '\n'.join(self.content)
        # Create the admonition node, to be populated by `nested_parse`.

        self.name = self.arguments[0]
        
        term = nodes.term()
        term += nodes.strong(text=self.arguments[0]) 
        
        targetnode = self.make_targetnode()

        deflist = nodes.definition_list()
        test_def = nodes.definition_list_item()
        test_def += term
        defn = nodes.definition()
        test_def += defn
        deflist += test_def

        # Parse the directive contents.
        self.state.nested_parse(self.content, self.content_offset, defn)

        option_map = {}
        option_map['runtests'] = 'Tests to run'
        field_list = self.options_to_field_list(option_map)

        if 'runtests' in self.options:
            self.runtests = []
            for p in self.options['runtests'].split(","):
#                print "Testing for `" + p.strip() + "' in prepare_setup `" + self.name + "'..."
                newruntests = [t for t in self.parsed('test') if p.strip() == t.name]
                if len(newruntests) == 0:
                    sys.stderr.write("ERROR : runtests field couldn't expand to any tests for name `" + p.strip() + "'\n")
                    if (self.check_errors()):
                        exit(1)

#                for t in newruntests :
#                    print "Runtests adding test : " + t.name
    
                self.runtests.extend(newruntests)

        else:
            self.runtests = []

        if (field_list != None):
            defn += field_list

        self.parsed('prepare_setup').append(self)
        return [targetnode, deflist]
Exemplo n.º 18
0
    def make_definition_list(self, term, definition=None, classifier=None):
        definition_list = nodes.definition_list_item()
        if not isinstance(term, nodes.Node):
            term = nodes.strong(text=term)
        definition_list.append(term)
        if classifier is not None:
            definition_list.append(nodes.classifier(text=classifier))
        if definition is not None:
            if isinstance(definition, (list, tuple)):
                definition_list.append(nodes.definition('', *definition))
            else:
                definition_list.append(nodes.definition('', definition))

        return definition_list
Exemplo n.º 19
0
 def run(self):
     refid = 'cmdoption-arg-' + nodes.make_id(self.arguments[0])
     target = nodes.target(names=[refid], ids=[refid])
     dl = nodes.definition_list()
     dt = nodes.definition_list_item()
     term = nodes.term()
     term += nodes.literal(self.arguments[0], self.arguments[0], classes=["descname"])
     dt += term
     definition = nodes.definition()
     dt += definition
     definition.document = self.state.document
     self.state.nested_parse(self.content, self.content_offset, definition)
     dl += dt
     return [target, dl]
Exemplo n.º 20
0
 def render(self):
   symbol = self.symbol()
   if not symbol.params:
     yield nodes.paragraph(text=_('None.'))
   else:
     definition_list = nodes.definition_list()
     for param in symbol.params:
       param_name = param.get('declname')
       if param_name is not None:
         param_desc = param.get('briefdescription', '')
         definition_list_item = nodes.definition_list_item('',
           nodes.term('', '', nodes.literal('', param_name)),
           nodes.definition('', nodes.paragraph(text=param_desc)))
         definition_list.append(definition_list_item)
     yield definition_list
Exemplo n.º 21
0
        def handle_item(fieldarg: str, content: List[nodes.inline]) -> nodes.definition_list_item:
            head = nodes.term()
            head += makerefs(self.rolename, fieldarg, addnodes.literal_strong)
            fieldtype = types.pop(fieldarg, None)
            if fieldtype is not None:
                head += nodes.Text(' : ')
                if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
                    typename = ''.join(n.astext() for n in fieldtype)
                    head += makerefs(self.typerolename, typename, addnodes.literal_emphasis)
                else:
                    head += fieldtype

            body_content = nodes.paragraph('', '', *content)
            body = nodes.definition('', body_content)

            return nodes.definition_list_item('', head, body)
Exemplo n.º 22
0
    def run(self):
        self.assert_has_content()

        title = self.arguments[0]
        content = '\n'.join(self.content)
        math_node = self.make_math_node(self.prepare_latex(content))

        tid = nodes.make_id(title)
        target = nodes.target('', '', ids=['inference-' + tid])
        self.state.document.note_explicit_target(target)

        term, desc = nodes.term('', title), nodes.description('', math_node)
        dli = nodes.definition_list_item('', term, desc)
        dl = nodes.definition_list(content, target, dli)
        set_source_info(self, dl)
        return [dl]
Exemplo n.º 23
0
    def rst_nodes(self):

        nodelist = []
        for entry in self.parameternamelist:
            nodelist.extend(entry.rst_nodes())

        term = nodes.term("","", *nodelist)

        nodelist = []

        if self.parameterdescription:
            nodelist.extend(self.parameterdescription.rst_nodes())

        definition = nodes.definition("", *nodelist)

        return [nodes.definition_list_item("", term, definition)]
Exemplo n.º 24
0
 def _format_subcommands(self, parser_info):
     assert 'children' in parser_info
     items = []
     for subcmd in parser_info['children']:
         subcmd_items = []
         if subcmd['help']:
             subcmd_items.append(nodes.paragraph(text=subcmd['help']))
         else:
             subcmd_items.append(nodes.paragraph(text='Undocumented'))
         items.append(
             nodes.definition_list_item(
                 '',
                 nodes.term('', '', nodes.strong(
                     text=subcmd['bare_usage'])),
                 nodes.definition('', *subcmd_items)))
     return nodes.definition_list('', *items)
Exemplo n.º 25
0
        def describing(self, description=MARKER, after=None):
            dl = self._current_node
            assert isinstance(dl, nodes.definition_list), dl
            item = nodes.definition_list_item()
            dl += item
            term = nodes.term()
            item += term
            self._current_node = term

            yield

            # We must now have either a description (so we call
            # described_as) or they must call described_as
            # des
            self._current_node = item

            self._describing(description, after)
Exemplo n.º 26
0
    def contribute_attributes(self, parent):
        schema = self.resource_class.attributes_schema
        if not schema:
            return
        section = self._section(parent, _("Attributes"), "%s-attrs")
        prop_list = nodes.definition_list()
        section.append(prop_list)
        for prop_key in sorted(schema.keys()):
            description = schema[prop_key]
            prop_item = nodes.definition_list_item("", nodes.term("", prop_key))
            prop_list.append(prop_item)

            definition = nodes.definition()
            prop_item.append(definition)

            if description:
                def_para = nodes.paragraph("", description)
                definition.append(def_para)
Exemplo n.º 27
0
 def render_sub(self, app, doctree, objs, n1, n2):
     if objs:
         #pni  = nodes.list_item()
         pni  = nodes.paragraph()
         pni  = nodes.definition_list_item()
         prub = nodes.rubric(text=n1)
         st = nodes.strong()
         st.append(prub)
         pni.append(st)
         pbl = nodes.bullet_list()
         pni.append(pbl)
         for (n, be) in objs.iteritems():
             ni  = nodes.list_item()
             prub = nodes.paragraph(text=n2 + n)
             pbl.append(ni)
             ni.append(prub)
             ni.append(be.render(app, doctree))
         return pni
Exemplo n.º 28
0
def generate_flag_summary(flags, category):

    summary_node = nodes.definition_list_item()
    term_node = nodes.term(text=categories[category])
    summary_node += term_node
    block = nodes.definition()
    summary_node += block

    # Fill block with flags
    for flag_info in flags:

        for name in flag_info['names']:
            block += nodes.literal(text=name)
            block += nodes.inline(text=' ')

    block += nodes.inline(text='\n')

    return summary_node
Exemplo n.º 29
0
def generate_flag_list(flags, category):

    list_node = nodes.definition_list()

    for flag_info in flags:

        dl_item_node = nodes.definition_list_item()
        term_node = nodes.term()
        # The man writer is picky, so we have to remove the outer
        # paragraph node to get just the flag name
        term_node += flag_info['cells'][0][0]
        dl_item_node += term_node
        def_node = nodes.definition()
        def_node += flag_info['cells'][1]
        dl_item_node += def_node

        list_node += dl_item_node

    return list_node
Exemplo n.º 30
0
    def contribute_attributes(self, parent):
        if not self.attrs_schemata:
            return
        section = self._section(parent, _('Attributes'), '%s-attrs')
        prop_list = nodes.definition_list()
        section.append(prop_list)
        for prop_key, prop in sorted(self.attrs_schemata.items()):
            description = prop.description
            prop_item = nodes.definition_list_item(
                '', nodes.term('', prop_key))
            prop_list.append(prop_item)

            definition = nodes.definition()
            prop_item.append(definition)

            self._status_str(prop.support_status, definition)

            if description:
                def_para = nodes.paragraph('', description)
                definition.append(def_para)
Exemplo n.º 31
0
def to_fields(x):
    to_definiton_list = False
    for v in x.values():
        if isinstance(v, dict):
            to_definiton_list = True
            break
    if to_definiton_list:
        node = nodes.definition_list()
        previous_fieldlist = None
        for key, v in x.items():
            df = nodes.definition_list_item()
            if isinstance(v, str):  # embed field_list inside definition_list
                if previous_fieldlist is None:
                    fv = previous_fieldlist = nodes.field_list()
                    df.append(fv)
                    node.append(df)
                else:
                    fv = previous_fieldlist
                fvf = nodes.field()
                fv.append(fvf)
                fvf.append(nodes.field_name(text=key))
                fvf.append(nodes.field_body(v, nodes.Text(v)))
            else:
                previous_fieldlist = None
                df.append(nodes.term(text=key))
                dfv = nodes.definition()
                dfv.append(to_fields(v))
                df.append(dfv)
                node.append(df)
    else:
        node = nodes.field_list()
        for key, v in x.items():
            df = nodes.field()
            df.append(nodes.field_name(text=key))
            dfv = nodes.field_body(v, nodes.Text(v))
            df.append(dfv)
            node.append(df)
    return node
Exemplo n.º 32
0
 def render_dl_open(self, token):
     """Render a definition list."""
     node = nodes.definition_list(classes=["simple", "myst"])
     self.add_line_and_source_path(node, token)
     with self.current_node_context(node, append=True):
         item = None
         for child in token.children:
             if child.opening.type == "dt_open":
                 item = nodes.definition_list_item()
                 self.add_line_and_source_path(item, child)
                 with self.current_node_context(item, append=True):
                     term = nodes.term()
                     self.add_line_and_source_path(term, child)
                     with self.current_node_context(term, append=True):
                         self.render_children(child)
             elif child.opening.type == "dd_open":
                 if item is None:
                     error = self.reporter.error(
                         ("Found a definition in a definition list, "
                          "with no preceding term"),
                         # nodes.literal_block(content, content),
                         line=child.map[0],
                     )
                     self.current_node += [error]
                 with self.current_node_context(item):
                     definition = nodes.definition()
                     self.add_line_and_source_path(definition, child)
                     with self.current_node_context(definition,
                                                    append=True):
                         self.render_children(child)
             else:
                 error = self.reporter.error(
                     ("Expected a term/definition as a child of a definition list"
                      f", but found a: {child.opening.type}"),
                     # nodes.literal_block(content, content),
                     line=child.map[0],
                 )
                 self.current_node += [error]
Exemplo n.º 33
0
    def run(self):
        # Raise an error if the directive does not have contents.
        self.assert_has_content()
        self.document = self.state_machine.document

        self.name = self.arguments[0]
        #print "*** SETUP RUN HIT!"

        self.setup_time = int(self.options['setup_time'])

        text = '\n'.join(self.content)
        # Create the admonition node, to be populated by `nested_parse`.

        term = nodes.term()
        term += nodes.strong(text=self.arguments[0])

        targetnode = self.make_targetnode()

        deflist = nodes.definition_list()
        behaviour_def = nodes.definition_list_item()
        behaviour_def += term
        defn = nodes.definition()
        behaviour_def += defn
        deflist += behaviour_def

        # Parse the directive contents.
        self.state.nested_parse(self.content, self.content_offset, defn)

        option_map = {}
        option_map['setup_time'] = 'Setup time'
        field_list = self.options_to_field_list(option_map)

        if (field_list != None):
            defn += field_list

        #print "*** self.parsed('setup').append(self)!"
        self.parsed('setup').append(self)
        return [targetnode, deflist]
Exemplo n.º 34
0
  def run(self):
    """ Generate the definition list that displays the actual references """
    env = self.state.document.settings.env
    keys = env.domaindata['cite']['keys']
    env.domaindata['cite']['refdoc'] = env.docname

    citations = env.domains['cite'].citations

    # TODO: implement
    #env.domaindata['cite']['refdocs'][env.docname] = Citations(env, path)

    # Build the references list
    # TODO: Make this an enumerated_list or field_list maybe?
    node = nodes.definition_list()
    node.document = self.state.document
    node['classes'].append('references')

    items = []
    for i, key in enumerate(keys):
      term = nodes.term('', '')

      # TODO: Allow the format of the reference list be configurable
      if env.domaindata['cite']['conf']['style'] == 'super':
        term.children = [nodes.superscript('', i+1)]
      else:
        term.children = [nodes.inline('', "%s) " % (i+1))]

      nid = "citation-%s" % nodes.make_id(key)
      definition = self.get_reference_node(citations.get(key))

      li = nodes.definition_list_item('', term, definition)
      li[0]['ids'].append(nid)
      li[0]['names'].append(nid)
      items.append(li)
    node.extend(item for item in items)

    return [node]
Exemplo n.º 35
0
Arquivo: ext.py Projeto: EVMosaic/BAM
def print_subcommand_list(data, nested_content):
    definitions = map_nested_definitions(nested_content)
    items = []
    if 'children' in data:
        for child in data['children']:
            my_def = [nodes.paragraph(
                text=child['help'])] if child['help'] else []
            name = child['name']
            my_def = apply_definition(definitions, my_def, name)
            if len(my_def) == 0:
                my_def.append(nodes.paragraph(text='Undocumented'))
            my_def.append(nodes.literal_block(text=child['usage']))
            my_def.append(
                print_command_args_and_opts(
                    print_arg_list(child, nested_content),
                    print_opt_list(child, nested_content),
                    text_from_rst(child.get('description', ""), is_rst=True),
                    print_subcommand_list(child, nested_content),
                ))
            items.append(
                nodes.definition_list_item(
                    '', nodes.term('', '', nodes.strong(text=name)),
                    nodes.definition('', *my_def)))
    return nodes.definition_list('', *items)
Exemplo n.º 36
0
    def contribute_attributes(self, parent):
        if not self.attrs_schemata:
            return
        section = self._section(parent, _('Attributes'), '%s-attrs')
        prop_list = nodes.definition_list()
        section.append(prop_list)
        for prop_key, prop in sorted(self.attrs_schemata.items()):
            description = prop.description
            prop_item = nodes.definition_list_item(
                '', nodes.term('', prop_key))
            prop_list.append(prop_item)

            definition = nodes.definition()
            prop_item.append(definition)

            if prop.support_status.status != support.SUPPORTED:
                para = nodes.paragraph('',
                                       self._status_str(prop.support_status))
                note = nodes.note('', para)
                definition.append(note)

            if description:
                def_para = nodes.paragraph('', description)
                definition.append(def_para)
Exemplo n.º 37
0
    def add_coq_output_1(self, repl, node):
        options = self.parse_options(node)

        pairs = []

        if options['restart']:
            repl.sendone('Restart.')
        if options['reset']:
            repl.sendone('Reset Initial.')
            repl.send_initial_options()
        if options['fail']:
            repl.sendone('Unset Coqtop Exit On Error.')
        if options['warn']:
            repl.sendone('Set Warnings "default".')
        for sentence in self.split_sentences(node.rawsource):
            pairs.append((sentence, repl.sendone(sentence)))
        if options['abort']:
            repl.sendone('Abort All.')
        if options['fail']:
            repl.sendone('Set Coqtop Exit On Error.')
        if options['warn']:
            repl.sendone('Set Warnings "+default".')

        dli = nodes.definition_list_item()
        for sentence, output in pairs:
            # Use Coqdoc to highlight input
            in_chunks = highlight_using_coqdoc(sentence)
            dli += nodes.term(sentence, '', *in_chunks, classes=self.block_classes(options['input']))
            # Parse ANSI sequences to highlight output
            out_chunks = AnsiColorsParser().colorize_str(output)
            dli += nodes.definition(output, *out_chunks, classes=self.block_classes(options['output'], output))
        node.clear()
        node.rawsource = self.make_rawsource(pairs, options['input'], options['output'])
        node['classes'].extend(self.block_classes(options['input'] or options['output']))
        node += nodes.inline('', '', classes=['coqtop-reset'] * options['reset'])
        node += nodes.definition_list(node.rawsource, dli)
Exemplo n.º 38
0
	def run(self):
		event   = self.arguments[0]
		anchor  = event.lower().replace('_', '-')
		kind    = self.options.get('type')
		inType  = self.options.get('in')
		outType = self.options.get('out') or 'void'
		subject = self.options.get('subject')
		params  = self.options.get('params') or ''
		since   = self.options.get('since') or ''
		desc    = u'\n'.join(self.content)

		# create section

		# optionally insert zero-width breaks:
		# event.replace('_', u"_\u200B")

		sec = nodes.section()
		sec.append(nodes.title('', event))
		sec['names'].append(anchor)

		self.state.document.note_implicit_target(sec, sec)

		# the signature

		sig = '%s %s(%s)' % (outType, event, inType)

		if kind == 'until':
			sig += ' BREAKS'

		# additional params for this event

		paramlist = None

		if len(params) > 0:
			paramlist = self._buildParamList(nodes.bullet_list(), params)

		# create actual definition list

		dl = nodes.definition_list('',
			nodes.definition_list_item('',
				nodes.term('', '', nodes.strong('', 'Signatur:')),
				nodes.definition('', nodes.literal('', sig))
			),
			nodes.definition_list_item('',
				nodes.term('', '', nodes.strong('', 'Beschreibung:')),
				nodes.definition('', self._parseInline(desc))
			),
			nodes.definition_list_item('',
				nodes.term('', '', nodes.strong('', 'Subject:')),
				nodes.definition('', self._parseInline(subject))
			)
		)

		if paramlist:
			dl.append(nodes.definition_list_item('',
				nodes.term('', '', nodes.strong('', 'Weitere Parameter:')),
				nodes.definition('', paramlist)
			))

		if len(since) > 0:
			since = 'v%s' % since

			dl.append(nodes.definition_list_item('',
				nodes.term('', '', nodes.strong('', u'Hinzugefügt in:')),
				nodes.definition('', self._parseInline(since))
			))

		sec.append(dl)

		return [sec]
Exemplo n.º 39
0
def add_df_item(root, term, *contents):
    root += nodes.definition_list_item('', nodes.term('', term),
                                       nodes.definition('', *contents))
Exemplo n.º 40
0
    def run(self):
        env = self.state.document.settings.env

        # generate the linkback node for this option
        targetid = "option-%d" % env.new_serialno('mrjob-opt')
        targetnode = nodes.target('', '', ids=[targetid])

        # Each option will be outputted as a single-item definition list
        # (just like it was doing before we used this extension)
        dl = nodes.definition_list()
        dli = nodes.definition_list_item()

        term = nodes.term()

        # config option shall be bold
        if 'config' in self.options:
            cfg = self.options['config']
            term.append(nodes.strong(cfg, cfg))
            if 'switch' in self.options:
                term.append(nodes.Text(' (', ' ('))

        # switch shall be comma-separated literals
        if 'switch' in self.options:
            switches = self.options['switch'].split(', ')
            for i, s in enumerate(switches):
                if i > 0:
                    term.append(nodes.Text(', ', ', '))
                term.append(nodes.literal(s, s))
            if 'config' in self.options:
                term.append(nodes.Text(')', ')'))

        dli.append(term)

        # classifier is either plan text or a link to some more docs, so parse
        # its contents
        classifier = nodes.classifier()
        type_nodes, messages = self.state.inline_text(
            self.options.get('type', ''), self.lineno)

        # failed attempt at a markup shortcut; may be able to make this work
        # later
        #t = option_info['options']['type']
        #refnode = addnodes.pending_xref(
        #    t, reftarget='data-type-%s' % t,
        #    refexplicit=True, reftype='ref')
        #print refnode
        #refnode += nodes.Text(t, t)
        #type_nodes = [refnode]

        classifier.extend(type_nodes)
        dli.append(classifier)

        # definition holds the description
        defn = nodes.definition()

        # add a default if any
        default_nodes = []
        if 'default' in self.options:
            default_par = nodes.paragraph()
            default_par.append(nodes.strong('Default: ', 'Default: '))
            textnodes, messages = self.state.inline_text(
                self.options['default'], self.lineno)
            default_nodes = textnodes
            default_par.extend(textnodes)
            defn.append(default_par)

        # parse the description like a nested block (see
        # sphinx.compat.make_admonition)
        desc_par = nodes.paragraph()
        self.state.nested_parse(self.content, self.content_offset, desc_par)
        defn.append(desc_par)

        dli.append(defn)
        dl.append(dli)

        if not hasattr(env, 'optionlist_all_options'):
            env.optionlist_all_options = []

        # store info for the optionlist traversal to find
        env.optionlist_all_options.append({
            'docname': env.docname,
            'lineno': self.lineno,
            'options': self.options,
            'content': self.content,
            'target': targetnode,
            'type_nodes': [n.deepcopy() for n in type_nodes],
            'default_nodes': [n.deepcopy() for n in default_nodes]
        })

        return [targetnode, dl]
Exemplo n.º 41
0
    def _document_member(self, name, member):
        def _add_member_note(definition, member, qualifiers, format_text):
            if not type(qualifiers) == list:
                qualifiers = [qualifiers]
            format_param = {
                qualifier: getattr(member, qualifier)
                for qualifier in qualifiers
                if hasattr(member, qualifier) and getattr(member, qualifier)
                and type(getattr(member, qualifier)) != types.FunctionType
            }

            if len(qualifiers) == len(format_param):
                definition += _note(text=format_text.format(**format_param))

        def _add_member_note_if(definition, member, qualifier, text):
            if getattr(member, qualifier, False):
                definition += _note(text=text)

        node = nodes.definition_list_item()
        node += nodes.term(text=name)

        definition = nodes.definition()

        if member.help_text and len(member.help_text):
            definition += rst2node(member.help_text)

        if isinstance(member, StringField):
            node += nodes.classifier(text="String")
        elif isinstance(member, EmbeddedDocumentField):
            node += nodes.classifier(text="JSON object")
            doctype = member.document_type.__name__
            definition += rst2node("This member is an object of type "
                                   "`{}`".format(doctype))
            # definition += nodes.paragraph(self._document_class(member.document_type)
        elif isinstance(member, ReferenceField):
            node += nodes.classifier(text="String containing a "
                                     "MongoDB ObjectID")
        elif isinstance(member, DateTimeField):
            node += nodes.classifier(text="String containing a Date/Time")

        _add_member_note(
            definition, member, 'min_length', "The minimum length for this "
            "member is {min_length} characters.")

        _add_member_note(
            definition, member, 'max_length', "The maximum length for this "
            "member is {max_length} characters.")

        _add_member_note(definition, member, 'choices',
                         "Valid values for this member are {choices}.")

        _add_member_note(
            definition, member, 'default', "The default value for this "
            "member is {default!r}.")

        _add_member_note_if(
            definition, member, 'required',
            "This member is required for the creation "
            "of a new object.")

        _add_member_note_if(
            definition, member, 'unique',
            "Values for this member must be unique to all "
            "objects.")

        node += definition

        return node
Exemplo n.º 42
0
    def run(self):
        # Content is optional, so don't raise an error if it's missing...
        #        print self.state_machine.document.current_source

        self.document = self.state_machine.document
        self.is_config_option = False

        text = '\n'.join(self.content)
        # Create the admonition node, to be populated by `nested_parse`.

        self.name = self.arguments[0].strip()

        title = self.arguments[0]
        if 'parent' in self.options:
            title += " (" + self.options['parent'] + ")"

        term = nodes.term()
        n = nodes.strong(text=title)
        term += n

        targetnode = self.make_targetnode()

        deflist = nodes.definition_list()
        feature_def = nodes.definition_list_item()
        feature_def += term
        defn = nodes.definition()
        feature_def += defn
        deflist += feature_def
        # Parse the directive contents.
        self.state.nested_parse(self.content, self.content_offset, defn)
        option_map = {}
        option_map['parents'] = 'Parent features'
        option_map['config_options'] = 'Options'
        field_list = self.options_to_field_list(option_map)

        if 'parents' in self.options:
            self.parents = []
            for p in self.options['parents'].split(","):
                found = False
                for f in self.parsed('feature'):
                    if p.strip() == f.name:
                        found = True
                if not found:
                    sys.stderr.write("ERROR: Feature `" + self.name +
                                     "' refers to unknown parent `" +
                                     p.strip() + "'")
                    if self.check_errors():
                        exit(1)

            for p in self.options['parents'].split(","):
                self.parents.extend(
                    [f for f in self.parsed('feature') if p.strip() == f.name])

            ancestors = set(self.parents)
            for p in self.parents:
                ancestors = ancestors | set(p.ancestors)
            self.ancestors = list(ancestors)
        else:
            self.parents = []
            self.ancestors = []

        self.summarize = ('summarize' in self.options)

        if 'config_options' in self.options:
            self.choices = []
            found_default = False
            optstr = self.options['config_options'].strip()
            p = re.compile("(.*), *default *= *(.*)")
            m = p.match(optstr)
            if m:
                optstr = m.groups(0)[0]
                default = m.groups(0)[1]
            else:
                default = None
            p = re.compile("(.*)\.\.(.*)")
            m = p.match(optstr)
            if m:
                lb = int(m.groups(0)[0])
                ub = int(m.groups(0)[1])
                name = self.name + "__RANGE"
                choice = Feature(name)
                choice.summarize = ('summarize_options' in self.options)
                self.choices.append(choice)
                self.parsed('feature').append(choice)
                choice.is_range = True
                choice.is_config_option = True
                choice.is_default = False
                choice.parents = [self]
                choice.ancestors = [self]
                choice.ancestors.extend(self.ancestors)
            else:
                for o in self.options['config_options'].split('|'):
                    p = re.compile("(.*) \(default\)")
                    is_default = False
                    name = o.strip()
                    if (p.match(name)):
                        name = p.match(name).groups(0)[0]
                        name = name.strip()
                        is_default = True
                        found_default = True
                    if (name == default):
                        is_default = True
                        found_default = True
                    name = self.name + "_" + name
                    choice = Feature(name)
                    choice.is_range = False
                    choice.is_default = is_default
                    choice.summarize = ('summarize_options' in self.options)
                    self.choices.append(choice)
                    self.parsed('feature').append(choice)
                    choice.is_config_option = True
                    choice.parents = [self]
                    choice.ancestors = [self]
                    choice.ancestors.extend(self.ancestors)
                if not found_default:
                    self.choices[0].is_default = True
        else:
            self.choices = []

        if (field_list != None):
            defn += field_list

        self.parsed('feature').append(self)
        return [targetnode, deflist]
Exemplo n.º 43
0
    def contribute_property(self, prop_list, prop_key, prop):
        prop_item = nodes.definition_list_item(
            '', nodes.term('', prop_key))
        prop_list.append(prop_item)

        prop_item.append(nodes.classifier('', prop.type))

        definition = nodes.definition()
        prop_item.append(definition)

        if prop.support_status.status != support.SUPPORTED:
            para = nodes.paragraph('', self._status_str(prop.support_status))
            note = nodes.note('', para)
            definition.append(note)

        if (prop.support_status.status == support.SUPPORTED and
            prop.support_status.version is not None):
            tag = prop.support_status.version.title()
            message = (_('Available since %s.') % self._version_str(tag))
            para = nodes.paragraph('', message)
            note = nodes.note('', para)
            definition.append(note)

        if not prop.implemented:
            para = nodes.paragraph('', _('Not implemented.'))
            note = nodes.note('', para)
            definition.append(note)
            return

        if prop.description:
            para = nodes.paragraph('', prop.description)
            definition.append(para)

        if prop.update_allowed:
            para = nodes.paragraph('',
                                   _('Can be updated without replacement.'))
            definition.append(para)
        elif prop.immutable:
            para = nodes.paragraph('', _('Updates are not supported. '
                                         'Resource update will fail on any '
                                         'attempt to update this property.'))
            definition.append(para)
        else:
            para = nodes.paragraph('', _('Updates cause replacement.'))
            definition.append(para)

        if prop.required:
            para = nodes.paragraph('', _('Required property.'))
        elif prop.default is not None:
            para = nodes.paragraph(
                '',
                _('Optional property, defaults to "%s".') % prop.default)
        else:
            para = nodes.paragraph('', _('Optional property.'))
        definition.append(para)

        for constraint in prop.constraints:
            para = nodes.paragraph('', str(constraint))
            definition.append(para)

        sub_schema = None
        if prop.schema and prop.type == properties.Schema.MAP:
            para = nodes.paragraph()
            emph = nodes.emphasis('', _('Map properties:'))
            para.append(emph)
            definition.append(para)
            sub_schema = prop.schema

        elif prop.schema and prop.type == properties.Schema.LIST:
            para = nodes.paragraph()
            emph = nodes.emphasis('', _('List contents:'))
            para.append(emph)
            definition.append(para)
            sub_schema = prop.schema

        if sub_schema:
            sub_prop_list = nodes.definition_list()
            definition.append(sub_prop_list)
            for sub_prop_key, sub_prop in sorted(sub_schema.items(),
                                                 self.cmp_prop):
                self.contribute_property(
                    sub_prop_list, sub_prop_key, sub_prop)
Exemplo n.º 44
0
    def contribute_property(self, prop_list, prop_key, prop):
        prop_item = nodes.definition_list_item(
            '', nodes.term('', prop_key))
        prop_list.append(prop_item)

        prop_item.append(nodes.classifier('', prop.type))

        definition = nodes.definition()
        prop_item.append(definition)

        if prop.support_status.status != support.SUPPORTED:
            sstatus = prop.support_status.to_dict()
            msg = _('%(status)s')
            if sstatus['message'] is not None:
                msg = _('%(status)s - %(message)s')
            para = nodes.inline('', msg % sstatus)
            warning = nodes.note('', para)
            definition.append(warning)

        if not prop.implemented:
            para = nodes.inline('', _('Not implemented.'))
            warning = nodes.note('', para)
            definition.append(warning)
            return

        if prop.description:
            para = nodes.paragraph('', prop.description)
            definition.append(para)

        if prop.update_allowed:
            para = nodes.paragraph('',
                                   _('Can be updated without replacement.'))
            definition.append(para)
        else:
            para = nodes.paragraph('', _('Updates cause replacement.'))
            definition.append(para)

        if prop.required:
            para = nodes.paragraph('', _('Required property.'))
        elif prop.default is not None:
            para = nodes.paragraph(
                '',
                _('Optional property, defaults to "%s".') % prop.default)
        else:
            para = nodes.paragraph('', _('Optional property.'))
        definition.append(para)

        for constraint in prop.constraints:
            para = nodes.paragraph('', str(constraint))
            definition.append(para)

        sub_schema = None
        if prop.schema and prop.type == properties.Schema.MAP:
            para = nodes.emphasis('', _('Map properties:'))
            definition.append(para)
            sub_schema = prop.schema

        elif prop.schema and prop.type == properties.Schema.LIST:
            para = nodes.emphasis(
                '', _('List contents:'))
            definition.append(para)
            sub_schema = prop.schema

        if sub_schema:
            sub_prop_list = nodes.definition_list()
            definition.append(sub_prop_list)
            for sub_prop_key, sub_prop in sorted(sub_schema.items(),
                                                 self.cmp_prop):
                self.contribute_property(
                    sub_prop_list, sub_prop_key, sub_prop)
    def run(self):
        env = self.state.document.settings.env

        # Parse the content of the directive recursively
        node = nodes.Element()
        node.document = self.state.document
        self.state.nested_parse(self.content, self.content_offset, node)

        # define defaults
        node['name'] = self.arguments[0]
        node['operates_on_value'] = self.options.get('operates_on_value',
                                                     False)
        node['content'] = self.content

        arg_nodes = []
        other_nodes = []
        required_params = {}
        optional_params = {}

        for child in node:
            if isinstance(child, MetaIniArgNode):
                if child["required"]:
                    required_params[child["name"]] = child
                else:
                    optional_params[child["name"]] = child
            else:
                other_nodes.append(child)

        # Build the content of the box
        prefix = ''
        if node["operates_on_value"]:
            prefix = '<value> | '
        else:
            prefix = '<key> = <value> | '

        sl = [prefix + self.arguments[0] + ' ']

        for rp, paramnode in required_params.items():
            if paramnode["multi"]:
                sl.append('<' + paramnode['name'] + '1 [' + paramnode['name'] +
                          '2 ...]' + '> ')
            if paramnode["single"]:
                sl.append('<' + paramnode['name'] + '> ')

        for op, paramnode in optional_params.items():
            if paramnode["multi"]:
                sl.append('[<' + paramnode['name'] + '1 [' +
                          paramnode['name'] + '2 ...]' + '>] ')
            if paramnode["single"]:
                sl.append('[<' + paramnode['name'] + '>] ')

        lb = nodes.literal_block(''.join(sl), ''.join(sl))
        arg_nodes.append(lb)

        # provide a defition list for the arguments
        dl = nodes.definition_list()
        for param, paramnode in chain(required_params.items(),
                                      optional_params.items()):
            dli = nodes.definition_list_item()
            dl += dli

            dlit = nodes.term(text=param)
            dli += dlit

            dlic = nodes.definition()
            dli += dlic
            self.state.nested_parse(paramnode['content'], self.content_offset,
                                    dlic)

        # add the parameter list to the output
        arg_nodes.append(dl)

        # Add a target for referencing!
        section = nodes.section(names=[node['name']])
        section += nodes.subtitle(text="The " + node['name'] + " command")

        return [section] + arg_nodes + other_nodes
Exemplo n.º 46
0
    def run(self):
        # Tests don't have to have contents now, so check for it but don't assert:
        self.document = self.state_machine.document
        text = []
        if self.content:
            text = '\n'.join(self.content)

        # Create the admonition node, to be populated by `nested_parse`.

        self.name = self.arguments[0]

        if 'test_time' in self.options:
            self.test_time = self.options['test_time']

        # test_procedure name
        #if 'test_procedure' in self.options:
        if 'test_procedure' in self.options:
            self.test_procedure = self.options['test_procedure']
        else:
            self.assert_has_content()
            proc = TestProcedure(self.name + "_procedure")
            self.test_procedure = proc.name
            if 'setup' in self.options:
                proc.setup = self.options['setup']
            else:
                proc.setup = ""
            proc.content = self.content
            self.parsed('test_procedure').append(proc)

        term = nodes.term()
        term += nodes.strong(text=self.arguments[0])

        targetnode = self.make_targetnode()

        deflist = nodes.definition_list()
        test_def = nodes.definition_list_item()
        test_def += term
        defn = nodes.definition()
        test_def += defn
        deflist += test_def

        if 'parameters' in self.options:
            params = self.parse_parameters()

            defn += nodes.paragraph(text="Parameters:")
            for param in params:
                name = param['param']
                field_list = nodes.field_list()
                param_field = nodes.field()
                param_field_name = nodes.field_name()
                param_field_name += nodes.Text(name)
                param_field += param_field_name
                param_field_body = nodes.field_body()
                choices_str = param['choices_str']
                if (len(choices_str) < 50):
                    param_field_body += nodes.paragraph(text=choices_str)
                else:
                    choices = param['choices']
                    param_field_body += nodes.raw('',
                                                  ' \\ \n\n',
                                                  format="latex")
                    for choice in choices:
                        param_field_body += nodes.paragraph(text=choice)
                param_field += param_field_body
                field_list += param_field
                name = self.arguments[0].strip() + "param" + name
                param_target = nodes.target('', '', ids=[nodes.make_id(name)])
                name = nodes.fully_normalize_name(name)
                param_target['names'].append(name)
                self.state_machine.document.note_explicit_target(
                    param_target, param_target)
                defn += param_target
                defn += field_list

        # Parse the directive contents.
        self.state.nested_parse(self.content, self.content_offset, defn)

        option_map = {}
        option_map['configurations'] = 'Valid configurations'
        option_map['setup'] = 'Required setup'
        option_map['test_time'] = 'Test time (min)'
        option_map['priority'] = 'Priority'
        option_map['test_procedure'] = 'Test procedure'
        field_list = self.options_to_field_list(option_map)

        if (field_list != None):
            defn += field_list

        self.parsed('test').append(self)
        return [targetnode, deflist]
Exemplo n.º 47
0
    def make_document(self, doc_strings):
        """make doctree representation of collected fragments"""

        opt = self.opt

        big_doc = publish_doctree("")
        self.document = big_doc
        big_doc += nodes.title(text="Plugins listing generated %s" %
                               time.asctime())

        contents = nodes.container()
        if opt.include_contents:
            big_doc += nodes.topic('', nodes.title(text='Contents'), contents)

        if not opt.no_summary:
            def_list = nodes.definition_list()
            alpha_list = nodes.paragraph()
            big_doc += nodes.section('', nodes.title(text="Plugins summary"),
                                     alpha_list, def_list)

        last_alpha = ''

        for doc in doc_strings:

            section = nodes.section()
            big_doc += section
            section += nodes.title(text=doc[0])

            self.add_ids(section)

            if not opt.no_summary:
                firstpara = (self.first_text(doc[2])
                             or nodes.paragraph(text='No summary found'))
                reference = nodes.reference('',
                                            refid=section['ids'][0],
                                            name=doc[0],
                                            anonymous=1)
                reference += nodes.Text(doc[0])
                def_list += nodes.definition_list_item(
                    '', nodes.term('', '', reference),
                    nodes.definition('', firstpara))

                # add letter quick index entry if needed
                if doc[0][0].upper() != last_alpha:
                    last_alpha = doc[0][0].upper()
                    self.add_ids(reference)
                    alpha_list += nodes.reference('',
                                                  nodes.Text(last_alpha + ' '),
                                                  refid=reference['ids'][0],
                                                  name=doc[0],
                                                  anonymous=1)

            for element in doc[2]:
                # if the docstring has titles, we need another level
                if element.tagname == 'title':
                    subsection = nodes.section()
                    section += subsection
                    section = subsection
                    break

            for element in doc[2]:
                try:
                    section += element.deepcopy()
                except TypeError:
                    err('Element.deepcopy() failed, dropped element for %s\n' %
                        doc[0])

        if opt.include_contents:
            contents.details = {'text': 'Contents here'}

            self.add_ids(big_doc)
            transform = Contents(big_doc, contents)
            transform.apply()

        return big_doc
Exemplo n.º 48
0
    def run(self) -> List[Node]:
        node = addnodes.glossary()
        node.document = self.state.document
        node['sorted'] = ('sorted' in self.options)

        # This directive implements a custom format of the reST definition list
        # that allows multiple lines of terms before the definition.  This is
        # easy to parse since we know that the contents of the glossary *must
        # be* a definition list.

        # first, collect single entries
        entries: List[Tuple[List[Tuple[str, str, int]], StringList]] = []
        in_definition = True
        in_comment = False
        was_empty = True
        messages: List[Node] = []
        for line, (source, lineno) in zip(self.content, self.content.items):
            # empty line -> add to last definition
            if not line:
                if in_definition and entries:
                    entries[-1][1].append('', source, lineno)
                was_empty = True
                continue
            # unindented line -> a term
            if line and not line[0].isspace():
                # enable comments
                if line.startswith('.. '):
                    in_comment = True
                    continue
                else:
                    in_comment = False

                # first term of definition
                if in_definition:
                    if not was_empty:
                        messages.append(self.state.reporter.warning(
                            _('glossary term must be preceded by empty line'),
                            source=source, line=lineno))
                    entries.append(([(line, source, lineno)], StringList()))
                    in_definition = False
                # second term and following
                else:
                    if was_empty:
                        messages.append(self.state.reporter.warning(
                            _('glossary terms must not be separated by empty lines'),
                            source=source, line=lineno))
                    if entries:
                        entries[-1][0].append((line, source, lineno))
                    else:
                        messages.append(self.state.reporter.warning(
                            _('glossary seems to be misformatted, check indentation'),
                            source=source, line=lineno))
            elif in_comment:
                pass
            else:
                if not in_definition:
                    # first line of definition, determines indentation
                    in_definition = True
                    indent_len = len(line) - len(line.lstrip())
                if entries:
                    entries[-1][1].append(line[indent_len:], source, lineno)
                else:
                    messages.append(self.state.reporter.warning(
                        _('glossary seems to be misformatted, check indentation'),
                        source=source, line=lineno))
            was_empty = False

        # now, parse all the entries into a big definition list
        items: List[nodes.definition_list_item] = []
        for terms, definition in entries:
            termnodes: List[Node] = []
            system_messages: List[Node] = []
            for line, source, lineno in terms:
                parts = split_term_classifiers(line)
                # parse the term with inline markup
                # classifiers (parts[1:]) will not be shown on doctree
                textnodes, sysmsg = self.state.inline_text(parts[0], lineno)

                # use first classifier as a index key
                term = make_glossary_term(self.env, textnodes, parts[1], source, lineno,
                                          node_id=None, document=self.state.document)
                term.rawsource = line
                system_messages.extend(sysmsg)
                termnodes.append(term)

            termnodes.extend(system_messages)

            defnode = nodes.definition()
            if definition:
                self.state.nested_parse(definition, definition.items[0][1],
                                        defnode)
            termnodes.append(defnode)
            items.append(nodes.definition_list_item('', *termnodes))

        dlist = nodes.definition_list('', *items)
        dlist['classes'].append('glossary')
        node += dlist
        return messages + [node]
Exemplo n.º 49
0
    def run(self):
        result = []
        symbol = self.arguments[0]
        descriptor = descriptors_by_symbol[symbol]

        comment = find_comment(symbol, prefix='')
        if comment:
            result += produce_nodes(self.state, comment)

        if descriptor.client_streaming:
            text = ('This method uses client-streaming.')
            result.append(
                nodes.warning(
                    '',
                    nodes.paragraph('', '', nodes.Text(text)),
                ))

        if descriptor.server_streaming:
            text = ('This method uses server-streaming. ' +
                    'Yamcs sends an unspecified amount of data ' +
                    'using chunked transfer encoding.')
            result.append(
                nodes.warning(
                    '',
                    nodes.paragraph('', '', nodes.Text(text)),
                ))

        route_options = descriptor.options.Extensions[annotations_pb2.route]
        route_text = get_route_for_method_descriptor(descriptor)

        raw = '.. rubric:: URI Template\n'
        raw += '.. code-block:: uritemplate\n\n'
        raw += '    ' + route_text + '\n'

        result += produce_nodes(self.state, raw)

        input_descriptor = descriptors_by_symbol[descriptor.input_type]

        route_params = get_route_params(route_text)
        if route_params:
            dl_items = []
            for param in route_params:
                param_template = get_route_param_template(route_text, param)
                comment = find_comment(descriptor.input_type + '.' + param,
                                       prefix='') or ''

                dl_items.append(
                    nodes.definition_list_item(
                        '',
                        nodes.term('', '', nodes.literal('', param_template)),
                        nodes.definition('', nodes.paragraph(text=comment)),
                    ))

            result += [nodes.definition_list('', *dl_items)]

        if route_options.get:
            query_param_fields = []
            for field in input_descriptor.field:
                if field.json_name not in route_params:
                    query_param_fields.append(field)

            if query_param_fields:
                dl_items = []
                for field in query_param_fields:
                    field_symbol = descriptor.input_type + '.' + field.name

                    comment_node = nodes.section()
                    comment = find_comment(field_symbol, prefix='')
                    if comment:
                        for child in produce_nodes(self.state, comment):
                            comment_node += child

                    dl_items.append(
                        nodes.definition_list_item(
                            '',
                            nodes.term('', '',
                                       nodes.literal('', field.json_name)),
                            nodes.definition('', comment_node),
                        ))
                result += [
                    nodes.rubric('', 'Query Parameters'),
                    nodes.definition_list('', *dl_items),
                ]

        return result
Exemplo n.º 50
0
    def run(self):
        env = self.state.document.settings.env

        # Parse the content of the directive recursively
        node = nodes.Element()
        node.document = self.state.document
        self.state.nested_parse(self.content, self.content_offset, node)

        brief_nodes = []
        output_nodes = []
        positional_params = []
        required_params = {}
        optional_params = {}

        for child in node:
            if isinstance(child, CMakeParamNode):
                if child["positional"]:
                    positional_params.append(child)
                elif child["required"]:
                    required_params[child["name"]] = child
                else:
                    optional_params[child["name"]] = child
            elif isinstance(child, CMakeBriefNode):
                par = nodes.paragraph()
                self.state.nested_parse(child['content'], self.content_offset,
                                        par)
                brief_nodes.append(par)
            else:
                output_nodes.append(child)

        def render_required(paramnode):
            if paramnode["multi"]:
                sl.append(" " * 5 + paramnode['name'] + ' ' +
                          paramnode['argname'] + '1 [' + paramnode['argname'] +
                          '2 ...]\n')
            if paramnode["single"]:
                sl.append(" " * 5 + paramnode['name'] + ' ' +
                          paramnode['argname'] + '\n')
            if paramnode["option"]:
                sl.append(" " * 5 + paramnode['name'] + '\n')
            if paramnode["special"]:
                sl.append(" " * 5 + paramnode['argname'] + '\n')

        def render_optional(paramnode):
            if paramnode["multi"]:
                sl.append(' ' * 4 + '[' + paramnode['name'] + ' ' +
                          paramnode['argname'] + '1 [' + paramnode['argname'] +
                          '2 ...]' + ']\n')
            if paramnode["single"]:
                sl.append(" " * 4 + '[' + paramnode['name'] + ' ' +
                          paramnode['argname'] + ']\n')
            if paramnode["option"]:
                sl.append(" " * 4 + '[' + paramnode['name'] + ']\n')
            if paramnode["special"]:
                sl.append(" " * 4 + '[' + paramnode['argname'] + ']\n')

        # Build the content of the box
        sl = [self.arguments[0] + '(\n']

        for paramnode in positional_params:
            if paramnode["required"]:
                render_required(paramnode)
            else:
                render_optional(paramnode)

        for rp, paramnode in required_params.items():
            render_required(paramnode)
        for op, paramnode in optional_params.items():
            render_optional(paramnode)

        sl.append(")\n")
        lb = nodes.literal_block(''.join(sl), ''.join(sl))
        brief_nodes.append(lb)

        dl = nodes.definition_list()
        for paramnode in chain(positional_params, required_params.values(),
                               optional_params.values()):
            dli = nodes.definition_list_item()
            dl += dli

            dlit = nodes.term(text=paramnode["name"])
            dli += dlit

            dlic = nodes.definition()
            dli += dlic
            self.state.nested_parse(paramnode['content'], self.content_offset,
                                    dlic)

        # add the parameter list to the output
        brief_nodes.append(dl)

        return brief_nodes + output_nodes
    def run(self):
        env = self.state.document.settings.env
        objects = env.domaindata['std']['objects']
        gloss_entries = env.temp_data.setdefault('gloss_entries', set())
        node = addnodes.glossary()
        node.document = self.state.document

        # This directive implements a custom format of the reST definition list
        # that allows multiple lines of terms before the definition.  This is
        # easy to parse since we know that the contents of the glossary *must
        # be* a definition list.

        # first, collect single entries
        entries = []
        in_definition = True
        was_empty = True
        messages = []
        for line, (source, lineno) in zip(self.content, self.content.items):
            # empty line -> add to last definition
            if not line:
                if in_definition and entries:
                    entries[-1][1].append('', source, lineno)
                was_empty = True
                continue
            # unindented line -> a term
            if line and not line[0].isspace():
                # first term of definition
                if in_definition:
                    if not was_empty:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary term must be preceded by empty line',
                            source=source, line=lineno))
                    entries.append(([(line, source, lineno)], ViewList()))
                    in_definition = False
                # second term and following
                else:
                    if was_empty:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary terms must not be separated by empty '
                            'lines', source=source, line=lineno))
                    if entries:
                        entries[-1][0].append((line, source, lineno))
                    else:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary seems to be misformatted, check '
                        'indentation', source=source, line=lineno))
            else:
                if not in_definition:
                    # first line of definition, determines indentation
                    in_definition = True
                    indent_len = len(line) - len(line.lstrip())
                if entries:
                    entries[-1][1].append(line[indent_len:], source, lineno)
                else:
                    messages.append(self.state.reporter.system_message(
                        2, 'glossary seems to be misformatted, check '
                    'indentation', source=source, line=lineno))
            was_empty = False

        # now, parse all the entries into a big definition list
        items = []
        for terms, definition in entries:
            termtexts = []
            termnodes = []
            system_messages = []
            ids = []
            for line, source, lineno in terms:
                # parse the term with inline markup
                res = self.state.inline_text(line, lineno)
                system_messages.extend(res[1])

                # get a text-only representation of the term and register it
                # as a cross-reference target
                tmp = nodes.paragraph('', '', *res[0])
                termtext = tmp.astext()
                new_id = 'term-' + nodes.make_id(termtext)
                if new_id in gloss_entries:
                    new_id = 'term-' + str(len(gloss_entries))
                gloss_entries.add(new_id)
                ids.append(new_id)
                objects['term', termtext.lower()] = env.docname, new_id
                termtexts.append(termtext)
                # add an index entry too
                indexnode = addnodes.index()
                indexnode['entries'] = [('single', termtext, new_id, 'main')]
                termnodes.append(indexnode)
                termnodes.extend(res[0])
                termnodes.append(addnodes.termsep())
            # make a single "term" node with all the terms, separated by termsep
            # nodes (remove the dangling trailing separator)
            term = nodes.term('', '', *termnodes[:-1])
            term['ids'].extend(ids)
            term['names'].extend(ids)
            term += system_messages

            defnode = nodes.definition()
            self.state.nested_parse(definition, definition.items[0][1], defnode)

            items.append((termtexts,
                          nodes.definition_list_item('', term, defnode)))

        if 'sorted' in self.options:
            items.sort(key=lambda x:
                       unicodedata.normalize('NFD', x[0][0].lower()))

        dlist = nodes.definition_list()
        dlist['classes'].append('glossary')
        dlist.extend(item[1] for item in items)
        node += dlist
        return messages + [node]
Exemplo n.º 52
0
    def contribute_property(self, prop_list, prop_key, prop):
        prop_item = nodes.definition_list_item('', nodes.term('', prop_key))
        prop_list.append(prop_item)

        prop_type = prop.get('Type')
        classifier = prop_type
        if prop.get('MinValue'):
            classifier += _(' from %s') % prop.get('MinValue')
        if prop.get('MaxValue'):
            classifier += _(' up to %s') % prop.get('MaxValue')
        if prop.get('MinLength'):
            classifier += _(' from length %s') % prop.get('MinLength')
        if prop.get('MaxLength'):
            classifier += _(' up to length %s') % prop.get('MaxLength')
        prop_item.append(nodes.classifier('', classifier))

        definition = nodes.definition()
        prop_item.append(definition)

        if not prop.get('Implemented', True):
            para = nodes.inline('', _('Not implemented.'))
            warning = nodes.note('', para)
            definition.append(warning)
            return

        description = prop.get('Description')
        if description:
            para = nodes.paragraph('', description)
            definition.append(para)

        if prop.get('Required'):
            para = nodes.paragraph('', _('Required property.'))
        elif prop.get('Default'):
            para = nodes.paragraph(
                '',
                _('Optional property, defaults to "%s".') %
                prop.get('Default'))
        else:
            para = nodes.paragraph('', _('Optional property.'))
        definition.append(para)

        if prop.get('AllowedPattern'):
            para = nodes.paragraph(
                '',
                _('Value must match pattern: %s') % prop.get('AllowedPattern'))
            definition.append(para)

        if prop.get('AllowedValues'):
            allowed = [
                str(a) for a in prop.get('AllowedValues') if a is not None
            ]
            para = nodes.paragraph(
                '',
                _('Allowed values: %s') % ', '.join(allowed))
            definition.append(para)

        sub_schema = None
        if prop.get('Schema') and prop_type == 'Map':
            para = nodes.emphasis('', _('Map properties:'))
            definition.append(para)
            sub_schema = prop.get('Schema')

        elif prop_type == 'List' and prop.get('Schema', {}).get('Schema'):
            para = nodes.emphasis('',
                                  _('List contains maps with the properties:'))
            definition.append(para)
            sub_schema = prop.get('Schema').get('Schema')

        if sub_schema:
            sub_prop_list = nodes.definition_list()
            definition.append(sub_prop_list)
            for sub_prop_key in sorted(sub_schema.keys()):
                sub_prop = sub_schema[sub_prop_key]
                self.contribute_property(sub_prop_list, sub_prop_key, sub_prop)
def process_item_nodes(app, doctree, fromdocname):
    """
    This function should be triggered upon ``doctree-resolved event``

    Replace all ItemList nodes with a list of the collected items.
    Augment each item with a backlink to the original location.

    """
    env = app.builder.env

    if sphinx_version < '1.6.0':
        try:
            env.traceability_collection.self_test(fromdocname)
        except TraceabilityException as err:
            report_warning(env, err, fromdocname)
        except MultipleTraceabilityExceptions as errs:
            for err in errs.iter():
                report_warning(env, err, err.get_document())

    # Processing of the item-link items.
    for node in doctree.traverse(ItemLink):
        # The ItemLink node has no final representation, so is removed from the tree
        node.replace_self([])

    # Item matrix:
    # Create table with related items, printing their target references.
    # Only source and target items matching respective regexp shall be included
    for node in doctree.traverse(ItemMatrix):
        showcaptions = not node['nocaptions']
        source_ids = env.traceability_collection.get_items(node['source'])
        target_ids = env.traceability_collection.get_items(node['target'])
        top_node = create_top_node(node['title'])
        table = nodes.table()
        tgroup = nodes.tgroup()
        left_colspec = nodes.colspec(colwidth=5)
        right_colspec = nodes.colspec(colwidth=5)
        tgroup += [left_colspec, right_colspec]
        tgroup += nodes.thead(
            '',
            nodes.row(
                '', nodes.entry('', nodes.paragraph('', node['sourcetitle'])),
                nodes.entry('', nodes.paragraph('', node['targettitle']))))
        tbody = nodes.tbody()
        tgroup += tbody
        table += tgroup

        relationships = node['type']
        if not relationships:
            relationships = env.traceability_collection.iter_relations()

        count_total = 0
        count_covered = 0

        for source_id in source_ids:
            source_item = env.traceability_collection.get_item(source_id)
            count_total += 1
            covered = False
            row = nodes.row()
            left = nodes.entry()
            left += make_internal_item_ref(app, node, fromdocname, source_id,
                                           showcaptions)
            right = nodes.entry()
            for relationship in relationships:
                if REGEXP_EXTERNAL_RELATIONSHIP.search(relationship):
                    for target_id in source_item.iter_targets(relationship):
                        right += make_external_item_ref(
                            app, target_id, relationship)
                        covered = True
            for target_id in target_ids:
                if env.traceability_collection.are_related(
                        source_id, relationships, target_id):
                    right += make_internal_item_ref(app, node, fromdocname,
                                                    target_id, showcaptions)
                    covered = True
            if covered:
                count_covered += 1
            row += left
            row += right
            tbody += row

        try:
            percentage = int(100 * count_covered / count_total)
        except ZeroDivisionError:
            percentage = 0
        disp = 'Statistics: {cover} out of {total} covered: {pct}%'.format(
            cover=count_covered, total=count_total, pct=percentage)
        if node['stats']:
            p_node = nodes.paragraph()
            txt = nodes.Text(disp)
            p_node += txt
            top_node += p_node

        top_node += table
        node.replace_self(top_node)

    # Item attribute matrix:
    # Create table with items, printing their attribute values.
    for node in doctree.traverse(ItemAttributesMatrix):
        docname, lineno = get_source_line(node)
        showcaptions = not node['nocaptions']
        item_ids = env.traceability_collection.get_items(
            node['filter'],
            sortattributes=node['sort'],
            reverse=node['reverse'])
        top_node = create_top_node(node['title'])
        table = nodes.table()
        tgroup = nodes.tgroup()
        colspecs = [nodes.colspec(colwidth=5)]
        hrow = nodes.row('', nodes.entry('', nodes.paragraph('', '')))
        for attr in node['attributes']:
            colspecs.append(nodes.colspec(colwidth=5))
            p_node = nodes.paragraph()
            if attr in app.config.traceability_attribute_to_string:
                attrstr = app.config.traceability_attribute_to_string[attr]
            else:
                report_warning(
                    env,
                    'Traceability: attribute {attr} cannot be translated to string'
                    .format(attr=attr), docname, lineno)
                attrstr = attr
            p_node += nodes.Text(attrstr)
            hrow.append(nodes.entry('', p_node))
        tgroup += colspecs
        tgroup += nodes.thead('', hrow)
        tbody = nodes.tbody()
        for item_id in item_ids:
            item = env.traceability_collection.get_item(item_id)
            row = nodes.row()
            cell = nodes.entry()
            cell += make_internal_item_ref(app, node, fromdocname, item_id,
                                           showcaptions)
            row += cell
            for attr in node['attributes']:
                cell = nodes.entry()
                p_node = nodes.paragraph()
                txt = item.get_attribute(attr)
                p_node += nodes.Text(txt)
                cell += p_node
                row += cell
            tbody += row
        tgroup += tbody
        table += tgroup
        top_node += table
        node.replace_self(top_node)

    # Item 2D matrix:
    # Create table with related items, printing their target references.
    # Only source and target items matching respective regexp shall be included
    for node in doctree.traverse(Item2DMatrix):
        source_ids = env.traceability_collection.get_items(node['source'])
        target_ids = env.traceability_collection.get_items(node['target'])
        top_node = create_top_node(node['title'])
        table = nodes.table()
        tgroup = nodes.tgroup()
        colspecs = [nodes.colspec(colwidth=5)]
        hrow = nodes.row('', nodes.entry('', nodes.paragraph('', '')))
        for source_id in source_ids:
            colspecs.append(nodes.colspec(colwidth=5))
            src_cell = make_internal_item_ref(app, node, fromdocname,
                                              source_id, False)
            hrow.append(nodes.entry('', src_cell))
        tgroup += colspecs
        tgroup += nodes.thead('', hrow)
        tbody = nodes.tbody()
        for target_id in target_ids:
            row = nodes.row()
            tgt_cell = nodes.entry()
            tgt_cell += make_internal_item_ref(app, node, fromdocname,
                                               target_id, False)
            row += tgt_cell
            for source_id in source_ids:
                cell = nodes.entry()
                p_node = nodes.paragraph()
                if env.traceability_collection.are_related(
                        source_id, node['type'], target_id):
                    txt = node['hit']
                else:
                    txt = node['miss']
                p_node += nodes.Text(txt)
                cell += p_node
                row += cell
            tbody += row
        tgroup += tbody
        table += tgroup
        top_node += table
        node.replace_self(top_node)

    # Item list:
    # Create list with target references. Only items matching list regexp
    # shall be included
    for node in doctree.traverse(ItemList):
        item_ids = env.traceability_collection.get_items(
            node['filter'], node['attributes'])
        showcaptions = not node['nocaptions']
        top_node = create_top_node(node['title'])
        ul_node = nodes.bullet_list()
        for i in item_ids:
            bullet_list_item = nodes.list_item()
            p_node = nodes.paragraph()
            p_node.append(
                make_internal_item_ref(app, node, fromdocname, i,
                                       showcaptions))
            bullet_list_item.append(p_node)
            ul_node.append(bullet_list_item)
        top_node += ul_node
        node.replace_self(top_node)

    # Item tree:
    # Create list with target references. Only items matching list regexp
    # shall be included
    for node in doctree.traverse(ItemTree):
        top_item_ids = env.traceability_collection.get_items(node['top'])
        showcaptions = not node['nocaptions']
        top_node = create_top_node(node['title'])
        ul_node = nodes.bullet_list()
        ul_node.set_class('bonsai')
        for i in top_item_ids:
            if is_item_top_level(env, i, node['top'],
                                 node['top_relation_filter']):
                ul_node.append(
                    generate_bullet_list_tree(app, env, node, fromdocname, i,
                                              showcaptions))
        top_node += ul_node
        node.replace_self(top_node)

    # Resolve item cross references (from ``item`` role)
    for node in doctree.traverse(PendingItemXref):
        # Create a dummy reference to be used if target reference fails
        new_node = make_refnode(app.builder, fromdocname, fromdocname,
                                'ITEM_NOT_FOUND', node[0].deepcopy(),
                                node['reftarget'] + '??')
        # If target exists, try to create the reference
        item_info = env.traceability_collection.get_item(node['reftarget'])
        if item_info:
            if item_info.is_placeholder():
                docname, lineno = get_source_line(node)
                report_warning(
                    env,
                    'Traceability: cannot link to %s, item is not defined' %
                    item_info.get_id(), docname, lineno)
            else:
                try:
                    new_node = make_refnode(app.builder, fromdocname,
                                            item_info.docname,
                                            item_info.node['refid'],
                                            node[0].deepcopy(),
                                            node['reftarget'])
                except NoUri:
                    # ignore if no URI can be determined, e.g. for LaTeX output :(
                    pass

        else:
            docname, lineno = get_source_line(node)
            report_warning(
                env, 'Traceability: item %s not found' % node['reftarget'],
                docname, lineno)

        node.replace_self(new_node)

    # Item: replace item nodes, with admonition, list of relationships
    for node in doctree.traverse(Item):
        docname, lineno = get_source_line(node)
        currentitem = env.traceability_collection.get_item(node['id'])
        showcaptions = not node['nocaptions']
        header = currentitem.get_id()
        if currentitem.caption:
            header += ' : ' + currentitem.caption
        top_node = create_top_node(header)
        par_node = nodes.paragraph()
        dl_node = nodes.definition_list()
        if app.config.traceability_render_attributes_per_item:
            if currentitem.iter_attributes():
                li_node = nodes.definition_list_item()
                dt_node = nodes.term()
                txt = nodes.Text('Attributes')
                dt_node.append(txt)
                li_node.append(dt_node)
                for attr in currentitem.iter_attributes():
                    dd_node = nodes.definition()
                    p_node = nodes.paragraph()
                    if attr in app.config.traceability_attribute_to_string:
                        attrstr = app.config.traceability_attribute_to_string[
                            attr]
                    else:
                        report_warning(
                            env,
                            'Traceability: attribute {attr} cannot be translated to string'
                            .format(attr=attr), docname, lineno)
                        attrstr = attr
                    txt = nodes.Text('{attr}: {value}'.format(
                        attr=attrstr, value=currentitem.get_attribute(attr)))
                    p_node.append(txt)
                    dd_node.append(p_node)
                    li_node.append(dd_node)
                dl_node.append(li_node)
        if app.config.traceability_render_relationship_per_item:
            for rel in env.traceability_collection.iter_relations():
                tgts = currentitem.iter_targets(rel)
                if tgts:
                    li_node = nodes.definition_list_item()
                    dt_node = nodes.term()
                    if rel in app.config.traceability_relationship_to_string:
                        relstr = app.config.traceability_relationship_to_string[
                            rel]
                    else:
                        report_warning(
                            env,
                            'Traceability: relation {rel} cannot be translated to string'
                            .format(rel=rel), docname, lineno)
                        relstr = rel
                    txt = nodes.Text(relstr)
                    dt_node.append(txt)
                    li_node.append(dt_node)
                    for tgt in tgts:
                        dd_node = nodes.definition()
                        p_node = nodes.paragraph()
                        if REGEXP_EXTERNAL_RELATIONSHIP.search(rel):
                            link = make_external_item_ref(app, tgt, rel)
                        else:
                            link = make_internal_item_ref(
                                app, node, fromdocname, tgt, showcaptions)
                        p_node.append(link)
                        dd_node.append(p_node)
                        li_node.append(dd_node)
                    dl_node.append(li_node)
        par_node.append(dl_node)
        top_node.append(par_node)
        # Note: content should be displayed during read of RST file, as it contains other RST objects
        node.replace_self(top_node)
Exemplo n.º 54
0
    def contribute_property(self, prop_list, prop_key, prop, upd_para=None):
        prop_item = nodes.definition_list_item(
            '', nodes.term('', prop_key))
        prop_list.append(prop_item)

        prop_item.append(nodes.classifier('', prop.type))

        definition = nodes.definition()
        prop_item.append(definition)

        self._status_str(prop.support_status, definition)

        if not prop.implemented:
            para = nodes.paragraph('', _('Not implemented.'))
            note = nodes.note('', para)
            definition.append(note)
            return

        if prop.description:
            para = nodes.paragraph('', prop.description)
            definition.append(para)

        if upd_para is not None:
            definition.append(upd_para)
        else:
            if prop.update_allowed:
                upd_para = nodes.paragraph(
                    '', _('Can be updated without replacement.'))
                definition.append(upd_para)
            elif prop.immutable:
                upd_para = nodes.paragraph('', _('Updates are not supported. '
                                                 'Resource update will fail on'
                                                 ' any attempt to update this '
                                                 'property.'))
                definition.append(upd_para)
            else:
                upd_para = nodes.paragraph('', _('Updates cause replacement.'))
                definition.append(upd_para)

        if prop.default is not None:
            para = nodes.paragraph('', _('Defaults to "%s".') % prop.default)
            definition.append(para)

        for constraint in prop.constraints:
            para = nodes.paragraph('', str(constraint))
            definition.append(para)

        sub_schema = None
        if prop.schema and prop.type == properties.Schema.MAP:
            para = nodes.paragraph()
            emph = nodes.emphasis('', _('Map properties:'))
            para.append(emph)
            definition.append(para)
            sub_schema = prop.schema

        elif prop.schema and prop.type == properties.Schema.LIST:
            para = nodes.paragraph()
            emph = nodes.emphasis('', _('List contents:'))
            para.append(emph)
            definition.append(para)
            sub_schema = prop.schema

        if sub_schema:
            sub_prop_list = nodes.definition_list()
            definition.append(sub_prop_list)
            for sub_prop_key, sub_prop in sorted(sub_schema.items(),
                                                 self.cmp_prop):
                if sub_prop.support_status.status != support.HIDDEN:
                    self.contribute_property(
                        sub_prop_list, sub_prop_key, sub_prop, upd_para)
Exemplo n.º 55
0
    def run(self):
        # type: () -> List[nodes.Node]
        node = addnodes.glossary()
        node.document = self.state.document

        # This directive implements a custom format of the reST definition list
        # that allows multiple lines of terms before the definition.  This is
        # easy to parse since we know that the contents of the glossary *must
        # be* a definition list.

        # first, collect single entries
        entries = [
        ]  # type: List[Tuple[List[Tuple[unicode, unicode, int]], ViewList]]
        in_definition = True
        was_empty = True
        messages = []
        for line, (source, lineno) in zip(self.content, self.content.items):
            # empty line -> add to last definition
            if not line:
                if in_definition and entries:
                    entries[-1][1].append('', source, lineno)
                was_empty = True
                continue
            # unindented line -> a term
            if line and not line[0].isspace():
                # enable comments
                if line.startswith('.. '):
                    continue
                # first term of definition
                if in_definition:
                    if not was_empty:
                        messages.append(
                            self.state.reporter.system_message(
                                2,
                                'glossary term must be preceded by empty line',
                                source=source,
                                line=lineno))
                    entries.append(([(line, source, lineno)], ViewList()))
                    in_definition = False
                # second term and following
                else:
                    if was_empty:
                        messages.append(
                            self.state.reporter.system_message(
                                2,
                                'glossary terms must not be separated by empty '
                                'lines',
                                source=source,
                                line=lineno))
                    if entries:
                        entries[-1][0].append((line, source, lineno))
                    else:
                        messages.append(
                            self.state.reporter.system_message(
                                2, 'glossary seems to be misformatted, check '
                                'indentation',
                                source=source,
                                line=lineno))
            else:
                if not in_definition:
                    # first line of definition, determines indentation
                    in_definition = True
                    indent_len = len(line) - len(line.lstrip())
                if entries:
                    entries[-1][1].append(line[indent_len:], source, lineno)
                else:
                    messages.append(
                        self.state.reporter.system_message(
                            2, 'glossary seems to be misformatted, check '
                            'indentation',
                            source=source,
                            line=lineno))
            was_empty = False

        # now, parse all the entries into a big definition list
        items = []
        for terms, definition in entries:
            termtexts = []
            termnodes = []
            system_messages = []  # type: List[unicode]
            for line, source, lineno in terms:
                parts = split_term_classifiers(line)
                # parse the term with inline markup
                # classifiers (parts[1:]) will not be shown on doctree
                textnodes, sysmsg = self.state.inline_text(parts[0], lineno)

                # use first classifier as a index key
                term = make_glossary_term(self.env, textnodes, parts[1],
                                          source, lineno)
                term.rawsource = line
                system_messages.extend(sysmsg)
                termtexts.append(term.astext())
                termnodes.append(term)

            termnodes.extend(system_messages)

            defnode = nodes.definition()
            if definition:
                self.state.nested_parse(definition, definition.items[0][1],
                                        defnode)
            termnodes.append(defnode)
            items.append(
                (termtexts, nodes.definition_list_item('', *termnodes)))

        if 'sorted' in self.options:
            items.sort(
                key=lambda x: unicodedata.normalize('NFD', x[0][0].lower()))

        dlist = nodes.definition_list()
        dlist['classes'].append('glossary')
        dlist.extend(item[1] for item in items)
        node += dlist
        return messages + [node]
Exemplo n.º 56
0
def process_item_nodes(app, doctree, fromdocname):
    """
    This function should be triggered upon ``doctree-resolved event``

    Replace all ItemList nodes with a list of the collected items.
    Augment each item with a backlink to the original location.

    """
    env = app.builder.env

    all_item_ids = sorted(env.traceability_all_items, key=naturalsortkey)

    # Item matrix:
    # Create table with related items, printing their target references.
    # Only source and target items matching respective regexp shall be included
    for node in doctree.traverse(ItemMatrix):
        table = nodes.table()
        tgroup = nodes.tgroup()
        left_colspec = nodes.colspec(colwidth=5)
        right_colspec = nodes.colspec(colwidth=5)
        tgroup += [left_colspec, right_colspec]
        tgroup += nodes.thead(
            '',
            nodes.row('', nodes.entry('', nodes.paragraph('', 'Source')),
                      nodes.entry('', nodes.paragraph('', 'Target'))))
        tbody = nodes.tbody()
        tgroup += tbody
        table += tgroup

        for source_id in all_item_ids:
            source_item = env.traceability_all_items[source_id]
            # placeholders don't end up in any item-matrix (less duplicate warnings for missing items)
            if source_item['placeholder'] is True:
                continue
            if re.match(node['source'], source_id):
                row = nodes.row()
                left = nodes.entry()
                left += make_internal_item_ref(app, node, fromdocname,
                                               source_id)
                right = nodes.entry()
                for relationship in node['type']:
                    if REGEXP_EXTERNAL_RELATIONSHIP.search(relationship):
                        for target_id in source_item[relationship]:
                            right += make_external_item_ref(
                                app, target_id, relationship)
                for target_id in all_item_ids:
                    target_item = env.traceability_all_items[target_id]
                    # placeholders don't end up in any item-matrix (less duplicate warnings for missing items)
                    if target_item['placeholder'] is True:
                        continue
                    if (re.match(node['target'], target_id) and are_related(
                            env, source_id, target_id, node['type'])):
                        right += make_internal_item_ref(
                            app, node, fromdocname, target_id)
                row += left
                row += right
                tbody += row

        node.replace_self(table)

    # Item list:
    # Create list with target references. Only items matching list regexp
    # shall be included
    for node in doctree.traverse(ItemList):
        ul_node = nodes.bullet_list()
        for i in all_item_ids:
            # placeholders don't end up in any item-list (less duplicate warnings for missing items)
            if env.traceability_all_items[i]['placeholder'] is True:
                continue
            if re.match(node['filter'], i):
                bullet_list_item = nodes.list_item()
                p_node = nodes.paragraph()
                p_node.append(make_internal_item_ref(app, node, fromdocname,
                                                     i))
                bullet_list_item.append(p_node)
                ul_node.append(bullet_list_item)

        node.replace_self(ul_node)

    # Item tree:
    # Create list with target references. Only items matching list regexp
    # shall be included
    for node in doctree.traverse(ItemTree):
        ul_node = nodes.bullet_list()
        ul_node.set_class('bonsai')
        for i in all_item_ids:
            # placeholders don't end up in any item-tree (less duplicate warnings for missing items)
            if env.traceability_all_items[i]['placeholder'] is True:
                continue
            if re.match(node['top'], i):
                if is_item_top_level(env, i, node['top'],
                                     node['top_relation_filter']):
                    ul_node.append(
                        generate_bullet_list_tree(app, env, node, fromdocname,
                                                  i))
        node.replace_self(ul_node)

    # Resolve item cross references (from ``item`` role)
    for node in doctree.traverse(PendingItemXref):
        # Create a dummy reference to be used if target reference fails
        new_node = make_refnode(app.builder, fromdocname, fromdocname,
                                'ITEM_NOT_FOUND', node[0].deepcopy(),
                                node['reftarget'] + '??')
        # If target exists, try to create the reference
        if node['reftarget'] in env.traceability_all_items:
            item_info = env.traceability_all_items[node['reftarget']]
            if item_info['placeholder'] is True:
                report_warning(
                    env,
                    'Traceability: cannot link to %s, item is not defined' %
                    item_info['id'], fromdocname, get_source_line(node))
            else:
                try:
                    new_node = make_refnode(app.builder, fromdocname,
                                            item_info['docname'],
                                            item_info['target']['refid'],
                                            node[0].deepcopy(),
                                            node['reftarget'])
                except NoUri:
                    # ignore if no URI can be determined, e.g. for LaTeX output :(
                    pass

        else:
            report_warning(
                env, 'Traceability: item %s not found' % node['reftarget'],
                fromdocname, get_source_line(node))

        node.replace_self(new_node)

    # Item: replace item nodes, with admonition, list of relationships
    for node in doctree.traverse(Item):
        currentitem = env.traceability_all_items[node['id']]
        cont = nodes.container()
        admon = nodes.admonition()
        title = nodes.title()
        header = currentitem['id']
        if currentitem['caption']:
            header += ' : ' + currentitem['caption']
        txt = nodes.Text(header)
        title.append(txt)
        admon.append(title)
        cont.append(admon)
        if app.config.traceability_render_relationship_per_item:
            par_node = nodes.paragraph()
            dl_node = nodes.definition_list()
            for rel in sorted(list(env.relationships.keys())):
                if rel in currentitem and currentitem[rel]:
                    li_node = nodes.definition_list_item()
                    dt_node = nodes.term()
                    if rel in app.config.traceability_relationship_to_string:
                        relstr = app.config.traceability_relationship_to_string[
                            rel]
                    else:
                        continue
                    txt = nodes.Text(relstr)
                    dt_node.append(txt)
                    li_node.append(dt_node)
                    for tgt in currentitem[rel]:
                        dd_node = nodes.definition()
                        p_node = nodes.paragraph()
                        if REGEXP_EXTERNAL_RELATIONSHIP.search(rel):
                            link = make_external_item_ref(app, tgt, rel)
                        else:
                            link = make_internal_item_ref(
                                app, node, fromdocname, tgt, True)
                        p_node.append(link)
                        dd_node.append(p_node)
                        li_node.append(dd_node)
                    dl_node.append(li_node)
            par_node.append(dl_node)
            cont.append(par_node)
        ## Note: content should be displayed during read of RST file, as it contains other RST objects
        node.replace_self(cont)
Exemplo n.º 57
0
Arquivo: std.py Projeto: th0/test2
    def run(self):
        env = self.state.document.settings.env
        node = addnodes.glossary()
        node.document = self.state.document

        # This directive implements a custom format of the reST definition list
        # that allows multiple lines of terms before the definition.  This is
        # easy to parse since we know that the contents of the glossary *must
        # be* a definition list.

        # first, collect single entries
        entries = []
        in_definition = True
        was_empty = True
        messages = []
        for line, (source, lineno) in zip(self.content, self.content.items):
            # empty line -> add to last definition
            if not line:
                if in_definition and entries:
                    entries[-1][1].append('', source, lineno)
                was_empty = True
                continue
            # unindented line -> a term
            if line and not line[0].isspace():
                # enable comments
                if line.startswith('.. '):
                    continue
                # first term of definition
                if in_definition:
                    if not was_empty:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary term must be preceded by empty line',
                            source=source, line=lineno))
                    entries.append(([(line, source, lineno)], ViewList()))
                    in_definition = False
                # second term and following
                else:
                    if was_empty:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary terms must not be separated by empty '
                            'lines', source=source, line=lineno))
                    if entries:
                        entries[-1][0].append((line, source, lineno))
                    else:
                        messages.append(self.state.reporter.system_message(
                            2, 'glossary seems to be misformatted, check '
                            'indentation', source=source, line=lineno))
            else:
                if not in_definition:
                    # first line of definition, determines indentation
                    in_definition = True
                    indent_len = len(line) - len(line.lstrip())
                if entries:
                    entries[-1][1].append(line[indent_len:], source, lineno)
                else:
                    messages.append(self.state.reporter.system_message(
                        2, 'glossary seems to be misformatted, check '
                        'indentation', source=source, line=lineno))
            was_empty = False

        # now, parse all the entries into a big definition list
        items = []
        for terms, definition in entries:
            termtexts = []
            termnodes = []
            system_messages = []
            ids = []
            for line, source, lineno in terms:
                # parse the term with inline markup
                res = self.state.inline_text(line, lineno)
                system_messages.extend(res[1])

                # get a text-only representation of the term and register it
                # as a cross-reference target
                tmp = nodes.paragraph('', '', *res[0])
                tmp.source = source
                tmp.line = lineno
                new_id, termtext, new_termnodes = \
                    make_termnodes_from_paragraph_node(env, tmp)
                ids.append(new_id)
                termtexts.append(termtext)
                termnodes.extend(new_termnodes)

            term = make_term_from_paragraph_node(termnodes, ids)
            term += system_messages

            defnode = nodes.definition()
            if definition:
                self.state.nested_parse(definition, definition.items[0][1],
                                        defnode)

            items.append((termtexts,
                          nodes.definition_list_item('', term, defnode)))

        if 'sorted' in self.options:
            items.sort(key=lambda x:
                       unicodedata.normalize('NFD', x[0][0].lower()))

        dlist = nodes.definition_list()
        dlist['classes'].append('glossary')
        dlist.extend(item[1] for item in items)
        node += dlist
        return messages + [node]
Exemplo n.º 58
0
    def generate_properties(self, node, properties):
        """
        Add the properties to the node
        """
        for name, property in properties.items():
            default = property.get('default')
            type = property.get('type')

            info = '**type:** :cfy:datatype:`{}`'.format(type) if type else ''

            if default is not None:
                if default != '':
                    info += ' **default:** ``{}``'.format(property['default'])
            elif property.get('required', True):
                info += ' **required**'

            try:
                description = property['description']
            except KeyError:
                if type in {
                        'string',
                        'boolean',
                        'list',
                        'integer',
                        None,
                        }:
                    # only custom defined types are allowed to not have a
                    # description
                    self.state.document.settings.env.app.warn(
                        '{type} property {name} has no description'.format(
                            type=self.arguments[0],
                            name=name,
                        ))
                description = ''

            lines = ViewList(prepare_docstring(
                info + '\n\n' + description + '\n\n'))

            term = nodes.term('', name)
            definition = nodes.definition()
            self.state.nested_parse(
                    lines,
                    self.content_offset + 4,
                    definition,
                    )

            if type not in [
                    'string',
                    'boolean',
                    'list',
                    'integer',
                    ]:
                # Try tp get the nested properties of the type
                data_type = types.get('data_types', {}).get(type)
                if data_type:
                    sub_props = nodes.definition_list()
                    definition.append(sub_props)
                    self.generate_properties(
                            sub_props,
                            data_type['properties']
                            )

            node.append(nodes.definition_list_item(
                '',
                term,
                definition,
                ))
Exemplo n.º 59
0
    def run(self):
        content = []

        # First, add a short description supplied by the directive.
        text = '\n'.join(self.content)
        text_node = nodes.paragraph(rawsource=text)
        # Parse the directive contents.
        self.state.nested_parse(self.content, self.content_offset, text_node)
        content.append(text_node)

        klass_name = self.arguments[0]
        klass = get_class(klass_name)

        definition_list = nodes.definition_list()

        # Now, go over the class and find and interogate all the properties.
        raw_text = ""
        for attribute in klass.__class_schema_nodes__:
            list_item = nodes.definition_list_item()

            # TODO: Make it parse in restructuredtext from the nodes themselves.
            term = nodes.term(text=attribute.name)

            # Add two classifiers; one for the type and the other
            # if it's required or optional.
            node_type = nodes.classifier(text=attribute.typ.__class__.__name__)
            required_text = 'Optional'
            if attribute.required:
                required_text = 'Required'
            required = nodes.classifier(text=required_text)

            # Set up the description, adding in full stops if needed.
            definition = nodes.definition()
            description_text = attribute.title
            if not attribute.title.endswith('.'):
                description_text += '.'
            description_text += ' ' + attribute.description
            if not description_text.endswith('.'):
                description_text += '.'

            description = nodes.paragraph(text=description_text)

            definition += description

            if attribute.default != colander.null:
                # There is a default set. Add it.
                if isinstance(attribute.default, bool):
                    # Convert boolean's str() output to lowercase.
                    # Why? In yaml it must be lowercase, so having it in the
                    # docs as uppercase is confusing to users.
                    default_text = "Default value: %s" % str(
                        attribute.default).lower()
                else:
                    default_value = str(attribute.default)
                    if len(default_value) == 0:
                        default_value = "''"
                    default_text = "Default value: %s" % default_value
                default = nodes.paragraph(text=default_text)

                definition += default

            list_item += term
            list_item += node_type
            list_item += required
            list_item += definition

            definition_list += list_item

        content.append(definition_list)

        content.append(nodes.paragraph(text="Original class: %s" % klass))

        return content
Exemplo n.º 60
0
    def run(self):
        env = self.state.document.settings.env

        # generate the linkback node for this option
        targetid = "option-%s" % self.options['config']
        targetnode = nodes.target('', '', ids=[targetid])

        # Each option will be outputted as a single-item definition list
        # (just like it was doing before we used this extension)
        dl = nodes.definition_list()
        dl['classes'].append('mrjob-opt')
        dli = nodes.definition_list_item()

        term = nodes.term()

        # config option shall be bold
        if 'config' in self.options:
            cfg = self.options['config']
            term.append(nodes.strong(cfg, cfg))
            if 'switch' in self.options:
                term.append(nodes.Text(' (', ' ('))

        # switch shall be comma-separated literals
        if 'switch' in self.options:
            switches = self.options['switch'].split(', ')
            for i, s in enumerate(switches):
                if i > 0:
                    term.append(nodes.Text(', ', ', '))
                term.append(nodes.literal(s, s))
            if 'config' in self.options:
                term.append(nodes.Text(')', ')'))

        dli.append(term)

        # classifier is either plain text or a link to some more docs, so parse
        # its contents
        classifier = nodes.classifier()
        type_nodes, messages = self.state.inline_text(
            self.options.get('type', ''), self.lineno)

        classifier.extend(type_nodes)
        dli.append(classifier)

        # definition holds the description
        defn = nodes.definition()

        # add a default if any
        default_nodes = []
        if 'default' in self.options:
            default_par = nodes.paragraph()
            default_par.append(nodes.strong('Default: ', 'Default: '))
            textnodes, messages = self.state.inline_text(
                self.options['default'], self.lineno)
            default_nodes = textnodes
            default_par.extend(textnodes)
            defn.append(default_par)

        # parse the description like a nested block (see
        # sphinx.compat.make_admonition)
        desc_par = nodes.paragraph()
        self.state.nested_parse(self.content, self.content_offset, desc_par)
        defn.append(desc_par)

        dli.append(defn)
        dl.append(dli)

        if not hasattr(env, 'optionlist_all_options'):
            env.optionlist_all_options = []
            env.optionlist_indexed_options = {}

        # store info for the optionlist traversal to find
        info = {
            'docname': env.docname,
            'lineno': self.lineno,
            'options': self.options,
            'content': self.content,
            'target': targetnode,
            'type_nodes': type_nodes,
            'default_nodes': default_nodes,
        }
        env.optionlist_all_options.append(info)
        env.optionlist_indexed_options[self.options['config']] = info

        return [targetnode, dl]