Example #1
0
def toctree_directive(dirname, arguments, options, content, lineno,
                      content_offset, block_text, state, state_machine):

    node = nodes.admonition()
    node['classes'] += ['admonition-toctree']
    node += nodes.title('', 'Toctree')

    para = nodes.paragraph('')
    node += para
    
    ul = nodes.bullet_list()
    para += ul
    
    for line in content:
        line = line.strip()
        if not line or line.startswith(':'): continue

        try:
            uri, name = resolve_name(line, state.inliner)
            title = name
            try:
                doc = models.Docstring.on_site.get(name=name)
                if doc.title:
                    title = doc.title
            except models.Docstring.DoesNotExist:
                pass
            entry = nodes.reference('', title, refuri=uri)
        except ValueError:
            entry = nodes.reference('', line, name=line,
                                    refname=':ref:`%s`' % line)

        ul += nodes.list_item('', nodes.paragraph('', '', entry))

    return [node]
Example #2
0
    def _dialect_node(self):
        self._dialects[self.dialect_name] = self

        content = self._parse_content()
        self.database_name = content['name']

        self.bullets = nodes.bullet_list()
        text = "The following dialect/DBAPI options are available.  "\
                "Please refer to individual DBAPI sections for connect information."
        sec = nodes.section('',
                nodes.paragraph('', '',
                    nodes.Text(
                        "Support for the %s database." % content['name'],
                        "Support for the %s database." % content['name']
                    ),
                ),
                nodes.title("DBAPI Support", "DBAPI Support"),
                nodes.paragraph('', '',
                    nodes.Text(text, text),
                    self.bullets
                ),
                ids=["dialect-%s" % self.dialect_name]
            )

        return [sec]
Example #3
0
 def run(self):
     node = nodes.paragraph()
     node['classes'] = ['versionadded']
     node.document = self.state.document
     set_source_info(self, node)
     node['type'] = self.name
     node['version'] = self.arguments[0]
     text = versionlabels[self.name] % self.arguments[0]
     if len(self.arguments) == 2:
         inodes, messages = self.state.inline_text(self.arguments[1],
                                                   self.lineno + 1)
         para = nodes.paragraph(self.arguments[1], '', *inodes)
         set_source_info(self, para)
         node.append(para)
     else:
         messages = []
     if self.content:
         self.state.nested_parse(self.content, self.content_offset, node)
     if len(node):
         if isinstance(node[0], nodes.paragraph) and node[0].rawsource:
             content = nodes.inline(node[0].rawsource, translatable=True)
             content.source = node[0].source
             content.line = node[0].line
             content += node[0].children
             node[0].replace_self(nodes.paragraph('', '', content))
         node[0].insert(0, nodes.inline('', '%s: ' % text,
                                        classes=['versionmodified']))
     else:
         para = nodes.paragraph('', '', nodes.inline('', '%s.' % text, classes=['versionmodified']))
         node.append(para)
     language = languages.get_language(self.state.document.settings.language_code,
                                       self.state.document.reporter)
     language.labels.update(versionlabels)
     return [node] + messages
Example #4
0
def print_arg_list(data, nested_content):

    definitions = map_nested_definitions(nested_content)

    items = []

    if 'args' in data:
        for arg in data['args']:
            my_def = [nodes.paragraph(text=arg['help'])] if arg['help'] else []

            name = arg['name']

            my_def = apply_definition(definitions, my_def, name)

            if len(my_def) == 0:
                my_def.append(nodes.paragraph(text='Undocumented'))

            items.append(
                nodes.option_list_item('',
                    nodes.option_group('', nodes.option_string(text=name)),
                    nodes.description('', *my_def)
                )
            )

    return nodes.option_list('', *items) if items else None
 def run(self):
   oldStdout, sys.stdout = sys.stdout, StringIO()
   try:
     exec '\n'.join(self.content)
     return [nodes.paragraph(text = sys.stdout.getvalue())]
   except Exception, e:
     return [nodes.error(None, nodes.paragraph(text = "Unable to execute python code at %s:%d:" % (basename(self.src), self.srcline)), nodes.paragraph(text = str(e)))]
Example #6
0
 def run(self):
     node = addnodes.versionmodified()
     node.document = self.state.document
     node["type"] = "deprecated-removed"
     version = (self.arguments[0], self.arguments[1])
     node["version"] = version
     text = self._label % version
     if len(self.arguments) == 3:
         inodes, messages = self.state.inline_text(self.arguments[2], self.lineno + 1)
         para = nodes.paragraph(self.arguments[2], "", *inodes)
         node.append(para)
     else:
         messages = []
     if self.content:
         self.state.nested_parse(self.content, self.content_offset, node)
         if isinstance(node[0], nodes.paragraph) and node[0].rawsource:
             content = nodes.inline(node[0].rawsource, translatable=True)
             content.source = node[0].source
             content.line = node[0].line
             content += node[0].children
             node[0].replace_self(nodes.paragraph("", "", content))
         if not SPHINX11:
             node[0].insert(0, nodes.inline("", "%s: " % text, classes=["versionmodified"]))
     elif not SPHINX11:
         para = nodes.paragraph("", "", nodes.inline("", "%s." % text, classes=["versionmodified"]))
         if len(node):
             node.insert(0, para)
         else:
             node.append(para)
     env = self.state.document.settings.env
     env.note_versionchange("deprecated", version[0], node, self.lineno)
     return [node] + messages
Example #7
0
    def formatComponent(self, moduleName, name, X):
        # no class bases available from repository scanner 
        CLASSNAME = self.formatClassStatement(name, X.bases)
        CLASSDOC = self.docString(X.doc)
        INBOXES = self.boxes(name,"Inboxes", X.inboxes)
        OUTBOXES = self.boxes(name,"Outboxes", X.outboxes)
        
        if self.config.includeMethods and len(X.listAllFunctions()):
            METHODS = [ nodes.section('',
                          nodes.title('', 'Methods defined here'),
                          boxright('',
                              nodes.paragraph('', '',
                                  nodes.strong('', nodes.Text("Warning!"))
                              ),
                              nodes.paragraph('', '',
                                  nodes.Text("You should be using the inbox/outbox interface, not these methods (except construction). This documentation is designed as a roadmap as to their functionalilty for maintainers and new component developers.")
                              ),
                          ),
                          * self.formatMethodDocStrings(name,X)
                        )
                      ]
        else:
            METHODS = []

        return \
                nodes.section('',
                * [ nodes.title('', CLASSNAME, ids=["symbol-"+name]) ]
                  + CLASSDOC
                  + [ INBOXES, OUTBOXES ]
                  + METHODS
                  + [ self.formatInheritedMethods(name,X) ]
                )
Example #8
0
def test_NodeMatcher():
    doctree = nodes.document(None, None)
    doctree += nodes.paragraph('', 'Hello')
    doctree += nodes.paragraph('', 'Sphinx', block=1)
    doctree += nodes.paragraph('', 'World', block=2)
    doctree += nodes.literal_block('', 'blah blah blah', block=3)

    # search by node class
    matcher = NodeMatcher(nodes.paragraph)
    assert len(doctree.traverse(matcher)) == 3

    # search by multiple node classes
    matcher = NodeMatcher(nodes.paragraph, nodes.literal_block)
    assert len(doctree.traverse(matcher)) == 4

    # search by node attribute
    matcher = NodeMatcher(block=1)
    assert len(doctree.traverse(matcher)) == 1

    # search by node attribute (Any)
    matcher = NodeMatcher(block=Any)
    assert len(doctree.traverse(matcher)) == 3

    # search by both class and attribute
    matcher = NodeMatcher(nodes.paragraph, block=Any)
    assert len(doctree.traverse(matcher)) == 2

    # mismatched
    matcher = NodeMatcher(nodes.title)
    assert len(doctree.traverse(matcher)) == 0
Example #9
0
    def run(self):
        self.assert_has_content()

        packages = []
        for line in self.content:
            (pkg_path, _, reason) = line.partition(':')
            if len(reason) == 0:
                raise RuntimeError("Missing reason for inclusion of package %s"
                                   % pkg_path)

            # Parse reason
            from docutils.statemachine import ViewList
            reason_vl = ViewList(initlist=[reason.strip()])
            reason_node = nodes.paragraph()
            self.state.nested_parse(reason_vl, 0, reason_node)
            packages.append((pkg_path, reason_node))

        # Create column headers for table
        header = [ nodes.inline(text=h)
                   for h in ["Package", "Version", "Reason for inclusion"] ]
        package_list = [header]

        for (pkg_path, reason) in sorted(packages):
            (pkg_name, pkg_version) = read_cabal_file(pkg_path)
            cells = [ nodes.paragraph(text=pkg_name),
                      nodes.inline(text=pkg_version),
                      reason ]
            package_list.append(cells)

        table = build_table_from_list(package_list, [20, 20, 40])
        table['classes'].append('longtable')
        return [table]
Example #10
0
  def test_ids_generated(self):
    from docutils import utils, nodes
    from docutils.core import publish_from_doctree
    doc = utils.new_document('<program>')
    docsect = nodes.section('')
    docsect['classes'] = ('c1 c2',)
    docsect['ids'] = ('my-test-id',)
    docsect['target-ids'] = ('my-test-id',)
    docsect.append(nodes.title('', '', nodes.Text('Title')))
    docsect.append(nodes.paragraph('', '', nodes.Text('some text.')))
    docsect.append(
      nodes.section(
        '',
        nodes.title('', '', nodes.Text('Sub-Title')),
        nodes.paragraph('', '', nodes.Text('some more text'))))
    doc.append(docsect)
    chk = '''\
.. class:: c1 c2

.. _`my-test-id`:

======
Title
======

some text.

---------
Sub-Title
---------

some more text
'''
    out = publish_from_doctree(doc, writer=rst.Writer())
    self.assertMultiLineEqual(out, chk)
Example #11
0
    def run(self):
        self.assert_has_content()

        # Build Container Node
        talentNode = nodes.container(classes=['talent-container'])
        if self.options.get('type'):
            talentNode['classes'].append(self.options['type'])
        else:
            talentNode['classes'].append('talent')

        # Generate Title Node
        titleNode = nodes.paragraph(text=self.options['name'], classes=['title'])

        # Generate Requirements & Tags
        reqs = '[' + format_dict(self.options['reqs']) + ']' if self.options.get('reqs') else ''
        tags = '(' + format_list(self.options['tags']) + ')' if self.options.get('tags') else ''
        specs = ' '.join([reqs, tags])
        specsNode = nodes.paragraph(text=specs, classes=['specs'])

        contentNode = nodes.paragraph()
        self.state.nested_parse(self.content, self.content_offset, contentNode)

        talentNode += [titleNode, specsNode, contentNode]

        return [talentNode]
def description_table(descriptions, widths, headers):
    # generate table-root
    tgroup = nodes.tgroup(cols=len(widths))
    for width in widths:
        tgroup += nodes.colspec(colwidth=width)
    table = nodes.table()
    table += tgroup

    # generate table-header
    thead = nodes.thead()
    row = nodes.row()
    for header in headers:
        entry = nodes.entry()
        entry += nodes.paragraph(text=header)
        row += entry
    thead += row
    tgroup += thead

    # generate table-body
    tbody = nodes.tbody()
    for desc in descriptions:
        row = nodes.row()
        for col in desc:
            entry = nodes.entry()
            if not isinstance(col, basestring):
                col = str(col)
            paragraph = nodes.paragraph()
            paragraph += nodes.Text(col)
            entry += paragraph
            row += entry
        tbody += row
    tgroup += tbody

    return table
Example #13
0
 def run(self):
     result = nodes.definition_list()
     for option in sorted(self.options.keys()):
         if option == 'added':
             continue
         term = option.capitalize()
         result += nodes.term(text=term)
         definition = nodes.definition()
         if option in ['kerk', 'predikant', 'tags']:
             taglink = {'kerk': SERMONCHURHLINK,
                        'predikant': SERMONREFERENTLINK,
                        'tags': SERMONTAGLINK}[option]
             value = self.options[option]
             values = [value.strip() for value in value.split(',')]
             paragraph = nodes.paragraph()
             for i, value in enumerate(values):
                 link = taglink % value
                 paragraph += nodes.reference(refuri=link, text=value)
                 if not i == len(values) - 1:
                     paragraph += nodes.inline(text=', ')
             definition += paragraph
         else:
             paragraph = nodes.paragraph()
             paragraph += nodes.inline(text=self.options[option])
             definition += paragraph
         result += definition
     return [result]
Example #14
0
 def _create_content(self):
     """
     Generate table node tree based on data stored in pending elements.
     """
     row_nodes = []
     for action_id, action_nodes in sorted(self._actions_dict.items()):
         row_data = []
         # add action_id into row_data as 1st entry
         row_data.append([nodes.paragraph(text=str(action_id))])
         # for each action check if we have step and result pending node
         # this defines order of collumns in resulting table
         for col_name in ('test_step', 'test_result'):
             if col_name in action_nodes:
                 row_data.append(action_nodes[col_name].details['nodes'])
             else:
                 row_data.append(nodes.paragraph())
         row_node = build_row(row_data)
         row_nodes.append(row_node)
     headrow_data = [
         nodes.paragraph(),
         [nodes.paragraph(text="Step")],
         [nodes.paragraph(text="Expected Result")],
         ]
     table_node = build_table(row_nodes, [2, 44, 44], headrow_data)
     return table_node
Example #15
0
def synopsis_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine):
    env = state.document.settings.env
    cls = env._kaa_current_class
    clsname = env._kaa_current_class_name
    env.currmodule, env.currclass = clsname.rsplit(".", 1)

    para = nodes.paragraph()

    section_synopsis = subsection(title="Synopsis")
    para.append(section_synopsis)

    state.nested_parse(content, 0, para)

    syn = synopsis(title="Class Hierarchy")
    syn_para = nodes.paragraph(classes=["hierarchy"])
    section_synopsis.append(syn)
    append_class_hierarchy(syn_para, state, cls)
    syn.append(syn_para)

    ci = env._kaa_class_info
    append_synopsis_section(state, section_synopsis, para, "Class Attributes", "attr", "classattrs" not in ci)
    append_synopsis_section(state, section_synopsis, para, "Methods", "meth", "automethods" not in ci)
    append_synopsis_section(state, section_synopsis, para, "Properties", "attr", "autoproperties" not in ci)
    #    append_synopsis_section(state, section_synopsis, para, 'Signals', 'attr', 'autosignals' not in ci)
    return [para]
Example #16
0
def build_toc(descinfo, env):
    """Return a desc table of contents node tree"""
    
    separator = EMDASH
    child_ids = descinfo['children']
    if not child_ids:
        return None
    max_fullname_len = 0
    max_summary_len = 0
    rows = []
    for fullname, refid, summary in ichild_ids(child_ids, env):
        max_fullname_len = max(max_fullname_len, len(fullname))
        max_summary_len = max(max_summary_len, len(summary))
        reference_node = toc_ref(fullname, refid)
        ref_entry_node = entry('', paragraph('', '', reference_node))
        sep_entry_node = entry('', paragraph('', separator))
        sum_entry_node = entry('', paragraph('', summary))
        row_node = row('', ref_entry_node, sep_entry_node, sum_entry_node)
        rows.append(row_node)
    col0_len = max_fullname_len + 2   # add error margin
    col1_len = len(separator)         # no padding
    col2_len = max_summary_len + 10   # add error margin
    tbody_node = tbody('', *rows)
    col0_colspec_node = colspec(colwidth=col0_len)
    col1_colspec_node = colspec(colwidth=col1_len)
    col2_colspec_node = colspec(colwidth=col2_len)
    tgroup_node = tgroup('',
                         col0_colspec_node,
                         col1_colspec_node,
                         col2_colspec_node,
                         tbody_node,
                         cols=3)
    return TocTable('', tgroup_node, classes=['toc'])
Example #17
0
    def create_cross_table(self, app, docname, node, matrix, options):
        table = nodes.table()
        table["classes"].append("traceables-crosstable")
        tgroup = nodes.tgroup(cols=len(matrix.secondaries), colwidths="auto")
        table += tgroup

        # Add column specifications.
        tgroup += nodes.colspec(colwidth=1)
        for column in matrix.secondaries:
            tgroup += nodes.colspec(colwidth=1)

        # Add heading row.
        thead = nodes.thead()
        tgroup += thead
        row = nodes.row()
        thead += row
        entry = nodes.entry()
        row += entry
        for secondary in matrix.secondaries:
            entry = nodes.entry()
            row += entry
            container = nodes.container()
            entry += container
            inline = nodes.inline()
            container += inline
            paragraph = nodes.paragraph()
            inline += paragraph
            paragraph += secondary.make_reference_node(app.builder, docname)

        # Add table body.
        tbody = nodes.tbody()
        tgroup += tbody
        for primary in matrix.primaries:
            row = nodes.row()
            tbody += row
            entry = nodes.entry()
            row += entry
            paragraph = nodes.paragraph()
            entry += paragraph
            paragraph += primary.make_reference_node(app.builder, docname)

            for is_related in matrix.get_boolean_row(primary):
                entry = nodes.entry()
                row += entry
                if is_related:
                    checkmark = traceable_checkmark()
                    entry += checkmark
                    checkmark += nodes.inline(u"\u2714", u"\u2714")
                else:
                    continue

        container = traceable_matrix_crosstable()
        container += table
        container["traceables-matrix"] = matrix
#        backward = matrix.backward_relationship.capitalize()
#        forward = matrix.forward_relationship.capitalize()
#        container["relationships"] = (forward, backward)
#        container["boolean_matrix"] = 0#boolean_matrix
#        container["secondaries"] = matrix.secondaries
        return container
Example #18
0
def print_opt_list(data, nested_content):
    definitions = map_nested_definitions(nested_content)
    items = []
    if 'options' in data:
        for opt in data['options']:
            names = []
            my_def = [nodes.paragraph(text=opt['help'])] if opt['help'] else []
            for name in opt['name']:
                option_declaration = [nodes.option_string(text=name)]
                if opt['default'] is not None \
                        and opt['default'] != '==SUPPRESS==':
                    option_declaration += nodes.option_argument(
                        '', text='=' + str(opt['default']))
                names.append(nodes.option('', *option_declaration))
                my_def = apply_definition(definitions, my_def, name)
            if len(my_def) == 0:
                my_def.append(nodes.paragraph(text='Undocumented'))
            if 'choices' in opt:
                my_def.append(nodes.paragraph(
                    text=('Possible choices: %s' % ', '.join([str(c) for c in opt['choices']]))))
            items.append(
                nodes.option_list_item(
                    '', nodes.option_group('', *names),
                    nodes.description('', *my_def)))
    return nodes.option_list('', *items) if items else None
Example #19
0
 def run(self):
     node = addnodes.versionmodified()
     node.document = self.state.document
     set_source_info(self, node)
     node['type'] = self.name
     node['version'] = self.arguments[0]
     text = versionlabels[self.name] % self.arguments[0]
     if len(self.arguments) == 2:
         inodes, messages = self.state.inline_text(self.arguments[1],
                                                   self.lineno+1)
         para = nodes.paragraph(self.arguments[1], '', *inodes)
         set_source_info(self, para)
         node.append(para)
     else:
         messages = []
     if self.content:
         self.state.nested_parse(self.content, self.content_offset, node)
     if len(node):
         if isinstance(node[0], nodes.paragraph) and node[0].rawsource:
             content = nodes.inline(node[0].rawsource, translatable=True)
             content.source = node[0].source
             content.line = node[0].line
             content += node[0].children
             node[0].replace_self(nodes.paragraph('', '', content))
         node[0].insert(0, nodes.inline('', '%s: ' % text,
                                        classes=['versionmodified']))
     else:
         para = nodes.paragraph('', '',
                                nodes.inline('', '%s.' % text,
                                             classes=['versionmodified']))
         node.append(para)
     env = self.state.document.settings.env
     # XXX should record node.source as well
     env.note_versionchange(node['type'], node['version'], node, node.line)
     return [node] + messages
Example #20
0
def synopsis_directive(name, arguments, options, content, lineno,
                       content_offset, block_text, state, state_machine):
    env = state.document.settings.env
    cls = get_class(arguments[0])
    env._kaa_current_class = cls
    env._kaa_current_class_name = clsname = arguments[0]
    env.currmodule, env.currclass = clsname.rsplit('.', 1)

    para = nodes.paragraph()

    section_synopsis = subsection(title='Synopsis')
    para.append(section_synopsis)

    state.nested_parse(content, 0, para)

    syn = synopsis(title='Class Hierarchy')
    syn_para = nodes.paragraph(classes=['hierarchy'])
    section_synopsis.append(syn)
    append_class_hierarchy(syn_para, state, cls)
    syn.append(syn_para)

    ci = env._kaa_class_info
    append_synopsis_section(state, section_synopsis, para, 'Class Attributes', 'attr', 'classattrs' not in ci)
    append_synopsis_section(state, section_synopsis, para, 'Methods', 'meth', 'automethods' not in ci)
    append_synopsis_section(state, section_synopsis, para, 'Properties', 'attr', 'autoproperties' not in ci)
    append_synopsis_section(state, section_synopsis, para, 'Signals', 'attr', 'autosignals' not in ci)
    return [para]
Example #21
0
def print_subcommand_list(data, nested_content):
    definitions = map_nested_definitions(nested_content)
    items = []
    if 'children' in data:
        for child in data['children']:
            my_def = [nodes.paragraph(
                text=child['help'])] if child['help'] else []
            name = child['name']
            my_def = apply_definition(definitions, my_def, name)
            if len(my_def) == 0:
                my_def.append(nodes.paragraph(text='Undocumented'))
            my_def.append(nodes.literal_block(text=child['usage']))
            my_def.append(print_command_args_and_opts(
                print_arg_list(child, nested_content),
                print_opt_list(child, nested_content),
                text_from_rst(child.get('description', ""), is_rst=True),
                print_subcommand_list(child, nested_content),

            ))
            items.append(
                nodes.definition_list_item(
                    '',
                    nodes.term('', '', nodes.strong(text=name)),
                    nodes.definition('', *my_def)
                )
            )
    return nodes.definition_list('', *items)
Example #22
0
 def _format_optional_arguments(self, parser_info):
     assert 'options' in parser_info
     items = []
     for opt in parser_info['options']:
         names = []
         opt_items = []
         for name in opt['name']:
             option_declaration = [nodes.option_string(text=name)]
             if opt['default'] is not None \
                     and opt['default'] != '==SUPPRESS==':
                 option_declaration += nodes.option_argument(
                     '', text='=' + str(opt['default']))
             names.append(nodes.option('', *option_declaration))
         if opt['help']:
             opt_items.append(nodes.paragraph(text=opt['help']))
         else:
             opt_items.append(nodes.paragraph(text='Undocumented'))
         if 'choices' in opt:
             opt_items.append(
                 nodes.paragraph(
                     text='Possible choices: ' + ', '.join(opt['choices'])))
         items.append(
             nodes.option_list_item(
                 '', nodes.option_group('', *names),
                 nodes.description('', *opt_items)))
     return nodes.option_list('', *items)
Example #23
0
def filebrief_replace_node(app, doctree, itemtype, items):
    """ where does the scene take place?
        here sir, here!!! I saw chaisaw, acid, and the like.
        what? this is just python code idiot!
    """
    env = app.builder.env
    nodefuncmap = dict()

    if itemtype == "functions":
        nodefuncmap = filebrief_nodefuncmap(doctree)

    listnode = nodes.bullet_list()
    for refname, (docname, type, theid) in sorted(items.iteritems()):
        pnode   = nodes.paragraph()
        if itemtype == "classes":
            pnode   = nodes.paragraph("class ", "class ")
        refnode = nodes.reference(refname, refname, refdocname=docname, refid=theid)

        retnode = refnode
        cnode = nodefuncmap.get(theid)
        if cnode:
            (_, fname) = split_func_name(refname)
            refnode = nodes.reference(fname, fname, refdocname=docname, refid=theid)
            i = cnode[0].first_child_matching_class(addnodes.desc_name)
            cnode[0][i] = refnode
            cnode.children = cnode[0].children
            retnode = cnode
        pnode.append(retnode)
        listnode.append(pnode)
    return listnode
Example #24
0
def gen_table(columns, data):
        table = nodes.table()
        tgroup = nodes.tgroup(cols=len(columns))
        table += tgroup
        for column in columns:
            tgroup += nodes.colspec(colwidth=1)
        thead = nodes.thead()
        tgroup += thead
        headrow = nodes.row()
        for column in columns:
            entry = nodes.entry()
            para = nodes.paragraph()
            entry += para
            header = column.header()
            para += nodes.Text(header, header)
            headrow += entry
        thead += headrow
        tbody = nodes.tbody()
        tgroup += tbody
        for obj in data:
            row = nodes.row()
            for column in columns:
                entry = nodes.entry()
                para = nodes.paragraph()
                entry += para
                para += column.data(obj)
                row += entry
            tbody += row
        return [table]
    def run(self):
        # filename *or* python code content, but not both
        if self.arguments and self.content:
            raise RuntimeError("bokeh-plot:: directive can't have both args and content")

        env = self.state.document.settings.env
        app = env.app

        if not hasattr(env, 'bokeh_plot_tmpdir'):
            env.bokeh_plot_tmpdir = mkdtemp()
            app.verbose("creating new temp dir for bokeh-plot cache: %s" % env.bokeh_plot_tmpdir)
        else:
            tmpdir = env.bokeh_plot_tmpdir
            if not exists(tmpdir) or not isdir(tmpdir):
                app.verbose("creating new temp dir for bokeh-plot cache: %s" % env.bokeh_plot_tmpdir)
                env.bokeh_plot_tmpdir = mkdtemp()
            else:
                app.verbose("using existing temp dir for bokeh-plot cache: %s" % env.bokeh_plot_tmpdir)

        # TODO (bev) verify that this is always the correct thing
        rst_source = self.state_machine.node.document['source']
        rst_dir = dirname(rst_source)
        rst_filename = basename(rst_source)

        target_id = "%s.bokeh-plot-%d" % (rst_filename, env.new_serialno('bokeh-plot'))
        target_node = nodes.target('', '', ids=[target_id])
        result = [target_node]

        try:
            source = self._get_source()
        except Exception:
            node = nodes.error(None,
                               nodes.paragraph(text="Unable to generate Bokeh plot at %s:%d:" % (basename(rst_source), self.lineno)),
                               nodes.paragraph(text=str(sys.exc_info()[1])))
            return [node]

        source_position = self.options.get('source-position', 'below')

        if source_position == 'above':
            result += self._get_source_nodes(source)

        node = bokeh_plot()
        node['target_id'] = target_id
        node['source'] = source
        node['relpath'] = relpath(rst_dir, env.srcdir)
        node['rst_source'] = rst_source
        node['rst_lineno'] = self.lineno
        if 'alt' in self.options:
            node['alt'] = self.options['alt']
        if self.arguments:
            node['path'] = self.arguments[0]
            env.note_dependency(node['path'])
        if len(self.arguments) == 2:
            node['symbol'] = self.arguments[1]
        result += [node]

        if source_position == 'below':
            result += self._get_source_nodes(source)

        return result
def test_parse_list_items(get_list):
    p, el = get_list
    assert parse_list_items(p) == []
    p.append(el)
    item1 = nodes.list_item()
    paragraph1 = nodes.paragraph()
    el.append(item1)
    with pytest.raises(ValueError):
        parse_list_items(p)

    item1.append(paragraph1)
    text1 = nodes.Text('test')
    paragraph1.append(text1)
    assert parse_list_items(p) == ['test']

    item2 = nodes.list_item()
    paragraph2 = nodes.paragraph()
    text21 = nodes.Text('test2')
    text22 = nodes.Text('test3')
    el.append(item2)
    item2.append(paragraph2)
    paragraph2.append(text21)
    paragraph2.append(text22)

    assert parse_list_items(p) == ['test', 'test2 test3']
Example #27
0
def format_parser_error(name, error, filename, state, lineno, do_unicode_warning):

    warning = '%s: Unable to parse xml file "%s". ' % (name, filename)
    explanation = "Reported error: %s. " % error

    unicode_explanation_text = ""
    unicode_explanation = []
    if do_unicode_warning:
        unicode_explanation_text = (
            textwrap.dedent(
                """
        Parsing errors are often due to unicode errors associated with the encoding of the original
        source files. Doxygen propagates invalid characters from the input source files to the
        output xml."""
            )
            .strip()
            .replace("\n", " ")
        )
        unicode_explanation = [nodes.paragraph("", "", nodes.Text(unicode_explanation_text))]

    return [
        nodes.warning(
            "",
            nodes.paragraph("", "", nodes.Text(warning)),
            nodes.paragraph("", "", nodes.Text(explanation)),
            *unicode_explanation
        ),
        state.document.reporter.warning(warning + explanation + unicode_explanation_text, line=lineno),
    ]
Example #28
0
 def run(self):
     node = addnodes.versionmodified()
     node.document = self.state.document
     node['type'] = 'deprecated-removed'
     version = (self.arguments[0], self.arguments[1])
     node['version'] = version
     label = translators['sphinx'].gettext(self._label)
     text = label.format(deprecated=self.arguments[0], removed=self.arguments[1])
     if len(self.arguments) == 3:
         inodes, messages = self.state.inline_text(self.arguments[2],
                                                   self.lineno+1)
         para = nodes.paragraph(self.arguments[2], '', *inodes, translatable=False)
         node.append(para)
     else:
         messages = []
     if self.content:
         self.state.nested_parse(self.content, self.content_offset, node)
     if len(node):
         if isinstance(node[0], nodes.paragraph) and node[0].rawsource:
             content = nodes.inline(node[0].rawsource, translatable=True)
             content.source = node[0].source
             content.line = node[0].line
             content += node[0].children
             node[0].replace_self(nodes.paragraph('', '', content, translatable=False))
         node[0].insert(0, nodes.inline('', '%s: ' % text,
                                        classes=['versionmodified']))
     else:
         para = nodes.paragraph('', '',
                                nodes.inline('', '%s.' % text,
                                             classes=['versionmodified']),
                                translatable=False)
         node.append(para)
     env = self.state.document.settings.env
     env.note_versionchange('deprecated', version[0], node, self.lineno)
     return [node] + messages
    def make_node(self, cls, element):
        node = cls()
        having_block_node = cls in HAVING_BLOCK_NODE
        if element.text and element.text != "\n":
            text = self.unescape_char(element.text)
            if HTML_PLACEHOLDER_RE.search(text):
                node += nodes.raw(format='html', text=self.unescape_char(text, rawHtml=True))
            elif having_block_node:
                node += nodes.paragraph(text=text)
            else:
                node += nodes.Text(text)
        for child in element:
            subnode = self.visit(child)
            if having_block_node and isinstance(subnode, INLINE_NODES):
                all_nodes_is_in_paragraph = True
                if len(node) == 0:
                    node += nodes.paragraph()
                node[0] += subnode
            else:
                all_nodes_is_in_paragraph = False
                node += subnode

            if child.tail and child.tail != "\n":
                tail = self.unescape_char(child.tail)
                if HTML_PLACEHOLDER_RE.search(tail):
                    node += nodes.raw(format='html', text=tail)
                elif all_nodes_is_in_paragraph:
                    node[0] += nodes.Text(tail)
                elif having_block_node:
                    node += nodes.paragraph(text=tail)
                else:
                    node += nodes.Text(tail)

        return node
Example #30
0
def print_opt_list(data, nested_content):
    definitions = map_nested_definitions(nested_content)
    items = []
    nodes_list = []  # dictionary to hold the group options, the group title is used as the key
    if 'action_groups' in data:
        for action_group in data['action_groups']:
            if 'options' in action_group:
                for opt in action_group['options']:
                    names = []
                    my_def = [nodes.paragraph(text=opt['help'])] if opt['help'] else []
                    for name in opt['name']:
                        option_declaration = [nodes.option_string(text=name)]
                        if opt['default'] is not None \
                                and opt['default'] != '==SUPPRESS==':
                            option_declaration += nodes.option_argument(
                                '', text='=' + str(opt['default']))
                        names.append(nodes.option('', *option_declaration))
                        my_def = apply_definition(definitions, my_def, name)
                    if len(my_def) == 0 and 'choices' not in opt:
                        my_def.append(nodes.paragraph(text='Undocumented'))
                    if 'choices' in opt:
                        my_def.append(nodes.paragraph(
                            text=('Possible choices: %s' % ', '.join([str(c) for c in opt['choices']]))))
                    items.append(
                        nodes.option_list_item(
                            '', nodes.option_group('', *names),
                            nodes.description('', *my_def)))
            opts = nodes.option_list('', *items) if items else None
            nodes_list.append({'options': opts,
                               'title': action_group['title'],
                               'description': action_group['description']})
    return nodes_list
Example #31
0
    def contribute_property(self, prop_list, prop_key, prop):
        prop_item = nodes.definition_list_item('', nodes.term('', prop_key))
        prop_list.append(prop_item)

        prop_item.append(nodes.classifier('', prop.type))

        definition = nodes.definition()
        prop_item.append(definition)

        self._status_str(prop.support_status, definition)

        if not prop.implemented:
            para = nodes.paragraph('', _('Not implemented.'))
            note = nodes.note('', para)
            definition.append(note)
            return

        if prop.description:
            para = nodes.paragraph('', prop.description)
            definition.append(para)

        if prop.update_allowed:
            para = nodes.paragraph('',
                                   _('Can be updated without replacement.'))
            definition.append(para)
        elif prop.immutable:
            para = nodes.paragraph(
                '',
                _('Updates are not supported. '
                  'Resource update will fail on any '
                  'attempt to update this property.'))
            definition.append(para)
        else:
            para = nodes.paragraph('', _('Updates cause replacement.'))
            definition.append(para)

        if prop.required:
            para = nodes.paragraph('', _('Required property.'))
        elif prop.default is not None:
            para = nodes.paragraph(
                '',
                _('Optional property, defaults to "%s".') % prop.default)
        else:
            para = nodes.paragraph('', _('Optional property.'))
        definition.append(para)

        for constraint in prop.constraints:
            para = nodes.paragraph('', str(constraint))
            definition.append(para)

        sub_schema = None
        if prop.schema and prop.type == properties.Schema.MAP:
            para = nodes.paragraph()
            emph = nodes.emphasis('', _('Map properties:'))
            para.append(emph)
            definition.append(para)
            sub_schema = prop.schema

        elif prop.schema and prop.type == properties.Schema.LIST:
            para = nodes.paragraph()
            emph = nodes.emphasis('', _('List contents:'))
            para.append(emph)
            definition.append(para)
            sub_schema = prop.schema

        if sub_schema:
            sub_prop_list = nodes.definition_list()
            definition.append(sub_prop_list)
            for sub_prop_key, sub_prop in sorted(sub_schema.items(),
                                                 self.cmp_prop):
                self.contribute_property(sub_prop_list, sub_prop_key, sub_prop)
Example #32
0
def _note(*args, **kwargs):
    note = nodes.note()
    note += nodes.paragraph(*args, **kwargs)
    return note
Example #33
0
    def contribute_property(self, parent, prop_key, prop, upd_para=None,
                            id_pattern_prefix=None, sub_prop=False):
        if not id_pattern_prefix:
            id_pattern_prefix = '%s-prop'
        id_pattern = id_pattern_prefix + '-' + prop_key

        definition = self._section(parent, prop_key, id_pattern)

        self._status_str(prop.support_status, definition)

        if not prop.implemented:
            para = nodes.paragraph('', _('Not implemented.'))
            note = nodes.note('', para)
            definition.append(note)
            return

        if sub_prop and prop.type != properties.Schema.LIST and prop.type\
                != properties.Schema.MAP:
            if prop.required:
                para = nodes.paragraph('', _('Required.'))
                definition.append(para)
            else:
                para = nodes.paragraph('', _('Optional.'))
                definition.append(para)

        if prop.description:
            para = nodes.paragraph('', prop.description)
            definition.append(para)

        type = nodes.paragraph('', _('%s value expected.') % prop.type)
        definition.append(type)

        if upd_para is not None:
            definition.append(upd_para)
        else:
            if prop.update_allowed:
                upd_para = nodes.paragraph(
                    '', _('Can be updated without replacement.'))
                definition.append(upd_para)
            elif prop.immutable:
                upd_para = nodes.paragraph('', _('Updates are not supported. '
                                                 'Resource update will fail on'
                                                 ' any attempt to update this '
                                                 'property.'))
                definition.append(upd_para)
            else:
                upd_para = nodes.paragraph('', _('Updates cause replacement.'))
                definition.append(upd_para)

        if prop.default is not None:
            para = nodes.paragraph('', _('Defaults to "%s".') % prop.default)
            definition.append(para)

        for constraint in prop.constraints:
            para = nodes.paragraph('', str(constraint))
            definition.append(para)

        sub_schema = None
        if prop.schema and prop.type == properties.Schema.MAP:
            para = nodes.paragraph()
            emph = nodes.emphasis('', _('Map properties:'))
            para.append(emph)
            definition.append(para)
            sub_schema = prop.schema

        elif prop.schema and prop.type == properties.Schema.LIST:
            para = nodes.paragraph()
            emph = nodes.emphasis('', _('List contents:'))
            para.append(emph)
            definition.append(para)
            sub_schema = prop.schema

        if sub_schema:
            for _key, _prop in sorted(sub_schema.items(),
                                      key=cmp_to_key(self.cmp_prop)):
                if _prop.support_status.status != support.HIDDEN:
                    indent = nodes.block_quote()
                    definition.append(indent)
                    self.contribute_property(
                        indent, _key, _prop, upd_para, id_pattern,
                        sub_prop=True)
Example #34
0
    def build_links_table(self, resource):
        is_list = 'is-list' in self.options

        table = nodes.table()

        tgroup = nodes.tgroup(cols=3)
        table += tgroup

        tgroup += nodes.colspec(colwidth=25)
        tgroup += nodes.colspec(colwidth=15)
        tgroup += nodes.colspec(colwidth=60)

        thead = nodes.thead()
        tgroup += thead
        append_row(thead, ['Name', 'Method', 'Resource'])

        tbody = nodes.tbody()
        tgroup += tbody

        request = DummyRequest()

        if is_list:
            child_resources = resource.list_child_resources
        else:
            child_resources = resource.item_child_resources

        names_to_resource = {}

        for child in child_resources:
            names_to_resource[child.name_plural] = (child, True)

        if not is_list and resource.model:
            child_keys = {}
            create_fake_resource_path(request, resource, child_keys, True)
            obj = resource.get_queryset(request, **child_keys)[0]
        else:
            obj = None

        related_links = resource.get_related_links(request=request, obj=obj)

        for key, info in related_links.iteritems():
            if 'resource' in info:
                names_to_resource[key] = \
                    (info['resource'], info.get('list-resource', False))

        links = resource.get_links(child_resources, request=DummyRequest(),
                                   obj=obj)

        for linkname in sorted(links.iterkeys()):
            info = links[linkname]
            child, is_child_link = \
                names_to_resource.get(linkname, (resource, is_list))

            paragraph = nodes.paragraph()
            paragraph += get_ref_to_resource(child, is_child_link)

            append_row(tbody,
                       [nodes.strong(text=linkname),
                        info['method'],
                        paragraph])

        return table
Example #35
0
    def build_fields_table(self, fields, required_fields={},
                           show_requirement_labels=False):
        def get_type_name(field_type):
            # We may be dealing with a forward-declared class.
            if isinstance(field_type, basestring) and field_type is not str:
                field_type = self.get_resource_class(field_type)

            if type(field_type) is list:
                return ([nodes.inline(text='List of ')] +
                        get_type_name(field_type[0]))
            elif type(field_type) is tuple:
                value_nodes = []

                for value in field_type:
                    if value_nodes:
                        value_nodes.append(nodes.inline(text=', '))

                    value_nodes.append(nodes.literal(text=value))

                return [nodes.inline(text='One of ')] + value_nodes
            elif (inspect.isclass(field_type) and
                  issubclass(field_type, WebAPIResource)):
                return [get_ref_to_resource(field_type, False)]
            elif field_type in self.type_mapping:
                return [nodes.inline(text=self.type_mapping[field_type])]
            else:
                print "Unknown type %s" % (field_type,)
                assert False

        table = nodes.table(classes=['resource-fields'])

        tgroup = nodes.tgroup(cols=3)
        table += tgroup

        tgroup += nodes.colspec(colwidth=15, classes=['field'])
        tgroup += nodes.colspec(colwidth=25, classes=['type'])
        tgroup += nodes.colspec(colwidth=60, classes=['description'])

        thead = nodes.thead()
        tgroup += thead
        append_row(thead, ['Field', 'Type', 'Description'])

        tbody = nodes.tbody()
        tgroup += tbody

        if isinstance(fields, dict):
            for field in sorted(fields.iterkeys()):
                info = fields[field]

                name_node = nodes.inline()
                name_node += nodes.strong(text=field)

                if show_requirement_labels:
                    if field in required_fields:
                        name_node += nodes.inline(text=" (required)")
                    else:
                        name_node += nodes.inline(text=" (optional)")

                type_node = nodes.inline()

                if info.get('supports_text_types'):
                    type_node += get_ref_to_doc('webapi2.0-text-fields',
                                                'Rich Text')
                else:
                    type_node += get_type_name(info['type'])

                description_node = parse_text(
                    self, info['description'],
                    where='%s field description' % field)

                if 'added_in' in info:
                    paragraph = nodes.paragraph()
                    paragraph += nodes.emphasis(
                        text='Added in %s\n' % info['added_in'],
                        classes=['field-versioning'])
                    description_node += paragraph

                if 'deprecated_in' in info:
                    paragraph = nodes.paragraph()
                    paragraph += nodes.emphasis(
                        text='Deprecated in %s\n' % info['deprecated_in'],
                        classes=['field-versioning'])
                    description_node += paragraph

                if 'removed_in' in info:
                    paragraph = nodes.paragraph()
                    paragraph += nodes.emphasis(
                        text='Removed in %s\n' % info['removed_in'],
                        classes=['field-versioning'])
                    description_node += paragraph

                append_row(tbody, [name_node, type_node, description_node])
        else:
            for field in sorted(fields):
                name = field

                if show_requirement_labels:
                    if field in required_fields:
                        name += " (required)"
                    else:
                        name += " (optional)"

                append_row(tbody, [name, "", ""])

        return table
Example #36
0
    def _render_service(self, service):
        service_id = "service-%d" % self.env.new_serialno('service')
        service_node = nodes.section(ids=[service_id])

        title = '%s service at %s' % (service.name.title(), service.path)
        service_node += nodes.title(text=title)

        if service.description is not None:
            service_node += rst2node(trim(service.description))

        for method, view, args in service.definitions:
            if method == 'HEAD':
                # Skip head - this is essentially duplicating the get docs.
                continue
            method_id = '%s-%s' % (service_id, method)
            method_node = nodes.section(ids=[method_id])
            method_node += nodes.title(text=method)

            if is_string(view):
                if 'klass' in args:
                    ob = args['klass']
                    view_ = getattr(ob, view.lower())
                    docstring = trim(view_.__doc__ or "") + '\n'
            else:
                docstring = trim(view.__doc__ or "") + '\n'

            if 'schema' in args:
                schema = args['schema']

                attrs_node = nodes.inline()
                for location in ('header', 'querystring', 'body'):
                    attributes = schema.get_attributes(location=location)
                    if attributes:
                        attrs_node += nodes.inline(text='values in the %s' %
                                                   location)
                        location_attrs = nodes.bullet_list()

                        for attr in attributes:
                            temp = nodes.list_item()

                            # Get attribute data-type
                            if hasattr(attr, 'type'):
                                attr_type = attr.type
                            elif hasattr(attr, 'typ'):
                                attr_type = attr.typ.__class__.__name__
                            else:
                                attr_type = None

                            temp += nodes.strong(text=attr.name)
                            if attr_type is not None:
                                temp += nodes.inline(text=' (%s)' % attr_type)
                            if not attr.required or attr.description:
                                temp += nodes.inline(text=' - ')
                                if not attr.required:
                                    if attr.missing is not None:
                                        default = json.dumps(attr.missing)
                                        temp += nodes.inline(
                                            text='(default: %s) ' % default)
                                    else:
                                        temp += nodes.inline(
                                            text='(optional) ')
                                if attr.description:
                                    temp += nodes.inline(text=attr.description)

                            location_attrs += temp

                        attrs_node += location_attrs
                method_node += attrs_node

            for validator in args.get('validators', ()):
                if validator.__doc__ is not None:
                    docstring += trim(validator.__doc__)

            if 'accept' in args:
                accept = to_list(args['accept'])

                if callable(accept):
                    if accept.__doc__ is not None:
                        docstring += accept.__doc__.strip()
                else:
                    accept_node = nodes.strong(text='Accepted content types:')
                    node_accept_list = nodes.bullet_list()
                    accept_node += node_accept_list

                    for item in accept:
                        temp = nodes.list_item()
                        temp += nodes.inline(text=item)
                        node_accept_list += temp

                    method_node += accept_node

            node = rst2node(docstring)
            DocFieldTransformer(self).transform_all(node)
            if node is not None:
                method_node += node

            renderer = args['renderer']
            if renderer == 'simplejson':
                renderer = 'json'

            response = nodes.paragraph()

            response += nodes.strong(text='Response: %s' % renderer)
            method_node += response

            service_node += method_node

        return service_node
 def visit_td(self, node):
     self.append_node(nodes.entry())
     return nodes.paragraph()
Example #38
0
def row_col_maker(app, fromdocname, all_needs, need_info, need_key, make_ref=False, ref_lookup=False, prefix=''):
    """
    Creates and returns a column.

    :param app: current sphinx app
    :param fromdocname: current document
    :param all_needs: Dictionary of all need objects
    :param need_info: need_info object, which stores all related need data
    :param need_key: The key to access the needed data from need_info
    :param make_ref: If true, creates a reference for the given data in need_key
    :param ref_lookup: If true, it uses the data to lookup for a related need and uses its data to create the reference
    :param prefix: string, which is used as prefix for the text output
    :return: column object (nodes.entry)
    """
    row_col = nodes.entry(classes=['needs_' + need_key])
    para_col = nodes.paragraph()

    if need_key in need_info and need_info[need_key] is not None:
        if not isinstance(need_info[need_key], (list, set)):
            data = [need_info[need_key]]
        else:
            data = need_info[need_key]

        for index, datum in enumerate(data):
            link_id = datum
            link_part = None

            link_list = []
            for link_type in app.env.config.needs_extra_links:
                link_list.append(link_type["option"])
                link_list.append(link_type["option"] + '_back')

            if need_key in link_list:
                if '.' in datum:
                    link_id = datum.split('.')[0]
                    link_part = datum.split('.')[1]

            datum_text = prefix + str(datum)
            text_col = nodes.Text(datum_text, datum_text)
            if make_ref or ref_lookup:
                try:
                    ref_col = nodes.reference("", "")
                    if not ref_lookup:
                        ref_col['refuri'] = app.builder.get_relative_uri(fromdocname, need_info['docname'])
                        ref_col['refuri'] += "#" + datum
                    else:
                        temp_need = all_needs[link_id]
                        ref_col['refuri'] = app.builder.get_relative_uri(fromdocname, temp_need['docname'])
                        ref_col['refuri'] += "#" + temp_need["id"]
                        if link_part is not None:
                            ref_col['refuri'] += '.' + link_part

                except KeyError:
                    para_col += text_col
                else:
                    ref_col.append(text_col)
                    para_col += ref_col
            else:
                para_col += text_col

            if index + 1 < len(data):
                para_col += nodes.emphasis("; ", "; ")

    row_col += para_col

    return row_col
 def render_paragraph_open(self, token: NestedTokens):
     para = nodes.paragraph(
         token.children[0].content if token.children else "")
     self.add_line_and_source_path(para, token)
     with self.current_node_context(para, append=True):
         self.render_children(token)
Example #40
0
def process_audit_events(app, doctree, fromdocname):
    for node in doctree.traverse(audit_event_list):
        break
    else:
        return

    env = app.builder.env

    table = nodes.table(cols=3)
    group = nodes.tgroup(
        '',
        nodes.colspec(colwidth=30),
        nodes.colspec(colwidth=55),
        nodes.colspec(colwidth=15),
        cols=3,
    )
    head = nodes.thead()
    body = nodes.tbody()

    table += group
    group += head
    group += body

    row = nodes.row()
    row += nodes.entry('', nodes.paragraph('', nodes.Text('Audit event')))
    row += nodes.entry('', nodes.paragraph('', nodes.Text('Arguments')))
    row += nodes.entry('', nodes.paragraph('', nodes.Text('References')))
    head += row

    for name in sorted(getattr(env, "all_audit_events", ())):
        audit_event = env.all_audit_events[name]

        row = nodes.row()
        node = nodes.paragraph('', nodes.Text(name))
        row += nodes.entry('', node)

        node = nodes.paragraph()
        for i, a in enumerate(audit_event['args']):
            if i:
                node += nodes.Text(", ")
            node += nodes.literal(a, nodes.Text(a))
        row += nodes.entry('', node)

        node = nodes.paragraph()
        backlinks = enumerate(sorted(set(audit_event['source'])), start=1)
        for i, (doc, label) in backlinks:
            if isinstance(label, str):
                ref = nodes.reference("",
                                      nodes.Text("[{}]".format(i)),
                                      internal=True)
                try:
                    ref['refuri'] = "{}#{}".format(
                        app.builder.get_relative_uri(fromdocname, doc),
                        label,
                    )
                except NoUri:
                    continue
                node += ref
        row += nodes.entry('', node)

        body += row

    for node in doctree.traverse(audit_event_list):
        node.replace_self(table)
Example #41
0
    def run(self):
        filename = self.arguments[0]
        document = self.state.document
        env = document.settings.env
        rel_filename, filename = env.relfn2path(filename)
        env.note_dependency(filename)
        try:
            with open(filename, 'r') as fp:
                releases = yaml.safe_load(fp)
                releases = releases["releases"]
        except Exception as e:
            return [
                document.reporter.warning(
                    "Failed to open Ceph releases file {}: {}".format(
                        filename, e),
                    line=self.lineno)
            ]

        table = nodes.table()
        tgroup = nodes.tgroup(cols=3)
        table += tgroup

        tgroup.extend(
            nodes.colspec(colwidth=30, colname='c' + str(idx))
            for idx, _ in enumerate(range(4)))

        thead = nodes.thead()
        tgroup += thead
        row_node = nodes.row()
        thead += row_node
        row_node.extend(
            nodes.entry(h, nodes.paragraph(text=h)) for h in [
                "Version", "Initial release", "Latest",
                "End of life (estimated)"
            ])

        releases = six.iteritems(releases)
        releases = sorted(releases, key=lambda t: t[0], reverse=True)

        tbody = nodes.tbody()
        tgroup += tbody

        rows = []
        for code_name, info in releases:
            actual_eol = info.get("actual_eol", None)
            if actual_eol and actual_eol <= datetime.datetime.now().date():
                continue
            trow = nodes.row()

            entry = nodes.entry()
            para = nodes.paragraph(text="`{}`_".format(code_name))
            sphinx.util.nodes.nested_parse_with_titles(self.state, para, entry)
            #entry += para
            trow += entry

            sorted_releases = sorted(
                info["releases"],
                key=lambda t: [t["released"]] + list(
                    map(lambda v: int(v), t["version"].split("."))))
            oldest_release = sorted_releases[0]
            newest_release = sorted_releases[-1]

            entry = nodes.entry()
            para = nodes.paragraph(
                text="{}".format(oldest_release["released"].strftime("%b %Y")))
            entry += para
            trow += entry

            entry = nodes.entry()
            if newest_release.get("skip_ref", False):
                para = nodes.paragraph(
                    text="{}".format(newest_release["version"]))
            else:
                para = nodes.paragraph(
                    text="`{}`_".format(newest_release["version"]))
            sphinx.util.nodes.nested_parse_with_titles(self.state, para, entry)
            #entry += para
            trow += entry

            entry = nodes.entry()
            para = nodes.paragraph(
                text="{}".format(info.get("target_eol", "--")))
            entry += para
            trow += entry

            rows.append(trow)

        tbody.extend(rows)

        return [table]
Example #42
0
    def run(self):
        filename = self.arguments[0]
        document = self.state.document
        env = document.settings.env
        rel_filename, filename = env.relfn2path(filename)
        env.note_dependency(filename)
        try:
            with open(filename, 'r') as fp:
                releases = yaml.safe_load(fp)
        except Exception as e:
            return [
                document.reporter.warning(
                    "Failed to open Ceph releases file {}: {}".format(
                        filename, e),
                    line=self.lineno)
            ]

        display_releases = self.arguments[1:]

        timeline = []
        for code_name, info in six.iteritems(releases["releases"]):
            if code_name in display_releases:
                for release in info.get("releases", []):
                    released = release["released"]
                    timeline.append((released, code_name, release["version"],
                                     release.get("skip_ref", False)))

        assert "development" not in releases["releases"]
        if "development" in display_releases:
            for release in releases["development"]["releases"]:
                released = release["released"]
                timeline.append((released, "development", release["version"],
                                 release.get("skip_ref", False)))

        timeline = sorted(timeline, key=lambda t: t[0], reverse=True)

        table = nodes.table()
        tgroup = nodes.tgroup(cols=3)
        table += tgroup

        columns = ["Date"] + display_releases
        tgroup.extend(
            nodes.colspec(colwidth=30, colname='c' + str(idx))
            for idx, _ in enumerate(range(len(columns))))

        thead = nodes.thead()
        tgroup += thead
        row_node = nodes.row()
        thead += row_node
        for col in columns:
            entry = nodes.entry()
            if col.lower() in ["date", "development"]:
                para = nodes.paragraph(text=col)
            else:
                para = nodes.paragraph(text="`{}`_".format(col))
            sphinx.util.nodes.nested_parse_with_titles(self.state, para, entry)
            row_node += entry

        tbody = nodes.tbody()
        tgroup += tbody

        rows = []
        for row_info in timeline:
            trow = nodes.row()

            entry = nodes.entry()
            para = nodes.paragraph(text=row_info[0].strftime("%b %Y"))
            entry += para
            trow += entry

            for release in display_releases:
                entry = nodes.entry()
                if row_info[1] == release:
                    if row_info[3]:  # if skip ref
                        para = nodes.paragraph(text=row_info[2])
                    else:
                        para = nodes.paragraph(
                            text="`{}`_".format(row_info[2]))
                    sphinx.util.nodes.nested_parse_with_titles(
                        self.state, para, entry)
                else:
                    para = nodes.paragraph(text="--")
                    entry += para
                trow += entry
            rows.append(trow)

        tbody.extend(rows)

        return [table]
Example #43
0
 def run(self):
     paragraph_node = nodes.paragraph(text='Hello World!')
     return [paragraph_node]
def process_needfilters(app, doctree, fromdocname):
    # Replace all needlist nodes with a list of the collected needs.
    # Augment each need with a backlink to the original location.
    env = app.builder.env

    # NEEDFILTER
    for node in doctree.traverse(Needfilter):
        if not app.config.needs_include_needs:
            # Ok, this is really dirty.
            # If we replace a node, docutils checks, if it will not lose any attributes.
            # But this is here the case, because we are using the attribute "ids" of a node.
            # However, I do not understand, why losing an attribute is such a big deal, so we delete everything
            # before docutils claims about it.
            for att in ('ids', 'names', 'classes', 'dupnames'):
                node[att] = []
            node.replace_self([])
            continue

        id = node.attributes["ids"][0]
        current_needfilter = env.need_all_needfilters[id]
        all_needs = env.needs_all_needs

        if current_needfilter["layout"] == "list":
            content = []

        elif current_needfilter["layout"] == "diagram":
            content = []
            try:
                if "sphinxcontrib.plantuml" not in app.config.extensions:
                    raise ImportError
                from sphinxcontrib.plantuml import plantuml
            except ImportError:
                content = nodes.error()
                para = nodes.paragraph()
                text = nodes.Text("PlantUML is not available!", "PlantUML is not available!")
                para += text
                content.append(para)
                node.replace_self(content)
                continue

            plantuml_block_text = ".. plantuml::\n" \
                                  "\n" \
                                  "   @startuml" \
                                  "   @enduml"
            puml_node = plantuml(plantuml_block_text, **dict())
            puml_node["uml"] = "@startuml\n"
            puml_connections = ""

        elif current_needfilter["layout"] == "table":
            content = nodes.table()
            tgroup = nodes.tgroup()
            id_colspec = nodes.colspec(colwidth=5)
            title_colspec = nodes.colspec(colwidth=15)
            type_colspec = nodes.colspec(colwidth=5)
            status_colspec = nodes.colspec(colwidth=5)
            links_colspec = nodes.colspec(colwidth=5)
            tags_colspec = nodes.colspec(colwidth=5)
            tgroup += [id_colspec, title_colspec, type_colspec, status_colspec, links_colspec, tags_colspec]
            tgroup += nodes.thead('', nodes.row(
                '',
                nodes.entry('', nodes.paragraph('', 'ID')),
                nodes.entry('', nodes.paragraph('', 'Title')),
                nodes.entry('', nodes.paragraph('', 'Type')),
                nodes.entry('', nodes.paragraph('', 'Status')),
                nodes.entry('', nodes.paragraph('', 'Links')),
                nodes.entry('', nodes.paragraph('', 'Tags'))
            ))
            tbody = nodes.tbody()
            tgroup += tbody
            content += tgroup

        all_needs = list(all_needs.values())
        found_needs = procces_filters(all_needs, current_needfilter)

        line_block = nodes.line_block()
        for need_info in found_needs:
            if current_needfilter["layout"] == "list":
                para = nodes.line()
                description = "%s: %s" % (need_info["id"], need_info["title"])

                if current_needfilter["show_status"] and need_info["status"] is not None:
                    description += " (%s)" % need_info["status"]

                if current_needfilter["show_tags"] and need_info["tags"] is not None:
                    description += " [%s]" % "; ".join(need_info["tags"])

                title = nodes.Text(description, description)

                # Create a reference
                if not need_info["hide"]:
                    ref = nodes.reference('', '')
                    ref['refdocname'] = need_info['docname']
                    ref['refuri'] = app.builder.get_relative_uri(
                        fromdocname, need_info['docname'])
                    ref['refuri'] += '#' + need_info['target_node']['refid']
                    ref.append(title)
                    para += ref
                else:
                    para += title

                line_block.append(para)
            elif current_needfilter["layout"] == "table":
                row = nodes.row()
                row += row_col_maker(app, fromdocname, env.needs_all_needs, need_info, "id", make_ref=True)
                row += row_col_maker(app, fromdocname, env.needs_all_needs, need_info, "title")
                row += row_col_maker(app, fromdocname, env.needs_all_needs, need_info, "type_name")
                row += row_col_maker(app, fromdocname, env.needs_all_needs, need_info, "status")
                row += row_col_maker(app, fromdocname, env.needs_all_needs, need_info, "links", ref_lookup=True)
                row += row_col_maker(app, fromdocname, env.needs_all_needs, need_info, "tags")
                tbody += row
            elif current_needfilter["layout"] == "diagram":
                # Link calculation
                # All links we can get from docutils functions will be relative.
                # But the generated link in the svg will be relative to the svg-file location
                # (e.g. server.com/docs/_images/sqwxo499cnq329439dfjne.svg)
                # and not to current documentation. Therefore we need to add ../ to get out of the _image folder.
                try:
                    link = "../" + app.builder.get_target_uri(need_info['docname']) \
                           + "?highlight={0}".format(urlParse(need_info['title'])) \
                           + "#" \
                           + need_info['target_node']['refid'] \
                        # Gets mostly called during latex generation
                except NoUri:
                    link = ""

                diagram_template = Template(env.config.needs_diagram_template)
                node_text = diagram_template.render(**need_info)

                puml_node["uml"] += '{style} "{node_text}" as {id} [[{link}]] {color}\n'.format(
                    id=need_info["id"], node_text=node_text, link=link, color=need_info["type_color"],
                    style=need_info["type_style"])
                for link in need_info["links"]:
                    puml_connections += '{id} --> {link}\n'.format(id=need_info["id"], link=link)

        if current_needfilter["layout"] == "list":
            content.append(line_block)

        if current_needfilter["layout"] == "diagram":
            puml_node["uml"] += puml_connections

            # Create a legend

            if current_needfilter["show_legend"]:
                puml_node["uml"] += "legend\n"
                puml_node["uml"] += "|= Color |= Type |\n"
                for need in app.config.needs_types:
                    puml_node["uml"] += "|<back:{color}> {color} </back>| {name} |\n".format(
                        color=need["color"], name=need["title"])
                puml_node["uml"] += "endlegend\n"
            puml_node["uml"] += "@enduml"
            puml_node["incdir"] = os.path.dirname(current_needfilter["docname"])
            puml_node["filename"] = os.path.split(current_needfilter["docname"])[1]  # Needed for plantuml >= 0.9
            content.append(puml_node)

        if len(content) == 0:
            nothing_found = "No needs passed the filters"
            para = nodes.line()
            nothing_found_node = nodes.Text(nothing_found, nothing_found)
            para += nothing_found_node
            content.append(para)
        if current_needfilter["show_filters"]:
            para = nodes.paragraph()
            filter_text = "Used filter:"
            filter_text += " status(%s)" % " OR ".join(current_needfilter["status"]) if len(
                current_needfilter["status"]) > 0 else ""
            if len(current_needfilter["status"]) > 0 and len(current_needfilter["tags"]) > 0:
                filter_text += " AND "
            filter_text += " tags(%s)" % " OR ".join(current_needfilter["tags"]) if len(
                current_needfilter["tags"]) > 0 else ""
            if (len(current_needfilter["status"]) > 0 or len(current_needfilter["tags"]) > 0) and len(
                    current_needfilter["types"]) > 0:
                filter_text += " AND "
            filter_text += " types(%s)" % " OR ".join(current_needfilter["types"]) if len(
                current_needfilter["types"]) > 0 else ""

            filter_node = nodes.emphasis(filter_text, filter_text)
            para += filter_node
            content.append(para)

        node.replace_self(content)
Example #45
0
 def apply(self):
     if not len(self.document):
         # @@@ replace these DataErrors with proper system messages
         raise DataError('Document tree is empty.')
     header = self.document[0]
     if not isinstance(header, nodes.field_list) or \
           'rfc2822' not in header['classes']:
         raise DataError('Document does not begin with an RFC-2822 '
                         'header; it is not a PEP.')
     pep = None
     for field in header:
         if field[0].astext().lower() == 'pep':  # should be the first field
             value = field[1].astext()
             try:
                 pep = int(value)
                 cvs_url = self.pep_cvs_url % pep
             except ValueError:
                 pep = value
                 cvs_url = None
                 msg = self.document.reporter.warning(
                     '"PEP" header must contain an integer; "%s" is an '
                     'invalid value.' % pep,
                     base_node=field)
                 msgid = self.document.set_id(msg)
                 prb = nodes.problematic(value,
                                         value or '(none)',
                                         refid=msgid)
                 prbid = self.document.set_id(prb)
                 msg.add_backref(prbid)
                 if len(field[1]):
                     field[1][0][:] = [prb]
                 else:
                     field[1] += nodes.paragraph('', '', prb)
             break
     if pep is None:
         raise DataError('Document does not contain an RFC-2822 "PEP" '
                         'header.')
     if pep == 0:
         # Special processing for PEP 0.
         pending = nodes.pending(PEPZero)
         self.document.insert(1, pending)
         self.document.note_pending(pending)
     if len(header) < 2 or header[1][0].astext().lower() != 'title':
         raise DataError('No title!')
     for field in header:
         name = field[0].astext().lower()
         body = field[1]
         if len(body) > 1:
             raise DataError('PEP header field body contains multiple '
                             'elements:\n%s' % field.pformat(level=1))
         elif len(body) == 1:
             if not isinstance(body[0], nodes.paragraph):
                 raise DataError('PEP header field body may only contain '
                                 'a single paragraph:\n%s' %
                                 field.pformat(level=1))
         elif name == 'last-modified':
             date = time.strftime(
                 '%d-%b-%Y',
                 time.localtime(os.stat(self.document['source'])[8]))
             if cvs_url:
                 body += nodes.paragraph(
                     '', '', nodes.reference('', date, refuri=cvs_url))
         else:
             # empty
             continue
         para = body[0]
         if name == 'author':
             for node in para:
                 if isinstance(node, nodes.reference):
                     node.replace_self(mask_email(node))
         elif name == 'discussions-to':
             for node in para:
                 if isinstance(node, nodes.reference):
                     node.replace_self(mask_email(node, pep))
         elif name in ('replaces', 'replaced-by', 'requires'):
             newbody = []
             space = nodes.Text(' ')
             for refpep in re.split(',?\s+', body.astext()):
                 pepno = int(refpep)
                 newbody.append(
                     nodes.reference(
                         refpep,
                         refpep,
                         refuri=(self.document.settings.pep_base_url +
                                 self.pep_url % pepno)))
                 newbody.append(space)
             para[:] = newbody[:-1]  # drop trailing space
         elif name == 'last-modified':
             utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
             if cvs_url:
                 date = para.astext()
                 para[:] = [nodes.reference('', date, refuri=cvs_url)]
         elif name == 'content-type':
             pep_type = para.astext()
             uri = self.document.settings.pep_base_url + self.pep_url % 12
             para[:] = [nodes.reference('', pep_type, refuri=uri)]
         elif name == 'version' and len(body):
             utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
 def visit_p(self, node):
     return nodes.paragraph()
Example #47
0
def create_hoverlist(app, doctree, fromdocname):
    # If translationlists are set to not appear, replace them with empty nodes.
    if not app.config.hover_translationList:
        for node in doctree.traverse(hoverlist):
            if not app.config.hover_translationList:
                node.replace_self([])
        return

    # Words is a dictionary with translated terms as keys and translations as values.
    words = {}
    content = []

    #with codecs.open("LIST_OF_HOVER_TERMS", encoding = "utf-8") as listfile:
    listfile = open("LIST_OF_HOVER_TERMS", 'r')
    listcontents = listfile.readlines()
    listfile.close()

    for line in listcontents:
        # Clean up the strings.
        line = line.split(";")
        for idx, entry in enumerate(line):
            beginindex = entry.find("'")
            newentry = entry[beginindex + 1:]
            line[idx] = newentry

        citationform = line[2]
        translation = line[3]

        if citationform in words:
            continue
        words[citationform] = translation

    # Add words and translations (sorted) to nodes.
    for key, value in sorted(words.items()):
        wordnode = nodes.emphasis(key, key)
        translationstring = " : " + value

        # Add linebreak if smaller version of list is used.
        if app.config.hover_miniTranslationList:
            translationstring += "\n"

        translationnode = nodes.Text(translationstring)

        # If the larger version of list is requested, create new paragraph.
        if not app.config.hover_miniTranslationList:
            para = nodes.paragraph()
        # If the smaller version of list is requested, create a new line.
        else:
            para = nodes.line()
        # Append the line/paragraph.
        para += wordnode
        para += translationnode
        content.append(para)

    # Replace all hoverlist nodes with the translations
    for node in doctree.traverse(hoverlist):
        # If hover_translation userconfig is set to 0 remove all hoverlist nodes.
        if not app.config.hover_translationList:
            node.replace_self([])
            continue
        node.replace_self(content)
    return
Example #48
0
def envy_resolve(app, doctree, fromdocname):
    objects = app.env.domaindata['envy']['objects']

    # add uplink info
    for holder in doctree.traverse(uplink_placeholder):
        obj = objects[holder.name]
        links = []
        for sp, pos, name, variants in obj.uplinks:
            signode = addnodes.desc_signature('', '')
            signode['first'] = False
            signode += make_refnode(app.builder, fromdocname, sp.docname, sp.iname + '-' + sp.name, addnodes.desc_addname(sp.name, sp.name), sp.name)
            text = ' {}: {}{}'.format(pos.format_offset(), name, pos.format_square())
            post = pos.format_post()
            if post:
                text += ' ({})'.format(', '.join(post))
            signode += addnodes.desc_name(text, text)
            if variants is not None:
                text = ' [{}]'.format(variants)
                signode += addnodes.desc_annotation(text, text)
            links.append(signode)
        holder.replace_self(links)

    # add subnode list
    for holder in doctree.traverse(sub_placeholder):
        obj = objects[holder.name]
        add_variant = False
        for pos, name, child, variants in obj.subs:
            if variants is not None:
                add_variant = True
        if obj.subs:
            table = nodes.table()
            headers = [(1, 'Address'), (1, 'Name'), (10, 'Description')]
            if add_variant:
                headers.insert(1, (1, 'Variants'))
            tgroup = nodes.tgroup(cols=len(headers))
            table += tgroup
            for colwidth, header in headers:
                tgroup += nodes.colspec(colwidth=colwidth)
            thead = nodes.thead()
            tgroup += thead
            headrow = nodes.row()
            for colwidth, header in headers:
                entry = nodes.entry()
                para = nodes.paragraph()
                entry += para
                para += nodes.Text(header, header)
                headrow += entry
            thead += headrow
            tbody = nodes.tbody()
            tgroup += tbody
            for pos, name, child, variants in obj.subs:
                row = nodes.row()
                ptext = pos.format_offset()
                post = pos.format_post()
                if post:
                    ptext += ' ({})'.format(', '.join(post))
                row += wrap_text_entry(ptext)
                if add_variant:
                    row += wrap_text_entry('all' if variants is None else variants)
                row += wrap_text_entry(name + pos.format_square())
                entry = nodes.entry()
                para = nodes.paragraph()
                entry += para
                para += make_refnode(app.builder, fromdocname, child.docname, child.iname + '-' + child.name, nodes.Text(child.brief, child.brief), child.brief)
                row += entry
                tbody += row
            holder.replace_self([table])
        else:
            holder.replace_self([])
Example #49
0
    def run(self):
        # Re-run on the current document if this directive's source changes.
        self.state.document.settings.env.note_dependency(__file__)

        # Parse directive options.  Don't use os.path.sep or os.path.join here!
        # That would break if building the docs on Windows.
        tool = self.options.get('tool', 'west').lower()
        app = self.options.get('app', None)
        zephyr_app = self.options.get('zephyr-app', None)
        cd_into = 'cd-into' in self.options
        generator = self.options.get('generator', 'ninja').lower()
        host_os = self.options.get('host-os', 'all').lower()
        board = self.options.get('board', None)
        shield = self.options.get('shield', None)
        conf = self.options.get('conf', None)
        gen_args = self.options.get('gen-args', None)
        build_args = self.options.get('build-args', None)
        build_dir_append = self.options.get('build-dir', '').strip('/')
        goals = self.options.get('goals').split()
        skip_config = 'maybe-skip-config' in self.options
        compact = 'compact' in self.options

        if tool not in self.TOOLS:
            raise self.error('Unknown tool {}; choose from: {}'.format(
                tool, self.TOOLS))

        if app and zephyr_app:
            raise self.error('Both app and zephyr-app options were given.')

        if generator not in self.GENERATORS:
            raise self.error('Unknown generator {}; choose from: {}'.format(
                generator, self.GENERATORS))

        if host_os not in self.HOST_OS:
            raise self.error('Unknown host-os {}; choose from: {}'.format(
                host_os, self.HOST_OS))

        if compact and skip_config:
            raise self.error(
                'Both compact and maybe-skip-config options were given.')

        app = app or zephyr_app
        in_tree = self.IN_TREE_STR if zephyr_app else None
        # Allow build directories which are nested.
        build_dir = ('build' + '/' + build_dir_append).rstrip('/')

        # Create host_os array
        host_os = [host_os] if host_os != "all" else [
            v for v in self.HOST_OS if v != 'all'
        ]
        # Create tools array
        tools = [tool
                 ] if tool != "all" else [v for v in self.TOOLS if v != 'all']
        # Build the command content as a list, then convert to string.
        content = []
        tool_comment = None
        if len(tools) > 1:
            tool_comment = 'Using {}:'

        run_config = {
            'host_os': host_os,
            'app': app,
            'in_tree': in_tree,
            'cd_into': cd_into,
            'board': board,
            'shield': shield,
            'conf': conf,
            'gen_args': gen_args,
            'build_args': build_args,
            'build_dir': build_dir,
            'goals': goals,
            'compact': compact,
            'skip_config': skip_config,
            'generator': generator
        }

        if 'west' in tools:
            w = self._generate_west(**run_config)
            if tool_comment:
                paragraph = nodes.paragraph()
                paragraph += nodes.Text(tool_comment.format('west'))
                content.append(paragraph)
                content.append(self._lit_block(w))
            else:
                content.extend(w)

        if 'cmake' in tools:
            c = self._generate_cmake(**run_config)
            if tool_comment:
                paragraph = nodes.paragraph()
                paragraph += nodes.Text(
                    tool_comment.format('CMake and {}'.format(generator)))
                content.append(paragraph)
                content.append(self._lit_block(c))
            else:
                content.extend(c)

        if not tool_comment:
            content = [self._lit_block(content)]

        return content
Example #50
0
    def run(self):
        env = self.state.document.settings.env
        cmd = [env.config.kerneldoc_bin, '-rst', '-enable-lineno']

        filename = env.config.kerneldoc_srctree + '/' + self.arguments[0]
        export_file_patterns = []

        # Tell sphinx of the dependency
        env.note_dependency(os.path.abspath(filename))

        tab_width = self.options.get('tab-width',
                                     self.state.document.settings.tab_width)

        # FIXME: make this nicer and more robust against errors
        if 'export' in self.options:
            cmd += ['-export']
            export_file_patterns = str(self.options.get('export')).split()
        elif 'internal' in self.options:
            cmd += ['-internal']
            export_file_patterns = str(self.options.get('internal')).split()
        elif 'doc' in self.options:
            cmd += ['-function', str(self.options.get('doc'))]
        elif 'functions' in self.options:
            functions = self.options.get('functions').split()
            if functions:
                for f in functions:
                    cmd += ['-function', f]
            else:
                cmd += ['-no-doc-sections']

        for pattern in export_file_patterns:
            for f in glob.glob(env.config.kerneldoc_srctree + '/' + pattern):
                env.note_dependency(os.path.abspath(f))
                cmd += ['-export-file', f]

        cmd += [filename]

        try:
            kernellog.verbose(env.app,
                              'calling kernel-doc \'%s\'' % (" ".join(cmd)))

            p = subprocess.Popen(cmd,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            out, err = p.communicate()

            out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')

            if p.returncode != 0:
                sys.stderr.write(err)

                kernellog.warn(
                    env.app, 'kernel-doc \'%s\' failed with return code %d' %
                    (" ".join(cmd), p.returncode))
                return [
                    nodes.error(None,
                                nodes.paragraph(text="kernel-doc missing"))
                ]
            elif env.config.kerneldoc_verbosity > 0:
                sys.stderr.write(err)

            lines = statemachine.string2lines(out,
                                              tab_width,
                                              convert_whitespace=True)
            result = ViewList()

            lineoffset = 0
            line_regex = re.compile("^#define LINENO ([0-9]+)$")
            for line in lines:
                match = line_regex.search(line)
                if match:
                    # sphinx counts lines from 0
                    lineoffset = int(match.group(1)) - 1
                    # we must eat our comments since the upset the markup
                else:
                    result.append(line, filename, lineoffset)
                    lineoffset += 1

            node = nodes.section()
            self.do_parse(result, node)

            return node.children

        except Exception as e:  # pylint: disable=W0703
            kernellog.warn(
                env.app, 'kernel-doc \'%s\' processing failed with: %s' %
                (" ".join(cmd), str(e)))
            return [
                nodes.error(None, nodes.paragraph(text="kernel-doc missing"))
            ]
Example #51
0
    def run(self):
        grid_node = nodes.container()
        grid_node['classes'] += ['m-imagegrid', 'm-container-inflate']

        rows = [[]]
        total_widths = [0]
        for uri in self.content:
            # New line, calculating width from 0 again
            if not uri:
                rows.append([])
                total_widths.append(0)
                continue

            # Open the files and calculate the overall width
            # Support both {filename} (3.7.1) and {static} (3.8) placeholders
            file = os.path.join(os.getcwd(), settings['PATH'])
            absuri = uri.format(filename=file, static=file)
            im = PIL.Image.open(absuri)

            # Get EXIF info, if it's there
            if hasattr(im, '_getexif') and im._getexif() is not None:
                exif = {
                    PIL.ExifTags.TAGS[k]: v
                    for k, v in im._getexif().items()
                    if k in PIL.ExifTags.TAGS and len(str(v)) < 256
                }

                # Not all info might be present
                caption = []
                if 'FNumber' in exif:
                    caption += [
                        "F{}".format(
                            float(
                                float(exif['FNumber'][0]) /
                                float(exif['FNumber'][1])))
                    ]
                if 'ExposureTime' in exif:
                    numerator, denominator = exif['ExposureTime']
                    if int(numerator) > int(denominator):
                        caption += [
                            "{} s".format(
                                float(numerator) / float(denominator))
                        ]
                    else:
                        caption += ["{}/{} s".format(numerator, denominator)]
                if 'ISOSpeedRatings' in exif:
                    caption += ["ISO {}".format(exif['ISOSpeedRatings'])]
                caption = ', '.join(caption)

            # It's not (e.g. a PNG file), empty caption
            else:
                caption = ""

            rel_width = float(im.width) / im.height
            total_widths[-1] += rel_width
            rows[-1].append((uri, rel_width, caption))

        for i, row in enumerate(rows):
            row_node = nodes.container()

            for uri, rel_width, caption in row:
                image_reference = rst.directives.uri(uri)
                image_node = nodes.image('', uri=image_reference)

                # <figurecaption> in case there's a caption
                if caption:
                    text_nodes, _ = self.state.inline_text(
                        caption, self.lineno)
                    text_node = nodes.paragraph('', '', *text_nodes)
                    overlay_node = nodes.caption()
                    overlay_node.append(text_node)

                # Otherwise an empty <div>
                else:
                    overlay_node = nodes.container()

                link_node = nodes.reference('', refuri=image_reference)
                link_node.append(image_node)
                link_node.append(overlay_node)
                wrapper_node = nodes.figure(
                    width="{:.3f}%".format(rel_width * 100.0 /
                                           total_widths[i]))
                wrapper_node.append(link_node)
                row_node.append(wrapper_node)

            grid_node.append(row_node)

        return [grid_node]
Example #52
0
def wrap_text_entry(txt):
    entry = nodes.entry()
    para = nodes.paragraph()
    entry += para
    para += nodes.Text(txt, txt)
    return entry
Example #53
0
def _add_source_link(pep_source_path: Path) -> nodes.paragraph:
    """Add link to source text on VCS (GitHub)"""
    source_link = f"https://github.com/python/peps/blob/main/{pep_source_path.name}"
    link_node = nodes.reference("", source_link, refuri=source_link)
    return nodes.paragraph("", "Source: ", link_node)
Example #54
0
    def transform(self, node: nodes.field_list) -> None:
        """Transform a single field list *node*."""
        typemap = self.typemap

        entries = []        # type: List[Union[nodes.field, Tuple[Field, Any]]]
        groupindices = {}   # type: Dict[str, int]
        types = {}          # type: Dict[str, Dict]

        # step 1: traverse all fields and collect field types and content
        for field in cast(List[nodes.field], node):
            assert len(field) == 2
            field_name = cast(nodes.field_name, field[0])
            field_body = cast(nodes.field_body, field[1])
            try:
                # split into field type and argument
                fieldtype_name, fieldarg = field_name.astext().split(None, 1)
            except ValueError:
                # maybe an argument-less field type?
                fieldtype_name, fieldarg = field_name.astext(), ''
            typedesc, is_typefield = typemap.get(fieldtype_name, (None, None))

            # collect the content, trying not to keep unnecessary paragraphs
            if _is_single_paragraph(field_body):
                paragraph = cast(nodes.paragraph, field_body[0])
                content = paragraph.children
            else:
                content = field_body.children

            # sort out unknown fields
            if typedesc is None or typedesc.has_arg != bool(fieldarg):
                # either the field name is unknown, or the argument doesn't
                # match the spec; capitalize field name and be done with it
                new_fieldname = fieldtype_name[0:1].upper() + fieldtype_name[1:]
                if fieldarg:
                    new_fieldname += ' ' + fieldarg
                field_name[0] = nodes.Text(new_fieldname)
                entries.append(field)

                # but if this has a type then we can at least link it
                if (typedesc and is_typefield and content and
                        len(content) == 1 and isinstance(content[0], nodes.Text)):
                    typed_field = cast(TypedField, typedesc)
                    target = content[0].astext()
                    xrefs = typed_field.make_xrefs(
                        typed_field.typerolename,
                        self.directive.domain,
                        target,
                        contnode=content[0],
                        env=self.directive.state.document.settings.env
                    )
                    if _is_single_paragraph(field_body):
                        paragraph = cast(nodes.paragraph, field_body[0])
                        paragraph.clear()
                        paragraph.extend(xrefs)
                    else:
                        field_body.clear()
                        field_body += nodes.paragraph('', '', *xrefs)

                continue

            typename = typedesc.name

            # if the field specifies a type, put it in the types collection
            if is_typefield:
                # filter out only inline nodes; others will result in invalid
                # markup being written out
                content = [n for n in content if isinstance(n, nodes.Inline) or
                           isinstance(n, nodes.Text)]
                if content:
                    types.setdefault(typename, {})[fieldarg] = content
                continue

            # also support syntax like ``:param type name:``
            if typedesc.is_typed:
                try:
                    argtype, argname = fieldarg.split(None, 1)
                except ValueError:
                    pass
                else:
                    types.setdefault(typename, {})[argname] = \
                        [nodes.Text(argtype)]
                    fieldarg = argname

            translatable_content = nodes.inline(field_body.rawsource,
                                                translatable=True)
            translatable_content.document = field_body.parent.document
            translatable_content.source = field_body.parent.source
            translatable_content.line = field_body.parent.line
            translatable_content += content

            # grouped entries need to be collected in one entry, while others
            # get one entry per field
            if typedesc.is_grouped:
                if typename in groupindices:
                    group = cast(Tuple[Field, List], entries[groupindices[typename]])
                else:
                    groupindices[typename] = len(entries)
                    group = (typedesc, [])
                    entries.append(group)
                new_entry = typedesc.make_entry(fieldarg, [translatable_content])
                group[1].append(new_entry)
            else:
                new_entry = typedesc.make_entry(fieldarg, [translatable_content])
                entries.append((typedesc, new_entry))

        # step 2: all entries are collected, construct the new field list
        new_list = nodes.field_list()
        for entry in entries:
            if isinstance(entry, nodes.field):
                # pass-through old field
                new_list += entry
            else:
                fieldtype, items = entry
                fieldtypes = types.get(fieldtype.name, {})
                env = self.directive.state.document.settings.env
                new_list += fieldtype.make_field(fieldtypes, self.directive.domain,
                                                 items, env=env)

        node.replace_self(new_list)
    def dict_to_fm_field_list(self,
                              data: Dict[str, Any],
                              language_code: str,
                              line: int = 0) -> nodes.field_list:
        """Render each key/val pair as a docutils ``field_node``.

        Bibliographic keys below will be parsed as Markdown,
        all others will be left as literal text.

        The field list should be at the start of the document,
        and will then be converted to a `docinfo` node during the
        `docutils.docutils.transforms.frontmatter.DocInfo` transform (priority 340),
        and bibliographic keys (or their translation) will be converted to nodes::

            {'author': docutils.nodes.author,
            'authors': docutils.nodes.authors,
            'organization': docutils.nodes.organization,
            'address': docutils.nodes.address,
            'contact': docutils.nodes.contact,
            'version': docutils.nodes.version,
            'revision': docutils.nodes.revision,
            'status': docutils.nodes.status,
            'date': docutils.nodes.date,
            'copyright': docutils.nodes.copyright,
            'dedication': docutils.nodes.topic,
            'abstract': docutils.nodes.topic}

        Also, the 'dedication' and 'abstract' will be placed outside the `docinfo`,
        and so will always be shown in the document.

        If using sphinx, this `docinfo` node will later be extracted from the AST,
        by the `DoctreeReadEvent` transform (priority 880),
        calling `MetadataCollector.process_doc`.
        In this case keys and values will be converted to strings and stored in
        `app.env.metadata[app.env.docname]`

        See
        https://www.sphinx-doc.org/en/master/usage/restructuredtext/field-lists.html
        for docinfo fields used by sphinx.

        """
        field_list = nodes.field_list()

        bibliofields = get_language(language_code).bibliographic_fields
        state_machine = MockStateMachine(self, line)
        state = MockState(self, state_machine, line)

        for key, value in data.items():
            if not isinstance(value, (str, int, float, date, datetime)):
                value = json.dumps(value)
            value = str(value)
            if key in bibliofields:
                para_nodes, _ = state.inline_text(value, line)
                body_children = [nodes.paragraph("", "", *para_nodes)]
            else:
                body_children = [nodes.Text(value, value)]

            field_node = nodes.field()
            field_node.source = value
            field_node += nodes.field_name(key, "", nodes.Text(key, key))
            field_node += nodes.field_body(value, *body_children)
            field_list += field_node

        return field_list
Example #56
0
class ExcelTableDirective(ListTable, DirectiveTemplate):
    """
  ExcelTableDirective implements the directive.
  Directive allows to create RST tables from the contents
  of the Excel sheet. The functionality is very similar to
  csv-table (docutils) and xmltable (:mod:`sphinxcontrib.xmltable`).

  Example of the directive:

  .. code-block:: rest

    .. exceltable::
       :file: path/to/document.xls
       :header: 1

  """
    #required_arguments = 0
    #optional_arguments = 0
    has_content = False
    option_spec = {
        'file': directives.path,
        'selection': directives.unchanged_required,
        'encoding': directives.unchanged,
        'header': directives.unchanged,
        'sheet': directives.unchanged,
        'class': directives.class_option,
        'widths': directives.unchanged,
    }

    def run(self):
        """
    Implements the directive
    """
        # Get content and options
        file_path = self.options.get('file', None)
        selection = self.options.get('selection', 'A1:')
        sheet = self.options.get('sheet', '0')
        header = self.options.get('header', '0')
        col_widths = self.options.get('widths', None)

        # Divide the selection into from and to values
        if u':' not in selection:
            selection += ':'
        fromcell, tocell = selection.split(':')

        if not fromcell:
            fromcell = 'A1'

        if not tocell:
            tocell = None

        #print selection, fromcell, tocell

        if not file_path:
            return [self._report('file_path -option missing')]

        # Header option
        header_rows = 0
        if header and header.isdigit():
            header_rows = int(header)

        # Transform the path suitable for processing
        file_path = self._get_directive_path(file_path)

        print 'file path: %s' % file_path

        try:
            et = ExcelTable(open(file_path))
            table = et.create_table(fromcell=fromcell,
                                    tocell=tocell,
                                    nheader=header_rows,
                                    sheet=sheet)
        except Exception, e:
            return [msgr.error('Error occured while creating table: %s' % e)]
            pass

        #print table

        title, messages = self.make_title()
        #node = nodes.Element() # anonymous container for parsing
        #self.state.nested_parse(self.content, self.content_offset, node)

        # If empty table is created
        if not table:
            self._report('The table generated from queries is empty')
            return [nodes.paragraph(text='')]

        try:
            table_data = []

            # If there is header defined, set the header-rows param and
            # append the data in row =>. build_table_from_list handles the header generation
            if header and not header.isdigit():

                # Otherwise expect the header to be string with column names defined in
                # it, separating the values with comma
                header_rows = 1
                table_data.append([
                    nodes.paragraph(text=hcell.strip())
                    for hcell in header.split(',')
                ])

            # Put the given data in rst elements: paragraph
            for row in table['headers']:
                table_data.append(
                    [nodes.paragraph(text=cell['value']) for cell in row])

            # Iterates rows: put the given data in rst elements
            for row in table['rows']:
                row_data = []
                for cell in row:
                    class_data = ['']
                    # Node based on formatting rules
                    # NOTE: rst does not support nested, use class attribute instead

                    if cell['italic']:
                        class_data.append('italic')

                    if cell['bold']:
                        node = nodes.strong(text=cell['value'])
                    else:
                        node = nodes.paragraph(text=cell['value'])

                    # Add additional formatting as class attributes
                    node['classes'] = class_data
                    row_data.append([node])

                    # FIXME: style attribute does not get into writer
                    if cell['bgcolor']:
                        rgb = [str(val) for val in cell['bgcolor']]
                        node.attributes[
                            'style'] = 'background-color: rgb(%s);' % ','.join(
                                rgb)

                    #print node

                table_data.append(row_data)

            # If there is no data at this point, throw an error
            if not table_data:
                return [msgr.error('Selection did not return any data')]

            # Get params from data
            num_cols = len(table_data[0])

            # Get the widths for the columns:
            # 1. Use provided info, if available
            # 2. Use widths from the excelsheet
            # 3. Use default widths (equal to all)
            #
            # Get content widths from the first row of the table
            # if it fails, calculate default column widths
            if col_widths:
                col_widths = [int(width) for width in col_widths.split(',')]
            else:
                col_widths = [int(col['width']) for col in table['rows'][0]]
                col_width_total = sum(col_widths)
                col_widths = [
                    int(width * 100 / col_width_total) for width in col_widths
                ]

            # If still empty for some reason, use default widths
            if not col_widths:
                col_widths = self.get_column_widths(num_cols)

            stub_columns = 0

            # Sanity checks

            # Different amount of cells in first and second row (possibly header and 1 row)
            if type(header) is not int:
                if len(table_data) > 1 and len(table_data[0]) != len(
                        table_data[1]):
                    error = msgr.error(
                        'Data amount mismatch: check the directive data and params'
                    )
                    return [error]

            self.check_table_dimensions(table_data, header_rows, stub_columns)

        except SystemMessagePropagation, detail:
            return [detail.args[0]]
    def _sections(self, sensors):
        """Generate a section for each sensor"""

        for s in sensors:
            dummy = nodes.section()
            result = ViewList()
            result.append('.. _{}:'.format(s['name']),
                          source=s['source_file'],
                          offset=s['source_line'])
            if s['name'] != s['url_name']:
                result.append('.. _{}:'.format(s['url_name']),
                              source=s['source_file'],
                              offset=s['source_line'])
            self.state.nested_parse(result, 0, dummy)
            for c in dummy.children:
                yield c

            # FIXME: not sure why this does not have the same effect as above
            # target = nodes.target(ids=[s['url_name']], names=[s['url_name']])
            # yield target

            section = nodes.section(ids=[s['name']], names=[s['name']])

            title_text = s.get('vendor_part_name', None) or s['vendor_part_number']
            if 'vendor_name' in s:
                title_text = '{} {}'.format(s['vendor_name'], title_text)
            title = nodes.title(text=title_text)
            section += title

            info_section = nodes.section(ids=[s['name'] + '-info'],
                                         names=[s['name'] + '\\ info'])
            info_title = nodes.title(text='General Info')
            info_section += info_title
            info_table = self._table([1, 1], None, [
                r for r in self._info_rows(s)
            ])
            info_section += info_table
            section += info_section

            if 'mode_info' in s:
                modes_section = nodes.section(ids=[s['name'] + '-modes'],
                                              names=[s['name'] + '\\ modes'])
                modes_title = nodes.title(text='Modes')
                modes_section += modes_title
                modes_header = self._row([
                    'Mode',
                    'Description',
                    'Units',
                    'Decimals',
                    'Num. Values',
                    'Values'
                ])
                modes_rows = [r for r in self._modes_rows(s)]
                modes_table = self._table([3, 6, 3, 1, 1, 6], modes_header, modes_rows)
                modes_section += modes_table
                section += modes_section

                cmds_section = nodes.section(ids=[s['name'] + '-commands'],
                                             names=[s['name'] + '\\ commands'])
                cmds_title = nodes.title(text='Commands')
                cmds_section += cmds_title
                if 'cmd_info' in s:
                    cmds_table = self._table([1, 6], self._row(['Command', 'Description']),
                                             [r for r in self._cmds_rows(s)])
                    cmds_section += cmds_table
                else:
                    cmds_paragraph = nodes.paragraph(
                        text='This sensor does not support commands.')
                    cmds_section += cmds_paragraph
                section += cmds_section

            notes = self._notes(s)
            if notes:
                section += notes

            yield section
Example #58
0
    def build_details_table(self, resource):
        is_list = 'is-list' in self.options

        table = nodes.table(classes=['resource-info'])

        tgroup = nodes.tgroup(cols=2)
        table += tgroup

        tgroup += nodes.colspec(colwidth=30, classes=['field'])
        tgroup += nodes.colspec(colwidth=70, classes=['value'])

        tbody = nodes.tbody()
        tgroup += tbody

        # Name
        if is_list:
            resource_name = resource.name_plural
        else:
            resource_name = resource.name

        append_detail_row(tbody, "Name", nodes.literal(text=resource_name))

        # URI
        uri_template = get_resource_uri_template(resource, not is_list)
        append_detail_row(tbody, "URI", nodes.literal(text=uri_template))

        # Required features
        if getattr(resource, 'required_features', False):
            feature_list = nodes.bullet_list()

            for feature in resource.required_features:
                item = nodes.list_item()
                paragraph = nodes.paragraph()

                paragraph += nodes.inline(text=feature.feature_id)
                item += paragraph
                feature_list += item

            append_detail_row(tbody, 'Required Features', feature_list)

        # Token Policy ID
        if hasattr(resource, 'policy_id'):
            append_detail_row(tbody, "Token Policy ID",
                              nodes.literal(text=resource.policy_id))

        # HTTP Methods
        allowed_http_methods = self.get_http_methods(resource, is_list)
        bullet_list = nodes.bullet_list()

        for http_method in allowed_http_methods:
            item = nodes.list_item()
            bullet_list += item

            paragraph = nodes.paragraph()
            item += paragraph

            ref = nodes.reference(text=http_method, refid=http_method)
            paragraph += ref

            doc_summary = self.get_doc_for_http_method(resource, http_method)
            i = doc_summary.find('.')

            if i != -1:
                doc_summary = doc_summary[:i + 1]

            paragraph += nodes.inline(text=" - ")
            paragraph += parse_text(
                self, doc_summary, nodes.inline,
                where='HTTP %s handler summary for %s'
                      % (http_method, self.options['classname']))

        append_detail_row(tbody, "HTTP Methods", bullet_list)

        # Parent Resource
        if is_list or resource.uri_object_key is None:
            parent_resource = resource._parent_resource
            is_parent_list = False
        else:
            parent_resource = resource
            is_parent_list = True

        if parent_resource:
            paragraph = nodes.paragraph()
            paragraph += get_ref_to_resource(parent_resource, is_parent_list)
        else:
            paragraph = 'None.'

        append_detail_row(tbody, "Parent Resource", paragraph)

        # Child Resources
        if is_list:
            child_resources = list(resource.list_child_resources)

            if resource.name != resource.name_plural:
                if resource.uri_object_key:
                    child_resources.append(resource)

                are_children_lists = False
            else:
                are_children_lists = True
        else:
            child_resources = resource.item_child_resources
            are_children_lists = True

        if child_resources:
            tocnode = addnodes.toctree()
            tocnode['glob'] = None
            tocnode['maxdepth'] = 1
            tocnode['hidden'] = False

            docnames = sorted([
                docname_join(self.state.document.settings.env.docname,
                             get_resource_docname(child_resource,
                                                  are_children_lists))
                for child_resource in child_resources
            ])

            tocnode['includefiles'] = docnames
            tocnode['entries'] = [(None, docname) for docname in docnames]
        else:
            tocnode = nodes.paragraph(text="None")

        append_detail_row(tbody, "Child Resources", tocnode)

        # Anonymous Access
        if is_list and not resource.singleton:
            getter = resource.get_list
        else:
            getter = resource.get

        if getattr(getter, 'login_required', False):
            anonymous_access = 'No'
        elif getattr(getter, 'checks_login_required', False):
            anonymous_access = 'Yes, if anonymous site access is enabled'
        else:
            anonymous_access = 'Yes'

        append_detail_row(tbody, "Anonymous Access", anonymous_access)

        return table
Example #59
0
    def run(self):
        env = self.state.document.settings.env
        conf = env.app.config.images_config

        #TODO get defaults from config
        group = self.options.get('group',
            conf['default_group'] if conf['default_group'] else uuid.uuid4())
        classes = self.options.get('class', '')
        width = self.options.get('width', conf['default_image_width'])
        height = self.options.get('height', conf['default_image_height'])
        alt = self.options.get('alt', '')
        title = self.options.get('title', '' if conf['default_show_title'] else None)
        align = self.options.get('align', '')
        show_caption = self.options.get('show_caption', False)
        legacy_classes = self.options.get('legacy_class', '')

        #TODO get default from config
        download = self.options.get('download', conf['download'])

        # parse nested content
        #TODO: something is broken here, not parsed as expected
        description = nodes.paragraph()
        content = nodes.paragraph()
        content += [nodes.Text(u"%s" % x) for x in self.content]
        self.state.nested_parse(content,
                                0,
                                description)

        img = image_node()

        if self.is_remote(self.arguments[0]):
            img['remote'] = True
            if download:
                img['uri'] = os.path.join('_images', hashlib.sha1(self.arguments[0].encode()).hexdigest())
                img['remote_uri'] = self.arguments[0]
                env.remote_images[img['remote_uri']] = img['uri']
                env.images.add_file('', img['uri'])
            else:
                img['uri'] = self.arguments[0]
                img['remote_uri'] = self.arguments[0]
        else:
            img['uri'] = self.arguments[0]
            img['remote'] = False
            env.images.add_file('', img['uri'])

        img['content'] = description.astext()

        if title is None:
            img['title'] = ''
        elif title:
            img['title'] = title
        else:
            img['title'] = img['content']
            img['content'] = ''

        img['show_caption'] = show_caption
        img['legacy_classes'] = legacy_classes
        img['group'] = group
        img['size'] = (width, height)
        img['classes'] += classes
        img['alt'] = alt
        img['align'] = align
        return [img]
Example #60
0
def process_motor_nodes(app, doctree):
    # Search doctree for Motor's methods and attributes whose docstrings were
    # copied from PyMongo, and fix them up for Motor:
    #   1. Add a 'callback' param (sometimes optional, sometimes required) to
    #      all async methods. If the PyMongo method took no params, we create
    #      a parameter-list from scratch, otherwise we edit PyMongo's list.
    #   2. Remove all version annotations like "New in version 2.0" since
    #      PyMongo's version numbers are meaningless in Motor's docs.
    #   3. Remove "seealso" directives that reference PyMongo's docs.
    #
    # We do this here, rather than by registering a callback to Sphinx's
    # 'autodoc-process-signature' event, because it's way easier to handle the
    # parsed doctree before it's turned into HTML than it is to update the RST.
    for objnode in doctree.traverse(desc):
        if objnode['objtype'] in ('method', 'attribute'):
            signature_node = find_by_path(objnode, [desc_signature])[0]
            name = '.'.join(
                [signature_node['module'], signature_node['fullname']])

            assert name.startswith('motor.')
            obj_motor_info = motor_info.get(name)
            if obj_motor_info:
                desc_content_node = find_by_path(objnode, [desc_content])[0]
                if obj_motor_info.get('is_async_method'):
                    try:
                        # Find the parameter list, a bullet_list instance
                        parameters_node = find_by_path(
                            desc_content_node,
                            [field_list, field, field_body, bullet_list])[0]
                    except IndexError:
                        # PyMongo method has no parameters, create an empty
                        # params list
                        parameters_node = bullet_list()
                        parameters_field_list_node = field_list(
                            '',
                            field('', field_name('', 'Parameters '),
                                  field_body('', parameters_node)))

                        desc_content_node.append(parameters_field_list_node)

                    insert_callback(parameters_node)

                    callback_future_text = (
                        "If a callback is passed, returns None, else returns a"
                        " Future.")

                    desc_content_node.append(
                        paragraph('', Text(callback_future_text)))

                if obj_motor_info['is_pymongo_docstring']:
                    # Remove all "versionadded", "versionchanged" and
                    # "deprecated" directives from the docs we imported from
                    # PyMongo
                    version_nodes = find_by_path(desc_content_node,
                                                 [versionmodified])

                    for version_node in version_nodes:
                        version_node.parent.remove(version_node)

                    # Remove all "seealso" directives that contain :doc:
                    # references from PyMongo's docs
                    seealso_nodes = find_by_path(desc_content_node, [seealso])

                    for seealso_node in seealso_nodes:
                        if 'reftype="doc"' in str(seealso_node):
                            seealso_node.parent.remove(seealso_node)