def _generate_documentation(self, tree):
     unvisited = [tree]
     while len(unvisited) > 0:
         node = unvisited[-1]
         if len(node.children) > 0:
             unvisited.append(node.remove_child())
             continue
         if node.parent is None or node.comment is None:
             unvisited.pop()
             continue
         with switch_source_input(self.state, node.comment):
             definition = nodes.definition()
             if isinstance(node.comment, ViewList):
                 self.state.nested_parse(node.comment, 0, definition)
             else:
                 definition += node.comment
             node.comment = nodes.definition_list_item(
                 "",
                 nodes.term("", node.value.value),
                 definition,
             )
             if node.parent.comment is None:
                 node.parent.comment = nodes.definition_list()
             elif not isinstance(node.parent.comment,
                                 nodes.definition_list):
                 with switch_source_input(self.state, node.parent.comment):
                     dlist = nodes.definition_list()
                     self.state.nested_parse(node.parent.comment, 0, dlist)
                     node.parent.comment = dlist
             node.parent.comment += node.comment
         unvisited.pop()
     return tree.comment
Example #2
0
    def build_app_doc(self, module, app):
        """
        Build overall Chalice app documentation.

        Heading comes from app name.
        Body comes from directive content or module docstring.
        """
        # See RSTState.section for regular section creation logic.
        root = App()
        root['names'].append(nodes.fully_normalize_name(app.app_name))
        root += AppName(app.app_name, app.app_name.replace('_', ' ').title())
        self.state.document.note_implicit_target(root, root)
        # Add cross-reference
        self.add_xref('app', app.app_name, root['ids'][0])
        # If content is given use that, otherwise use module docstring.
        if self.content:
            nodeutils.nested_parse_with_titles(self.state, self.content, root)
        else:
            _, content = get_content(module)
            with docutils.switch_source_input(self.state, content):
                # Necessary so that the child nodes get the right source/line
                root.document = self.state.document
                nodeutils.nested_parse_with_titles(self.state, content, root)

        return root
Example #3
0
    def do_parse(self, rstlist, node):
        """Parse rST source lines and add them to the specified node

        Take the list of rST source lines rstlist, parse them as
        rST, and add the resulting docutils nodes as children of node.
        The nodes are parsed in a way that allows them to include
        subheadings (titles) without confusing the rendering of
        anything else.
        """
        # This is from kerneldoc.py -- it works around an API change in
        # Sphinx between 1.6 and 1.7. Unlike kerneldoc.py, we use
        # sphinx.util.nodes.nested_parse_with_titles() rather than the
        # plain self.state.nested_parse(), and so we can drop the saving
        # of title_styles and section_level that kerneldoc.py does,
        # because nested_parse_with_titles() does that for us.
        if Use_SSI:
            with switch_source_input(self.state, rstlist):
                nested_parse_with_titles(self.state, rstlist, node)
        else:
            save = self.state.memo.reporter
            self.state.memo.reporter = AutodocReporter(
                rstlist, self.state.memo.reporter)
            try:
                nested_parse_with_titles(self.state, rstlist, node)
            finally:
                self.state.memo.reporter = save
    def run(self):
        structure_xml = self.get_opt('structure_xml')

        parent = docutils.nodes.section()
        parent.document = self.state.document
        content = StringList()
        objtype = RE_AUTOSTRIP.sub('', self.name)  # strip prefix
        visited = set()

        try:
            tree = etree.parse(structure_xml)
            self.state.document.settings.record_dependencies.add(structure_xml)

            self.functions = set(
                tree.xpath('//function/full_name/text ()',
                           smart_strings=False))
            self.classes = set(
                tree.xpath('//class/full_name/text ()', smart_strings=False))
            self.methods = set(
                tree.xpath('//method/full_name/text ()', smart_strings=False))
            self.properties = set(
                tree.xpath('//property/full_name/text ()',
                           smart_strings=False))

            for k in list(self.functions):
                self.functions.add(strip_braces(k))
            for k in list(self.methods):
                self.methods.add(strip_braces(k))

            for argument in self.arguments:
                xpath_query = "//file[re:test (@path, '%s')]" % argument

                filenodes = {}
                for node in tree.xpath(xpath_query, namespaces=NS):
                    filenodes[node.get('path')] = node

                for path in sorted(filenodes.keys()):
                    if path in visited:
                        continue
                    visited.add(path)
                    node = filenodes[path]

                    if objtype == 'module':
                        PHPModule(node, 0, self).run(content)
                    if objtype == 'class':
                        PHPClass(node, 0, self).run(content)
                    if objtype == 'method':
                        PHPMethod(node, 0, self).run(content)
                    if objtype == 'function':
                        PHPFunction(node, 0, self).run(content)

            with switch_source_input(self.state, content):
                # logger.info (content.pprint ())
                nested_parse_with_titles(self.state, content, parent)

        except etree.LxmlError as exc:
            logger.error('LXML Error in "%s" directive: %s.' %
                         (self.name, str(exc)))

        return parent.children
Example #5
0
    def run(self):
        env = self.state.document.settings.env

        result = ViewList()

        for pattern in self.arguments[0].split():
            filenames = glob.glob(env.config.cautodoc_root + '/' + pattern)
            if len(filenames) == 0:
                fmt = 'Pattern "{pat}" does not match any files.'
                env.app.warn(fmt.format(pat=pattern),
                             location=(env.docname, self.lineno))
                continue

            for filename in filenames:
                mode = os.stat(filename).st_mode
                if stat.S_ISDIR(mode):
                    fmt = 'Path "{name}" matching pattern "{pat}" is a directory.'
                    env.app.warn(fmt.format(name=filename, pat=pattern),
                                 location=(env.docname, self.lineno))
                    continue

                # Tell Sphinx about the dependency and parse the file
                env.note_dependency(os.path.abspath(filename))
                self.__parse(result, filename)

        # Parse the extracted reST
        with switch_source_input(self.state, result):
            node = nodes.section()
            nested_parse_with_titles(self.state, result, node)

        return node.children
Example #6
0
    def nestedParse(self, lines, fname):
        content = ViewList()
        node    = nodes.section()

        if "debug" in self.options:
            code_block = "\n\n.. code-block:: rst\n    :linenos:\n"
            for l in lines.split("\n"):
                code_block += "\n    " + l
            lines = code_block + "\n\n"

        for c, l in enumerate(lines.split("\n")):
            content.append(l, fname, c)

        buf  = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter

        if Use_SSI:
            with switch_source_input(self.state, content):
                self.state.nested_parse(content, 0, node, match_titles=1)
        else:
            self.state.memo.title_styles  = []
            self.state.memo.section_level = 0
            self.state.memo.reporter      = AutodocReporter(content, self.state.memo.reporter)
            try:
                self.state.nested_parse(content, 0, node, match_titles=1)
            finally:
                self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = buf

        return node.children
Example #7
0
    def run(self) -> List[Node]:
        # look up target Documenter
        objtype = self.name[5:]  # strip prefix (fibre-).
        documenter = documenters[objtype]()

        registry = self.env.app.fibre_registry
        
        for file in self.config.fibre_interface_files:
            self.env.note_dependency(file)

        decl_ns, obj = documenter.load_object(registry, self.arguments[0])
        lines = documenter.generate(registry, decl_ns.get_path()[:2], obj, self.options)

        result_rest = StringList()
        for line in lines:
            result_rest.append(line, 'fibre autogen output', 0)

        #print("reST output: ", result_rest)
        
        # Parse nested reST
        with switch_source_input(self.state, result_rest):
            node = nodes.paragraph()
            node.document = self.state.document
            self.state.nested_parse(result_rest, 0, node)
            return node.children
Example #8
0
 def transform_content(self, contentnode: addnodes.desc_content) -> None:
     name = self.arguments[0]
     source, lineno = self.get_source_info()
     source = f'{source}:{lineno}:<confval>'
     fields = StringList(self._render_option(name).splitlines() + [''],
                         source=source, parent_offset=lineno)
     with switch_source_input(self.state, fields):
         self.state.nested_parse(fields, 0, contentnode)
Example #9
0
def parse_generated_content(state: RSTState,
                            content: StringList) -> List[Node]:
    """Parse a generated content by Documenter."""
    with switch_source_input(state, content):
        node = nodes.paragraph()
        node.document = state.document
        state.nested_parse(content, 0, node)

        return node.children
Example #10
0
 def do_parse(self, result, node):
     if Use_SSI:
         with switch_source_input(self.state, result):
             nested_parse_with_titles(self.state, result, node)
     else:
         save = self.state.memo.reporter
         self.state.memo.reporter = AutodocReporter(result, self.state.memo.reporter)
         try:
             nested_parse_with_titles(self.state, result, node)
         finally:
             self.state.memo.reporter = save
Example #11
0
    def build_route_doc(self, view_function, methods, path, basepath=None):
        """Build documentation for an individual route from view_function docstring."""
        basepath = basepath or ''
        section = Route()
        srccontent, doccontent = get_content(view_function)
        with docutils.switch_source_input(self.state, srccontent):
            # Necessary so that the child nodes get the right source/line
            section.document = self.state.document
            section['names'].extend([
                view_function.__name__,
                nodes.fully_normalize_name(path),
            ])
            self.state.document.note_implicit_target(section, section)
            sid = section['ids'][0]
            # Add title
            title_src = ' '.join(methods + [basepath + path])
            title = RouteName(title_src)
            methodlist = MethodList(' '.join(methods))
            for i, method in enumerate(methods):
                mnode = Method(method, method)
                mnode.setdefault('classes', []).append(method.lower())
                if i > 0:
                    methodlist += nodes.Text(' ')

                methodlist += mnode
                # ...add cross-reference
                self.add_xref('route',
                              '{} {}{}'.format(method, basepath, path), sid)

            title += [methodlist, nodes.Text(' ')]
            if basepath:
                title += BasePath(basepath, basepath)

            title += Path(path, path)
            section += title
            # Add content
            with docutils.switch_source_input(self.state, doccontent):
                nodeutils.nested_parse_with_titles(self.state, doccontent,
                                                   section)

        return section
Example #12
0
    def run(self):
        result = ViewList()

        for filename in self._get_filenames():
            self.__get_docstrings(result, filename)

        # Parse the extracted reST
        with switch_source_input(self.state, result):
            node = nodes.section()
            nested_parse_with_titles(self.state, result, node)

        return node.children
Example #13
0
 def do_parse(self, result, node):
     if Use_SSI:
         with switch_source_input(self.state, result):
             self.state.nested_parse(result, 0, node, match_titles=1)
     else:
         save = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter
         self.state.memo.reporter = AutodocReporter(result, self.state.memo.reporter)
         self.state.memo.title_styles, self.state.memo.section_level = [], 0
         try:
             self.state.nested_parse(result, 0, node, match_titles=1)
         finally:
             self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = save
 def do_parse(self, result, node):
     if Use_SSI:
         with switch_source_input(self.state, result):
             self.state.nested_parse(result, 0, node, match_titles=1)
     else:
         save = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter
         self.state.memo.reporter = AutodocReporter(result, self.state.memo.reporter)
         self.state.memo.title_styles, self.state.memo.section_level = [], 0
         try:
             self.state.nested_parse(result, 0, node, match_titles=1)
         finally:
             self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = save
Example #15
0
    def do_parse(self, content, node):
        if Use_SSI:
            with switch_source_input(self.state, content):
                self.state.nested_parse(content, 0, node, match_titles=1)
        else:
            buf = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter

            self.state.memo.title_styles = []
            self.state.memo.section_level = 0
            self.state.memo.reporter = AutodocReporter(
                content, self.state.memo.reporter)
            try:
                self.state.nested_parse(content, 0, node, match_titles=1)
            finally:
                self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = buf
Example #16
0
def parse_generated_content(state, content, documenter):
    # type: (State, StringList, Documenter) -> List[nodes.Node]
    """Parse a generated content by Documenter."""
    with switch_source_input(state, content):
        if documenter.titles_allowed:
            node = nodes.section()
            # necessary so that the child nodes get the right source/line set
            node.document = state.document
            nested_parse_with_titles(state, content, node)
        else:
            node = nodes.paragraph()
            node.document = state.document
            state.nested_parse(content, 0, node)

        return node.children
Example #17
0
def parse_generated_content(state, content, documenter):
    # type: (State, StringList, Documenter) -> List[nodes.Node]
    """Parse a generated content by Documenter."""
    with switch_source_input(state, content):
        if documenter.titles_allowed:
            node = nodes.section()
            # necessary so that the child nodes get the right source/line set
            node.document = state.document
            nested_parse_with_titles(state, content, node)
        else:
            node = nodes.paragraph()
            node.document = state.document
            state.nested_parse(content, 0, node)

        return node.children
Example #18
0
def parse_generated_content(state: RSTState, content: StringList,
                            documenter: Documenter) -> List[Node]:
    """Parse an item of content generated by Documenter."""
    with switch_source_input(state, content):
        if documenter.titles_allowed:
            node: Element = nodes.section()
            # necessary so that the child nodes get the right source/line set
            node.document = state.document
            nested_parse_with_titles(state, content, node)
        else:
            node = nodes.paragraph()
            node.document = state.document
            state.nested_parse(content, 0, node)

        return node.children
Example #19
0
 def append_row(*column_texts: str) -> None:
     row = nodes.row('')
     source, line = self.state_machine.get_source_and_line()
     for text in column_texts:
         node = nodes.paragraph('')
         vl = StringList()
         vl.append(text, '%s:%d:<autosummary>' % (source, line))
         with switch_source_input(self.state, vl):
             self.state.nested_parse(vl, 0, node)
             try:
                 if isinstance(node[0], nodes.paragraph):
                     node = node[0]
             except IndexError:
                 pass
             row.append(nodes.entry('', node))
     body.append(row)
Example #20
0
 def _create_row(self, *column_texts):
     row = nodes.row('')
     source, line = self.state_machine.get_source_and_line()
     for text_line in column_texts:
         node = nodes.paragraph('')
         vl = ViewList()
         for text in text_line.split('\n'):
             vl.append(text, '%s:%d' % (source, line))
         with switch_source_input(self.state, vl):
             self.state.nested_parse(vl, 0, node)
             try:
                 if isinstance(node[0], nodes.paragraph) and len(node.children) == 1:
                     node = node[0]
             except IndexError:
                 pass
             row.append(nodes.entry('', node))
     return row
Example #21
0
 def append_row(*column_texts):
     # type: (unicode) -> None
     row = nodes.row('')
     source, line = self.state_machine.get_source_and_line()
     for text in column_texts:
         node = nodes.paragraph('')
         vl = ViewList()
         vl.append(text, '%s:%d:<autosummary>' % (source, line))
         with switch_source_input(self.state, vl):
             self.state.nested_parse(vl, 0, node)
             try:
                 if isinstance(node[0], nodes.paragraph):
                     node = node[0]
             except IndexError:
                 pass
             row.append(nodes.entry('', node))
     body.append(row)
Example #22
0
    def _append_row(self, body, column_texts):
        # type: (Any, str) -> None
        table_row = nodes.row('')
        source, line = self.state_machine.get_source_and_line()

        for text in column_texts:
            node = nodes.paragraph('')

            vl = ViewList()
            vl.append(text, f'{source}:{line}:<{self.name}>')

            with switch_source_input(self.state, vl):
                self.state.nested_parse(vl, 0, node)
                while len(node) > 0 and isinstance(node[0], nodes.paragraph):
                    node = node[0]

                table_row.append(nodes.entry('', node))

        body.append(table_row)
    def run(self):
        tags_arg = self.options.get("tags")
        tags = {t.strip() for t in tags_arg.split(",")} if tags_arg else None

        header_separator = self.options.get('header-separator')
        new_content = self.render_content(tags=tags, header_separator=header_separator)

        with switch_source_input(self.state, self.content):
            new_content = StringList(new_content.splitlines(), source='')
            node = nodes.section()  # type: Element
            # necessary so that the child nodes get the right source/line set
            node.document = self.state.document
            nested_parse_with_titles(self.state, new_content, node)

        # record all filenames as dependencies -- this will at least
        # partially make automatic invalidation possible
        for filepath in get_provider_yaml_paths():
            self.state.document.settings.record_dependencies.add(filepath)

        return node.children
Example #24
0
    def _parse_string(self, s: str):
        """Adapted from https://github.com/sphinx-doc/sphinx/blob/5559e5af1ff6f5fc2dc706
        79bdd6dc089cfff388/sphinx/ext/autosummary/__init__.py#L425."""
        node = nodes.paragraph("")

        vl = StringList()

        source, line = self.state_machine.get_source_and_line()
        vl.append(s, f"{source}:{line}:<probnum-config-options>")

        with switch_source_input(self.state, vl):
            self.state.nested_parse(vl, 0, node)

            try:
                if isinstance(node[0], nodes.paragraph):
                    node = node[0]
            except IndexError:
                pass

        return node
Example #25
0
    def run(self):
        env = self.state.document.settings.env
        filename = os.path.join(env.config.cdoc_srcdir, self.arguments[0])
        env.note_dependency(os.path.abspath(filename))

        ## create a (view) list from the extracted doc
        lst = docutils.statemachine.ViewList()
        f = open(filename, 'r')
        for (lineno, lines) in extract(f, filename):
            for l in lines.split('\n'):
                lst.append(l.expandtabs(8), filename, lineno)
                lineno += 1

        ## let parse this new reST content
        memo = self.state.memo
        save = memo.title_styles, memo.section_level
        node = docutils.nodes.section()
        try:
            with switch_source_input(self.state, lst):
                self.state.nested_parse(lst, 0, node, match_titles=1)
        finally:
            memo.title_styles, memo.section_level = save
        return node.children
Example #26
0
 def _parse_file(self, source):
     comments = {}
     with open(source, 'r') as src:
         doc = src.read()
     in_docstring = False
     for linenum, line in enumerate(doc.splitlines(), start=1):
         line = line.lstrip()
         if line.startswith(self.config.autoyaml_doc_delimiter):
             in_docstring = True
             comment = ViewList()
         elif line.startswith(self.config.autoyaml_comment) \
                 and in_docstring:
             line = line[len(self.config.autoyaml_comment):]
             # strip preceding whitespace
             if line and line[0] == ' ':
                 line = line[1:]
             comment.append(line, source, linenum)
         elif in_docstring:
             comments[linenum] = comment
             in_docstring = False
     loader = Loader(doc)
     token = None
     while True:
         last_token, token = token, loader.get_token()
         if token is None:
             break
         end_line = token.end_mark.line
         if isinstance(last_token, tokens.KeyToken) \
                 and isinstance(token, tokens.ScalarToken):
             comment = comments.get(end_line + 1)
             if comment:
                 with switch_source_input(self.state, comment):
                     node = nodes.paragraph(text=token.value)
                     definition = nodes.definition()
                     node += definition
                     self.state.nested_parse(comment, 0, definition)
                     yield node
Example #27
0
 def do_parse(self, content, node):
     with switch_source_input(self.state, content):
         self.state.nested_parse(content, 0, node, match_titles=1)
    def get_table(self, items: List[Tuple[str, str, str, str,
                                          str]]) -> List[Node]:
        """Generate a proper list of table nodes for autosummary:: directive.
        *items* is a list produced by :meth:`get_items`.
        """

        has_config_type = any([item[-1] is not None for item in items])
        if has_config_type:
            n_cols = 3
        else:
            n_cols = 2

        table_spec = addnodes.tabular_col_spec()
        table_spec["spec"] = r"\X{1}{2}\X{1}{2}"

        table = autosummary_table("")
        real_table = nodes.table("", classes=["longtable"])
        table.append(real_table)
        group = nodes.tgroup("", cols=n_cols)
        real_table.append(group)
        group.append(nodes.colspec("", colwidth=10))
        if has_config_type:
            group.append(nodes.colspec("", colwidth=10))
        group.append(nodes.colspec("", colwidth=90))

        head = nodes.thead("")
        cols = ["Class/method name", "type", "Summary"]
        if not has_config_type:
            del cols[1]
        row = nodes.row("")
        source, line = self.state_machine.get_source_and_line()
        for text in cols:
            node = nodes.paragraph("")
            vl = StringList()
            vl.append(text, "%s:%d:<autosummary>" % (source, line))
            with switch_source_input(self.state, vl):
                self.state.nested_parse(vl, 0, node)
                try:
                    if isinstance(node[0], nodes.paragraph):
                        node = node[0]
                except IndexError:
                    pass
                row.append(nodes.entry("", node))
        head.append(row)
        group.append(head)

        body = nodes.tbody("")
        group.append(body)

        def append_row(*column_texts: str) -> None:
            row = nodes.row("")
            source, line = self.state_machine.get_source_and_line()
            for text in column_texts:
                node = nodes.paragraph("")
                vl = StringList()
                vl.append(text, "%s:%d:<autosummary>" % (source, line))
                with switch_source_input(self.state, vl):
                    self.state.nested_parse(vl, 0, node)
                    try:
                        if isinstance(node[0], nodes.paragraph):
                            node = node[0]
                    except IndexError:
                        pass
                    row.append(nodes.entry("", node))
            body.append(row)

        for name, sig, summary, real_name, config_type in items:
            qualifier = "obj"
            if "nosignatures" not in self.options:
                col1 = ":%s:`%s <%s>`\\ %s" % (
                    qualifier,
                    name,
                    real_name,
                    rst.escape(sig),
                )
            else:
                col1 = ":%s:`%s <%s>`" % (qualifier, name, real_name)
            col2 = summary
            if has_config_type:
                col3 = config_type if config_type else ""
                append_row(col1, col3, col2)
            else:
                append_row(col1, col2)
        return [table_spec, table]
    def run(self):
        self.doclets = []
        self.names = {}
        self.longnames = {}

        structure_json = self.get_opt('structure_json', True)

        parent = docutils.nodes.section()
        parent.document = self.state.document
        objtype = strip_directive(self.name)  # 'js:automodule' => 'module'

        self.content = StringList()

        def obj_factory(d):
            """ Transmogrify the dictionaries read from the json file into objects.
            If the object has a known kind make it into a JS<kind> class,
            else if it has an unknwon kind make it into an Obj
            else if it has no kind (substructures of doclets) make it an obj
            """
            try:
                kind = d['kind']
                o = self.vtable.get(kind, Obj)(d)
            except KeyError:
                o = obj(d)
            return o

        # load and cache structure file
        if structure_json not in loaded_structure_files:
            with open(structure_json, 'r') as fp:
                self.state.document.settings.record_dependencies.add(
                    structure_json)
                loaded_structure_files[structure_json] = \
                    self.merge_doclets (jsonlib.load (fp, object_hook = obj_factory))

        # get cached structure file
        self.doclets, self.names, self.longnames = loaded_structure_files[
            structure_json]

        try:
            visited = set()  # remember which objects we have already output
            for argument in self.arguments:
                rex = re.compile(argument)

                # grep the list of doclets
                doclets = (d for d in self.doclets
                           if d.kind == objtype and rex.search(d.longname)
                           and d.longname not in visited)

                for d in sorted(doclets, key=operator.attrgetter('longname')):
                    visited.add(d.longname)
                    d.run(self, 0)

            with switch_source_input(self.state, self.content):
                # logger.info (self.content.pprint ())
                try:
                    nested_parse_with_titles(self.state, self.content, parent)
                except:
                    logger.error(self.content.pprint())
                    raise

        except AutoJSDocError as exc:
            logger.error('Error in "%s" directive: %s.' %
                         (self.name, str(exc)))

        return parent.children
Example #30
0
 def do_parse(self, result, node):
     with switch_source_input(self.state, result):
         self.state.nested_parse(result, 0, node, match_titles=1)
Example #31
0
    def run(self):
        env = self.state.document.settings.env
        cmd = [env.config.kerneldoc_bin, '-rst', '-enable-lineno']

        filename = env.config.kerneldoc_srctree + '/' + self.arguments[0]
        export_file_patterns = []

        # Tell sphinx of the dependency
        env.note_dependency(os.path.abspath(filename))

        tab_width = self.options.get('tab-width',
                                     self.state.document.settings.tab_width)

        # FIXME: make this nicer and more robust against errors
        if 'export' in self.options:
            cmd += ['-export']
            export_file_patterns = str(self.options.get('export')).split()
        elif 'internal' in self.options:
            cmd += ['-internal']
            export_file_patterns = str(self.options.get('internal')).split()
        elif 'doc' in self.options:
            cmd += ['-function', str(self.options.get('doc'))]
        elif 'functions' in self.options:
            functions = self.options.get('functions').split()
            if functions:
                for f in functions:
                    cmd += ['-function', f]
            else:
                cmd += ['-no-doc-sections']

        for pattern in export_file_patterns:
            for f in glob.glob(env.config.kerneldoc_srctree + '/' + pattern):
                env.note_dependency(os.path.abspath(f))
                cmd += ['-export-file', f]

        cmd += [filename]

        try:
            logger.verbose('calling kernel-doc \'%s\'' % (" ".join(cmd)))

            p = subprocess.Popen(cmd,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
            out, err = p.communicate()

            out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')

            if p.returncode != 0:
                sys.stderr.write(err)

                logger.warning('kernel-doc \'%s\' failed with return code %d' %
                               (" ".join(cmd), p.returncode))
                return [
                    nodes.error(None,
                                nodes.paragraph(text="kernel-doc missing"))
                ]
            elif env.config.kerneldoc_verbosity > 0:
                sys.stderr.write(err)

            lines = statemachine.string2lines(out,
                                              tab_width,
                                              convert_whitespace=True)
            result = ViewList()

            lineoffset = 0
            line_regex = re.compile("^#define LINENO ([0-9]+)$")
            for line in lines:
                match = line_regex.search(line)
                if match:
                    # sphinx counts lines from 0
                    lineoffset = int(match.group(1)) - 1
                    # we must eat our comments since the upset the markup
                else:
                    result.append(line, filename, lineoffset)
                    lineoffset += 1

            node = nodes.section()
            buf = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter
            self.state.memo.title_styles, self.state.memo.section_level = [], 0
            try:
                with switch_source_input(self.state, result):
                    self.state.nested_parse(result, 0, node, match_titles=1)
            finally:
                self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = buf

            return node.children

        except Exception as e:  # pylint: disable=W0703
            logger.warning('kernel-doc \'%s\' processing failed with: %s' %
                           (" ".join(cmd), str(e)))
            return [
                nodes.error(None, nodes.paragraph(text="kernel-doc missing"))
            ]
Example #32
0
    def getNodes(self):  # pylint: disable=too-many-branches, too-many-statements, too-many-locals

        translator = kerneldoc.ReSTTranslator()
        lines = ""
        content = WriterList(self.parser)

        # translate

        if "debug" in self.options:
            rstout = StringIO()
            self.parser.options.out = rstout
            self.parser.parse_dump_storage(translator=translator)
            code_block = "\n\n.. code-block:: rst\n    :linenos:\n"
            for l in rstout.getvalue().split("\n"):
                code_block += "\n    " + l
            lines = code_block + "\n\n"

        elif "snippets" in self.options:
            selected = self.options["snippets"].replace(",", " ").split()
            names = self.parser.ctx.snippets.keys()
            not_found = [s for s in selected if s not in names]
            found = [s for s in selected if s in names]
            if not_found:
                self.errMsg("selected snippets(s) not found:\n    %s" %
                            "\n    ,".join(not_found))

            if found:
                code_block = "\n\n.. code-block:: %s\n" % self.options.get(
                    "language", "c")
                if "linenos" in self.options:
                    code_block += "    :linenos:\n"
                snipsnap = ""
                while found:
                    snipsnap += self.parser.ctx.snippets[found.pop(0)] + "\n\n"
                for l in snipsnap.split("\n"):
                    code_block += "\n    " + l
                lines = code_block + "\n\n"

        else:
            self.parser.options.out = content
            self.parser.parse_dump_storage(translator=translator)

        # check translation

        if "functions" in self.options:
            selected = self.options["functions"].replace(",", " ").split()
            names = translator.translated_names
            not_found = [s for s in selected if s not in names]
            if not_found:
                self.errMsg("selected section(s) not found:\n    %s" %
                            "\n    ,".join(not_found))

        if "export" in self.options:
            selected = self.parser.options.use_names
            names = translator.translated_names
            not_found = [s for s in selected if s not in names]
            if not_found:
                self.errMsg("exported definitions not found:\n    %s" %
                            "\n    ,".join(not_found))

        # add lines to content list
        reSTfname = self.state.document.current_source

        content.flush()
        if lines:
            for l in lines.split("\n"):
                content.append(l, reSTfname, self.lineno)

        node = nodes.section()
        # necessary so that the child nodes get the right source/line set
        node.document = self.state.document
        with switch_source_input(self.state, content):
            # hack around title style bookkeeping
            buf = self.state.memo.title_styles, self.state.memo.section_level
            self.state.memo.title_styles, self.state.memo.section_level = [], 0
            try:
                self.state.nested_parse(content, 0, node, match_titles=1)
            finally:
                self.state.memo.title_styles, self.state.memo.section_level = buf
        return node.children