Ejemplo n.º 1
0
    def write(self, *ignored):
        if self.config.man_pages:
            # build manpages from config.man_pages as usual
            ManualPageBuilder.write(self, *ignored)

        self.info(bold("scan master tree for kernel-doc man-pages ... ") + darkgreen("{"), nonl=True)

        master_tree = self.env.get_doctree(self.config.master_doc)
        master_tree = inline_all_toctrees(
            self, set(), self.config.master_doc, master_tree, darkgreen, [self.config.master_doc])
        self.info(darkgreen("}"))
        man_nodes   = master_tree.traverse(condition=self.is_manpage)
        if not man_nodes and not self.config.man_pages:
            self.warn('no "man_pages" config value nor manual section found; no manual pages '
                      'will be written')
            return

        self.info(bold('writing man pages ... '), nonl=True)

        for man_parent in man_nodes:

            doc_tree = self.get_partial_document(man_parent)
            Section2Manpage(doc_tree).apply()

            if not doc_tree.man_info["authors"] and self.config.author:
                doc_tree.man_info["authors"].append(self.config.author)

            doc_writer   = ManualPageWriter(self)
            doc_settings = OptionParser(
                defaults            = self.env.settings
                , components        = (doc_writer,)
                , read_config_files = True
                , ).get_default_values()

            doc_settings.__dict__.update(doc_tree.man_info)
            doc_tree.settings = doc_settings
            targetname  = '%s.%s' % (doc_tree.man_info.title, doc_tree.man_info.section)
            if doc_tree.man_info.decl_type in [
                    "struct", "enum", "union", "typedef"]:
                targetname = "%s_%s" % (doc_tree.man_info.decl_type, targetname)

            destination = FileOutput(
                destination_path = path.join(self.outdir, targetname)
                , encoding='utf-8')

            self.info(darkgreen(targetname) + " ", nonl=True)
            self.env.resolve_references(doc_tree, doc_tree.man_info.manpage, self)

            # remove pending_xref nodes
            for pendingnode in doc_tree.traverse(addnodes.pending_xref):
                pendingnode.replace_self(pendingnode.children)
            doc_writer.write(doc_tree, destination)
        self.info()
Ejemplo n.º 2
0
def doctree_read(app, doctree):
    """
    Inject AutoAPI into the TOC Tree dynamically.
    """
    if app.env.docname == "index":
        all_docs = set()
        insert = True
        nodes = doctree.traverse(toctree)
        toc_entry = "%s/index" % app.config.autoapi_root
        add_entry = (
            nodes
            and app.config.autoapi_generate_api_docs
            and app.config.autoapi_add_toctree_entry
        )
        if not add_entry:
            return
        # Capture all existing toctree entries
        for node in nodes:
            for entry in node["entries"]:
                all_docs.add(entry[1])
        # Don't insert autoapi it's already present
        for doc in all_docs:
            if doc.find(app.config.autoapi_root) != -1:
                insert = False
        if insert and app.config.autoapi_add_toctree_entry:
            # Insert AutoAPI index
            nodes[-1]["entries"].append((None, u"%s/index" % app.config.autoapi_root))
            nodes[-1]["includefiles"].append(u"%s/index" % app.config.autoapi_root)
            message_prefix = bold("[AutoAPI] ")
            message = darkgreen(
                "Adding AutoAPI TOCTree [{0}] to index.rst".format(toc_entry)
            )
            LOGGER.info(message_prefix + message)
Ejemplo n.º 3
0
    def finish(self):
        I18nBuilder.finish(self)
        data = dict(
            version = self.config.version,
            copyright = self.config.copyright,
            project = self.config.project,
            # XXX should supply tz
            ctime = datetime.now().strftime('%Y-%m-%d %H:%M%z'),
        )
        self._create_project_folder()
        for section, messages in self.status_iterator(
                self.catalogs.iteritems(), "writing message catalogs... ",
                lambda (section, _):darkgreen(section), len(self.catalogs)):

            pofn = path.join(self.config.omegat_project_path,
                             "source", section + '.po')
            pofile = open(pofn, 'w', encoding='utf-8')
            try:
                pofile.write(POHEADER % data)
                for message, positions in messages.iteritems():
                    # message contains *one* line of text ready for translation
                    position = ", ".join(["%s(%s)" % (source, line) 
                                              for (source, line) in positions])
                    message = message.replace(u'\\', ur'\\'). \
                                      replace(u'"', ur'\"')
                    pomsg = u'#%s\nmsgid "%s"\nmsgstr ""\n\n' % (position, message)
                    pofile.write(pomsg)
            finally:
                pofile.close()
Ejemplo n.º 4
0
    def find_files(self, patterns, dirs, ignore):
        # pylint: disable=too-many-nested-blocks
        if not ignore:
            ignore = []
        files_to_read = []
        for _dir in dirs:
            for root, dirnames, filenames in os.walk(_dir):
                for pattern in patterns:
                    for filename in fnmatch.filter(filenames, pattern):
                        skip = False

                        # Skip ignored files
                        for ignore_pattern in ignore:
                            if fnmatch.fnmatch(os.path.join(root, filename), ignore_pattern):
                                self.app.info(
                                    bold('[AutoAPI] ') +
                                    darkgreen("Ignoring %s/%s" % (root, filename))
                                )
                                skip = True

                        if skip:
                            continue
                        # Make sure the path is full
                        if os.path.isabs(filename):
                            files_to_read.append(filename)
                        else:
                            files_to_read.append(os.path.join(root, filename))

        for _path in self.app.status_iterator(
                files_to_read,
                '[AutoAPI] Reading files... ',
                darkgreen,
                len(files_to_read)):
            yield _path
Ejemplo n.º 5
0
 def assemble_doctree(self, indexfile):
     docnames = set([indexfile])
     self.info(darkgreen(indexfile) + " ", nonl=1)
     tree = self.env.get_doctree(indexfile)
     tree['docname'] = indexfile
     # extract toctree nodes from the tree and put them in a fresh document
     new_tree = docutils.utils.new_document('<rinoh output>')
     for node in tree.traverse(addnodes.toctree):
         new_tree += node
     largetree = inline_all_toctrees(self, docnames, indexfile, new_tree,
                                     darkgreen)
     largetree['docname'] = indexfile
     self.info()
     self.info("resolving references...")
     self.env.resolve_references(largetree, indexfile, self)
     # resolve :ref:s to distant tex files -- we can't add a cross-reference,
     # but append the document name
     for pendingnode in largetree.traverse(addnodes.pending_xref):
         docname = pendingnode['refdocname']
         sectname = pendingnode['refsectname']
         newnodes = [nodes.emphasis(sectname, sectname)]
         for subdir, title in self.titles:
             if docname.startswith(subdir):
                 newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                 newnodes.append(nodes.emphasis(title, title))
                 newnodes.append(nodes.Text(')', ')'))
                 break
         else:
             pass
         pendingnode.replace_self(newnodes)
     return largetree
Ejemplo n.º 6
0
 def process_result(self, result):
     uri, docname, lineno, status, info, code = result
     if status == 'unchecked':
         return
     if status == 'working' and info != 'new':
         return
     if lineno:
         self.info('(line %4d) ' % lineno, nonl=1)
     if status == 'ignored':
         self.info(darkgray('-ignored- ') + uri)
     elif status == 'local':
         self.info(darkgray('-local-   ') + uri)
         self.write_entry('local', docname, lineno, uri)
     elif status == 'working':
         self.info(darkgreen('ok        ')  + uri)
     elif status == 'broken':
         self.info(red('broken    ') + uri + red(' - ' + info))
         self.write_entry('broken', docname, lineno, uri + ': ' + info)
         if self.app.quiet:
             self.warn('broken link: %s' % uri,
                       '%s:%s' % (self.env.doc2path(docname), lineno))
     elif status == 'redirected':
         text, color = {
             301: ('permanently', darkred),
             302: ('with Found', purple),
             303: ('with See Other', purple),
             307: ('temporarily', turquoise),
             0:   ('with unknown code', purple),
         }[code]
         self.write_entry('redirected ' + text, docname, lineno,
                          uri + ' to ' + info)
         self.info(color('redirect  ') + uri + color(' - ' + text + ' to '  + info))
Ejemplo n.º 7
0
    def check(self, node, docname):
        uri = node['refuri']

        if '#' in uri:
            uri = uri.split('#')[0]

        if uri in self.good:
            return

        lineno = None
        while lineno is None:
            node = node.parent
            if node is None:
                break
            lineno = node.line

        if len(uri) == 0 or uri[0:7] == 'mailto:' or uri[0:4] == 'ftp:':
            return

        if lineno:
            self.info('(line %3d) ' % lineno, nonl=1)
        for rex in self.to_ignore:
            if rex.match(uri):
                self.info(uri + ' - ' + darkgray('ignored'))
                return
        if uri[0:5] == 'http:' or uri[0:6] == 'https:':
            self.info(uri, nonl=1)

            if uri in self.broken:
                (r, s) = self.broken[uri]
            elif uri in self.redirected:
                (r, s) = self.redirected[uri]
            else:
                (r, s) = self.resolve(uri)

            if r == 0:
                self.info(' - ' + darkgreen('working'))
                self.good.add(uri)
            elif r == 2:
                self.info(' - ' + red('broken: ') + s)
                self.write_entry('broken', docname, lineno, uri + ': ' + s)
                self.broken[uri] = (r, s)
                if self.app.quiet:
                    self.warn('broken link: %s' % uri,
                              '%s:%s' % (self.env.doc2path(docname), lineno))
            else:
                self.info(' - ' + purple('redirected') + ' to ' + s)
                self.write_entry('redirected', docname,
                                 lineno, uri + ' to ' + s)
                self.redirected[uri] = (r, s)
        else:
            self.info(uri + ' - ' + darkgray('local'))
            self.write_entry('local', docname, lineno, uri)

        if self.broken:
            self.app.statuscode = 1
Ejemplo n.º 8
0
def build_finished(app, exception):
    if not app.config.autoapi_keep_files:
        normalized_root = os.path.normpath(os.path.join(app.confdir, app.config.autoapi_root))
        if app.verbosity > 1:
            app.info(bold('[AutoAPI] ') + darkgreen('Cleaning generated .rst files'))
        shutil.rmtree(normalized_root)

        mapper = default_backend_mapping[app.config.autoapi_type]
        if hasattr(mapper, 'build_finished'):
            mapper.build_finished(app, exception)
Ejemplo n.º 9
0
def build_finished(app, exception):
    if not app.config.autoapi_keep_files and app.config.autoapi_generate_api_docs:
        normalized_root = os.path.normpath(
            os.path.join(app.confdir, app.config.autoapi_root)
        )
        if app.verbosity > 1:
            LOGGER.info(bold("[AutoAPI] ") + darkgreen("Cleaning generated .rst files"))
        shutil.rmtree(normalized_root)

        sphinx_mapper = default_backend_mapping[app.config.autoapi_type]
        if hasattr(sphinx_mapper, "build_finished"):
            sphinx_mapper.build_finished(app, exception)
Ejemplo n.º 10
0
Archivo: xlatex.py Proyecto: xcore/xdoc
    def assemble_doctree(self, indexfile, toctree_only, appendices):
        self.docnames = set([indexfile] + appendices)
        self.info(darkgreen(indexfile) + " ", nonl=1)
        tree = self.env.get_doctree(indexfile)
        tree['docname'] = indexfile
        if toctree_only:
            # extract toctree nodes from the tree and put them in a
            # fresh document
            new_tree = new_document('<latex output>')
            new_sect = nodes.section()
            new_sect += nodes.title(u'<Set title in conf.py>',
                                    u'<Set title in conf.py>')
            new_tree += new_sect
            for node in tree.traverse(addnodes.toctree):
                new_sect += node
            tree = new_tree
        largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
                                        darkgreen)
        largetree['docname'] = indexfile
        for docname in appendices:
            appendix = self.env.get_doctree(docname)
            appendix['docname'] = docname
            largetree.append(appendix)
        self.info()
        self.info("resolving references...")
        #for node in largetree.traverse(nodes.target):
        #    print node

        self.env.resolve_references(largetree, indexfile, self)
        # resolve :ref:s to distant tex files -- we can't add a cross-reference,
        # but append the document name
        for pendingnode in largetree.traverse(addnodes.pending_xref):
            docname = pendingnode['refdocname']
            sectname = pendingnode['refsectname']
            newnodes = [nodes.emphasis(sectname, sectname)]
            for subdir, title in self.titles:
                if docname.startswith(subdir):
                    newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                    newnodes.append(nodes.emphasis(title, title))
                    newnodes.append(nodes.Text(')', ')'))
                    break
            else:
                pass
            pendingnode.replace_self(newnodes)

#        for node in largetree.traverse(nodes.reference):
#            print node


        return largetree
Ejemplo n.º 11
0
    def check(self, node, docname):
        uri = node["refuri"]

        if "#" in uri:
            uri = uri.split("#")[0]

        if uri in self.good:
            return

        lineno = None
        while lineno is None:
            node = node.parent
            if node is None:
                break
            lineno = node.line

        if len(uri) == 0 or uri[0:7] == "mailto:" or uri[0:4] == "ftp:":
            return

        if lineno:
            self.info("(line %3d) " % lineno, nonl=1)
        if uri[0:5] == "http:" or uri[0:6] == "https:":
            self.info(uri, nonl=1)

            if uri in self.broken:
                (r, s) = self.broken[uri]
            elif uri in self.redirected:
                (r, s) = self.redirected[uri]
            else:
                (r, s) = self.resolve(uri)

            if r == 0:
                self.info(" - " + darkgreen("working"))
                self.good.add(uri)
            elif r == 2:
                self.info(" - " + red("broken: ") + s)
                self.write_entry("broken", docname, lineno, uri + ": " + s)
                self.broken[uri] = (r, s)
                if self.app.quiet:
                    self.warn("broken link: %s" % uri, "%s:%s" % (self.env.doc2path(docname), lineno))
            else:
                self.info(" - " + purple("redirected") + " to " + s)
                self.write_entry("redirected", docname, lineno, uri + " to " + s)
                self.redirected[uri] = (r, s)
        else:
            self.info(uri + " - " + darkgray("local"))
            self.write_entry("local", docname, lineno, uri)

        if self.broken:
            self.app.statuscode = 1
Ejemplo n.º 12
0
    def check(self, node, docname):
        uri = node['refuri']

        if '#' in uri:
            uri = uri.split('#')[0]

        if uri in self.good:
            return

        lineno = None
        while lineno is None and node:
            node = node.parent
            lineno = node.line

        if uri[0:5] == 'http:' or uri[0:6] == 'https:':
            self.info(uri, nonl=1)

            if uri in self.broken:
                (r, s) = self.broken[uri]
            elif uri in self.redirected:
                (r, s) = self.redirected[uri]
            else:
                (r, s) = self.resolve(uri)

            if r == 0:
                self.info(' - ' + darkgreen('working'))
                self.good.add(uri)
            elif r == 2:
                self.info(' - ' + red('broken: ') + s)
                self.write_entry('broken', docname, lineno, uri + ': ' + s)
                self.broken[uri] = (r, s)
                if self.app.quiet:
                    self.warn('%s:%s: broken link: %s' % (docname, lineno, uri))
            else:
                self.info(' - ' + purple('redirected') + ' to ' + s)
                self.write_entry('redirected', docname, lineno, uri + ' to ' + s)
                self.redirected[uri] = (r, s)
        elif len(uri) == 0 or uri[0:7] == 'mailto:' or uri[0:4] == 'ftp:':
            return
        else:
            self.warn(uri + ' - ' + red('malformed!'))
            self.write_entry('malformed', docname, lineno, uri)
            if self.app.quiet:
                self.warn('%s:%s: malformed link: %s' % (docname, lineno, uri))
            self.app.statuscode = 1

        if self.broken:
            self.app.statuscode = 1
Ejemplo n.º 13
0
    def write_doc(self, docname, doctree):
        self.checker.push_filters(self.env.spelling_document_filters[docname])

        for node in doctree.traverse(docutils.nodes.Text):
            if (node.tagname == '#text'
                    and node.parent
                    and node.parent.tagname in self.TEXT_NODES):

                # Figure out the line number for this node by climbing the
                # tree until we find a node that has a line number.
                lineno = None
                parent = node
                seen = set()
                while lineno is None:
                    #self.info('looking for line number on %r' % node)
                    seen.add(parent)
                    parent = node.parent
                    if parent is None or parent in seen:
                        break
                    lineno = parent.line

                # Check the text of the node.
                for word, suggestions in self.checker.check(node.astext()):
                    msg_parts = [docname + '.po']
                    if lineno:
                        msg_parts.append(darkgreen(str(lineno)))
                    msg_parts.append(' %s ' % red(word))
                    msg_parts.append(self.format_suggestions(suggestions))
                    msg_parts.append(node.astext())
                    msg = ':'.join(msg_parts)
                    self.info(msg)

                    if False:  # To generate simple output.txt for spell_statistic command
                        self.output.write('%s\n' % word)
                    else:
                        self.output.write(u"%s:%s: (%s) %s | %s\n" % (
                            docname + '.po',
                            lineno, word,
                            self.format_suggestions(suggestions),
                            node.astext()
                        ))

                    # We found at least one bad spelling, so set the status
                    # code for the app to a value that indicates an error.
                    self.app.statuscode = 1

        self.checker.pop_filters()
        return
Ejemplo n.º 14
0
    def write(self, *ignored):
        # type: (Any) -> None
        docwriter = ManualPageWriter(self)
        docsettings = OptionParser(
            defaults=self.env.settings,
            components=(docwriter,),
            read_config_files=True).get_default_values()  # type: Any

        logger.info(bold(__('writing... ')), nonl=True)

        for info in self.config.man_pages:
            docname, name, description, authors, section = info
            if docname not in self.env.all_docs:
                logger.warning(__('"man_pages" config value references unknown '
                                  'document %s'), docname)
                continue
            if isinstance(authors, str):
                if authors:
                    authors = [authors]
                else:
                    authors = []

            docsettings.title = name
            docsettings.subtitle = description
            docsettings.authors = authors
            docsettings.section = section

            targetname = '%s.%s' % (name, section)
            logger.info(darkgreen(targetname) + ' { ', nonl=True)
            destination = FileOutput(
                destination_path=path.join(self.outdir, targetname),
                encoding='utf-8')

            tree = self.env.get_doctree(docname)
            docnames = set()  # type: Set[str]
            largetree = inline_all_toctrees(self, docnames, docname, tree,
                                            darkgreen, [docname])
            largetree.settings = docsettings
            logger.info('} ', nonl=True)
            self.env.resolve_references(largetree, docname, self)
            # remove pending_xref nodes
            for pendingnode in largetree.traverse(addnodes.pending_xref):
                pendingnode.replace_self(pendingnode.children)

            docwriter.write(largetree, destination)
        logger.info('')
Ejemplo n.º 15
0
    def run(self):
        if self.has_run:
            return

        App.builder.info("running test %s ..." % darkgreen(self.path))

        self.rst_output = os.path.join(BTestTmp, "%s" % self.tag)
        os.environ["BTEST_RST_OUTPUT"] = self.rst_output

        self.cleanTmps()

        try:
            subprocess.check_call("btest -S %s" % self.path, shell=True)
        except (OSError, IOError, subprocess.CalledProcessError) as e:
            # Equivalent to Directive.error(); we don't have an
            # directive object here and can't pass it in because
            # it doesn't pickle.
            App.builder.warn(red("BTest error: %s" % e))
Ejemplo n.º 16
0
def doctree_read(app, doctree):
    all_docs = set()
    insert = True
    if app.env.docname == 'index':
        nodes = doctree.traverse(toctree)
        if not nodes:
            return
        for node in nodes:
            for entry in node['entries']:
                all_docs.add(entry[1])
        for doc in all_docs:
            if doc.find(app.config.autoapi_root) != -1:
                insert = False
        if insert and app.config.autoapi_add_toctree_entry:
            nodes[-1]['entries'].append(
                (None, u'%s/index' % app.config.autoapi_root)
            )
            nodes[-1]['includefiles'].append(u'%s/index' % app.config.autoapi_root)
            app.info(bold('[AutoAPI] ') + darkgreen('Adding AutoAPI TOCTree to index.rst'))
Ejemplo n.º 17
0
 def assemble_doctree(self, indexfile, toctree_only, appendices):
     # type: (unicode, bool, List[unicode]) -> nodes.Node
     self.docnames = set([indexfile] + appendices)
     logger.info(darkgreen(indexfile) + " ", nonl=1)
     tree = self.env.get_doctree(indexfile)
     tree['docname'] = indexfile
     if toctree_only:
         # extract toctree nodes from the tree and put them in a
         # fresh document
         new_tree = new_document('<texinfo output>')
         new_sect = nodes.section()
         new_sect += nodes.title(u'<Set title in conf.py>',
                                 u'<Set title in conf.py>')
         new_tree += new_sect
         for node in tree.traverse(addnodes.toctree):
             new_sect += node
         tree = new_tree
     largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
                                     darkgreen, [indexfile])
     largetree['docname'] = indexfile
     for docname in appendices:
         appendix = self.env.get_doctree(docname)
         appendix['docname'] = docname
         largetree.append(appendix)
     logger.info('')
     logger.info("resolving references...")
     self.env.resolve_references(largetree, indexfile, self)
     # TODO: add support for external :ref:s
     for pendingnode in largetree.traverse(addnodes.pending_xref):
         docname = pendingnode['refdocname']
         sectname = pendingnode['refsectname']
         newnodes = [nodes.emphasis(sectname, sectname)]
         for subdir, title in self.titles:
             if docname.startswith(subdir):
                 newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                 newnodes.append(nodes.emphasis(title, title))
                 newnodes.append(nodes.Text(')', ')'))
                 break
         else:
             pass
         pendingnode.replace_self(newnodes)
     return largetree
Ejemplo n.º 18
0
    def write(self, *ignored):
        # overwritten -- use our own version of the Writer
        docwriter = CyrusManualPageWriter(self)
        docsettings = OptionParser(
            defaults=self.env.settings,
            components=(docwriter,),
            read_config_files=True).get_default_values()

        self.info(bold('writing... '), nonl=True)

        for info in self.config.man_pages:
            docname, name, description, authors, section = info
            if isinstance(authors, basestring):
                if authors:
                    authors = [authors]
                else:
                    authors = []

            targetname = '%s.%s' % (name, section)
            self.info(darkgreen(targetname) + ' { ', nonl=True)
            destination = FileOutput(
                destination_path=path.join(self.outdir, targetname),
                encoding='utf-8')

            tree = self.env.get_doctree(docname)
            docnames = set()
            largetree = inline_all_toctrees(self, docnames, docname, tree,
                                            darkgreen)
            self.info('} ', nonl=True)
            self.env.resolve_references(largetree, docname, self)
            # remove pending_xref nodes
            for pendingnode in largetree.traverse(addnodes.pending_xref):
                pendingnode.replace_self(pendingnode.children)

            largetree.settings = docsettings
            largetree.settings.title = name
            largetree.settings.subtitle = description
            largetree.settings.authors = authors
            largetree.settings.section = section

            docwriter.write(largetree, destination)
        self.info()
Ejemplo n.º 19
0
    def run(self):
        self.assert_has_content()

        env = self.state.document.settings.env
        btest_base = env.config.btest_base
        btest_tests = env.config.btest_tests

        tag = self.arguments[0]

        if not btest_base:
            return self.error("error: btest_base not set in config")

        if not btest_tests:
            return self.error("error: btest_tests not set in config")

        if not os.path.exists(btest_base):
            return self.error("error: btest_base directory '%s' does not exists" % btest_base)

        if not os.path.exists(os.path.join(btest_base, btest_tests)):
            return self.error("error: btest_tests directory '%s' does not exists" % os.path.join(btest_base, btest_tests))

        os.chdir(btest_base)

        tmp = tempfile.mktemp(prefix="rst-btest")
        file = os.path.join(btest_tests, tag + ".btest")

        self.message("running test %s ..." % darkgreen(file))

        # Save the test.
        out = open(file, "w")
        for line in self.content:
            print >>out, line

        out.close()

        # Run it.
        os.environ["BTEST_RST_OUTPUT"] = tmp

        try:
            subprocess.check_call("btest -qd %s" % file, shell=True)
        except (OSError, IOError, subprocess.CalledProcessError), e:
            return self.error("btest: %s" % e)
Ejemplo n.º 20
0
 def process_tree(docname, tree):
     tree = tree.deepcopy()
     for toctreenode in tree.traverse(addnodes.toctree):
         newnodes = []
         includefiles = map(str, toctreenode['includefiles'])
         for includefile in includefiles:
             try:
                 self.info(darkgreen(includefile) + " ", nonl=1)
                 subtree = process_tree(includefile,
                 self.env.get_doctree(includefile))
                 self.docnames.add(includefile)
             except Exception:
                 self.warn('%s: toctree contains ref to nonexisting file %r'\
                                              % (docname, includefile))
             else:
                 sof = addnodes.start_of_file(docname=includefile)
                 sof.children = subtree.children
                 newnodes.append(sof)
         toctreenode.parent.replace(toctreenode, newnodes)
     return tree
Ejemplo n.º 21
0
    def write_doc(self, docname, doctree):
        self.checker.push_filters(self.env.spelling_document_filters[docname])

        for node in doctree.traverse(docutils.nodes.Text):
            if node.tagname == '#text' and node.parent and node.parent.tagname in TEXT_NODES:

                # Figure out the line number for this node by climbing the
                # tree until we find a node that has a line number.
                lineno = None
                parent = node
                seen = set()
                while lineno is None:
                    #self.info('looking for line number on %r' % node)
                    seen.add(parent)
                    parent = node.parent
                    if parent is None or parent in seen:
                        break
                    lineno = parent.line
                filename = self.env.doc2path(docname, base=None)

                # Check the text of the node.
                for word, suggestions in self.checker.check(node.astext()):
                    msg_parts = []
                    if lineno:
                        msg_parts.append(darkgreen('(line %3d)' % lineno))
                    msg_parts.append(red(word))
                    msg_parts.append(self.format_suggestions(suggestions))
                    msg = ' '.join(msg_parts)
                    self.info(msg)
                    self.output.write(u"%s:%s: (%s) %s\n" % (
                            self.env.doc2path(docname, None),
                            lineno, word,
                            self.format_suggestions(suggestions),
                            ))

                    # We found at least one bad spelling, so set the status
                    # code for the app to a value that indicates an error.
                    self.app.statuscode = 1

        self.checker.pop_filters()
        return
Ejemplo n.º 22
0
def doctree_read(app, doctree):
    all_docs = set()
    insert = True
    if app.env.docname == 'index':
        nodes = doctree.traverse(toctree)
        if not nodes:
            return
        for node in nodes:
            for entry in node['entries']:
                all_docs.add(entry[1])
        for doc in all_docs:
            if doc.find(app.config.autoapi_root) != -1:
                insert = False
        if insert and app.config.autoapi_add_toctree_entry:
            nodes[-1]['entries'].append(
                (None, u'%s/index' % app.config.autoapi_root))
            nodes[-1]['includefiles'].append(u'%s/index' %
                                             app.config.autoapi_root)
            app.info(
                bold('[AutoAPI] ') +
                darkgreen('Adding AutoAPI TOCTree to index.rst'))
Ejemplo n.º 23
0
 def process_tree(docname, tree):
     tree = tree.deepcopy()
     for toctreenode in tree.traverse(addnodes.toctree):
         newnodes = []
         includefiles = map(str, toctreenode['includefiles'])
         for includefile in includefiles:
             try:
                 self.sphinx_logger.info(darkgreen(includefile) + " ")
                 subtree = process_tree(
                     includefile, self.env.get_doctree(includefile))
                 self.docnames.add(includefile)
             except Exception:
                 self.sphinx_logger.warning(
                     '%s: toctree contains ref to nonexisting file %r' %
                     (docname, includefile))
             else:
                 sof = addnodes.start_of_file(docname=includefile)
                 sof.children = subtree.children
                 newnodes.append(sof)
         toctreenode.parent.replace(toctreenode, newnodes)
     return tree
Ejemplo n.º 24
0
def extension_build_finished(app, exception):
    if (
        not app.config.autoapi_keep_files
        and app.config.autoapi_generate_api_docs
    ):
        if app.verbosity > 1:
            LOGGER.info(
                bold("[AutoAPI] ") + darkgreen("Cleaning generated .rst files")
            )
        to_remove = getattr(app, "created_api_files", [])
        for file in to_remove:
            os.remove(file)
            directory = os.path.dirname(file)
            while True:
                try:
                    if len(os.listdir(directory)) > 0:
                        break
                    os.rmdir(directory)
                    directory = os.path.dirname(directory)
                except PermissionError:
                    break
Ejemplo n.º 25
0
    def write(self, *ignored):
        docwriter = ManualPageWriter(self)
        docsettings = OptionParser(
            defaults=self.env.settings,
            components=(docwriter,)).get_default_values()

        self.info(bold('writing... '), nonl=True)

        for info in self.config.man_pages:
            docname, name, description, authors, section = info
            if isinstance(authors, basestring):
                if authors:
                    authors = [authors]
                else:
                    authors = []

            targetname = '%s.%s' % (name, section)
            self.info(darkgreen(targetname) + ' { ', nonl=True)
            destination = FileOutput(
                destination_path=path.join(self.outdir, targetname),
                encoding='utf-8')

            tree = self.env.get_doctree(docname)
            docnames = set()
            largetree = inline_all_toctrees(self, docnames, docname, tree,
                                            darkgreen)
            self.info('} ', nonl=True)
            self.env.resolve_references(largetree, docname, self)
            # remove pending_xref nodes
            for pendingnode in largetree.traverse(addnodes.pending_xref):
                pendingnode.replace_self(pendingnode.children)

            largetree.settings = docsettings
            largetree.settings.title = name
            largetree.settings.subtitle = description
            largetree.settings.authors = authors
            largetree.settings.section = section

            docwriter.write(largetree, destination)
        self.info()
Ejemplo n.º 26
0
    def finish(self):
        I18nBuilder.finish(self)
        data = dict(
            version = self.config.version,
            copyright = self.config.copyright,
            project = self.config.project,
            # XXX should supply tz
            ctime = datetime.now().strftime('%Y-%m-%d %H:%M%z'),
        )
        for textdomain, catalog in self.status_iterator(
                self.catalogs.iteritems(), "writing message catalogs... ",
                lambda (textdomain, _): darkgreen(textdomain),
                                        len(self.catalogs)):

            # noop if config.gettext_compact is set
            ensuredir(path.join(self.outdir, path.dirname(textdomain)))

            pofn = path.join(self.outdir, textdomain + '.pot')
            pofile = open(pofn, 'w', encoding='utf-8')
            try:
                pofile.write(POHEADER % data)

                for message in catalog.messages:
                    positions = catalog.metadata[message]

                    # generate "#: file1:line1\n#: file2:line2 ..."
                    pofile.write(u"#: %s\n" % "\n#: ".join("%s:%s" %
                        (safe_relpath(source, self.outdir), line)
                        for source, line, _ in positions))
                    # generate "# uuid1\n# uuid2\n ..."
                    pofile.write(u"# %s\n" % "\n# ".join(uid for _, _, uid
                        in positions))

                    # message contains *one* line of text ready for translation
                    message = message.replace(u'\\', ur'\\'). \
                                      replace(u'"', ur'\"')
                    pofile.write(u'msgid "%s"\nmsgstr ""\n\n' % message)

            finally:
                pofile.close()
Ejemplo n.º 27
0
 def assemble_doctree(self, indexfile, toctree_only):
     docnames = set([indexfile])
     logger.info(darkgreen(indexfile) + " ", nonl=1)
     tree = self.env.get_doctree(indexfile)
     tree['docname'] = indexfile
     new_tree = docutils.utils.new_document(tree['source'])
     if toctree_only:
         # extract toctree nodes from the tree and put them in a
         # fresh document
         for node in tree.traverse(addnodes.toctree):
             new_tree += node
     else:
         for node in tree.children:
             if node.tagname == 'section':
                 for child in node.children:
                     if child.tagname != 'title':
                         new_tree += child
             else:
                 new_tree += node
     largetree = inline_all_toctrees(self, docnames, indexfile, new_tree,
                                     darkgreen, [indexfile])
     largetree['docname'] = indexfile
     logger.info("resolving references...")
     self.env.resolve_references(largetree, indexfile, self)
     # resolve :ref:s to distant tex files -- we can't add a cross-reference,
     # but append the document name
     for pendingnode in largetree.traverse(addnodes.pending_xref):
         docname = pendingnode['refdocname']
         sectname = pendingnode['refsectname']
         newnodes = [nodes.emphasis(sectname, sectname)]
         for subdir, title in self.titles:
             if docname.startswith(subdir):
                 newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                 newnodes.append(nodes.emphasis(title, title))
                 newnodes.append(nodes.Text(')', ')'))
                 break
         else:
             pass
         pendingnode.replace_self(newnodes)
     return largetree, docnames
Ejemplo n.º 28
0
 def process_result(self, result: Tuple[str, str, int, str, str, int]) -> None:
     uri, docname, lineno, status, info, code = result
     if status == 'unchecked':
         return
     if status == 'working' and info == 'old':
         return
     if lineno:
         logger.info('(line %4d) ', lineno, nonl=True)
     if status == 'ignored':
         if info:
             logger.info(darkgray('-ignored- ') + uri + ': ' + info)
         else:
             logger.info(darkgray('-ignored- ') + uri)
     elif status == 'local':
         logger.info(darkgray('-local-   ') + uri)
         self.write_entry('local', docname, lineno, uri)
     elif status == 'working':
         logger.info(darkgreen('ok        ') + uri + info)
     elif status == 'broken':
         self.write_entry('broken', docname, lineno, uri + ': ' + info)
         if self.app.quiet or self.app.warningiserror:
             logger.warning(__('broken link: %s (%s)'), uri, info,
                            location=(self.env.doc2path(docname), lineno))
         else:
             logger.info(red('broken    ') + uri + red(' - ' + info))
     elif status == 'redirected':
         try:
             text, color = {
                 301: ('permanently', darkred),
                 302: ('with Found', purple),
                 303: ('with See Other', purple),
                 307: ('temporarily', turquoise),
                 308: ('permanently', darkred),
             }[code]
         except KeyError:
             text, color = ('with unknown code', purple)
         self.write_entry('redirected ' + text, docname, lineno,
                          uri + ' to ' + info)
         logger.info(color('redirect  ') + uri + color(' - ' + text + ' to ' + info))
Ejemplo n.º 29
0
 def process_result(self, result):
     # type: (Tuple[unicode, unicode, int, unicode, unicode, int]) -> None
     uri, docname, lineno, status, info, code = result
     if status == 'unchecked':
         return
     if status == 'working' and info == 'old':
         return
     if lineno:
         self.info('(line %4d) ' % lineno, nonl=1)
     if status == 'ignored':
         if info:
             self.info(darkgray('-ignored- ') + uri + ': ' + info)
         else:
             self.info(darkgray('-ignored- ') + uri)
     elif status == 'local':
         self.info(darkgray('-local-   ') + uri)
         self.write_entry('local', docname, lineno, uri)
     elif status == 'working':
         self.info(darkgreen('ok        ') + uri + info)
     elif status == 'broken':
         self.write_entry('broken', docname, lineno, uri + ': ' + info)
         if self.app.quiet or self.app.warningiserror:
             self.warn('broken link: %s (%s)' % (uri, info),
                       '%s:%s' % (self.env.doc2path(docname), lineno))
         else:
             self.info(red('broken    ') + uri + red(' - ' + info))
     elif status == 'redirected':
         text, color = {
             301: ('permanently', darkred),
             302: ('with Found', purple),
             303: ('with See Other', purple),
             307: ('temporarily', turquoise),
             0: ('with unknown code', purple),
         }[code]
         self.write_entry('redirected ' + text, docname, lineno,
                          uri + ' to ' + info)
         self.info(
             color('redirect  ') + uri +
             color(' - ' + text + ' to ' + info))
Ejemplo n.º 30
0
    def write_doc(self, docname, doctree):
        self.checker.push_filters(self.env.spelling_document_filters[docname])

        for node in doctree.traverse(docutils.nodes.Text):
            if (node.tagname == '#text' and
                    node.parent and
                    node.parent.tagname in self.TEXT_NODES):

                # Figure out the line number for this node by climbing the
                # tree until we find a node that has a line number.
                lineno = None
                parent = node
                seen = set()
                while lineno is None:
                    # self.info('looking for line number on %r' % node)
                    seen.add(parent)
                    parent = node.parent
                    if parent is None or parent in seen:
                        break
                    lineno = parent.line

                # Check the text of the node.
                for word, suggestions in self.checker.check(node.astext()):
                    msg_parts = [docname + '.rst']
                    if lineno:
                        msg_parts.append(darkgreen(str(lineno)))
                    msg_parts.append(red(word))
                    msg_parts.append(self.format_suggestions(suggestions))
                    msg = ':'.join(msg_parts)
                    self.info(msg)
                    self.output.write(u"%s:%s: (%s) %s\n" % (
                        self.env.doc2path(docname, None),
                        lineno, word,
                        self.format_suggestions(suggestions),
                    ))
                    self.misspelling_count += 1

        self.checker.pop_filters()
        return
Ejemplo n.º 31
0
    def find_files(self, patterns, dirs, ignore):
        # pylint: disable=too-many-nested-blocks
        if not ignore:
            ignore = []
        files_to_read = []
        for _dir in dirs:
            for root, dirnames, filenames in os.walk(_dir):
                for pattern in patterns:
                    for filename in fnmatch.filter(filenames, pattern):
                        skip = False

                        # Skip ignored files
                        for ignore_pattern in ignore:
                            if fnmatch.fnmatch(os.path.join(root, filename),
                                               ignore_pattern):
                                self.app.info(
                                    bold('[AutoAPI] ') +
                                    darkgreen("Ignoring %s/%s" %
                                              (root, filename)))
                                skip = True

                        if skip:
                            continue

                        # Make sure the path is full
                        if not os.path.isabs(filename):
                            filename = os.path.join(root, filename)

                        files_to_read.append(filename)

        if sphinx.version_info >= (1, 6):
            status_iterator = sphinx.util.status_iterator
        else:
            status_iterator = self.app.status_iterator

        for _path in status_iterator(files_to_read,
                                     '[AutoAPI] Reading files... ', darkgreen,
                                     len(files_to_read)):
            yield _path
Ejemplo n.º 32
0
 def process_result(self, result):
     # type: (Tuple[unicode, unicode, int, unicode, unicode, int]) -> None
     uri, docname, lineno, status, info, code = result
     if status == 'unchecked':
         return
     if status == 'working' and info == 'old':
         return
     if lineno:
         logger.info('(line %4d) ', lineno, nonl=1)
     if status == 'ignored':
         if info:
             logger.info(darkgray('-ignored- ') + uri + ': ' + info)
         else:
             logger.info(darkgray('-ignored- ') + uri)
     elif status == 'local':
         logger.info(darkgray('-local-   ') + uri)
         self.write_entry('local', docname, lineno, uri)
     elif status == 'working':
         logger.info(darkgreen('ok        ') + uri + info)
     elif status == 'broken':
         self.write_entry('broken', docname, lineno, uri + ': ' + info)
         if self.app.quiet or self.app.warningiserror:
             logger.warning('broken link: %s (%s)', uri, info,
                            location=(self.env.doc2path(docname), lineno))
         else:
             logger.info(red('broken    ') + uri + red(' - ' + info))
     elif status == 'redirected':
         text, color = {
             301: ('permanently', darkred),
             302: ('with Found', purple),
             303: ('with See Other', purple),
             307: ('temporarily', turquoise),
             0:   ('with unknown code', purple),
         }[code]
         self.write_entry('redirected ' + text, docname, lineno,
                          uri + ' to ' + info)
         logger.info(color('redirect  ') + uri + color(' - ' + text + ' to ' + info))
Ejemplo n.º 33
0
def doctree_read(app, doctree):
    """
    Inject AutoAPI into the TOC Tree dynamically.
    """
    if app.env.docname == 'index':
        all_docs = set()
        insert = True
        nodes = doctree.traverse(toctree)
        toc_entry = '%s/index' % app.config.autoapi_root
        if not nodes:
            return
        # Capture all existing toctree entries
        root = nodes[0]
        for node in nodes:
            for entry in node['entries']:
                all_docs.add(entry[1])
        # Don't insert autoapi it's already present
        for doc in all_docs:
            if doc.find(app.config.autoapi_root) != -1:
                insert = False
        if insert and app.config.autoapi_add_toctree_entry:
            if app.config.autoapi_add_api_root_toctree:
                # Insert full API TOC in root TOC
                for path in app.env.autoapi_toc_entries:
                    nodes[-1]['entries'].append((None, path[1:]))
                    nodes[-1]['includefiles'].append(path)
            else:
                # Insert AutoAPI index
                nodes[-1]['entries'].append(
                    (None, u'%s/index' % app.config.autoapi_root))
                nodes[-1]['includefiles'].append(u'%s/index' %
                                                 app.config.autoapi_root)
                app.info(
                    bold('[AutoAPI] ') +
                    darkgreen('Adding AutoAPI TOCTree [%s] to index.rst' %
                              toc_entry))
Ejemplo n.º 34
0
 def process_result(self, result):
     uri, docname, lineno, status, info = result
     if status == 'unchecked':
         return
     if status == 'working' and info != 'new':
         return
     if lineno:
         self.info('(line %3d) ' % lineno, nonl=1)
     if status == 'ignored':
         self.info(uri + ' - ' + darkgray('ignored'))
     elif status == 'local':
         self.info(uri + ' - ' + darkgray('local'))
         self.write_entry('local', docname, lineno, uri)
     elif status == 'working':
         self.info(uri + ' - ' + darkgreen('working'))
     elif status == 'broken':
         self.info(uri + ' - ' + red('broken: ') + info)
         self.write_entry('broken', docname, lineno, uri + ': ' + info)
         if self.app.quiet:
             self.warn('broken link: %s' % uri,
                       '%s:%s' % (self.env.doc2path(docname), lineno))
     elif status == 'redirected':
         self.info(uri + ' - ' + purple('redirected') + ' to ' + info)
         self.write_entry('redirected', docname, lineno, uri + ' to ' + info)
Ejemplo n.º 35
0
 def process_result(self, result):
     uri, docname, lineno, status, info = result
     if status == 'unchecked':
         return
     if status == 'working' and info != 'new':
         return
     if lineno:
         self.info('(line %3d) ' % lineno, nonl=1)
     if status == 'ignored':
         self.info(uri + ' - ' + darkgray('ignored'))
     elif status == 'local':
         self.info(uri + ' - ' + darkgray('local'))
         self.write_entry('local', docname, lineno, uri)
     elif status == 'working':
         self.info(uri + ' - ' + darkgreen('working'))
     elif status == 'broken':
         self.info(uri + ' - ' + red('broken: ') + info)
         self.write_entry('broken', docname, lineno, uri + ': ' + info)
         if self.app.quiet:
             self.warn('broken link: %s' % uri,
                       '%s:%s' % (self.env.doc2path(docname), lineno))
     elif status == 'redirected':
         self.info(uri + ' - ' + purple('redirected') + ' to ' + info)
         self.write_entry('redirected', docname, lineno, uri + ' to ' + info)
Ejemplo n.º 36
0
def doctree_read(app, doctree):
    """
    Inject AutoAPI into the TOC Tree dynamically.
    """

    add_domain_to_toctree(app, doctree, app.env.docname)

    if app.env.docname == "index":
        all_docs = set()
        insert = True
        nodes = doctree.traverse(toctree)
        toc_entry = "%s/index" % app.config.autoapi_root
        add_entry = (
            nodes
            and app.config.autoapi_generate_api_docs
            and app.config.autoapi_add_toctree_entry
        )
        if not add_entry:
            return
        # Capture all existing toctree entries
        for node in nodes:
            for entry in node["entries"]:
                all_docs.add(entry[1])
        # Don't insert autoapi it's already present
        for doc in all_docs:
            if doc.find(app.config.autoapi_root) != -1:
                insert = False
        if insert and app.config.autoapi_add_toctree_entry:
            # Insert AutoAPI index
            nodes[-1]["entries"].append((None, u"%s/index" % app.config.autoapi_root))
            nodes[-1]["includefiles"].append(u"%s/index" % app.config.autoapi_root)
            message_prefix = bold("[AutoAPI] ")
            message = darkgreen(
                "Adding AutoAPI TOCTree [{0}] to index.rst".format(toc_entry)
            )
            LOGGER.info(message_prefix + message)
Ejemplo n.º 37
0
    def scan(self):
        """
        Scan the source dir for ``*.sls`` files and create an AutoSaltSLS object for each

        :return: int (count of sls objects found)
        """
        # Check the source dir exists
        if not os.path.isdir(self.full_source):
            raise ExtensionError("Source path '{0}' does not exist".format(
                self.full_source))

        logger.info(
            bold("[AutoSaltSLS] ") + "Scanning {0}".format(self.full_source))

        cwd = os.getcwd()
        os.chdir(self.full_source)

        # Clear out any old data
        self.sls_objects = []

        for dir_path, dir_names, filenames in os.walk("."):
            # Remove the source from the dir we are processing as this will give us the sls namespace
            p = pathlib.Path(dir_path)
            rel_path = dir_path
            if dir_path != ".":
                rel_path = str(pathlib.Path(*p.parts[:]))

            source_url_path = None

            if self.settings.url_root:
                source_url_path = (self.settings.url_root + "/" +
                                   rel_path.replace("\\", "/"))

            # Skip any paths in the exclude list
            if rel_path in self.settings.exclude:
                logger.info(
                    bold("[AutoSaltSLS] ") +
                    darkgreen("Ignoring {0}".format(dir_path)))
                dir_names[:] = []
                filenames[:] = []
                continue

            # Start with an empty parent
            sls_parent = None

            # Create a parent container object if not in the top level
            if rel_path != ".":
                logger.debug(
                    "[AutoSaltSLS] Creating sls object for {0} (No file)".
                    format(rel_path))
                sls_parent = AutoSaltSLS(
                    self.app,
                    rel_path,
                    self.full_source,
                    self.settings,
                    source_url_root=source_url_path,
                )
                self.sls_objects.append(sls_parent)

            for file in filenames:
                # We only want .sls files
                if file.endswith(".sls"):
                    # init.sls files are the parent so update the right object
                    if file == "init.sls" and sls_parent:
                        sls_parent.initfile = True
                        continue

                    # Create an sls object for the file
                    logger.debug(
                        "[AutoSaltSLS] Creating sls object for {0} ({1})".
                        format(
                            rel_path if rel_path != "." else "[root]",
                            file,
                        ))
                    sls_obj = AutoSaltSLS(
                        self.app,
                        file,
                        os.path.join(self.full_source, rel_path)
                        if rel_path != "." else self.full_source,
                        self.settings,
                        parent_name=sls_parent.name if sls_parent else None,
                        source_url_root=source_url_path,
                    )

                    if sls_parent:
                        # Add the child to the parent
                        sls_parent.add_child(sls_obj)
                    else:
                        self.sls_objects.append(sls_obj)

        # Post-process the sls objects to set the initfile data correctly for true parent objects and to identify any
        # top files
        for sls_obj in self.sls_objects:
            if sls_obj.initfile:
                logger.debug(
                    "[AutoSaltSLS] Setting sls object {0} as init file".format(
                        sls_obj.basename))

                rst_filename = None
                if not sls_obj.child_count:
                    rst_filename = sls_obj.basename.replace(".",
                                                            os.sep) + ".rst"

                sls_obj.set_initfile(rst_filename=rst_filename)

        # Change back to the starting dir
        os.chdir(cwd)

        # Report the count of objects found
        logger.info(
            bold("[AutoSaltSLS] ") +
            "Found {0} top-level sls entities and {1} sub-entities".format(
                self.sls_objects_count,
                self.sls_sub_object_count,
            ))

        return self.sls_objects_count
Ejemplo n.º 38
0
def main():
    # check that the linkcheck file exists
    linkcheck_file = os.path.join('build', 'linkcheck', 'output.txt')
    if not os.path.exists(linkcheck_file):
        error_exit('no linkcheck output file; run make linkcheck')

    # check that it hasn't been more than a day since the last linkcheck
    last_linkcheck = datetime.fromtimestamp(os.path.getmtime(linkcheck_file))
    if datetime.now() - last_linkcheck > timedelta(days=1):
        error_exit('linkcheck output outdated; run make linkcheck')

    # parse each line of the linkcheck output.txt file
    with open(linkcheck_file) as fp:
        lines = fp.readlines()

    local = {}
    broken = {}
    perm_redirects = {}
    temp_redirects = {}

    for line in lines:
        m = LINE_RE.match(line)
        if m is None:
            error_exit('could not parse: {!r}'.format(line))
            continue

        filename, lineno, status, url, more = m.groups()

        # ignore links with certain status messages
        if '429' in more and 'Too Many Requests' in more.title():
            continue

        # gather data for broken urls
        elif status == 'broken':
            url = url.rstrip(':')
            m = REASON_RE.match(more)
            more = m.group(1) if m else more.strip()
            broken.setdefault(filename, {})[url] = more

        # gather local links
        elif status == 'local':
            local.setdefault(filename, set()).add(url)

        # gather data for permanent redirects
        elif status == 'redirected permanently':
            dst = more.split(' to ', 1)[-1].strip()
            perm_redirects.setdefault(filename, {})[url] = dst

        # gather data for ...other ...temporary? redirects
        elif status.startswith('redirected'):
            dst = more.split(' to ', 1)[-1].strip()
            temp_redirects.setdefault(filename, {})[url] = dst

    n = update_redirects(perm_redirects)

    print()
    urls = {x for urls in perm_redirects.values() for x in urls.items()}
    print(white('Found {} links returning 301s [{} replacements made]'.format(len(urls), n)))
    for src, dst in sorted(urls):
        print(src + darkgreen(' -> ' + dst))

    print()
    urls = {x for urls in temp_redirects.values() for x in urls.items()}
    print(white('Found {} links returning other 300 codes [no replacements made]'.format(len(urls))))
    for src, dst in sorted(urls):
        print(src + purple(' -> ' + dst))

    print()
    urls = {x for urls in broken.values() for x in urls.items()}
    print(white('Found {} links returning error codes (excluding 429)'.format(len(urls))))
    for url, reason in sorted(urls):
        print(url + darkred(' - ' + reason))
Ejemplo n.º 39
0
    def update(self, source, destination, cmd):
        if os.path.exists(
                destination) and not "UPDATE_SPICY_CODE" in os.environ:
            destination_time = os.path.getmtime(destination)

            if os.path.exists(source):
                source_time = os.path.getmtime(source)
            elif not "UPDATE_SPICY_CODE" in os.environ:
                return

            if source_time <= destination_time:
                hash = open(destination).readline().strip()
                if hash == self.content_hash:
                    return

        # When running from CI, all recorded output should be up to date.
        # Abort if that's not the case.
        if "CI" in os.environ:
            self.error(
                "error during CI: {} is not up to date in repository".format(
                    destination))
            return

        all_good = True
        first = True

        show_as = []
        if self.show_as:
            show_as = self.show_as.split(";")

        for one_cmd in cmd.split(";"):
            one_cmd = one_cmd.strip()

            one_cmd = one_cmd.replace("%INPUT", source)
            self.message("executing %s" % darkgreen(one_cmd))

            try:
                output = subprocess.check_output(one_cmd,
                                                 shell=True,
                                                 stderr=subprocess.STDOUT)

                if not output:
                    output = b"\n"

                if self.expect_failure:
                    self.error(
                        "execution of '%s' expected to fail, but succeeded")
                    all_good = False

            except subprocess.CalledProcessError as e:
                output = e.output
                if not self.expect_failure:
                    self.error("execution failed: " + e.output.decode("utf8"))
                    all_good = False

            if all_good:
                out = None
                if first:
                    out = open(destination, "wb")
                    out.write(self.content_hash.encode())
                    out.write(b"\n")
                else:
                    out = open(destination, "ab")
                    out.write(b"\n")

                if show_as:
                    one_cmd = "# %s\n" % show_as[0].strip()
                    one_cmd = one_cmd.replace("%INPUT", self.show_with)
                    output = output.replace(source.encode(),
                                            self.show_with.encode())
                    out.write(one_cmd.encode())
                    show_as = show_as[1:]

                out.write(output)
                out.close()
                first = False
Ejemplo n.º 40
0
    def write(self, *ignored):
        if self.config.man_pages:
            # build manpages from config.man_pages as usual
            ManualPageBuilder.write(self, *ignored)

        logger.info(bold("scan master tree for kernel-doc man-pages ... ") +
                    darkgreen("{"),
                    nonl=True)

        master_tree = self.env.get_doctree(self.config.master_doc)
        master_tree = inline_all_toctrees(self, set(), self.config.master_doc,
                                          master_tree, darkgreen,
                                          [self.config.master_doc])
        logger.info(darkgreen("}"))
        man_nodes = master_tree.traverse(condition=self.is_manpage)
        if not man_nodes and not self.config.man_pages:
            logger.warn(
                'no "man_pages" config value nor manual section found; no manual pages '
                'will be written')
            return

        logger.info(bold('START writing man pages ... '), nonl=True)

        for man_parent in man_nodes:

            doc_tree = self.get_partial_document(man_parent)
            Section2Manpage(doc_tree).apply()

            if not doc_tree.man_info["authors"] and self.config.author:
                doc_tree.man_info["authors"].append(self.config.author)

            doc_writer = ManualPageWriter(self)
            doc_settings = OptionParser(
                defaults=self.env.settings,
                components=(doc_writer, ),
                read_config_files=True,
            ).get_default_values()

            doc_settings.__dict__.update(doc_tree.man_info)
            doc_tree.settings = doc_settings
            targetname = '%s.%s' % (doc_tree.man_info.title,
                                    doc_tree.man_info.section)
            if doc_tree.man_info.decl_type in [
                    "struct", "enum", "union", "typedef"
            ]:
                targetname = "%s_%s" % (doc_tree.man_info.decl_type,
                                        targetname)

            destination = FileOutput(destination_path=path.join(
                self.outdir, targetname),
                                     encoding='utf-8')

            logger.info(darkgreen(targetname) + " ", nonl=True)
            self.env.resolve_references(doc_tree, doc_tree.man_info.manpage,
                                        self)

            # remove pending_xref nodes
            for pendingnode in doc_tree.traverse(addnodes.pending_xref):
                pendingnode.replace_self(pendingnode.children)
            doc_writer.write(doc_tree, destination)
        logger.info("END writing man pages.")
Ejemplo n.º 41
0
 def format_undocumented_module(self, module):
     return MODULE_FORMAT.format(module=darkgreen(module))
Ejemplo n.º 42
0
 def assemble_doctree(self, indexfile, toctree_only, appendices):
     self.docnames = set([indexfile] + appendices)
     self.info(darkgreen(indexfile) + " ", nonl=1)
     def process_tree(docname, tree):
         tree = tree.deepcopy()
         for toctreenode in tree.traverse(addnodes.toctree):
             newnodes = []
             includefiles = map(str, toctreenode['includefiles'])
             for includefile in includefiles:
                 try:
                     self.info(darkgreen(includefile) + " ", nonl=1)
                     subtree = process_tree(
                         includefile, self.env.get_doctree(includefile))
                     self.docnames.add(includefile)
                 except Exception:
                     self.warn('toctree contains ref to nonexisting '
                               'file %r' % includefile,
                               self.env.doc2path(docname))
                 else:
                     sof = addnodes.start_of_file(docname=includefile)
                     sof.children = subtree.children
                     newnodes.append(sof)
             toctreenode.parent.replace(toctreenode, newnodes)
         return tree
     tree = self.env.get_doctree(indexfile)
     tree['docname'] = indexfile
     if toctree_only:
         # extract toctree nodes from the tree and put them in a
         # fresh document
         new_tree = new_document('<latex output>')
         new_sect = nodes.section()
         new_sect += nodes.title(u'<Set title in conf.py>',
                                 u'<Set title in conf.py>')
         new_tree += new_sect
         for node in tree.traverse(addnodes.toctree):
             new_sect += node
         tree = new_tree
     largetree = process_tree(indexfile, tree)
     largetree['docname'] = indexfile
     for docname in appendices:
         appendix = self.env.get_doctree(docname)
         appendix['docname'] = docname
         largetree.append(appendix)
     self.info()
     self.info("resolving references...")
     self.env.resolve_references(largetree, indexfile, self)
     # resolve :ref:s to distant tex files -- we can't add a cross-reference,
     # but append the document name
     for pendingnode in largetree.traverse(addnodes.pending_xref):
         docname = pendingnode['refdocname']
         sectname = pendingnode['refsectname']
         newnodes = [nodes.emphasis(sectname, sectname)]
         for subdir, title in self.titles:
             if docname.startswith(subdir):
                 newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                 newnodes.append(nodes.emphasis(title, title))
                 newnodes.append(nodes.Text(')', ')'))
                 break
         else:
             pass
         pendingnode.replace_self(newnodes)
     return largetree
Ejemplo n.º 43
0
 def build_finished(app, exception):
     if app.verbosity > 1:
         LOGGER.info(bold("[AutoAPI] ") + darkgreen("Cleaning generated .yml files"))
     if os.path.exists(DotNetSphinxMapper.DOCFX_OUTPUT_PATH):
         shutil.rmtree(DotNetSphinxMapper.DOCFX_OUTPUT_PATH)
Ejemplo n.º 44
0
 def build_finished(app, exception):
     if app.verbosity > 1:
         app.info(bold('[AutoAPI] ') + darkgreen('Cleaning generated .yml files'))
     if os.path.exists('_api_'):
         shutil.rmtree('_api_')
Ejemplo n.º 45
0
 def build_finished(app, exception):
     if app.verbosity > 1:
         app.info(bold('[AutoAPI] ') + darkgreen('Cleaning generated .yml files'))
     if os.path.exists(DotNetSphinxMapper.DOCFX_OUTPUT_PATH):
         shutil.rmtree(DotNetSphinxMapper.DOCFX_OUTPUT_PATH)
Ejemplo n.º 46
0
    def assemble_doctree(self, docname, title, author, appendices):
        
        self.docnames = set([docname])
        self.info(darkgreen(docname) + " ", nonl=1)
        def process_tree(docname, tree):
            tree = tree.deepcopy()
            for toctreenode in tree.traverse(addnodes.toctree):
                newnodes = []
                includefiles = map(str, toctreenode['includefiles'])
                for includefile in includefiles:
                    try:
                        self.info(darkgreen(includefile) + " ", nonl=1)
                        subtree = process_tree(includefile,
                        self.env.get_doctree(includefile))
                        self.docnames.add(includefile)
                    except Exception:
                        self.warn('%s: toctree contains ref to nonexisting file %r'\
                                                     % (docname, includefile))
                    else:
                        sof = addnodes.start_of_file(docname=includefile)
                        sof.children = subtree.children
                        newnodes.append(sof)
                toctreenode.parent.replace(toctreenode, newnodes)
            return tree

        
        tree = self.env.get_doctree(docname)        
        tree = process_tree(docname, tree)

        if self.config.language:
            langmod = languages.get_language(self.config.language[:2])
        else:
            langmod = languages.get_language('en')
            
        if self.config.pdf_use_index:
            # Add index at the end of the document
            
            # This is a hack. create_index creates an index from 
            # ALL the documents data, not just this one.
            # So, we preserve a copy, use just what we need, then
            # restore it.
            #from pudb import set_trace; set_trace()
            t=copy(self.env.indexentries)
            try:
                self.env.indexentries={docname:self.env.indexentries[docname+'-gen']}
            except KeyError:
                self.env.indexentries={}
                for dname in self.docnames:
                    self.env.indexentries[dname]=t.get(dname,[])
            genindex = self.env.create_index(self)
            self.env.indexentries=t
            # EOH (End Of Hack)
            
            if genindex: # No point in creating empty indexes
                index_nodes=genindex_nodes(genindex)
                tree.append(nodes.raw(text='OddPageBreak twoColumn', format='pdf'))
                tree.append(index_nodes)

        # This is stolen from the HTML builder
        #moduleindex = self.env.domaindata['py']['modules']
        if self.config.pdf_use_modindex and self.env.modules:
            modules = sorted(((mn, ('#module-' + mn, sy, pl, dep)) 
                for (mn, (fn, sy, pl, dep)) in self.env.modules.iteritems()),
                key=lambda x: x[0].lower())
            # collect all platforms
            platforms = set()
            letters = []
            pmn = ''
            fl = '' # first letter
            modindexentries = []
            num_toplevels = 0
            num_collapsables = 0
            cg = 0 # collapse group
            for mn, (fn, sy, pl, dep) in modules:
                pl = pl and pl.split(', ') or []
                platforms.update(pl)
                ignore = self.env.config['modindex_common_prefix']
                ignore = sorted(ignore, key=len, reverse=True)
                for i in ignore:
                    if mn.startswith(i):
                        mn = mn[len(i):]
                        stripped = i
                        break
                else:
                    stripped = ''

                if fl != mn[0].lower() and mn[0] != '_':
                    # heading
                    letter = mn[0].upper()
                    if letter not in letters:
                        modindexentries.append(['', False, 0, False,
                                                letter, '', [], False, ''])
                        letters.append(letter)
                tn = mn.split('.')[0]
                if tn != mn:
                    # submodule
                    if pmn == tn:
                        # first submodule - make parent collapsable
                        modindexentries[-1][1] = True
                        num_collapsables += 1
                    elif not pmn.startswith(tn):
                        # submodule without parent in list, add dummy entry
                        cg += 1
                        modindexentries.append([tn, True, cg, False, '', '',
                                                [], False, stripped])
                else:
                    num_toplevels += 1
                    cg += 1
                modindexentries.append([mn, False, cg, (tn != mn), fn, sy, pl,
                                        dep, stripped])
                pmn = mn
                fl = mn[0].lower()
            platforms = sorted(platforms)
            # As some parts of the module names may have been stripped, those
            # names have changed, thus it is necessary to sort the entries.
            if ignore:
                def sorthelper(entry):
                    name = entry[0]
                    if name == '':
                        # heading
                        name = entry[4]
                    return name.lower()

                modindexentries.sort(key=sorthelper)
                letters.sort()

            # Now, let's try to do the same thing
            # modindex.html does, more or less
            
            output=['DUMMY','=====','',
                    '.. _modindex:\n\n']
            t=_('Global Module Index')
            t+='\n'+'='*len(t)+'\n'
            output.append(t)
            for modname, collapse, cgroup, indent,\
                fname, synops, pform, dep, stripped in modindexentries:
                if not modname: # A letter
                    output.append('.. cssclass:: heading4\n\n%s\n\n'%fname)
                else: # A module
                    if fname:
                        output.append('`%s <%s>`_ '%(stripped+modname,fname))
                        if pform and pform[0]:
                            output[-1]+='*(%s)*'%' '.join(pform)
                        if synops:
                            output[-1]+=', *%s*'%synops
                        if dep:
                            output[-1]+=' **%s**'%_('Deprecated')
                output.append('')
                
            dt = docutils.core.publish_doctree('\n'.join(output))[1:]
            dt.insert(0,nodes.raw(text='OddPageBreak twoColumn', format='pdf'))
            tree.extend(dt)
                    
        if appendices:
            tree.append(nodes.raw(text='OddPageBreak %s'%self.page_template, format='pdf'))
            self.info()
            self.info('adding appendixes...', nonl=1)
            for docname in appendices:
                self.info(darkgreen(docname) + " ", nonl=1)
                appendix = self.env.get_doctree(docname)
                appendix['docname'] = docname
                tree.append(appendix)
            self.info('done')        
        
        self.info()
        self.info("resolving references...")
        #print tree
        #print '--------------'
        self.env.resolve_references(tree, docname, self)
        #print tree

        for pendingnode in tree.traverse(addnodes.pending_xref):
            # This needs work, need to keep track of all targets
            # so I don't replace and create hanging refs, which
            # crash
            if pendingnode.get('reftarget',None) == 'genindex'\
                and self.config.pdf_use_index:
                pendingnode.replace_self(nodes.reference(text=pendingnode.astext(),
                    refuri=pendingnode['reftarget']))
            if pendingnode.get('reftarget',None) == 'modindex'\
                and self.config.pdf_use_modindex:
                pendingnode.replace_self(nodes.reference(text=pendingnode.astext(),
                    refuri=pendingnode['reftarget']))
            else:
                # FIXME: This is from the LaTeX builder and I dtill don't understand it
                # well, and doesn't seem to work
                
                # resolve :ref:s to distant tex files -- we can't add a cross-reference,
                # but append the document name
                docname = pendingnode['refdocname']
                sectname = pendingnode['refsectname']
                newnodes = [nodes.emphasis(sectname, sectname)]
                for subdir, title in self.titles:
                    if docname.startswith(subdir):
                        newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                        newnodes.append(nodes.emphasis(title, title))
                        newnodes.append(nodes.Text(')', ')'))
                        break
                else:
                    pass
                pendingnode.replace_self(newnodes)
            #else:
                #pass
        return tree
Ejemplo n.º 47
0
 def format_undocumented_module(self, module):
     return MODULE_FORMAT.format(module=darkgreen(module))
Ejemplo n.º 48
0
def run_autoapi(app):
    """
    Load AutoAPI data from the filesystem.
    """

    if not app.config.autoapi_dirs:
        raise ExtensionError('You must configure an autoapi_dirs setting')

    # Make sure the paths are full
    normalized_dirs = []
    autoapi_dirs = app.config.autoapi_dirs
    if isinstance(autoapi_dirs, str):
        autoapi_dirs = [autoapi_dirs]
    for path in autoapi_dirs:
        if os.path.isabs(path):
            normalized_dirs.append(path)
        else:
            normalized_dirs.append(
                os.path.normpath(os.path.join(app.confdir, path))
            )

    for _dir in normalized_dirs:
        if not os.path.exists(_dir):
            raise ExtensionError(
                'AutoAPI Directory `{dir}` not found. '
                'Please check your `autoapi_dirs` setting.'.format(
                    dir=_dir
                )
            )

    normalized_root = os.path.normpath(os.path.join(app.confdir, app.config.autoapi_root))
    url_root = os.path.join('/', app.config.autoapi_root)

    app.env.autoapi_data = []

    sphinx_mapper = default_backend_mapping[app.config.autoapi_type]
    sphinx_mapper_obj = sphinx_mapper(app, template_dir=app.config.autoapi_template_dir,
                                      url_root=url_root)

    if app.config.autoapi_file_patterns:
        file_patterns = app.config.autoapi_file_patterns
    else:
        file_patterns = default_file_mapping.get(app.config.autoapi_type, [])

    if app.config.autoapi_ignore:
        ignore_patterns = app.config.autoapi_ignore
    else:
        ignore_patterns = default_ignore_patterns.get(app.config.autoapi_type, [])

    if '.rst' in app.config.source_suffix:
        out_suffix = '.rst'
    elif '.txt' in app.config.source_suffix:
        out_suffix = '.txt'
    else:
        # Fallback to first suffix listed
        out_suffix = app.config.source_suffix[0]

    # Actual meat of the run.
    app.info(bold('[AutoAPI] ') + darkgreen('Loading Data'))
    sphinx_mapper_obj.load(
        patterns=file_patterns,
        dirs=normalized_dirs,
        ignore=ignore_patterns,
    )

    app.info(bold('[AutoAPI] ') + darkgreen('Mapping Data'))
    sphinx_mapper_obj.map(options=app.config.autoapi_options)

    app.info(bold('[AutoAPI] ') + darkgreen('Rendering Data'))
    sphinx_mapper_obj.output_rst(
        root=normalized_root,
        source_suffix=out_suffix,
    )
Ejemplo n.º 49
0
def build_finished(app, exception):
    """
    Output YAML on the file system.
    """

    # Used to get rid of the uidname field for cleaner toc file.
    def sanitize_uidname_field(toc_yaml):
        for module in toc_yaml:
            if 'items' in module:
                sanitize_uidname_field(module['items'])
            module.pop('uidname')

    # Parses the package name and returns package name and module name.
    def find_package_name(package_name):
        for name in package_name:
            if name != "google" and name != "cloud":
                return [name, package_name[-1]]

    # Used to disambiguate names that have same entries.
    def disambiguate_toc_name(toc_yaml):
        names = {}
        for module in toc_yaml:
            names[module['name']] = 1 if module['name'] not in names else 2
            if 'items' in module:
                disambiguate_toc_name(module['items'])

        for module in toc_yaml:
            if names[module['name']] > 1:
                module['name'] = ".".join(
                    find_package_name(module['uidname'].split(".")))

    def find_node_in_toc_tree(toc_yaml, to_add_node):
        for module in toc_yaml:
            if module['uidname'] == to_add_node:
                return module

            if 'items' in module:
                items = module['items']
                found_module = find_node_in_toc_tree(items, to_add_node)
                if found_module != None:
                    return found_module
        return None

    def convert_module_to_package_if_needed(obj):
        if 'source' in obj and 'path' in obj['source'] and obj['source'][
                'path']:
            if obj['source']['path'].endswith(INITPY):
                obj['type'] = 'package'
                return

        for child_uid in obj['children']:
            if child_uid in app.env.docfx_info_uid_types:
                child_uid_type = app.env.docfx_info_uid_types[child_uid]

                if child_uid_type == MODULE:
                    obj['type'] = 'package'
                    return

    normalized_outdir = os.path.normpath(
        os.path.join(
            app.builder.outdir,  # Output Directory for Builder
            API_ROOT,
        ))
    ensuredir(normalized_outdir)

    toc_yaml = []
    # Used to record filenames dumped to avoid confliction
    # caused by Windows case insensitive file system
    file_name_set = set()

    # Order matters here, we need modules before lower level classes,
    # so that we can make sure to inject the TOC properly
    for data_set in (app.env.docfx_yaml_modules, app.env.docfx_yaml_classes,
                     app.env.docfx_yaml_functions):  # noqa

        for uid, yaml_data in iter(sorted(data_set.items())):
            if not uid:
                # Skip objects without a module
                continue

            references = []

            # Merge module data with class data
            for obj in yaml_data:
                arg_params = obj.get('syntax', {}).get('parameters', [])
                if (len(arg_params) > 0 and 'id' in arg_params[0]
                        and arg_params[0]['id'] == 'self'):
                    # Support having `self` as an arg param, but not documented
                    arg_params = arg_params[1:]
                    obj['syntax']['parameters'] = arg_params
                if obj['uid'] in app.env.docfx_info_field_data and \
                    obj['type'] == app.env.docfx_info_field_data[obj['uid']]['type']:
                    # Avoid entities with same uid and diff type.
                    del (app.env.docfx_info_field_data[obj['uid']]['type']
                         )  # Delete `type` temporarily
                    if 'syntax' not in obj:
                        obj['syntax'] = {}
                    merged_params = []
                    if 'parameters' in app.env.docfx_info_field_data[
                            obj['uid']]:
                        doc_params = app.env.docfx_info_field_data[
                            obj['uid']].get('parameters', [])
                        if arg_params and doc_params:
                            if len(arg_params) - len(doc_params) > 0:
                                app.warn(
                                    "Documented params don't match size of params:"
                                    " {}".format(obj['uid']))
                            # Zip 2 param lists until the long one is exhausted
                            for args, docs in zip_longest(arg_params,
                                                          doc_params,
                                                          fillvalue={}):
                                if len(args) == 0:
                                    merged_params.append(docs)
                                else:
                                    args.update(docs)
                                    merged_params.append(args)
                    obj['syntax'].update(
                        app.env.docfx_info_field_data[obj['uid']])
                    if merged_params:
                        obj['syntax']['parameters'] = merged_params

                    if 'parameters' in obj['syntax'] and obj[
                            'type'] == 'method':
                        for args in obj['syntax']['parameters']:
                            if 'isRequired' not in args and 'defaultValue' not in args:
                                args['isRequired'] = True

                    # Raise up summary
                    if 'summary' in obj['syntax'] and obj['syntax']['summary']:
                        obj['summary'] = obj['syntax'].pop('summary').strip(
                            " \n\r\r")

                    # Raise up remarks
                    if 'remarks' in obj['syntax'] and obj['syntax']['remarks']:
                        obj['remarks'] = obj['syntax'].pop('remarks')

                    # Raise up seealso
                    if 'seealso' in obj['syntax'] and obj['syntax']['seealso']:
                        obj['seealsoContent'] = obj['syntax'].pop('seealso')

                    # Raise up example
                    if 'example' in obj['syntax'] and obj['syntax']['example']:
                        obj.setdefault('example',
                                       []).append(obj['syntax'].pop('example'))

                    # Raise up exceptions
                    if 'exceptions' in obj['syntax'] and obj['syntax'][
                            'exceptions']:
                        obj['exceptions'] = obj['syntax'].pop('exceptions')

                    # Raise up references
                    if 'references' in obj['syntax'] and obj['syntax'][
                            'references']:
                        obj.setdefault('references', []).extend(
                            obj['syntax'].pop('references'))

                    # add content of temp list 'added_attribute' to children and yaml_data
                    if 'added_attribute' in obj['syntax'] and obj['syntax'][
                            'added_attribute']:
                        added_attribute = obj['syntax'].pop('added_attribute')
                        for attrData in added_attribute:
                            existed_Data = next(
                                (n for n in yaml_data
                                 if n['uid'] == attrData['uid']), None)
                            if existed_Data:
                                # Update data for already existed one which has attribute comment in source file
                                existed_Data.update(attrData)
                            else:
                                obj.get('children', []).append(attrData['uid'])
                                yaml_data.append(attrData)
                                if 'class' in attrData:
                                    # Get parent for attrData of Non enum class
                                    parent = attrData['class']
                                else:
                                    # Get parent for attrData of enum class
                                    parent = attrData['parent']
                                obj['references'].append(
                                    _create_reference(attrData, parent))
                    app.env.docfx_info_field_data[obj['uid']]['type'] = obj[
                        'type']  # Revert `type` for other objects to use

                if 'references' in obj:
                    # Ensure that references have no duplicate ref
                    ref_uids = [r['uid'] for r in references]
                    for ref_obj in obj['references']:
                        if ref_obj['uid'] not in ref_uids:
                            references.append(ref_obj)
                    obj.pop('references')

                if obj['type'] == 'module':
                    convert_module_to_package_if_needed(obj)

                if obj['type'] == 'method':
                    # Update the name to use shorter name to show
                    obj['name'] = obj['source']['id']

                # To distinguish distribution package and import package
                if obj.get('type', '') == 'package' and obj.get(
                        'kind', '') != 'distribution':
                    obj['kind'] = 'import'

                try:
                    if remove_inheritance_for_notfound_class:
                        if 'inheritance' in obj:
                            python_sdk_name = obj['uid'].split('.')[0]
                            obj['inheritance'] = [
                                n for n in obj['inheritance']
                                if not n['type'].startswith(python_sdk_name)
                                or n['type'] in app.env.docfx_info_uid_types
                            ]
                            if not obj['inheritance']:
                                obj.pop('inheritance')

                except NameError:
                    pass

                if 'source' in obj and (not obj['source']['remote']['repo'] or \
                    obj['source']['remote']['repo'] == 'https://apidrop.visualstudio.com/Content%20CI/_git/ReferenceAutomation'):
                    del (obj['source'])

            # Output file
            if uid.lower() in file_name_set:
                filename = uid + "(%s)" % app.env.docfx_info_uid_types[uid]
            else:
                filename = uid

            out_file = os.path.join(normalized_outdir, '%s.yml' % filename)
            ensuredir(os.path.dirname(out_file))
            if app.verbosity >= 1:
                app.info(
                    bold('[docfx_yaml] ') +
                    darkgreen('Outputting %s' % filename))

            with open(out_file, 'w') as out_file_obj:
                out_file_obj.write('### YamlMime:UniversalReference\n')
                try:
                    dump(
                        {
                            'items': yaml_data,
                            'references': references,
                            'api_name': [],  # Hack around docfx YAML
                        },
                        out_file_obj,
                        default_flow_style=False)
                except Exception as e:
                    raise ValueError(
                        "Unable to dump object\n{0}".format(yaml_data)) from e

            file_name_set.add(filename)

            # Parse the name of the object.
            # Some types will need additional parsing to de-duplicate their names and contain
            # a portion of their parent name for better disambiguation. This is done in
            # disambiguate_toc_name

            node_name = obj.get('class').split(".")[-1] if obj.get(
                'class') else obj['name']

            # Build nested TOC
            if uid.count('.') >= 1:
                parent_level = '.'.join(uid.split('.')[:-1])
                found_node = find_node_in_toc_tree(toc_yaml, parent_level)

                if found_node:
                    found_node.pop('uid', 'No uid found')
                    found_node.setdefault('items', [{
                        'name': 'Overview',
                        'uidname': parent_level,
                        'uid': parent_level
                    }]).append({
                        'name': node_name,
                        'uidname': uid,
                        'uid': uid
                    })
                else:
                    toc_yaml.append({
                        'name': node_name,
                        'uidname': uid,
                        'uid': uid
                    })

            else:
                toc_yaml.append({
                    'name': node_name,
                    'uidname': uid,
                    'uid': uid
                })

    if len(toc_yaml) == 0:
        raise RuntimeError("No documentation for this module.")

    # Perform additional disambiguation of the name
    disambiguate_toc_name(toc_yaml)

    # Keeping uidname field carrys over onto the toc.yaml files, we need to
    # be keep using them but don't need them in the actual file
    toc_yaml_with_uid = copy.deepcopy(toc_yaml)

    sanitize_uidname_field(toc_yaml)

    toc_file = os.path.join(normalized_outdir, 'toc.yml')
    with open(toc_file, 'w') as writable:
        writable.write(
            dump(
                [{
                    'name':
                    app.config.project,
                    'items': [{
                        'name': 'Overview',
                        'uid': 'project-' + app.config.project
                    }] + toc_yaml
                }],
                default_flow_style=False,
            ))

    index_file = os.path.join(normalized_outdir, 'index.yml')
    index_children = []
    index_references = []
    for item in toc_yaml_with_uid:
        index_children.append(item.get('uidname', ''))
        index_references.append({
            'uid': item.get('uidname', ''),
            'name': item.get('name', ''),
            'fullname': item.get('uidname', ''),
            'isExternal': False
        })
    with open(index_file, 'w') as index_file_obj:
        index_file_obj.write('### YamlMime:UniversalReference\n')
        dump(
            {
                'items': [{
                    'uid': 'project-' + app.config.project,
                    'name': app.config.project,
                    'fullName': app.config.project,
                    'langs': ['python'],
                    'type': 'package',
                    'kind': 'distribution',
                    'summary': '',
                    'children': index_children
                }],
                'references':
                index_references
            },
            index_file_obj,
            default_flow_style=False)
Ejemplo n.º 50
0
def build_finished(app, exception):
    """
    Output YAML on the file system.
    """
    def find_node_in_toc_tree(toc_yaml, to_add_node):
        for module in toc_yaml:
            if module['name'] == to_add_node:
                return module

            if 'items' in module:
                items = module['items']
                found_module = find_node_in_toc_tree(items, to_add_node)
                if found_module != None:
                    return found_module

        return None

    def convert_module_to_package_if_needed(obj):
        if 'source' in obj and 'path' in obj['source'] and obj['source'][
                'path']:
            if obj['source']['path'].endswith(INITPY):
                obj['type'] = 'package'
                return

        for child_uid in obj['children']:
            if child_uid in app.env.docfx_info_uid_types:
                child_uid_type = app.env.docfx_info_uid_types[child_uid]

                if child_uid_type == MODULE:
                    obj['type'] = 'package'
                    return

    normalized_outdir = os.path.normpath(
        os.path.join(
            app.builder.outdir,  # Output Directory for Builder
            API_ROOT,
        ))
    ensuredir(normalized_outdir)

    toc_yaml = []

    # Order matters here, we need modules before lower level classes,
    # so that we can make sure to inject the TOC properly
    for data_set in (app.env.docfx_yaml_modules,
                     app.env.docfx_yaml_classes):  # noqa
        for filename, yaml_data in iter(sorted(data_set.items())):
            if not filename:
                # Skip objects without a module
                continue

            references = []

            # Merge module data with class data
            for obj in yaml_data:
                if obj['uid'] in app.env.docfx_info_field_data:
                    if 'syntax' not in obj:
                        obj['syntax'] = {}
                    merged_params = []
                    if 'parameters' in app.env.docfx_info_field_data[
                            obj['uid']]:
                        arg_params = obj['syntax'].get('parameters', [])
                        doc_params = app.env.docfx_info_field_data[
                            obj['uid']].get('parameters', [])
                        if arg_params and doc_params:
                            if len(arg_params) - len(doc_params) > 1:
                                app.warn(
                                    "Documented params don't match size of params:"
                                    " {}".format(obj['uid']))
                            if ('id' in arg_params[0]
                                    and arg_params[0]['id'] == 'self'):
                                # Support having `self` as an arg param, but not documented
                                arg_params = arg_params[1:]
                            # Zip 2 param lists until the long one is exhausted
                            for args, docs in zip_longest(arg_params,
                                                          doc_params,
                                                          fillvalue={}):
                                args.update(docs)
                                merged_params.append(args)
                    obj['syntax'].update(
                        app.env.docfx_info_field_data[obj['uid']])
                    if merged_params:
                        obj['syntax']['parameters'] = merged_params

                    # Raise up summary
                    if 'summary' in obj['syntax'] and obj['syntax']['summary']:
                        obj['summary'] = obj['syntax'].pop('summary')

                    # Raise up seealso
                    if 'seealso' in obj['syntax'] and obj['syntax']['seealso']:
                        obj['seealsoContent'] = obj['syntax'].pop('seealso')

                    # Raise up example
                    if 'example' in obj['syntax'] and obj['syntax']['example']:
                        obj.setdefault('example',
                                       []).append(obj['syntax'].pop('example'))

                    # Raise up exceptions
                    if 'exceptions' in obj['syntax'] and obj['syntax'][
                            'exceptions']:
                        obj['exceptions'] = obj['syntax'].pop('exceptions')

                    # Raise up references
                    if 'references' in obj['syntax'] and obj['syntax'][
                            'references']:
                        obj.setdefault('references', []).extend(
                            obj['syntax'].pop('references'))

                    # add content of temp list 'added_attribute' to children and yaml_data
                    if 'added_attribute' in obj['syntax'] and obj['syntax'][
                            'added_attribute']:
                        added_attribute = obj['syntax'].pop('added_attribute')
                        for attrData in added_attribute:
                            existed_Data = next(
                                (n for n in yaml_data
                                 if n['uid'] == attrData['uid']), None)
                            if existed_Data:
                                # Update data for already existed one which has attribute comment in source file
                                existed_Data.update(attrData)
                            else:
                                obj.get('children', []).append(attrData['uid'])
                                yaml_data.append(attrData)
                                if 'class' in attrData:
                                    # Get parent for attrData of Non enum class
                                    parent = attrData['class']
                                else:
                                    # Get parent for attrData of enum class
                                    parent = attrData['parent']
                                obj['references'].append(
                                    _create_reference(attrData, parent))

                if 'references' in obj:
                    # Ensure that references have no duplicate ref
                    ref_uids = [r['uid'] for r in references]
                    for ref_obj in obj['references']:
                        if ref_obj['uid'] not in ref_uids:
                            references.append(ref_obj)
                    obj.pop('references')

                if obj['type'] == 'module':
                    convert_module_to_package_if_needed(obj)

                try:
                    if remove_inheritance_for_notfound_class:
                        if 'inheritance' in obj:
                            python_sdk_name = obj['uid'].split('.')[0]
                            obj['inheritance'] = [
                                n for n in obj['inheritance']
                                if not n['type'].startswith(python_sdk_name)
                                or n['type'] in app.env.docfx_info_uid_types
                            ]
                            if not obj['inheritance']:
                                obj.pop('inheritance')

                except NameError:
                    pass

            # Output file
            out_file = os.path.join(normalized_outdir, '%s.yml' % filename)
            ensuredir(os.path.dirname(out_file))
            if app.verbosity >= 1:
                app.info(
                    bold('[docfx_yaml] ') +
                    darkgreen('Outputting %s' % filename))
            with open(out_file, 'w') as out_file_obj:
                out_file_obj.write('### YamlMime:UniversalReference\n')
                dump(
                    {
                        'items': yaml_data,
                        'references': references,
                        'api_name': [],  # Hack around docfx YAML
                    },
                    out_file_obj,
                    default_flow_style=False)

            # Build nested TOC
            if filename.count('.') >= 1:
                parent_level = '.'.join(filename.split('.')[:-1])
                found_node = find_node_in_toc_tree(toc_yaml, parent_level)

                if found_node:
                    found_node.setdefault('items', []).append({
                        'name': filename,
                        'uid': filename
                    })
                else:
                    toc_yaml.append({'name': filename, 'uid': filename})

            else:
                toc_yaml.append({'name': filename, 'uid': filename})

    toc_file = os.path.join(normalized_outdir, 'toc.yml')
    with open(toc_file, 'w') as writable:
        writable.write(dump(
            toc_yaml,
            default_flow_style=False,
        ))
Ejemplo n.º 51
0
    def process_result(self, result: CheckResult) -> None:
        filename = self.env.doc2path(result.docname, None)

        linkstat = dict(filename=filename,
                        lineno=result.lineno,
                        status=result.status,
                        code=result.code,
                        uri=result.uri,
                        info=result.message)
        self.write_linkstat(linkstat)

        if result.status == 'unchecked':
            return
        if result.status == 'working' and result.message == 'old':
            return
        if result.lineno:
            logger.info('(%16s: line %4d) ',
                        result.docname,
                        result.lineno,
                        nonl=True)
        if result.status == 'ignored':
            if result.message:
                logger.info(
                    darkgray('-ignored- ') + result.uri + ': ' +
                    result.message)
            else:
                logger.info(darkgray('-ignored- ') + result.uri)
        elif result.status == 'local':
            logger.info(darkgray('-local-   ') + result.uri)
            self.write_entry('local', result.docname, filename, result.lineno,
                             result.uri)
        elif result.status == 'working':
            logger.info(darkgreen('ok        ') + result.uri + result.message)
        elif result.status == 'broken':
            if self.app.quiet or self.app.warningiserror:
                logger.warning(__('broken link: %s (%s)'),
                               result.uri,
                               result.message,
                               location=(result.docname, result.lineno))
            else:
                logger.info(
                    red('broken    ') + result.uri +
                    red(' - ' + result.message))
            self.write_entry('broken', result.docname, filename, result.lineno,
                             result.uri + ': ' + result.message)
            self.broken_hyperlinks += 1
        elif result.status == 'redirected':
            try:
                text, color = {
                    301: ('permanently', purple),
                    302: ('with Found', purple),
                    303: ('with See Other', purple),
                    307: ('temporarily', turquoise),
                    308: ('permanently', purple),
                }[result.code]
            except KeyError:
                text, color = ('with unknown code', purple)
            linkstat['text'] = text
            if self.config.linkcheck_allowed_redirects:
                logger.warning('redirect  ' + result.uri + ' - ' + text +
                               ' to ' + result.message,
                               location=(result.docname, result.lineno))
            else:
                logger.info(
                    color('redirect  ') + result.uri +
                    color(' - ' + text + ' to ' + result.message))
            self.write_entry('redirected ' + text, result.docname, filename,
                             result.lineno,
                             result.uri + ' to ' + result.message)
        else:
            raise ValueError("Unknown status %s." % result.status)
Ejemplo n.º 52
0
 def build_finished(app, _):
     if app.verbosity > 1:
         LOGGER.info(bold("[AutoAPI] ") + darkgreen("Cleaning generated .yml files"))
     if os.path.exists(DotNetSphinxMapper.DOCFX_OUTPUT_PATH):
         shutil.rmtree(DotNetSphinxMapper.DOCFX_OUTPUT_PATH)
Ejemplo n.º 53
0
def run_autoapi(app):
    """
    Load AutoAPI data from the filesystem.
    """

    if not app.config.autoapi_dirs:
        raise ExtensionError('You must configure an autoapi_dirs setting')

    # Make sure the paths are full
    normalized_dirs = []
    for path in app.config.autoapi_dirs:
        if os.path.isabs(path):
            normalized_dirs.append(app.config.autoapi_dir)
        else:
            normalized_dirs.append(
                os.path.normpath(os.path.join(app.confdir, path))
            )

    for _dir in normalized_dirs:
        if not os.path.exists(_dir):
            raise ExtensionError(
                'AutoAPI Directory `{dir}` not found. '
                'Please check your `autoapi_dirs` setting.'.format(
                    dir=_dir
                )
            )

    normalized_root = os.path.normpath(os.path.join(app.confdir, app.config.autoapi_root))
    url_root = os.path.join('/', app.config.autoapi_root)

    app.env.autoapi_data = []

    domain = default_backend_mapping[app.config.autoapi_type]
    domain_obj = domain(app, template_dir=app.config.autoapi_template_dir,
                        url_root=url_root)

    if app.config.autoapi_file_patterns:
        file_patterns = app.config.autoapi_file_patterns
    else:
        file_patterns = default_file_mapping.get(app.config.autoapi_type, [])

    if app.config.autoapi_ignore:
        ignore_patterns = app.config.autoapi_ignore
    else:
        ignore_patterns = default_ignore_patterns.get(app.config.autoapi_type, [])

    app.info(bold('[AutoAPI] ') + darkgreen('Loading Data'))
    domain_obj.load(
        patterns=file_patterns,
        dirs=normalized_dirs,
        ignore=ignore_patterns,
    )

    app.info(bold('[AutoAPI] ') + darkgreen('Mapping Data'))
    domain_obj.map(options=app.config.autoapi_options)

    app.info(bold('[AutoAPI] ') + darkgreen('Rendering Data'))
    domain_obj.output_rst(
        root=normalized_root,
        # TODO: Better way to determine suffix?
        source_suffix=app.config.source_suffix[0],
    )
Ejemplo n.º 54
0
    def assemble_doctree(self, indexfile, toctree_only, appendices):
        self.docnames = set([indexfile] + appendices)
        self.info(darkgreen(indexfile) + " ", nonl=1)

        def process_tree(docname, tree):
            tree = tree.deepcopy()
            for toctreenode in tree.traverse(addnodes.toctree):
                newnodes = []
                includefiles = map(str, toctreenode['includefiles'])
                for includefile in includefiles:
                    try:
                        self.info(darkgreen(includefile) + " ", nonl=1)
                        subtree = process_tree(
                            includefile, self.env.get_doctree(includefile))
                        self.docnames.add(includefile)
                    except Exception:
                        self.warn(
                            'toctree contains ref to nonexisting '
                            'file %r' % includefile,
                            self.env.doc2path(docname))
                    else:
                        sof = addnodes.start_of_file(docname=includefile)
                        sof.children = subtree.children
                        newnodes.append(sof)
                toctreenode.parent.replace(toctreenode, newnodes)
            return tree

        tree = self.env.get_doctree(indexfile)
        tree['docname'] = indexfile
        if toctree_only:
            # extract toctree nodes from the tree and put them in a
            # fresh document
            new_tree = new_document('<latex output>')
            new_sect = nodes.section()
            new_sect += nodes.title(u'<Set title in conf.py>',
                                    u'<Set title in conf.py>')
            new_tree += new_sect
            for node in tree.traverse(addnodes.toctree):
                new_sect += node
            tree = new_tree
        largetree = process_tree(indexfile, tree)
        largetree['docname'] = indexfile
        for docname in appendices:
            appendix = self.env.get_doctree(docname)
            appendix['docname'] = docname
            largetree.append(appendix)
        self.info()
        self.info("resolving references...")
        self.env.resolve_references(largetree, indexfile, self)
        # resolve :ref:s to distant tex files -- we can't add a cross-reference,
        # but append the document name
        for pendingnode in largetree.traverse(addnodes.pending_xref):
            docname = pendingnode['refdocname']
            sectname = pendingnode['refsectname']
            newnodes = [nodes.emphasis(sectname, sectname)]
            for subdir, title in self.titles:
                if docname.startswith(subdir):
                    newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                    newnodes.append(nodes.emphasis(title, title))
                    newnodes.append(nodes.Text(')', ')'))
                    break
            else:
                pass
            pendingnode.replace_self(newnodes)
        return largetree
Ejemplo n.º 55
0
    def assemble_doctree(self, docname, title, author, appendices):

        # FIXME: use the new inline_all_trees from Sphinx.
        # check how the LaTeX builder does it.

        self.docnames = set([docname])
        self.spinx_logger.info(darkgreen(docname) + " ")
        def process_tree(docname, tree):
            tree = tree.deepcopy()
            for toctreenode in tree.traverse(addnodes.toctree):
                newnodes = []
                includefiles = map(str, toctreenode['includefiles'])
                for includefile in includefiles:
                    try:
                        self.spinx_logger.info(darkgreen(includefile) + " ")
                        subtree = process_tree(includefile,
                        self.env.get_doctree(includefile))
                        self.docnames.add(includefile)
                    except Exception:
                        self.warn('%s: toctree contains ref to nonexisting file %r'\
                                                     % (docname, includefile))
                    else:
                        sof = addnodes.start_of_file(docname=includefile)
                        sof.children = subtree.children
                        newnodes.append(sof)
                toctreenode.parent.replace(toctreenode, newnodes)
            return tree

        tree = self.env.get_doctree(docname)
        tree = process_tree(docname, tree)

        self.docutils_languages = {}
        if self.config.language:
            self.docutils_languages[self.config.language] = \
                get_language_available(self.config.language)[2]

        if self.opts.get('pdf_use_index',self.config.pdf_use_index):
            # Add index at the end of the document

            # This is a hack. create_index creates an index from
            # ALL the documents data, not just this one.
            # So, we preserve a copy, use just what we need, then
            # restore it.
            t=copy(self.env.indexentries)
            try:
                self.env.indexentries={docname:self.env.indexentries[docname+'-gen']}
            except KeyError:
                self.env.indexentries={}
                for dname in self.docnames:
                    self.env.indexentries[dname]=t.get(dname,[])
            genindex = IndexEntries(self.env).create_index(self)
            self.env.indexentries=t
            # EOH (End Of Hack)

            if genindex: # No point in creating empty indexes
                index_nodes=genindex_nodes(genindex)
                tree.append(nodes.raw(text='OddPageBreak twoColumn', format='pdf'))
                tree.append(index_nodes)

        # This is stolen from the HTML builder's prepare_writing function
        self.domain_indices = []
        # html_domain_indices can be False/True or a list of index names
        indices_config = self.config.pdf_domain_indices
        if indices_config and hasattr(self.env, 'domains'):
            for domain in self.env.domains.values():
                for indexcls in domain.indices:
                    indexname = '%s-%s' % (domain.name, indexcls.name)
                    if isinstance(indices_config, list):
                        if indexname not in indices_config:
                            continue
                    # deprecated config value
                    if indexname == 'py-modindex' and \
                           not self.config.pdf_use_modindex:
                        continue
                    content, collapse = indexcls(domain).generate()
                    if content:
                        self.domain_indices.append(
                            (indexname, indexcls, content, collapse))

        # self.domain_indices contains a list of indices to generate, like
        # this:
        # [('py-modindex',
        #    <class 'sphinx.domains.python.PythonModuleIndex'>,
        #   [(u'p', [[u'parrot', 0, 'test', u'module-parrot', 'Unix, Windows',
        #   '', 'Analyze and reanimate dead parrots.']])], True)]

        # Now this in the HTML builder is passed onto write_domain_indices.
        # We handle it right here

        for indexname, indexcls, content, collapse in self.domain_indices:
            indexcontext = dict(
                indextitle = indexcls.localname,
                content = content,
                collapse_index = collapse,
            )
            # In HTML this is handled with a Jinja template, domainindex.html
            # We have to generate docutils stuff right here in the same way.
            self.spinx_logger.info(' ' + indexname)
            print

            output=['DUMMY','=====','',
                    '.. _modindex:\n\n']
            t=indexcls.localname
            t+='\n'+'='*len(t)+'\n'
            output.append(t)

            for letter, entries in content:
                output.append('.. cssclass:: heading4\n\n%s\n\n'%letter)
                for (name, grouptype, page, anchor,
                    extra, qualifier, description) in entries:
                    if qualifier:
                        q = '[%s]'%qualifier
                    else:
                        q = ''

                    if extra:
                        e = '(%s)'%extra
                    else:
                        e = ''
                    output.append ('`%s <#%s>`_ %s %s'%(name, anchor, e, q))
                    output.append('    %s'%description)
                output.append('')

            dt = docutils.core.publish_doctree('\n'.join(output))[1:]
            dt.insert(0,nodes.raw(text='OddPageBreak twoColumn', format='pdf'))
            tree.extend(dt)


        if appendices:
            tree.append(nodes.raw(text='OddPageBreak %s'%self.page_template, format='pdf'))
            self.spinx_logger.info()
            self.spinx_logger.info('adding appendixes...')
            for docname in appendices:
                self.spinx_logger.info(darkgreen(docname) + " ")
                appendix = self.env.get_doctree(docname)
                appendix['docname'] = docname
                tree.append(appendix)
            self.spinx_logger.info('done')

        # Replace Sphinx's HighlightLanguageTransform with our own for Spinx version between 1.8.0 & less than 2.0.0 as
        # Sphinx's HighlightLanguageTransform breaks linenothreshold setting in the highlight directive (See issue #721)
        # This code can be removed when we drop support for Python 2
        if sphinx.__version__ > '1.7.9' and sphinx.__version__ < '2.0.0':
            for i in range(len(self.env.app.registry.post_transforms)):
                if self.env.app.registry.post_transforms[i].__name__ == 'HighlightLanguageTransform':
                    self.env.app.registry.post_transforms[i] = HighlightLanguageTransform
                    break

        self.spinx_logger.info("resolving references...")
        self.env.resolve_references(tree, docname, self)

        for pendingnode in tree.traverse(addnodes.pending_xref):
            # This needs work, need to keep track of all targets
            # so I don't replace and create hanging refs, which
            # crash
            if pendingnode.get('reftarget',None) == 'genindex'\
                and self.config.pdf_use_index:
                pendingnode.replace_self(nodes.reference(text=pendingnode.astext(),
                    refuri=pendingnode['reftarget']))
            # FIXME: probably need to handle dangling links to domain-specific indexes
            else:
                # FIXME: This is from the LaTeX builder and I still don't understand it
                # well, and doesn't seem to work

                # resolve :ref:s to distant tex files -- we can't add a cross-reference,
                # but append the document name
                docname = pendingnode['refdocname']
                sectname = pendingnode['refsectname']
                newnodes = [nodes.emphasis(sectname, sectname)]
                for subdir, title in self.titles:
                    if docname.startswith(subdir):
                        newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                        newnodes.append(nodes.emphasis(title, title))
                        newnodes.append(nodes.Text(')', ')'))
                        break
                else:
                    pass
                pendingnode.replace_self(newnodes)
            #else:
                #pass
        return tree
Ejemplo n.º 56
0
    def assemble_doctree(self, docname, title, author, appendices):

        # FIXME: use the new inline_all_trees from Sphinx.
        # check how the LaTeX builder does it.

        self.docnames = set([docname])
        self.info(darkgreen(docname) + " ", nonl=1)
        def process_tree(docname, tree):
            tree = tree.deepcopy()
            for toctreenode in tree.traverse(addnodes.toctree):
                newnodes = []
                includefiles = map(str, toctreenode['includefiles'])
                for includefile in includefiles:
                    try:
                        self.info(darkgreen(includefile) + " ", nonl=1)
                        subtree = process_tree(includefile,
                        self.env.get_doctree(includefile))
                        self.docnames.add(includefile)
                    except Exception:
                        self.warn('%s: toctree contains ref to nonexisting file %r'\
                                                     % (docname, includefile))
                    else:
                        sof = addnodes.start_of_file(docname=includefile)
                        sof.children = subtree.children
                        newnodes.append(sof)
                toctreenode.parent.replace(toctreenode, newnodes)
            return tree

        tree = self.env.get_doctree(docname)
        tree = process_tree(docname, tree)

        self.docutils_languages = {}
        if self.config.language:
            self.docutils_languages[self.config.language] = \
                get_language_available(self.config.language)[2]

        if self.opts.get('pdf_use_index',self.config.pdf_use_index):
            # Add index at the end of the document

            # This is a hack. create_index creates an index from
            # ALL the documents data, not just this one.
            # So, we preserve a copy, use just what we need, then
            # restore it.
            #from pudb import set_trace; set_trace()
            t=copy(self.env.indexentries)
            try:
                self.env.indexentries={docname:self.env.indexentries[docname+'-gen']}
            except KeyError:
                self.env.indexentries={}
                for dname in self.docnames:
                    self.env.indexentries[dname]=t.get(dname,[])
            genindex = self.env.create_index(self)
            self.env.indexentries=t
            # EOH (End Of Hack)

            if genindex: # No point in creating empty indexes
                index_nodes=genindex_nodes(genindex)
                tree.append(nodes.raw(text='OddPageBreak twoColumn', format='pdf'))
                tree.append(index_nodes)

        # This is stolen from the HTML builder's prepare_writing function
        self.domain_indices = []
        # html_domain_indices can be False/True or a list of index names
        indices_config = self.config.pdf_domain_indices
        if indices_config and hasattr(self.env, 'domains'):
            for domain in self.env.domains.itervalues():
                for indexcls in domain.indices:
                    indexname = '%s-%s' % (domain.name, indexcls.name)
                    if isinstance(indices_config, list):
                        if indexname not in indices_config:
                            continue
                    # deprecated config value
                    if indexname == 'py-modindex' and \
                           not self.config.pdf_use_modindex:
                        continue
                    content, collapse = indexcls(domain).generate()
                    if content:
                        self.domain_indices.append(
                            (indexname, indexcls, content, collapse))

        # self.domain_indices contains a list of indices to generate, like
        # this:
        # [('py-modindex',
        #    <class 'sphinx.domains.python.PythonModuleIndex'>,
        #   [(u'p', [[u'parrot', 0, 'test', u'module-parrot', 'Unix, Windows',
        #   '', 'Analyze and reanimate dead parrots.']])], True)]

        # Now this in the HTML builder is passed onto write_domain_indices.
        # We handle it right here

        for indexname, indexcls, content, collapse in self.domain_indices:
            indexcontext = dict(
                indextitle = indexcls.localname,
                content = content,
                collapse_index = collapse,
            )
            # In HTML this is handled with a Jinja template, domainindex.html
            # We have to generate docutils stuff right here in the same way.
            self.info(' ' + indexname, nonl=1)
            print

            output=['DUMMY','=====','',
                    '.. _modindex:\n\n']
            t=indexcls.localname
            t+='\n'+'='*len(t)+'\n'
            output.append(t)

            for letter, entries in content:
                output.append('.. cssclass:: heading4\n\n%s\n\n'%letter)
                for (name, grouptype, page, anchor,
                    extra, qualifier, description) in entries:
                    if qualifier:
                        q = '[%s]'%qualifier
                    else:
                        q = ''

                    if extra:
                        e = '(%s)'%extra
                    else:
                        e = ''
                    output.append ('`%s <#%s>`_ %s %s'%(name, anchor, e, q))
                    output.append('    %s'%description)
                output.append('')

            dt = docutils.core.publish_doctree('\n'.join(output))[1:]
            dt.insert(0,nodes.raw(text='OddPageBreak twoColumn', format='pdf'))
            tree.extend(dt)


        if appendices:
            tree.append(nodes.raw(text='OddPageBreak %s'%self.page_template, format='pdf'))
            self.info()
            self.info('adding appendixes...', nonl=1)
            for docname in appendices:
                self.info(darkgreen(docname) + " ", nonl=1)
                appendix = self.env.get_doctree(docname)
                appendix['docname'] = docname
                tree.append(appendix)
            self.info('done')

        self.info()
        self.info("resolving references...")
        #print tree
        #print '--------------'
        self.env.resolve_references(tree, docname, self)
        #print tree

        for pendingnode in tree.traverse(addnodes.pending_xref):
            # This needs work, need to keep track of all targets
            # so I don't replace and create hanging refs, which
            # crash
            if pendingnode.get('reftarget',None) == 'genindex'\
                and self.config.pdf_use_index:
                pendingnode.replace_self(nodes.reference(text=pendingnode.astext(),
                    refuri=pendingnode['reftarget']))
            # FIXME: probably need to handle dangling links to domain-specific indexes
            else:
                # FIXME: This is from the LaTeX builder and I still don't understand it
                # well, and doesn't seem to work

                # resolve :ref:s to distant tex files -- we can't add a cross-reference,
                # but append the document name
                docname = pendingnode['refdocname']
                sectname = pendingnode['refsectname']
                newnodes = [nodes.emphasis(sectname, sectname)]
                for subdir, title in self.titles:
                    if docname.startswith(subdir):
                        newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                        newnodes.append(nodes.emphasis(title, title))
                        newnodes.append(nodes.Text(')', ')'))
                        break
                else:
                    pass
                pendingnode.replace_self(newnodes)
            #else:
                #pass
        return tree