Пример #1
0
    def is_parallel_allowed(self, typ):
        # type: (str) -> bool
        """Check parallel processing is allowed or not.

        ``typ`` is a type of processing; ``'read'`` or ``'write'``.
        """
        if typ == 'read':
            attrname = 'parallel_read_safe'
            message = __("the %s extension does not declare if it is safe "
                         "for parallel reading, assuming it isn't - please "
                         "ask the extension author to check and make it "
                         "explicit")
        elif typ == 'write':
            attrname = 'parallel_write_safe'
            message = __("the %s extension does not declare if it is safe "
                         "for parallel writing, assuming it isn't - please "
                         "ask the extension author to check and make it "
                         "explicit")
        else:
            raise ValueError('parallel type %s is not supported' % typ)

        for ext in self.extensions.values():
            allowed = getattr(ext, attrname, None)
            if allowed is None:
                logger.warning(message, ext.name)
                logger.warning(__('doing serial %s'), typ)
                return False
            elif not allowed:
                return False

        return True
Пример #2
0
 def convert_overrides(self, name, value):
     # type: (str, Any) -> Any
     if not isinstance(value, str):
         return value
     else:
         defvalue = self.values[name][0]
         if self.values[name][2] == Any:
             return value
         elif isinstance(defvalue, dict):
             raise ValueError(__('cannot override dictionary config setting %r, '
                                 'ignoring (use %r to set individual elements)') %
                              (name, name + '.key=value'))
         elif isinstance(defvalue, list):
             return value.split(',')
         elif isinstance(defvalue, int):
             try:
                 return int(value)
             except ValueError:
                 raise ValueError(__('invalid number %r for config value %r, ignoring') %
                                  (value, name))
         elif hasattr(defvalue, '__call__'):
             return value
         elif defvalue is not None and not isinstance(defvalue, str):
             raise ValueError(__('cannot override config setting %r with unsupported '
                                 'type, ignoring') % name)
         else:
             return value
Пример #3
0
 def _init_i18n(self):
     # type: () -> None
     """Load translated strings from the configured localedirs if enabled in
     the configuration.
     """
     if self.config.language is not None:
         logger.info(bold(__('loading translations [%s]... ') % self.config.language),
                     nonl=True)
         user_locale_dirs = [
             path.join(self.srcdir, x) for x in self.config.locale_dirs]
         # compile mo files if sphinx.po file in user locale directories are updated
         for catinfo in find_catalog_source_files(
                 user_locale_dirs, self.config.language, domains=['sphinx'],
                 charset=self.config.source_encoding):
             catinfo.write_mo(self.config.language)
         locale_dirs = [None, path.join(package_dir, 'locale')] + user_locale_dirs
     else:
         locale_dirs = []
     self.translator, has_translation = locale.init(locale_dirs, self.config.language)
     if self.config.language is not None:
         if has_translation or self.config.language == 'en':
             # "en" never needs to be translated
             logger.info(__('done'))
         else:
             logger.info(__('not available for built-in messages'))
Пример #4
0
    def convert(self, _from, _to):
        # type: (unicode, unicode) -> bool
        """Converts the image to expected one."""
        try:
            if _from.lower().endswith('.gif'):
                # when target is GIF format, pick the first frame
                _from += '[0]'

            args = ([self.config.image_converter] +
                    self.config.image_converter_args +
                    [_from, _to])
            logger.debug('Invoking %r ...', args)
            p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        except OSError as err:
            if err.errno != ENOENT:  # No such file or directory
                raise
            logger.warning(__('convert command %r cannot be run.'
                              'check the image_converter setting'),
                           self.config.image_converter)
            return False

        try:
            stdout, stderr = p.communicate()
        except (OSError, IOError) as err:
            if err.errno not in (EPIPE, EINVAL):
                raise
            stdout, stderr = p.stdout.read(), p.stderr.read()
            p.wait()
        if p.returncode != 0:
            raise ExtensionError(__('convert exited with error:\n'
                                    '[stderr]\n%s\n[stdout]\n%s') %
                                 (stderr, stdout))

        return True
Пример #5
0
    def is_available(self):
        # type: () -> bool
        """Confirms the converter is available or not."""
        try:
            args = [self.config.image_converter, '-version']
            logger.debug('Invoking %r ...', args)
            p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        except (OSError, IOError):
            logger.warning(__('convert command %r cannot be run.'
                              'check the image_converter setting'),
                           self.config.image_converter)
            return False

        try:
            stdout, stderr = p.communicate()
        except (OSError, IOError) as err:
            if err.errno not in (EPIPE, EINVAL):
                raise
            stdout, stderr = p.stdout.read(), p.stderr.read()
            p.wait()
        if p.returncode != 0:
            encoding = locale.getpreferredencoding()
            logger.warning(__('convert exited with error:\n'
                              '[stderr]\n%s\n[stdout]\n%s'),
                           stderr.decode(encoding), stdout.decode(encoding))
            return False

        return True
Пример #6
0
    def finish(self):
        # type: () -> None
        self.copy_image_files()

        # copy TeX support files from texinputs
        context = {'latex_engine': self.config.latex_engine}
        logger.info(bold(__('copying TeX support files...')))
        staticdirname = path.join(package_dir, 'texinputs')
        for filename in os.listdir(staticdirname):
            if not filename.startswith('.'):
                copy_asset_file(path.join(staticdirname, filename),
                                self.outdir, context=context)

        # use pre-1.6.x Makefile for make latexpdf on Windows
        if os.name == 'nt':
            staticdirname = path.join(package_dir, 'texinputs_win')
            copy_asset_file(path.join(staticdirname, 'Makefile_t'),
                            self.outdir, context=context)

        # copy additional files
        if self.config.latex_additional_files:
            logger.info(bold(__('copying additional files...')), nonl=1)
            for filename in self.config.latex_additional_files:
                logger.info(' ' + filename, nonl=1)
                copy_asset_file(path.join(self.confdir, filename), self.outdir)
            logger.info('')

        # the logo is handled differently
        if self.config.latex_logo:
            if not path.isfile(path.join(self.confdir, self.config.latex_logo)):
                raise SphinxError(__('logo file %r does not exist') % self.config.latex_logo)
            else:
                copy_asset_file(path.join(self.confdir, self.config.latex_logo), self.outdir)
        logger.info(__('done'))
Пример #7
0
    def build(self, force_all=False, filenames=None):
        # type: (bool, List[unicode]) -> None
        try:
            if force_all:
                self.builder.compile_all_catalogs()
                self.builder.build_all()
            elif filenames:
                self.builder.compile_specific_catalogs(filenames)
                self.builder.build_specific(filenames)
            else:
                self.builder.compile_update_catalogs()
                self.builder.build_update()

            status = (self.statuscode == 0 and
                      __('succeeded') or __('finished with problems'))
            if self._warncount:
                logger.info(bold(__('build %s, %s warning%s.') %
                                 (status, self._warncount,
                                  self._warncount != 1 and 's' or '')))
            else:
                logger.info(bold(__('build %s.') % status))
        except Exception as err:
            # delete the saved env to force a fresh build next time
            envfile = path.join(self.doctreedir, ENV_PICKLE_FILENAME)
            if path.isfile(envfile):
                os.unlink(envfile)
            self.emit('build-finished', err)
            raise
        else:
            self.emit('build-finished', None)
        self.builder.cleanup()
Пример #8
0
    def __init__(self, name, theme_path, factory):
        # type: (unicode, unicode, HTMLThemeFactory) -> None
        self.name = name
        self.base = None
        self.rootdir = None

        if path.isdir(theme_path):
            # already a directory, do nothing
            self.rootdir = None
            self.themedir = theme_path
        else:
            # extract the theme to a temp directory
            self.rootdir = tempfile.mkdtemp('sxt')
            self.themedir = path.join(self.rootdir, name)
            extract_zip(theme_path, self.themedir)

        self.config = configparser.RawConfigParser()
        self.config.read(path.join(self.themedir, THEMECONF))  # type: ignore

        try:
            inherit = self.config.get('theme', 'inherit')
        except configparser.NoSectionError:
            raise ThemeError(__('theme %r doesn\'t have "theme" setting') % name)
        except configparser.NoOptionError:
            raise ThemeError(__('theme %r doesn\'t have "inherit" setting') % name)

        if inherit != 'none':
            try:
                self.base = factory.create(inherit)
            except ThemeError:
                raise ThemeError(__('no theme named %r found, inherited by %r') %
                                 (inherit, name))
Пример #9
0
    def _resolve_numref_xref(self, env, fromdocname, builder, typ, target, node, contnode):
        # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element  # NOQA
        if target in self.data['labels']:
            docname, labelid, figname = self.data['labels'].get(target, ('', '', ''))
        else:
            docname, labelid = self.data['anonlabels'].get(target, ('', ''))
            figname = None

        if not docname:
            return None

        target_node = env.get_doctree(docname).ids.get(labelid)
        figtype = self.get_enumerable_node_type(target_node)
        if figtype is None:
            return None

        if figtype != 'section' and env.config.numfig is False:
            logger.warning(__('numfig is disabled. :numref: is ignored.'), location=node)
            return contnode

        try:
            fignumber = self.get_fignumber(env, builder, figtype, docname, target_node)
            if fignumber is None:
                return contnode
        except ValueError:
            logger.warning(__("no number is assigned for %s: %s"), figtype, labelid,
                           location=node)
            return contnode

        try:
            if node['refexplicit']:
                title = contnode.astext()
            else:
                title = env.config.numfig_format.get(figtype, '')

            if figname is None and '{name}' in title:
                logger.warning(__('the link has no caption: %s'), title, location=node)
                return contnode
            else:
                fignum = '.'.join(map(str, fignumber))
                if '{name}' in title or 'number' in title:
                    # new style format (cf. "Fig.{number}")
                    if figname:
                        newtitle = title.format(name=figname, number=fignum)
                    else:
                        newtitle = title.format(number=fignum)
                else:
                    # old style format (cf. "Fig.%s")
                    newtitle = title % fignum
        except KeyError as exc:
            logger.warning(__('invalid numfig_format: %s (%r)'), title, exc, location=node)
            return contnode
        except TypeError:
            logger.warning(__('invalid numfig_format: %s'), title, location=node)
            return contnode

        return self.build_reference_node(fromdocname, builder,
                                         docname, labelid, newtitle, 'numref',
                                         nodeclass=addnodes.number_reference,
                                         title=title)
Пример #10
0
 def build_specific(self, filenames):
     # type: (List[unicode]) -> None
     """Only rebuild as much as needed for changes in the *filenames*."""
     # bring the filenames to the canonical format, that is,
     # relative to the source directory and without source_suffix.
     dirlen = len(self.srcdir) + 1
     to_write = []
     suffixes = None  # type: Tuple[unicode]
     suffixes = tuple(self.config.source_suffix)  # type: ignore
     for filename in filenames:
         filename = path.normpath(path.abspath(filename))
         if not filename.startswith(self.srcdir):
             logger.warning(__('file %r given on command line is not under the '
                               'source directory, ignoring'), filename)
             continue
         if not (path.isfile(filename) or
                 any(path.isfile(filename + suffix) for suffix in suffixes)):
             logger.warning(__('file %r given on command line does not exist, '
                               'ignoring'), filename)
             continue
         filename = filename[dirlen:]
         for suffix in suffixes:
             if filename.endswith(suffix):
                 filename = filename[:-len(suffix)]
                 break
         filename = filename.replace(path.sep, SEP)
         to_write.append(filename)
     self.build(to_write, method='specific',
                summary=__('%d source files given on command line') % len(to_write))
Пример #11
0
 def post_process_images(self, doctree):
     # type: (nodes.Node) -> None
     """Pick the best candidate for all image URIs."""
     images = ImageAdapter(self.env)
     for node in doctree.traverse(nodes.image):
         if '?' in node['candidates']:
             # don't rewrite nonlocal image URIs
             continue
         if '*' not in node['candidates']:
             for imgtype in self.supported_image_types:
                 candidate = node['candidates'].get(imgtype, None)
                 if candidate:
                     break
             else:
                 mimetypes = sorted(node['candidates'])
                 image_uri = images.get_original_image_uri(node['uri'])
                 if mimetypes:
                     logger.warning(__('a suitable image for %s builder not found: '
                                       '%s (%s)'),
                                    self.name, mimetypes, image_uri, location=node)
                 else:
                     logger.warning(__('a suitable image for %s builder not found: %s'),
                                    self.name, image_uri, location=node)
                 continue
             node['uri'] = candidate
         else:
             candidate = node['uri']
         if candidate not in self.env.images:
             # non-existing URI; let it alone
             continue
         self.images[candidate] = self.env.images[candidate][1]
Пример #12
0
    def write(self, build_docnames, updated_docnames, method='update'):
        # type: (Iterable[unicode], Sequence[unicode], unicode) -> None
        if build_docnames is None or build_docnames == ['__all__']:
            # build_all
            build_docnames = self.env.found_docs
        if method == 'update':
            # build updated ones as well
            docnames = set(build_docnames) | set(updated_docnames)
        else:
            docnames = set(build_docnames)
        logger.debug(__('docnames to write: %s'), ', '.join(sorted(docnames)))

        # add all toctree-containing files that may have changed
        for docname in list(docnames):
            for tocdocname in self.env.files_to_rebuild.get(docname, set()):
                if tocdocname in self.env.found_docs:
                    docnames.add(tocdocname)
        docnames.add(self.config.master_doc)

        logger.info(bold(__('preparing documents... ')), nonl=True)
        self.prepare_writing(docnames)
        logger.info(__('done'))

        if self.parallel_ok:
            # number of subprocesses is parallel-1 because the main process
            # is busy loading doctrees and doing write_doc_serialized()
            self._write_parallel(sorted(docnames),
                                 nproc=self.app.parallel - 1)
        else:
            self._write_serial(sorted(docnames))
Пример #13
0
    def _write_parallel(self, docnames, nproc):
        # type: (Sequence[unicode], int) -> None
        def write_process(docs):
            # type: (List[Tuple[unicode, nodes.Node]]) -> None
            self.app.phase = BuildPhase.WRITING
            for docname, doctree in docs:
                self.write_doc(docname, doctree)

        # warm up caches/compile templates using the first document
        firstname, docnames = docnames[0], docnames[1:]
        self.app.phase = BuildPhase.RESOLVING
        doctree = self.env.get_and_resolve_doctree(firstname, self)
        self.app.phase = BuildPhase.WRITING
        self.write_doc_serialized(firstname, doctree)
        self.write_doc(firstname, doctree)

        tasks = ParallelTasks(nproc)
        chunks = make_chunks(docnames, nproc)

        self.app.phase = BuildPhase.RESOLVING
        for chunk in status_iterator(chunks, __('writing output... '), "darkgreen",
                                     len(chunks), self.app.verbosity):
            arg = []
            for i, docname in enumerate(chunk):
                doctree = self.env.get_and_resolve_doctree(docname, self)
                self.write_doc_serialized(docname, doctree)
                arg.append((docname, doctree))
            tasks.add_task(write_process, arg)

        # make sure all threads have finished
        logger.info(bold(__('waiting for workers...')))
        tasks.join()
Пример #14
0
    def _read_parallel(self, docnames, nproc):
        # type: (List[str], int) -> None
        # clear all outdated docs at once
        for docname in docnames:
            self.app.emit('env-purge-doc', self.env, docname)
            self.env.clear_doc(docname)

        def read_process(docs):
            # type: (List[str]) -> bytes
            self.env.app = self.app
            for docname in docs:
                self.read_doc(docname)
            # allow pickling self to send it back
            return pickle.dumps(self.env, pickle.HIGHEST_PROTOCOL)

        def merge(docs, otherenv):
            # type: (List[str], bytes) -> None
            env = pickle.loads(otherenv)
            self.env.merge_info_from(docs, env, self.app)

        tasks = ParallelTasks(nproc)
        chunks = make_chunks(docnames, nproc)

        for chunk in status_iterator(chunks, __('reading sources... '), "purple",
                                     len(chunks), self.app.verbosity):
            tasks.add_task(read_process, chunk, merge)

        # make sure all threads have finished
        logger.info(bold(__('waiting for workers...')))
        tasks.join()
Пример #15
0
    def setup(self, app):
        # type: (Sphinx) -> None
        """Set up BuildEnvironment object."""
        if self.version and self.version != app.registry.get_envversion(app):
            raise BuildEnvironmentError(__('build environment version not current'))
        elif self.srcdir and self.srcdir != app.srcdir:
            raise BuildEnvironmentError(__('source directory has changed'))

        if self.project:
            app.project.restore(self.project)

        self.app = app
        self.doctreedir = app.doctreedir
        self.srcdir = app.srcdir
        self.project = app.project
        self.version = app.registry.get_envversion(app)

        # initialize domains
        self.domains = {}
        for domain in app.registry.create_domains(self):
            self.domains[domain.name] = domain

        # initialize config
        self._update_config(app.config)

        # initialie settings
        self._update_settings(app.config)
Пример #16
0
    def update_config(self, config, srcdir, doctreedir):
        # type: (Config, unicode, unicode) -> Tuple[bool, unicode]
        """Update configurations by new one."""
        changed_reason = ''
        if self.config is None:
            changed_reason = __('new config')
        else:
            # check if a config value was changed that affects how
            # doctrees are read
            for confval in config.filter('env'):
                if self.config[confval.name] != confval.value:
                    changed_reason = __('config changed')
                    break

            # this value is not covered by the above loop because it is handled
            # specially by the config class
            if self.config.extensions != config.extensions:
                changed_reason = __('extensions changed')

        # the source and doctree directories may have been relocated
        self.srcdir = srcdir
        self.doctreedir = doctreedir
        self.config = config
        self._update_settings(config)

        # return tuple of (changed, reason)
        return bool(changed_reason), changed_reason
Пример #17
0
def eval_config_file(filename, tags):
    # type: (unicode, Tags) -> Dict[unicode, Any]
    """Evaluate a config file."""
    namespace = {}  # type: Dict[unicode, Any]
    namespace['__file__'] = filename
    namespace['tags'] = tags

    with cd(path.dirname(filename)):
        # during executing config file, current dir is changed to ``confdir``.
        try:
            execfile_(filename, namespace)
        except SyntaxError as err:
            msg = __("There is a syntax error in your configuration file: %s")
            if PY3:
                msg += __("\nDid you change the syntax from 2.x to 3.x?")
            raise ConfigError(msg % err)
        except SystemExit:
            msg = __("The configuration file (or one of the modules it imports) "
                     "called sys.exit()")
            raise ConfigError(msg)
        except Exception:
            msg = __("There is a programmable error in your configuration file:\n\n%s")
            raise ConfigError(msg % traceback.format_exc())

    return namespace
Пример #18
0
 def validate_config_value(self):
     # type: () -> None
     # <package> lang attribute, dc:language
     if not self.app.config.epub_language:
         logger.warning(__('conf value "epub_language" (or "language") '
                           'should not be empty for EPUB3'))
     # <package> unique-identifier attribute
     if not xmlname_checker().match(self.app.config.epub_uid):
         logger.warning(__('conf value "epub_uid" should be XML NAME for EPUB3'))
     # dc:title
     if not self.app.config.epub_title:
         logger.warning(__('conf value "epub_title" (or "html_title") '
                           'should not be empty for EPUB3'))
     # dc:creator
     if not self.app.config.epub_author:
         logger.warning(__('conf value "epub_author" should not be empty for EPUB3'))
     # dc:contributor
     if not self.app.config.epub_contributor:
         logger.warning(__('conf value "epub_contributor" should not be empty for EPUB3'))
     # dc:description
     if not self.app.config.epub_description:
         logger.warning(__('conf value "epub_description" should not be empty for EPUB3'))
     # dc:publisher
     if not self.app.config.epub_publisher:
         logger.warning(__('conf value "epub_publisher" should not be empty for EPUB3'))
     # dc:rights
     if not self.app.config.epub_copyright:
         logger.warning(__('conf value "epub_copyright" (or "copyright")'
                           'should not be empty for EPUB3'))
     # dc:identifier
     if not self.app.config.epub_identifier:
         logger.warning(__('conf value "epub_identifier" should not be empty for EPUB3'))
     # meta ibooks:version
     if not self.app.config.version:
         logger.warning(__('conf value "version" should not be empty for EPUB3'))
Пример #19
0
    def build_qhp(self, outdir, outname):
        # type: (str, str) -> None
        logger.info(__('writing project file...'))

        # sections
        tocdoc = self.env.get_and_resolve_doctree(self.config.master_doc, self,
                                                  prune_toctrees=False)

        sections = []
        matcher = NodeMatcher(addnodes.compact_paragraph, toctree=True)
        for node in tocdoc.traverse(matcher):  # type: addnodes.compact_paragraph
            sections.extend(self.write_toc(node))

        for indexname, indexcls, content, collapse in self.domain_indices:
            item = section_template % {'title': indexcls.localname,
                                       'ref': '%s.html' % indexname}
            sections.append(' ' * 4 * 4 + item)
        sections = '\n'.join(sections)  # type: ignore

        # keywords
        keywords = []
        index = IndexEntries(self.env).create_index(self, group_entries=False)
        for (key, group) in index:
            for title, (refs, subitems, key_) in group:
                keywords.extend(self.build_keywords(title, refs, subitems))
        keywords = u'\n'.join(keywords)  # type: ignore

        # it seems that the "namespace" may not contain non-alphanumeric
        # characters, and more than one successive dot, or leading/trailing
        # dots, are also forbidden
        if self.config.qthelp_namespace:
            nspace = self.config.qthelp_namespace
        else:
            nspace = 'org.sphinx.%s.%s' % (outname, self.config.version)

        nspace = re.sub(r'[^a-zA-Z0-9.\-]', '', nspace)
        nspace = re.sub(r'\.+', '.', nspace).strip('.')
        nspace = nspace.lower()

        # write the project file
        with open(path.join(outdir, outname + '.qhp'), 'w', encoding='utf-8') as f:
            body = render_file('project.qhp', outname=outname,
                               title=self.config.html_title, version=self.config.version,
                               project=self.config.project, namespace=nspace,
                               master_doc=self.config.master_doc,
                               sections=sections, keywords=keywords,
                               files=self.get_project_files(outdir))
            f.write(body)

        homepage = 'qthelp://' + posixpath.join(
            nspace, 'doc', self.get_target_uri(self.config.master_doc))
        startpage = 'qthelp://' + posixpath.join(nspace, 'doc', 'index.html')

        logger.info(__('writing collection project file...'))
        with open(path.join(outdir, outname + '.qhcp'), 'w', encoding='utf-8') as f:
            body = render_file('project.qhcp', outname=outname,
                               title=self.config.html_short_title,
                               homepage=homepage, startpage=startpage)
            f.write(body)
Пример #20
0
    def highlight_block(self, source, lang, opts=None, location=None, force=False, **kwargs):
        # type: (str, str, Any, Any, bool, Any) -> str
        if not isinstance(source, text_type):
            source = source.decode()

        # find out which lexer to use
        if lang in ('py', 'python'):
            if source.startswith('>>>'):
                # interactive session
                lexer = lexers['pycon']
            else:
                lexer = lexers['python']
        elif lang in ('py3', 'python3', 'default'):
            if source.startswith('>>>'):
                lexer = lexers['pycon3']
            else:
                lexer = lexers['python3']
        elif lang == 'guess':
            try:
                lexer = guess_lexer(source)
            except Exception:
                lexer = lexers['none']
        else:
            if lang in lexers:
                lexer = lexers[lang]
            else:
                try:
                    lexer = lexers[lang] = get_lexer_by_name(lang, **(opts or {}))
                except ClassNotFound:
                    logger.warning(__('Pygments lexer name %r is not known'), lang,
                                   location=location)
                    lexer = lexers['none']
                else:
                    lexer.add_filter('raiseonerror')

        # trim doctest options if wanted
        if isinstance(lexer, PythonConsoleLexer) and self.trim_doctest_flags:
            source = doctest.blankline_re.sub('', source)
            source = doctest.doctestopt_re.sub('', source)

        # highlight via Pygments
        formatter = self.get_formatter(**kwargs)
        try:
            hlsource = highlight(source, lexer, formatter)
        except ErrorToken:
            # this is most probably not the selected language,
            # so let it pass unhighlighted
            if lang == 'default':
                pass  # automatic highlighting failed.
            else:
                logger.warning(__('Could not lex literal_block as "%s". '
                                  'Highlighting skipped.'), lang,
                               type='misc', subtype='highlighting_failure',
                               location=location)
            hlsource = highlight(source, lexers['none'], formatter)
        if self.dest == 'html':
            return hlsource
        else:
            return hlsource.translate(tex_hl_escape_map_new)
Пример #21
0
def load_mappings(app):
    # type: (Sphinx) -> None
    """Load all intersphinx mappings into the environment."""
    now = int(time.time())
    cache_time = now - app.config.intersphinx_cache_limit * 86400
    inventories = InventoryAdapter(app.builder.env)
    update = False
    for key, (name, (uri, invs)) in app.config.intersphinx_mapping.items():
        failures = []
        for inv in invs:
            if not inv:
                inv = posixpath.join(uri, INVENTORY_FILENAME)
            # decide whether the inventory must be read: always read local
            # files; remote ones only if the cache time is expired
            if '://' not in inv or uri not in inventories.cache \
                    or inventories.cache[uri][1] < cache_time:
                safe_inv_url = _get_safe_url(inv)
                logger.info(__('loading intersphinx inventory from %s...'), safe_inv_url)
                try:
                    invdata = fetch_inventory(app, uri, inv)
                except Exception as err:
                    failures.append(err.args)
                    continue

                if invdata:
                    inventories.cache[uri] = (name, now, invdata)
                    update = True
                    break

        if failures == []:
            pass
        elif len(failures) < len(invs):
            logger.info(__("encountered some issues with some of the inventories,"
                           " but they had working alternatives:"))
            for fail in failures:
                logger.info(*fail)
        else:
            issues = '\n'.join([f[0] % f[1:] for f in failures])
            logger.warning(__("failed to reach any of the inventories "
                              "with the following issues:") + "\n" + issues)

    if update:
        inventories.clear()

        # Duplicate values in different inventories will shadow each
        # other; which one will override which can vary between builds
        # since they are specified using an unordered dict.  To make
        # it more consistent, we sort the named inventories and then
        # add the unnamed inventories last.  This means that the
        # unnamed inventories will shadow the named ones but the named
        # ones can still be accessed when the name is specified.
        cached_vals = list(inventories.cache.values())
        named_vals = sorted(v for v in cached_vals if v[0])
        unnamed_vals = [v for v in cached_vals if not v[0]]
        for name, _x, invdata in named_vals + unnamed_vals:
            if name:
                inventories.named_inventory[name] = invdata
            for type, objects in invdata.items():
                inventories.main_inventory.setdefault(type, {}).update(objects)
Пример #22
0
 def add_builder(self, builder):
     # type: (Type[Builder]) -> None
     if not hasattr(builder, 'name'):
         raise ExtensionError(__('Builder class %s has no "name" attribute') % builder)
     if builder.name in self.builders:
         raise ExtensionError(__('Builder %r already exists (in module %s)') %
                              (builder.name, self.builders[builder.name].__module__))
     self.builders[builder.name] = builder
Пример #23
0
 def override_domain(self, domain):
     # type: (Type[Domain]) -> None
     if domain.name not in self.domains:
         raise ExtensionError(__('domain %s not yet registered') % domain.name)
     if not issubclass(domain, self.domains[domain.name]):
         raise ExtensionError(__('new domain not a subclass of registered %s '
                                 'domain') % domain.name)
     self.domains[domain.name] = domain
Пример #24
0
 def copy_support_files(self):
     # type: () -> None
     try:
         with progress_message(__('copying Texinfo support files')):
             logger.info('Makefile ', nonl=True)
             copy_asset_file(os.path.join(template_dir, 'Makefile'), self.outdir)
     except OSError as err:
         logger.warning(__("error writing file Makefile: %s"), err)
Пример #25
0
def render_dot(self, code, options, format, prefix='graphviz'):
    # type: (nodes.NodeVisitor, unicode, Dict, unicode, unicode) -> Tuple[unicode, unicode]
    """Render graphviz code into a PNG or PDF output file."""
    graphviz_dot = options.get('graphviz_dot', self.builder.config.graphviz_dot)
    hashkey = (code + str(options) + str(graphviz_dot) +
               str(self.builder.config.graphviz_dot_args)).encode('utf-8')

    fname = '%s-%s.%s' % (prefix, sha1(hashkey).hexdigest(), format)
    relfn = posixpath.join(self.builder.imgpath, fname)
    outfn = path.join(self.builder.outdir, self.builder.imagedir, fname)

    if path.isfile(outfn):
        return relfn, outfn

    if (hasattr(self.builder, '_graphviz_warned_dot') and
       self.builder._graphviz_warned_dot.get(graphviz_dot)):
        return None, None

    ensuredir(path.dirname(outfn))

    # graphviz expects UTF-8 by default
    if isinstance(code, text_type):
        code = code.encode('utf-8')

    dot_args = [graphviz_dot]
    dot_args.extend(self.builder.config.graphviz_dot_args)
    dot_args.extend(['-T' + format, '-o' + outfn])
    if format == 'png':
        dot_args.extend(['-Tcmapx', '-o%s.map' % outfn])
    try:
        p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE)
    except OSError as err:
        if err.errno != ENOENT:   # No such file or directory
            raise
        logger.warning(__('dot command %r cannot be run (needed for graphviz '
                          'output), check the graphviz_dot setting'), graphviz_dot)
        if not hasattr(self.builder, '_graphviz_warned_dot'):
            self.builder._graphviz_warned_dot = {}
        self.builder._graphviz_warned_dot[graphviz_dot] = True
        return None, None
    try:
        # Graphviz may close standard input when an error occurs,
        # resulting in a broken pipe on communicate()
        stdout, stderr = p.communicate(code)
    except (OSError, IOError) as err:
        if err.errno not in (EPIPE, EINVAL):
            raise
        # in this case, read the standard output and standard error streams
        # directly, to get the error message(s)
        stdout, stderr = p.stdout.read(), p.stderr.read()
        p.wait()
    if p.returncode != 0:
        raise GraphvizError(__('dot exited with error:\n[stderr]\n%s\n'
                               '[stdout]\n%s') % (stderr, stdout))
    if not path.isfile(outfn):
        raise GraphvizError(__('dot did not produce an output file:\n[stderr]\n%s\n'
                               '[stdout]\n%s') % (stderr, stdout))
    return relfn, outfn
Пример #26
0
 def add_builder(self, builder, override=False):
     # type: (Type[Builder], bool) -> None
     logger.debug('[app] adding builder: %r', builder)
     if not hasattr(builder, 'name'):
         raise ExtensionError(__('Builder class %s has no "name" attribute') % builder)
     if builder.name in self.builders and not override:
         raise ExtensionError(__('Builder %r already exists (in module %s)') %
                              (builder.name, self.builders[builder.name].__module__))
     self.builders[builder.name] = builder
Пример #27
0
 def write_file(fpath, content, newline=None):
     # type: (unicode, unicode, unicode) -> None
     if overwrite or not path.isfile(fpath):
         if 'quiet' not in d:
             print(__('Creating file %s.') % fpath)
         with open(fpath, 'wt', encoding='utf-8', newline=newline) as f:
             f.write(content)
     else:
         if 'quiet' not in d:
             print(__('File %s already exists, skipping.') % fpath)
Пример #28
0
 def add_role_to_domain(self, domain, name, role, override=False):
     # type: (unicode, unicode, Union[RoleFunction, XRefRole], bool) -> None
     logger.debug('[app] adding role to domain: %r', (domain, name, role))
     if domain not in self.domains:
         raise ExtensionError(__('domain %s not yet registered') % domain)
     roles = self.domain_roles.setdefault(domain, {})
     if name in roles and not override:
         raise ExtensionError(__('The %r role is already registered to %d domain') %
                              (name, domain))
     roles[name] = role
Пример #29
0
 def add_index_to_domain(self, domain, index, override=False):
     # type: (unicode, Type[Index], bool) -> None
     logger.debug('[app] adding index to domain: %r', (domain, index))
     if domain not in self.domains:
         raise ExtensionError(__('domain %s not yet registered') % domain)
     indices = self.domain_indices.setdefault(domain, [])
     if index in indices and not override:
         raise ExtensionError(__('The %r index is already registered to %d domain') %
                              (index.name, domain))
     indices.append(index)
Пример #30
0
    def handle(self, node):
        # type: (nodes.Node) -> None
        try:
            basename = os.path.basename(node['uri'])
            if '?' in basename:
                basename = basename.split('?')[0]
            if basename == '' or len(basename) > MAX_FILENAME_LEN:
                filename, ext = os.path.splitext(node['uri'])
                basename = sha1(filename.encode("utf-8")).hexdigest() + ext

            dirname = node['uri'].replace('://', '/').translate({ord("?"): u"/",
                                                                 ord("&"): u"/"})
            if len(dirname) > MAX_FILENAME_LEN:
                dirname = sha1(dirname.encode('utf-8')).hexdigest()
            ensuredir(os.path.join(self.imagedir, dirname))
            path = os.path.join(self.imagedir, dirname, basename)

            headers = {}
            if os.path.exists(path):
                timestamp = ceil(os.stat(path).st_mtime)  # type: float
                headers['If-Modified-Since'] = epoch_to_rfc1123(timestamp)

            r = requests.get(node['uri'], headers=headers)
            if r.status_code >= 400:
                logger.warning(__('Could not fetch remote image: %s [%d]') %
                               (node['uri'], r.status_code))
            else:
                self.app.env.original_image_uri[path] = node['uri']

                if r.status_code == 200:
                    with open(path, 'wb') as f:
                        f.write(r.content)

                last_modified = r.headers.get('last-modified')
                if last_modified:
                    timestamp = rfc1123_to_epoch(last_modified)
                    os.utime(path, (timestamp, timestamp))

                mimetype = guess_mimetype(path, default='*')
                if mimetype != '*' and os.path.splitext(basename)[1] == '':
                    # append a suffix if URI does not contain suffix
                    ext = get_image_extension(mimetype)
                    newpath = os.path.join(self.imagedir, dirname, basename + ext)
                    movefile(path, newpath)
                    self.app.env.original_image_uri.pop(path)
                    self.app.env.original_image_uri[newpath] = node['uri']
                    path = newpath
                node['candidates'].pop('?')
                node['candidates'][mimetype] = path
                node['uri'] = path
                self.app.env.images.add_file(self.env.docname, path)
        except Exception as exc:
            logger.warning(__('Could not fetch remote image: %s [%s]') %
                           (node['uri'], text_type(exc)))
Пример #31
0
    def generate_docs(
        self,
        source_filenames: List[str],
        output_dir: str = None,
        suffix: str = ".rst",
        base_path: str = None,
        imported_members: bool = False,
        overwrite: bool = True,
        encoding: str = "utf-8",
    ) -> None:
        """
        Generate and write stub files for objects defined in the :rst:dir:`automodapi`
        and :rst:dir:`automodsumm` directives.

        Parameters
        ----------

        source_filenames : List[str]
            A list of all filenames for with the :rst:dir:`automodapi` and
            :rst:dir:`automodsumm` directives will be searched for.

        output_dir : `str`
            Directory for which the stub files will be written to.

        suffix : `str`
            (Default ``".rst"``) Suffix given to the written stub files.

        base_path : `str`
            The common base path for the filenames listed in ``source_filenames``.
            This is typically the source directory of the Sphinx application.

        imported_members : `bool`
            (Default `False`) Set `True` to include imported members in the
            stub file documentation for *module* object types.

        overwrite : `bool`
            (Default `True`)  Will cause existing stub files to be overwritten.

        encoding : `str`
            (Default: ``"utf-8"``) Encoding for the written stub files.


        .. note::  Adapted from
                   :func:`sphinx.ext.autosummary.generate.generate_autosummary_docs`.
        """
        app = self.app

        _info = self.logger.info
        _warn = self.logger.warning

        showed_sources = list(sorted(source_filenames))
        _info(
            __(f"[automodsumm] generating stub files for {len(showed_sources)} sources"
               ))

        if output_dir:
            _info(__(f"[automodsumm] writing to {output_dir}"))

        if base_path is not None:
            source_filenames = [
                os.path.join(base_path, filename)
                for filename in source_filenames
            ]

        template = AutomodsummRenderer(app)

        # read
        items = self.find_in_files(source_filenames)

        # keep track of new files
        new_files = []

        if app:
            filename_map = app.config.autosummary_filename_map
        else:
            filename_map = {}

        # write
        for entry in sorted(set(items), key=str):
            if entry.path is None:
                # The corresponding automodsumm:: directive did not have
                # a :toctree: option
                continue

            path = output_dir or os.path.abspath(entry.path)
            ensuredir(path)

            try:
                name, obj, parent, modname = import_by_name(entry.name)
                qualname = name.replace(modname + ".", "")
            except ImportError as e:
                try:
                    # try to import as an instance attribute
                    name, obj, parent, modname = import_ivar_by_name(
                        entry.name)
                    qualname = name.replace(modname + ".", "")
                except ImportError:
                    _warn(
                        __(f"[automodsumm] failed to import {entry.name}: {e}")
                    )
                    continue

            context = {}
            if app:
                context.update(app.config.autosummary_context)

            content = generate_autosummary_content(
                name,
                obj,
                parent,
                template,
                entry.template,
                imported_members,
                app,
                entry.recursive,
                context,
                modname,
                qualname,
            )

            filename = os.path.join(path,
                                    filename_map.get(name, name) + suffix)
            if os.path.isfile(filename):
                with open(filename, encoding=encoding) as f:
                    old_content = f.read()

                if content == old_content:
                    continue
                elif overwrite:  # content has changed
                    with open(filename, "w", encoding=encoding) as f:
                        f.write(content)
                    new_files.append(filename)
            else:
                with open(filename, "w", encoding=encoding) as f:
                    f.write(content)
                new_files.append(filename)

        # descend recursively to new files
        if new_files:
            self.generate_docs(
                new_files,
                output_dir=output_dir,
                suffix=suffix,
                base_path=base_path,
                imported_members=imported_members,
                overwrite=overwrite,
            )
Пример #32
0
        def _entries_from_toctree(toctreenode: addnodes.toctree,
                                  parents: List[str],
                                  separate: bool = False,
                                  subtree: bool = False) -> List[Element]:
            """Return TOC entries for a toctree node."""
            refs = [(e[0], e[1]) for e in toctreenode['entries']]
            entries = []  # type: List[Element]
            for (title, ref) in refs:
                try:
                    refdoc = None
                    if url_re.match(ref):
                        if title is None:
                            title = ref
                        reference = nodes.reference('',
                                                    '',
                                                    internal=False,
                                                    refuri=ref,
                                                    anchorname='',
                                                    *[nodes.Text(title)])
                        para = addnodes.compact_paragraph('', '', reference)
                        item = nodes.list_item('', para)
                        toc = nodes.bullet_list('', item)
                    elif ref == 'self':
                        # 'self' refers to the document from which this
                        # toctree originates
                        ref = toctreenode['parent']
                        if not title:
                            title = clean_astext(self.env.titles[ref])
                        reference = nodes.reference('',
                                                    '',
                                                    internal=True,
                                                    refuri=ref,
                                                    anchorname='',
                                                    *[nodes.Text(title)])
                        para = addnodes.compact_paragraph('', '', reference)
                        item = nodes.list_item('', para)
                        # don't show subitems
                        toc = nodes.bullet_list('', item)
                    else:
                        if ref in parents:
                            logger.warning(__('circular toctree references '
                                              'detected, ignoring: %s <- %s'),
                                           ref,
                                           ' <- '.join(parents),
                                           location=ref)
                            continue
                        refdoc = ref
                        toc = self.env.tocs[ref].deepcopy()
                        maxdepth = self.env.metadata[ref].get('tocdepth', 0)
                        if ref not in toctree_ancestors or (prune
                                                            and maxdepth > 0):
                            self._toctree_prune(toc, 2, maxdepth, collapse)
                        process_only_nodes(toc, builder.tags)
                        if title and toc.children and len(toc.children) == 1:
                            child = toc.children[0]
                            for refnode in child.traverse(nodes.reference):
                                if refnode['refuri'] == ref and \
                                   not refnode['anchorname']:
                                    refnode.children = [nodes.Text(title)]
                    if not toc.children:
                        # empty toc means: no titles will show up in the toctree
                        logger.warning(__(
                            'toctree contains reference to document %r that '
                            'doesn\'t have a title: no link will be generated'
                        ),
                                       ref,
                                       location=toctreenode)
                except KeyError:
                    # this is raised if the included file does not exist
                    if excluded(self.env.doc2path(ref, None)):
                        message = __(
                            'toctree contains reference to excluded document %r'
                        )
                    else:
                        message = __(
                            'toctree contains reference to nonexisting document %r'
                        )

                    logger.warning(message, ref, location=toctreenode)
                else:
                    # if titles_only is given, only keep the main title and
                    # sub-toctrees
                    if titles_only:
                        # children of toc are:
                        # - list_item + compact_paragraph + (reference and subtoc)
                        # - only + subtoc
                        # - toctree
                        children = cast(Iterable[nodes.Element], toc)

                        # delete everything but the toplevel title(s)
                        # and toctrees
                        for toplevel in children:
                            # nodes with length 1 don't have any children anyway
                            if len(toplevel) > 1:
                                subtrees = toplevel.traverse(addnodes.toctree)
                                if subtrees:
                                    toplevel[1][:] = subtrees  # type: ignore
                                else:
                                    toplevel.pop(1)
                    # resolve all sub-toctrees
                    for subtocnode in toc.traverse(addnodes.toctree):
                        if not (subtocnode.get('hidden', False)
                                and not includehidden):
                            i = subtocnode.parent.index(subtocnode) + 1
                            for entry in _entries_from_toctree(
                                    subtocnode, [refdoc] + parents,
                                    subtree=True):
                                subtocnode.parent.insert(i, entry)
                                i += 1
                            subtocnode.parent.remove(subtocnode)
                    if separate:
                        entries.append(toc)
                    else:
                        children = cast(Iterable[nodes.Element], toc)
                        entries.extend(children)
            if not subtree and not separate:
                ret = nodes.bullet_list()
                ret += entries
                return [ret]
            return entries
Пример #33
0
def handle_exception(app: Sphinx,
                     args: Any,
                     exception: BaseException,
                     stderr: IO = sys.stderr) -> None:  # NOQA
    if isinstance(exception, bdb.BdbQuit):
        return

    if args.pdb:
        print(red(__('Exception occurred while building, starting debugger:')),
              file=stderr)
        traceback.print_exc()
        pdb.post_mortem(sys.exc_info()[2])
    else:
        print(file=stderr)
        if args.verbosity or args.traceback:
            traceback.print_exc(None, stderr)
            print(file=stderr)
        if isinstance(exception, KeyboardInterrupt):
            print(__('Interrupted!'), file=stderr)
        elif isinstance(exception, SystemMessage):
            print(red(__('reST markup error:')), file=stderr)
            print(terminal_safe(exception.args[0]), file=stderr)
        elif isinstance(exception, SphinxError):
            print(red('%s:' % exception.category), file=stderr)
            print(str(exception), file=stderr)
        elif isinstance(exception, UnicodeError):
            print(red(__('Encoding error:')), file=stderr)
            print(terminal_safe(str(exception)), file=stderr)
            tbpath = save_traceback(app)
            print(red(
                __('The full traceback has been saved in %s, if you want '
                   'to report the issue to the developers.') % tbpath),
                  file=stderr)
        elif isinstance(exception,
                        RuntimeError) and 'recursion depth' in str(exception):
            print(red(__('Recursion error:')), file=stderr)
            print(terminal_safe(str(exception)), file=stderr)
            print(file=stderr)
            print(__('This can happen with very large or deeply nested source '
                     'files. You can carefully increase the default Python '
                     'recursion limit of 1000 in conf.py with e.g.:'),
                  file=stderr)
            print('    import sys; sys.setrecursionlimit(1500)', file=stderr)
        else:
            print(red(__('Exception occurred:')), file=stderr)
            print(format_exception_cut_frames().rstrip(), file=stderr)
            tbpath = save_traceback(app)
            print(red(
                __('The full traceback has been saved in %s, if you '
                   'want to report the issue to the developers.') % tbpath),
                  file=stderr)
            print(__('Please also report this if it was a user error, so '
                     'that a better error message can be provided next time.'),
                  file=stderr)
            print(__('A bug report can be filed in the tracker at '
                     '<https://github.com/sphinx-doc/sphinx/issues>. Thanks!'),
                  file=stderr)
Пример #34
0
def build_main(argv: List[str] = sys.argv[1:]) -> int:
    """Sphinx build "main" command-line entry."""

    parser = get_parser()
    args = parser.parse_args(argv)

    if args.noconfig:
        args.confdir = None
    elif not args.confdir:
        args.confdir = args.sourcedir

    if not args.doctreedir:
        args.doctreedir = os.path.join(args.outputdir, '.doctrees')

    # handle remaining filename arguments
    filenames = args.filenames
    missing_files = []
    for filename in filenames:
        if not os.path.isfile(filename):
            missing_files.append(filename)
    if missing_files:
        parser.error(__('cannot find files %r') % missing_files)

    if args.force_all and filenames:
        parser.error(__('cannot combine -a option and filenames'))

    if args.color == 'no' or (args.color == 'auto' and not color_terminal()):
        nocolor()

    status = sys.stdout
    warning = sys.stderr
    error = sys.stderr

    if args.quiet:
        status = None

    if args.really_quiet:
        status = warning = None

    if warning and args.warnfile:
        try:
            warnfp = open(args.warnfile, 'w')
        except Exception as exc:
            parser.error(
                __('cannot open warning file %r: %s') % (args.warnfile, exc))
        warning = Tee(warning, warnfp)  # type: ignore
        error = warning

    confoverrides = {}
    for val in args.define:
        try:
            key, val = val.split('=', 1)
        except ValueError:
            parser.error(
                __('-D option argument must be in the form name=value'))
        confoverrides[key] = val

    for val in args.htmldefine:
        try:
            key, val = val.split('=')
        except ValueError:
            parser.error(
                __('-A option argument must be in the form name=value'))
        try:
            val = int(val)
        except ValueError:
            pass
        confoverrides['html_context.%s' % key] = val

    if args.nitpicky:
        confoverrides['nitpicky'] = True

    app = None
    try:
        confdir = args.confdir or args.sourcedir
        with patch_docutils(confdir), docutils_namespace():
            app = Sphinx(args.sourcedir, args.confdir, args.outputdir,
                         args.doctreedir, args.builder, confoverrides, status,
                         warning, args.freshenv, args.warningiserror,
                         args.tags, args.verbosity, args.jobs, args.keep_going)
            app.build(args.force_all, filenames)
            return app.statuscode
    except (Exception, KeyboardInterrupt) as exc:
        handle_exception(app, args, exc, error)
        return 2
Пример #35
0
 def get_source_parser(self, filetype: str) -> "Type[Parser]":
     try:
         return self.source_parsers[filetype]
     except KeyError as exc:
         raise SphinxError(
             __('Source parser for %s not registered') % filetype) from exc
Пример #36
0
class CheckExternalLinksBuilder(Builder):
    """
    Checks for broken external links.
    """
    name = 'linkcheck'
    epilog = __('Look for any errors in the above output or in '
                '%(outdir)s/output.txt')

    def init(self) -> None:
        self.to_ignore = [
            re.compile(x) for x in self.app.config.linkcheck_ignore
        ]
        self.anchors_ignore = [
            re.compile(x) for x in self.app.config.linkcheck_anchors_ignore
        ]
        self.auth = [(re.compile(pattern), auth_info)
                     for pattern, auth_info in self.app.config.linkcheck_auth]
        self.good = set()  # type: Set[str]
        self.broken = {}  # type: Dict[str, str]
        self.redirected = {}  # type: Dict[str, Tuple[str, int]]
        # set a timeout for non-responding servers
        socket.setdefaulttimeout(5.0)
        # create output file
        open(path.join(self.outdir, 'output.txt'), 'w').close()
        # create JSON output file
        open(path.join(self.outdir, 'output.json'), 'w').close()

        # create queues and worker threads
        self.rate_limits = {}  # type: Dict[str, RateLimit]
        self.wqueue = queue.PriorityQueue()  # type: queue.PriorityQueue
        self.rqueue = queue.Queue()  # type: queue.Queue
        self.workers = []  # type: List[threading.Thread]
        for i in range(self.app.config.linkcheck_workers):
            thread = threading.Thread(target=self.check_thread, daemon=True)
            thread.start()
            self.workers.append(thread)

    def check_thread(self) -> None:
        kwargs = {}
        if self.app.config.linkcheck_timeout:
            kwargs['timeout'] = self.app.config.linkcheck_timeout

        def get_request_headers() -> Dict:
            url = urlparse(uri)
            candidates = [
                "%s://%s" % (url.scheme, url.netloc),
                "%s://%s/" % (url.scheme, url.netloc), uri, "*"
            ]

            for u in candidates:
                if u in self.config.linkcheck_request_headers:
                    headers = dict(DEFAULT_REQUEST_HEADERS)
                    headers.update(self.config.linkcheck_request_headers[u])
                    return headers

            return {}

        def check_uri() -> Tuple[str, str, int]:
            # split off anchor
            if '#' in uri:
                req_url, anchor = uri.split('#', 1)
                for rex in self.anchors_ignore:
                    if rex.match(anchor):
                        anchor = None
                        break
            else:
                req_url = uri
                anchor = None

            # handle non-ASCII URIs
            try:
                req_url.encode('ascii')
            except UnicodeError:
                req_url = encode_uri(req_url)

            # Get auth info, if any
            for pattern, auth_info in self.auth:
                if pattern.match(uri):
                    break
            else:
                auth_info = None

            # update request headers for the URL
            kwargs['headers'] = get_request_headers()

            try:
                if anchor and self.app.config.linkcheck_anchors:
                    # Read the whole document and see if #anchor exists
                    response = requests.get(req_url,
                                            stream=True,
                                            config=self.app.config,
                                            auth=auth_info,
                                            **kwargs)
                    response.raise_for_status()
                    found = check_anchor(response, unquote(anchor))

                    if not found:
                        raise Exception(__("Anchor '%s' not found") % anchor)
                else:
                    try:
                        # try a HEAD request first, which should be easier on
                        # the server and the network
                        response = requests.head(req_url,
                                                 allow_redirects=True,
                                                 config=self.app.config,
                                                 auth=auth_info,
                                                 **kwargs)
                        response.raise_for_status()
                    except (HTTPError, TooManyRedirects) as err:
                        if isinstance(
                                err,
                                HTTPError) and err.response.status_code == 429:
                            raise
                        # retry with GET request if that fails, some servers
                        # don't like HEAD requests.
                        response = requests.get(req_url,
                                                stream=True,
                                                config=self.app.config,
                                                auth=auth_info,
                                                **kwargs)
                        response.raise_for_status()
            except HTTPError as err:
                if err.response.status_code == 401:
                    # We'll take "Unauthorized" as working.
                    return 'working', ' - unauthorized', 0
                elif err.response.status_code == 429:
                    next_check = self.limit_rate(err.response)
                    if next_check is not None:
                        self.wqueue.put((next_check, uri, docname, lineno),
                                        False)
                        return 'rate-limited', '', 0
                    return 'broken', str(err), 0
                elif err.response.status_code == 503:
                    # We'll take "Service Unavailable" as ignored.
                    return 'ignored', str(err), 0
                else:
                    return 'broken', str(err), 0
            except Exception as err:
                return 'broken', str(err), 0
            else:
                netloc = urlparse(req_url).netloc
                try:
                    del self.rate_limits[netloc]
                except KeyError:
                    pass
            if response.url.rstrip('/') == req_url.rstrip('/'):
                return 'working', '', 0
            else:
                new_url = response.url
                if anchor:
                    new_url += '#' + anchor
                # history contains any redirects, get last
                if response.history:
                    code = response.history[-1].status_code
                    return 'redirected', new_url, code
                else:
                    return 'redirected', new_url, 0

        def check(docname: str) -> Tuple[str, str, int]:
            # check for various conditions without bothering the network
            if len(uri) == 0 or uri.startswith(('#', 'mailto:', 'tel:')):
                return 'unchecked', '', 0
            elif not uri.startswith(('http:', 'https:')):
                if uri_re.match(uri):
                    # non supported URI schemes (ex. ftp)
                    return 'unchecked', '', 0
                else:
                    srcdir = path.dirname(self.env.doc2path(docname))
                    if path.exists(path.join(srcdir, uri)):
                        return 'working', '', 0
                    else:
                        for rex in self.to_ignore:
                            if rex.match(uri):
                                return 'ignored', '', 0
                        else:
                            self.broken[uri] = ''
                            return 'broken', '', 0
            elif uri in self.good:
                return 'working', 'old', 0
            elif uri in self.broken:
                return 'broken', self.broken[uri], 0
            elif uri in self.redirected:
                return 'redirected', self.redirected[uri][0], self.redirected[
                    uri][1]
            for rex in self.to_ignore:
                if rex.match(uri):
                    return 'ignored', '', 0

            # need to actually check the URI
            for _ in range(self.app.config.linkcheck_retries):
                status, info, code = check_uri()
                if status != "broken":
                    break

            if status == "working":
                self.good.add(uri)
            elif status == "broken":
                self.broken[uri] = info
            elif status == "redirected":
                self.redirected[uri] = (info, code)

            return (status, info, code)

        while True:
            next_check, uri, docname, lineno = self.wqueue.get()
            if uri is None:
                break
            netloc = urlparse(uri).netloc
            try:
                # Refresh rate limit.
                # When there are many links in the queue, workers are all stuck waiting
                # for responses, but the builder keeps queuing. Links in the queue may
                # have been queued before rate limits were discovered.
                next_check = self.rate_limits[netloc].next_check
            except KeyError:
                pass
            if next_check > time.time():
                # Sleep before putting message back in the queue to avoid
                # waking up other threads.
                time.sleep(QUEUE_POLL_SECS)
                self.wqueue.put((next_check, uri, docname, lineno), False)
                self.wqueue.task_done()
                continue
            status, info, code = check(docname)
            if status == 'rate-limited':
                logger.info(
                    darkgray('-rate limited-   ') + uri +
                    darkgray(' | sleeping...'))
            else:
                self.rqueue.put((uri, docname, lineno, status, info, code))
            self.wqueue.task_done()

    def limit_rate(self, response: Response) -> Optional[float]:
        next_check = None
        retry_after = response.headers.get("Retry-After")
        if retry_after:
            try:
                # Integer: time to wait before next attempt.
                delay = float(retry_after)
            except ValueError:
                try:
                    # An HTTP-date: time of next attempt.
                    until = parsedate_to_datetime(retry_after)
                except (TypeError, ValueError):
                    # TypeError: Invalid date format.
                    # ValueError: Invalid date, e.g. Oct 52th.
                    pass
                else:
                    next_check = datetime.timestamp(until)
                    delay = (until -
                             datetime.now(timezone.utc)).total_seconds()
            else:
                next_check = time.time() + delay
        netloc = urlparse(response.url).netloc
        if next_check is None:
            max_delay = self.app.config.linkcheck_rate_limit_timeout
            try:
                rate_limit = self.rate_limits[netloc]
            except KeyError:
                delay = DEFAULT_DELAY
            else:
                last_wait_time = rate_limit.delay
                delay = 2.0 * last_wait_time
                if delay > max_delay and last_wait_time < max_delay:
                    delay = max_delay
            if delay > max_delay:
                return None
            next_check = time.time() + delay
        self.rate_limits[netloc] = RateLimit(delay, next_check)
        return next_check

    def process_result(self, result: Tuple[str, str, int, str, str,
                                           int]) -> None:
        uri, docname, lineno, status, info, code = result

        filename = self.env.doc2path(docname, None)
        linkstat = dict(filename=filename,
                        lineno=lineno,
                        status=status,
                        code=code,
                        uri=uri,
                        info=info)
        if status == 'unchecked':
            self.write_linkstat(linkstat)
            return
        if status == 'working' and info == 'old':
            self.write_linkstat(linkstat)
            return
        if lineno:
            logger.info('(line %4d) ', lineno, nonl=True)
        if status == 'ignored':
            if info:
                logger.info(darkgray('-ignored- ') + uri + ': ' + info)
            else:
                logger.info(darkgray('-ignored- ') + uri)
            self.write_linkstat(linkstat)
        elif status == 'local':
            logger.info(darkgray('-local-   ') + uri)
            self.write_entry('local', docname, filename, lineno, uri)
            self.write_linkstat(linkstat)
        elif status == 'working':
            logger.info(darkgreen('ok        ') + uri + info)
            self.write_linkstat(linkstat)
        elif status == 'broken':
            if self.app.quiet or self.app.warningiserror:
                logger.warning(__('broken link: %s (%s)'),
                               uri,
                               info,
                               location=(filename, lineno))
            else:
                logger.info(red('broken    ') + uri + red(' - ' + info))
            self.write_entry('broken', docname, filename, lineno,
                             uri + ': ' + info)
            self.write_linkstat(linkstat)
        elif status == 'redirected':
            try:
                text, color = {
                    301: ('permanently', purple),
                    302: ('with Found', purple),
                    303: ('with See Other', purple),
                    307: ('temporarily', turquoise),
                    308: ('permanently', purple),
                }[code]
            except KeyError:
                text, color = ('with unknown code', purple)
            linkstat['text'] = text
            logger.info(
                color('redirect  ') + uri +
                color(' - ' + text + ' to ' + info))
            self.write_entry('redirected ' + text, docname, filename, lineno,
                             uri + ' to ' + info)
            self.write_linkstat(linkstat)

    def get_target_uri(self, docname: str, typ: str = None) -> str:
        return ''

    def get_outdated_docs(self) -> Set[str]:
        return self.env.found_docs

    def prepare_writing(self, docnames: Set[str]) -> None:
        return

    def write_doc(self, docname: str, doctree: Node) -> None:
        logger.info('')
        n = 0

        # reference nodes
        for refnode in doctree.traverse(nodes.reference):
            if 'refuri' not in refnode:
                continue
            uri = refnode['refuri']
            lineno = node_line_or_0(refnode)
            uri_info = (CHECK_IMMEDIATELY, uri, docname, lineno)
            self.wqueue.put(uri_info, False)
            n += 1

        # image nodes
        for imgnode in doctree.traverse(nodes.image):
            uri = imgnode['candidates'].get('?')
            if uri and '://' in uri:
                lineno = node_line_or_0(imgnode)
                uri_info = (CHECK_IMMEDIATELY, uri, docname, lineno)
                self.wqueue.put(uri_info, False)
                n += 1

        done = 0
        while done < n:
            self.process_result(self.rqueue.get())
            done += 1

        if self.broken:
            self.app.statuscode = 1

    def write_entry(self, what: str, docname: str, filename: str, line: int,
                    uri: str) -> None:
        with open(path.join(self.outdir, 'output.txt'), 'a') as output:
            output.write("%s:%s: [%s] %s\n" % (filename, line, what, uri))

    def write_linkstat(self, data: dict) -> None:
        with open(path.join(self.outdir, 'output.json'), 'a') as output:
            output.write(json.dumps(data))
            output.write('\n')

    def finish(self) -> None:
        self.wqueue.join()
        # Shutdown threads.
        for worker in self.workers:
            self.wqueue.put((CHECK_IMMEDIATELY, None, None, None), False)
Пример #37
0
    def build(self, docnames, summary=None, method='update'):
        # type: (Iterable[unicode], unicode, unicode) -> None
        """Main build method.

        First updates the environment, and then calls :meth:`write`.
        """
        if summary:
            logger.info(bold(__('building [%s]') % self.name) + ': ' + summary)

        # while reading, collect all warnings from docutils
        with logging.pending_warnings():
            updated_docnames = set(self.read())

        doccount = len(updated_docnames)
        logger.info(bold(__('looking for now-outdated files... ')), nonl=1)
        for docname in self.env.check_dependents(self.app, updated_docnames):
            updated_docnames.add(docname)
        outdated = len(updated_docnames) - doccount
        if outdated:
            logger.info(__('%d found'), outdated)
        else:
            logger.info(__('none found'))

        if updated_docnames:
            # save the environment
            from sphinx.application import ENV_PICKLE_FILENAME
            logger.info(bold(__('pickling environment... ')), nonl=True)
            self.env.topickle(path.join(self.doctreedir, ENV_PICKLE_FILENAME))
            logger.info(__('done'))

            # global actions
            self.app.phase = BuildPhase.CONSISTENCY_CHECK
            logger.info(bold(__('checking consistency... ')), nonl=True)
            self.env.check_consistency()
            logger.info(__('done'))
        else:
            if method == 'update' and not docnames:
                logger.info(bold(__('no targets are out of date.')))
                return

        self.app.phase = BuildPhase.RESOLVING

        # filter "docnames" (list of outdated files) by the updated
        # found_docs of the environment; this will remove docs that
        # have since been removed
        if docnames and docnames != ['__all__']:
            docnames = set(docnames) & self.env.found_docs

        # determine if we can write in parallel
        if parallel_available and self.app.parallel > 1 and self.allow_parallel:
            self.parallel_ok = self.app.is_parallel_allowed('write')
        else:
            self.parallel_ok = False

        #  create a task executor to use for misc. "finish-up" tasks
        # if self.parallel_ok:
        #     self.finish_tasks = ParallelTasks(self.app.parallel)
        # else:
        # for now, just execute them serially
        self.finish_tasks = SerialTasks()

        # write all "normal" documents (or everything for some builders)
        self.write(docnames, list(updated_docnames), method)

        # finish (write static files etc.)
        self.finish()

        # wait for all tasks
        self.finish_tasks.join()
Пример #38
0
def main(argv: List[str] = sys.argv[1:]) -> int:
    """Parse and check the command line arguments."""
    sphinx.locale.setlocale(locale.LC_ALL, '')
    sphinx.locale.init_console(os.path.join(package_dir, 'locale'), 'sphinx')

    parser = get_parser()
    args = parser.parse_args(argv)

    rootpath = path.abspath(args.module_path)

    # normalize opts

    if args.header is None:
        args.header = rootpath.split(path.sep)[-1]
    if args.suffix.startswith('.'):
        args.suffix = args.suffix[1:]
    if not path.isdir(rootpath):
        print(__('%s is not a directory.') % rootpath, file=sys.stderr)
        sys.exit(1)
    if not args.dryrun:
        ensuredir(args.destdir)
    excludes = [path.abspath(exclude) for exclude in args.exclude_pattern]
    modules = recurse_tree(rootpath, excludes, args, args.templatedir)

    if args.full:
        from sphinx.cmd import quickstart as qs
        modules.sort()
        prev_module = ''
        text = ''
        for module in modules:
            if module.startswith(prev_module + '.'):
                continue
            prev_module = module
            text += '   %s\n' % module
        d = {
            'path':
            args.destdir,
            'sep':
            False,
            'dot':
            '_',
            'project':
            args.header,
            'author':
            args.author or 'Author',
            'version':
            args.version or '',
            'release':
            args.release or args.version or '',
            'suffix':
            '.' + args.suffix,
            'master':
            'index',
            'epub':
            True,
            'extensions':
            ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.todo'],
            'makefile':
            True,
            'batchfile':
            True,
            'make_mode':
            True,
            'mastertocmaxdepth':
            args.maxdepth,
            'mastertoctree':
            text,
            'language':
            'en',
            'module_path':
            rootpath,
            'append_syspath':
            args.append_syspath,
        }
        if args.extensions:
            d['extensions'].extend(args.extensions)
        if args.quiet:
            d['quiet'] = True

        for ext in d['extensions'][:]:
            if ',' in ext:
                d['extensions'].remove(ext)
                d['extensions'].extend(ext.split(','))

        if not args.dryrun:
            qs.generate(d,
                        silent=True,
                        overwrite=args.force,
                        templatedir=args.templatedir)
    elif args.tocfile:
        create_modules_toc_file(modules, args, args.tocfile, args.templatedir)

    return 0
Пример #39
0
class CheckExternalLinksBuilder(Builder):
    """
    Checks for broken external links.
    """
    name = 'linkcheck'
    epilog = __('Look for any errors in the above output or in '
                '%(outdir)s/output.txt')

    def init(self):
        # type: () -> None
        self.to_ignore = [re.compile(x) for x in self.app.config.linkcheck_ignore]
        self.anchors_ignore = [re.compile(x)
                               for x in self.app.config.linkcheck_anchors_ignore]
        self.good = set()       # type: Set[str]
        self.broken = {}        # type: Dict[str, str]
        self.redirected = {}    # type: Dict[str, Tuple[str, int]]
        # set a timeout for non-responding servers
        socket.setdefaulttimeout(5.0)
        # create output file
        open(path.join(self.outdir, 'output.txt'), 'w').close()

        # create queues and worker threads
        self.wqueue = queue.Queue()  # type: queue.Queue
        self.rqueue = queue.Queue()  # type: queue.Queue
        self.workers = []  # type: List[threading.Thread]
        for i in range(self.app.config.linkcheck_workers):
            thread = threading.Thread(target=self.check_thread)
            thread.setDaemon(True)
            thread.start()
            self.workers.append(thread)

    def check_thread(self):
        # type: () -> None
        kwargs = {
            'allow_redirects': True,
            'headers': {
                'Accept': 'text/html,application/xhtml+xml;q=0.9,*/*;q=0.8',
                'User-Agent': requests.useragent_header[0][1],
            },
        }
        if self.app.config.linkcheck_timeout:
            kwargs['timeout'] = self.app.config.linkcheck_timeout

        def check_uri():
            # type: () -> Tuple[str, str, int]
            # split off anchor
            if '#' in uri:
                req_url, anchor = uri.split('#', 1)
                for rex in self.anchors_ignore:
                    if rex.match(anchor):
                        anchor = None
                        break
            else:
                req_url = uri
                anchor = None

            # handle non-ASCII URIs
            try:
                req_url.encode('ascii')
            except UnicodeError:
                req_url = encode_uri(req_url)

            try:
                if anchor and self.app.config.linkcheck_anchors:
                    # Read the whole document and see if #anchor exists
                    response = requests.get(req_url, stream=True, config=self.app.config,
                                            **kwargs)
                    found = check_anchor(response, unquote(anchor))

                    if not found:
                        raise Exception(__("Anchor '%s' not found") % anchor)
                else:
                    try:
                        # try a HEAD request first, which should be easier on
                        # the server and the network
                        response = requests.head(req_url, config=self.app.config, **kwargs)
                        response.raise_for_status()
                    except HTTPError:
                        # retry with GET request if that fails, some servers
                        # don't like HEAD requests.
                        response = requests.get(req_url, stream=True, config=self.app.config,
                                                **kwargs)
                        response.raise_for_status()
            except HTTPError as err:
                if err.response.status_code == 401:
                    # We'll take "Unauthorized" as working.
                    return 'working', ' - unauthorized', 0
                else:
                    return 'broken', str(err), 0
            except Exception as err:
                if is_ssl_error(err):
                    return 'ignored', str(err), 0
                else:
                    return 'broken', str(err), 0
            if response.url.rstrip('/') == req_url.rstrip('/'):
                return 'working', '', 0
            else:
                new_url = response.url
                if anchor:
                    new_url += '#' + anchor
                # history contains any redirects, get last
                if response.history:
                    code = response.history[-1].status_code
                    return 'redirected', new_url, code
                else:
                    return 'redirected', new_url, 0

        def check():
            # type: () -> Tuple[str, str, int]
            # check for various conditions without bothering the network
            if len(uri) == 0 or uri.startswith(('#', 'mailto:', 'ftp:')):
                return 'unchecked', '', 0
            elif not uri.startswith(('http:', 'https:')):
                return 'local', '', 0
            elif uri in self.good:
                return 'working', 'old', 0
            elif uri in self.broken:
                return 'broken', self.broken[uri], 0
            elif uri in self.redirected:
                return 'redirected', self.redirected[uri][0], self.redirected[uri][1]
            for rex in self.to_ignore:
                if rex.match(uri):
                    return 'ignored', '', 0

            # need to actually check the URI
            for _ in range(self.app.config.linkcheck_retries):
                status, info, code = check_uri()
                if status != "broken":
                    break

            if status == "working":
                self.good.add(uri)
            elif status == "broken":
                self.broken[uri] = info
            elif status == "redirected":
                self.redirected[uri] = (info, code)

            return (status, info, code)

        while True:
            uri, docname, lineno = self.wqueue.get()
            if uri is None:
                break
            status, info, code = check()
            self.rqueue.put((uri, docname, lineno, status, info, code))

    def process_result(self, result):
        # type: (Tuple[str, str, int, str, str, int]) -> None
        uri, docname, lineno, status, info, code = result
        if status == 'unchecked':
            return
        if status == 'working' and info == 'old':
            return
        if lineno:
            logger.info('(line %4d) ', lineno, nonl=True)
        if status == 'ignored':
            if info:
                logger.info(darkgray('-ignored- ') + uri + ': ' + info)
            else:
                logger.info(darkgray('-ignored- ') + uri)
        elif status == 'local':
            logger.info(darkgray('-local-   ') + uri)
            self.write_entry('local', docname, lineno, uri)
        elif status == 'working':
            logger.info(darkgreen('ok        ') + uri + info)
        elif status == 'broken':
            self.write_entry('broken', docname, lineno, uri + ': ' + info)
            if self.app.quiet or self.app.warningiserror:
                logger.warning(__('broken link: %s (%s)'), uri, info,
                               location=(self.env.doc2path(docname), lineno))
            else:
                logger.info(red('broken    ') + uri + red(' - ' + info))
        elif status == 'redirected':
            text, color = {
                301: ('permanently', darkred),
                302: ('with Found', purple),
                303: ('with See Other', purple),
                307: ('temporarily', turquoise),
                0:   ('with unknown code', purple),
            }[code]
            self.write_entry('redirected ' + text, docname, lineno,
                             uri + ' to ' + info)
            logger.info(color('redirect  ') + uri + color(' - ' + text + ' to ' + info))

    def get_target_uri(self, docname, typ=None):
        # type: (str, str) -> str
        return ''

    def get_outdated_docs(self):
        # type: () -> Set[str]
        return self.env.found_docs

    def prepare_writing(self, docnames):
        # type: (Set[str]) -> None
        return

    def write_doc(self, docname, doctree):
        # type: (str, nodes.Node) -> None
        logger.info('')
        n = 0

        # reference nodes
        for refnode in doctree.traverse(nodes.reference):
            if 'refuri' not in refnode:
                continue
            uri = refnode['refuri']
            lineno = get_node_line(refnode)
            self.wqueue.put((uri, docname, lineno), False)
            n += 1

        # image nodes
        for imgnode in doctree.traverse(nodes.image):
            uri = imgnode['candidates'].get('?')
            if uri and '://' in uri:
                lineno = get_node_line(imgnode)
                self.wqueue.put((uri, docname, lineno), False)
                n += 1

        done = 0
        while done < n:
            self.process_result(self.rqueue.get())
            done += 1

        if self.broken:
            self.app.statuscode = 1

    def write_entry(self, what, docname, line, uri):
        # type: (str, str, int, str) -> None
        with open(path.join(self.outdir, 'output.txt'), 'a', encoding='utf-8') as output:
            output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None),
                                               line, what, uri))

    def finish(self):
        # type: () -> None
        for worker in self.workers:
            self.wqueue.put((None, None, None), False)
Пример #40
0
    'file_insertion_enabled': True,
    'smartquotes_locales': [],
}  # type: Dict[str, Any]

# This is increased every time an environment attribute is added
# or changed to properly invalidate pickle files.
ENV_VERSION = 56

# config status
CONFIG_OK = 1
CONFIG_NEW = 2
CONFIG_CHANGED = 3
CONFIG_EXTENSIONS_CHANGED = 4

CONFIG_CHANGED_REASON = {
    CONFIG_NEW: __('new config'),
    CONFIG_CHANGED: __('config changed'),
    CONFIG_EXTENSIONS_CHANGED: __('extensions changed'),
}

versioning_conditions = {
    'none': False,
    'text': is_translatable,
}  # type: Dict[str, Union[bool, Callable]]


class NoUri(Exception):
    """Raised by get_relative_uri if there is no URI available."""
    pass

Пример #41
0
    def build_py_coverage(self) -> None:
        objects = self.env.domaindata['py']['objects']
        modules = self.env.domaindata['py']['modules']

        skip_undoc = self.config.coverage_skip_undoc_in_source

        for mod_name in modules:
            ignore = False
            for exp in self.mod_ignorexps:
                if exp.match(mod_name):
                    ignore = True
                    break
            if ignore or self.ignore_pyobj(mod_name):
                continue

            try:
                mod = __import__(mod_name, fromlist=['foo'])
            except ImportError as err:
                logger.warning(__('module %s could not be imported: %s'), mod_name, err)
                self.py_undoc[mod_name] = {'error': err}
                continue

            funcs = []
            classes = {}  # type: Dict[str, List[str]]

            for name, obj in inspect.getmembers(mod):
                # diverse module attributes are ignored:
                if name[0] == '_':
                    # begins in an underscore
                    continue
                if not hasattr(obj, '__module__'):
                    # cannot be attributed to a module
                    continue
                if obj.__module__ != mod_name:
                    # is not defined in this module
                    continue

                full_name = '%s.%s' % (mod_name, name)
                if self.ignore_pyobj(full_name):
                    continue

                if inspect.isfunction(obj):
                    if full_name not in objects:
                        for exp in self.fun_ignorexps:
                            if exp.match(name):
                                break
                        else:
                            if skip_undoc and not obj.__doc__:
                                continue
                            funcs.append(name)
                elif inspect.isclass(obj):
                    for exp in self.cls_ignorexps:
                        if exp.match(name):
                            break
                    else:
                        if full_name not in objects:
                            if skip_undoc and not obj.__doc__:
                                continue
                            # not documented at all
                            classes[name] = []
                            continue

                        attrs = []  # type: List[str]

                        for attr_name in dir(obj):
                            if attr_name not in obj.__dict__:
                                continue
                            try:
                                attr = safe_getattr(obj, attr_name)
                            except AttributeError:
                                continue
                            if not (inspect.ismethod(attr) or
                                    inspect.isfunction(attr)):
                                continue
                            if attr_name[0] == '_':
                                # starts with an underscore, ignore it
                                continue
                            if skip_undoc and not attr.__doc__:
                                # skip methods without docstring if wished
                                continue
                            full_attr_name = '%s.%s' % (full_name, attr_name)
                            if self.ignore_pyobj(full_attr_name):
                                continue
                            if full_attr_name not in objects:
                                attrs.append(attr_name)
                        if attrs:
                            # some attributes are undocumented
                            classes[name] = attrs

            self.py_undoc[mod_name] = {'funcs': funcs, 'classes': classes}
Пример #42
0
class MessageCatalogBuilder(I18nBuilder):
    """
    Builds gettext-style message catalogs (.pot files).
    """
    name = 'gettext'
    epilog = __('The message catalogs are in %(outdir)s.')

    def init(self) -> None:
        super().init()
        self.create_template_bridge()
        self.templates.init(self)

    def _collect_templates(self) -> Set[str]:
        template_files = set()
        for template_path in self.config.templates_path:
            tmpl_abs_path = path.join(self.app.srcdir, template_path)
            for dirpath, dirs, files in walk(tmpl_abs_path):
                for fn in files:
                    if fn.endswith('.html'):
                        filename = canon_path(path.join(dirpath, fn))
                        template_files.add(filename)
        return template_files

    def _extract_from_template(self) -> None:
        files = list(self._collect_templates())
        files.sort()
        logger.info(bold(__('building [%s]: ') % self.name), nonl=True)
        logger.info(__('targets for %d template files'), len(files))

        extract_translations = self.templates.environment.extract_translations

        for template in status_iterator(files,
                                        __('reading templates... '), "purple",
                                        len(files), self.app.verbosity):
            try:
                with open(template, encoding='utf-8') as f:
                    context = f.read()
                for line, meth, msg in extract_translations(context):
                    origin = MsgOrigin(template, line)
                    self.catalogs['sphinx'].add(msg, origin)
            except Exception as exc:
                raise ThemeError('%s: %r' % (template, exc)) from exc

    def build(self,
              docnames: Iterable[str],
              summary: str = None,
              method: str = 'update') -> None:  # NOQA
        self._extract_from_template()
        super().build(docnames, summary, method)

    def finish(self) -> None:
        super().finish()
        context = {
            'version':
            self.config.version,
            'copyright':
            self.config.copyright,
            'project':
            self.config.project,
            'last_translator':
            self.config.gettext_last_translator,
            'language_team':
            self.config.gettext_language_team,
            'ctime':
            datetime.fromtimestamp(timestamp,
                                   ltz).strftime('%Y-%m-%d %H:%M%z'),
            'display_location':
            self.config.gettext_location,
            'display_uuid':
            self.config.gettext_uuid,
        }
        for textdomain, catalog in status_iterator(
                self.catalogs.items(), __("writing message catalogs... "),
                "darkgreen", len(self.catalogs), self.app.verbosity,
                lambda textdomain__: textdomain__[0]):
            # noop if config.gettext_compact is set
            ensuredir(path.join(self.outdir, path.dirname(textdomain)))

            context['messages'] = list(catalog)
            content = GettextRenderer(outdir=self.outdir).render(
                'message.pot_t', context)

            pofn = path.join(self.outdir, textdomain + '.pot')
            if should_write(pofn, content):
                with open(pofn, 'w', encoding='utf-8') as pofile:
                    pofile.write(content)
Пример #43
0
def render_dot_html(self: HTMLTranslator,
                    node: graphviz,
                    code: str,
                    options: Dict,
                    prefix: str = 'graphviz',
                    imgcls: str = None,
                    alt: str = None,
                    filename: str = None) -> Tuple[str, str]:
    format = self.builder.config.graphviz_output_format
    try:
        if format not in ('png', 'svg'):
            raise GraphvizError(
                __("graphviz_output_format must be one of 'png', "
                   "'svg', but is %r") % format)
        fname, outfn = render_dot(self, code, options, format, prefix,
                                  filename)
    except GraphvizError as exc:
        logger.warning(__('dot code %r: %s'), code, exc)
        raise nodes.SkipNode from exc

    classes = [imgcls, 'graphviz'] + node.get('classes', [])
    imgcls = ' '.join(filter(None, classes))

    if fname is None:
        self.body.append(self.encode(code))
    else:
        if alt is None:
            alt = node.get('alt', self.encode(code).strip())
        if 'align' in node:
            self.body.append('<div align="%s" class="align-%s">' %
                             (node['align'], node['align']))
        if format == 'svg':
            self.body.append('<div class="graphviz">')
            self.body.append(
                '<object data="%s" type="image/svg+xml" class="%s">\n' %
                (fname, imgcls))
            self.body.append('<p class="warning">%s</p>' % alt)
            self.body.append('</object></div>\n')
        else:
            with open(outfn + '.map', encoding='utf-8') as mapfile:
                imgmap = ClickableMapDefinition(outfn + '.map',
                                                mapfile.read(),
                                                dot=code)
                if imgmap.clickable:
                    # has a map
                    self.body.append('<div class="graphviz">')
                    self.body.append(
                        '<img src="%s" alt="%s" usemap="#%s" class="%s" />' %
                        (fname, alt, imgmap.id, imgcls))
                    self.body.append('</div>\n')
                    self.body.append(imgmap.generate_clickable_map())
                else:
                    # nothing in image map
                    self.body.append('<div class="graphviz">')
                    self.body.append('<img src="%s" alt="%s" class="%s" />' %
                                     (fname, alt, imgcls))
                    self.body.append('</div>\n')
        if 'align' in node:
            self.body.append('</div>\n')

    raise nodes.SkipNode
Пример #44
0
class CoverageBuilder(Builder):
    """
    Evaluates coverage of code in the documentation.
    """
    name = 'coverage'
    epilog = __('Testing of coverage in the sources finished, look at the '
                'results in %(outdir)s' + path.sep + 'python.txt.')

    def init(self) -> None:
        self.c_sourcefiles = []  # type: List[str]
        for pattern in self.config.coverage_c_path:
            pattern = path.join(self.srcdir, pattern)
            self.c_sourcefiles.extend(glob.glob(pattern))

        self.c_regexes = []  # type: List[Tuple[str, Pattern]]
        for (name, exp) in self.config.coverage_c_regexes.items():
            try:
                self.c_regexes.append((name, re.compile(exp)))
            except Exception:
                logger.warning(__('invalid regex %r in coverage_c_regexes'), exp)

        self.c_ignorexps = {}  # type: Dict[str, List[Pattern]]
        for (name, exps) in self.config.coverage_ignore_c_items.items():
            self.c_ignorexps[name] = compile_regex_list('coverage_ignore_c_items',
                                                        exps)
        self.mod_ignorexps = compile_regex_list('coverage_ignore_modules',
                                                self.config.coverage_ignore_modules)
        self.cls_ignorexps = compile_regex_list('coverage_ignore_classes',
                                                self.config.coverage_ignore_classes)
        self.fun_ignorexps = compile_regex_list('coverage_ignore_functions',
                                                self.config.coverage_ignore_functions)
        self.py_ignorexps = compile_regex_list('coverage_ignore_pyobjects',
                                               self.config.coverage_ignore_pyobjects)

    def get_outdated_docs(self) -> str:
        return 'coverage overview'

    def write(self, *ignored) -> None:
        self.py_undoc = {}  # type: Dict[str, Dict[str, Any]]
        self.build_py_coverage()
        self.write_py_coverage()

        self.c_undoc = {}  # type: Dict[str, Set[Tuple[str, str]]]
        self.build_c_coverage()
        self.write_c_coverage()

    def build_c_coverage(self) -> None:
        # Fetch all the info from the header files
        c_objects = self.env.domaindata['c']['objects']
        for filename in self.c_sourcefiles:
            undoc = set()  # type: Set[Tuple[str, str]]
            with open(filename) as f:
                for line in f:
                    for key, regex in self.c_regexes:
                        match = regex.match(line)
                        if match:
                            name = match.groups()[0]
                            if name not in c_objects:
                                for exp in self.c_ignorexps.get(key, []):
                                    if exp.match(name):
                                        break
                                else:
                                    undoc.add((key, name))
                            continue
            if undoc:
                self.c_undoc[filename] = undoc

    def write_c_coverage(self) -> None:
        output_file = path.join(self.outdir, 'c.txt')
        with open(output_file, 'w') as op:
            if self.config.coverage_write_headline:
                write_header(op, 'Undocumented C API elements', '=')
            op.write('\n')

            for filename, undoc in self.c_undoc.items():
                write_header(op, filename)
                for typ, name in sorted(undoc):
                    op.write(' * %-50s [%9s]\n' % (name, typ))
                op.write('\n')

    def ignore_pyobj(self, full_name):
        for exp in self.py_ignorexps:
            if exp.search(full_name):
                return True
        return False

    def build_py_coverage(self) -> None:
        objects = self.env.domaindata['py']['objects']
        modules = self.env.domaindata['py']['modules']

        skip_undoc = self.config.coverage_skip_undoc_in_source

        for mod_name in modules:
            ignore = False
            for exp in self.mod_ignorexps:
                if exp.match(mod_name):
                    ignore = True
                    break
            if ignore or self.ignore_pyobj(mod_name):
                continue

            try:
                mod = __import__(mod_name, fromlist=['foo'])
            except ImportError as err:
                logger.warning(__('module %s could not be imported: %s'), mod_name, err)
                self.py_undoc[mod_name] = {'error': err}
                continue

            funcs = []
            classes = {}  # type: Dict[str, List[str]]

            for name, obj in inspect.getmembers(mod):
                # diverse module attributes are ignored:
                if name[0] == '_':
                    # begins in an underscore
                    continue
                if not hasattr(obj, '__module__'):
                    # cannot be attributed to a module
                    continue
                if obj.__module__ != mod_name:
                    # is not defined in this module
                    continue

                full_name = '%s.%s' % (mod_name, name)
                if self.ignore_pyobj(full_name):
                    continue

                if inspect.isfunction(obj):
                    if full_name not in objects:
                        for exp in self.fun_ignorexps:
                            if exp.match(name):
                                break
                        else:
                            if skip_undoc and not obj.__doc__:
                                continue
                            funcs.append(name)
                elif inspect.isclass(obj):
                    for exp in self.cls_ignorexps:
                        if exp.match(name):
                            break
                    else:
                        if full_name not in objects:
                            if skip_undoc and not obj.__doc__:
                                continue
                            # not documented at all
                            classes[name] = []
                            continue

                        attrs = []  # type: List[str]

                        for attr_name in dir(obj):
                            if attr_name not in obj.__dict__:
                                continue
                            try:
                                attr = safe_getattr(obj, attr_name)
                            except AttributeError:
                                continue
                            if not (inspect.ismethod(attr) or
                                    inspect.isfunction(attr)):
                                continue
                            if attr_name[0] == '_':
                                # starts with an underscore, ignore it
                                continue
                            if skip_undoc and not attr.__doc__:
                                # skip methods without docstring if wished
                                continue
                            full_attr_name = '%s.%s' % (full_name, attr_name)
                            if self.ignore_pyobj(full_attr_name):
                                continue
                            if full_attr_name not in objects:
                                attrs.append(attr_name)
                        if attrs:
                            # some attributes are undocumented
                            classes[name] = attrs

            self.py_undoc[mod_name] = {'funcs': funcs, 'classes': classes}

    def write_py_coverage(self) -> None:
        output_file = path.join(self.outdir, 'python.txt')
        failed = []
        with open(output_file, 'w') as op:
            if self.config.coverage_write_headline:
                write_header(op, 'Undocumented Python objects', '=')
            keys = sorted(self.py_undoc.keys())
            for name in keys:
                undoc = self.py_undoc[name]
                if 'error' in undoc:
                    failed.append((name, undoc['error']))
                else:
                    if not undoc['classes'] and not undoc['funcs']:
                        continue

                    write_header(op, name)
                    if undoc['funcs']:
                        op.write('Functions:\n')
                        op.writelines(' * %s\n' % x for x in undoc['funcs'])
                        op.write('\n')
                    if undoc['classes']:
                        op.write('Classes:\n')
                        for name, methods in sorted(
                                undoc['classes'].items()):
                            if not methods:
                                op.write(' * %s\n' % name)
                            else:
                                op.write(' * %s -- missing methods:\n\n' % name)
                                op.writelines('   - %s\n' % x for x in methods)
                        op.write('\n')

            if failed:
                write_header(op, 'Modules that failed to import')
                op.writelines(' * %s -- %s\n' % x for x in failed)

    def finish(self) -> None:
        # dump the coverage data to a pickle file too
        picklepath = path.join(self.outdir, 'undoc.pickle')
        with open(picklepath, 'wb') as dumpfile:
            pickle.dump((self.py_undoc, self.c_undoc), dumpfile)
Пример #45
0
def get_parser() -> argparse.ArgumentParser:
    parser = argparse.ArgumentParser(
        usage='%(prog)s [OPTIONS] -o <OUTPUT_PATH> <MODULE_PATH> '
        '[EXCLUDE_PATTERN, ...]',
        epilog=__('For more information, visit <http://sphinx-doc.org/>.'),
        description=__("""
Look recursively in <MODULE_PATH> for Python modules and packages and create
one reST file with automodule directives per package in the <OUTPUT_PATH>.

The <EXCLUDE_PATTERN>s can be file and/or directory patterns that will be
excluded from generation.

Note: By default this script will not overwrite already created files."""))

    parser.add_argument('--version',
                        action='version',
                        dest='show_version',
                        version='%%(prog)s %s' % __display_version__)

    parser.add_argument('module_path', help=__('path to module to document'))
    parser.add_argument('exclude_pattern',
                        nargs='*',
                        help=__('fnmatch-style file and/or directory patterns '
                                'to exclude from generation'))

    parser.add_argument('-o',
                        '--output-dir',
                        action='store',
                        dest='destdir',
                        required=True,
                        help=__('directory to place all output'))
    parser.add_argument(
        '-q',
        action='store_true',
        dest='quiet',
        help=__('no output on stdout, just warnings on stderr'))
    parser.add_argument('-d',
                        '--maxdepth',
                        action='store',
                        dest='maxdepth',
                        type=int,
                        default=4,
                        help=__(
                            'maximum depth of submodules to show in the TOC '
                            '(default: 4)'))
    parser.add_argument('-f',
                        '--force',
                        action='store_true',
                        dest='force',
                        help=__('overwrite existing files'))
    parser.add_argument('-l',
                        '--follow-links',
                        action='store_true',
                        dest='followlinks',
                        default=False,
                        help=__(
                            'follow symbolic links. Powerful when combined '
                            'with collective.recipe.omelette.'))
    parser.add_argument('-n',
                        '--dry-run',
                        action='store_true',
                        dest='dryrun',
                        help=__('run the script without creating files'))
    parser.add_argument(
        '-e',
        '--separate',
        action='store_true',
        dest='separatemodules',
        help=__('put documentation for each module on its own page'))
    parser.add_argument('-P',
                        '--private',
                        action='store_true',
                        dest='includeprivate',
                        help=__('include "_private" modules'))
    parser.add_argument(
        '--tocfile',
        action='store',
        dest='tocfile',
        default='modules',
        help=__("filename of table of contents (default: modules)"))
    parser.add_argument('-T',
                        '--no-toc',
                        action='store_false',
                        dest='tocfile',
                        help=__("don't create a table of contents file"))
    parser.add_argument('-E',
                        '--no-headings',
                        action='store_true',
                        dest='noheadings',
                        help=__("don't create headings for the module/package "
                                "packages (e.g. when the docstrings already "
                                "contain them)"))
    parser.add_argument('-M',
                        '--module-first',
                        action='store_true',
                        dest='modulefirst',
                        help=__('put module documentation before submodule '
                                'documentation'))
    parser.add_argument('--implicit-namespaces',
                        action='store_true',
                        dest='implicit_namespaces',
                        help=__('interpret module paths according to PEP-0420 '
                                'implicit namespaces specification'))
    parser.add_argument('-s',
                        '--suffix',
                        action='store',
                        dest='suffix',
                        default='rst',
                        help=__('file suffix (default: rst)'))
    parser.add_argument(
        '-F',
        '--full',
        action='store_true',
        dest='full',
        help=__('generate a full project with sphinx-quickstart'))
    parser.add_argument(
        '-a',
        '--append-syspath',
        action='store_true',
        dest='append_syspath',
        help=__('append module_path to sys.path, used when --full is given'))
    parser.add_argument('-H',
                        '--doc-project',
                        action='store',
                        dest='header',
                        help=__('project name (default: root module name)'))
    parser.add_argument(
        '-A',
        '--doc-author',
        action='store',
        dest='author',
        help=__('project author(s), used when --full is given'))
    parser.add_argument('-V',
                        '--doc-version',
                        action='store',
                        dest='version',
                        help=__('project version, used when --full is given'))
    parser.add_argument('-R',
                        '--doc-release',
                        action='store',
                        dest='release',
                        help=__('project release, used when --full is given, '
                                'defaults to --doc-version'))

    group = parser.add_argument_group(__('extension options'))
    group.add_argument('--extensions',
                       metavar='EXTENSIONS',
                       dest='extensions',
                       action='append',
                       help=__('enable arbitrary extensions'))
    for ext in EXTENSIONS:
        group.add_argument('--ext-%s' % ext,
                           action='append_const',
                           const='sphinx.ext.%s' % ext,
                           dest='extensions',
                           help=__('enable %s extension') % ext)

    group = parser.add_argument_group(__('Project templating'))
    group.add_argument('-t',
                       '--templatedir',
                       metavar='TEMPLATEDIR',
                       dest='templatedir',
                       help=__('template directory for template files'))

    return parser
Пример #46
0
def collect_pages(app: Sphinx) -> Iterator[Tuple[str, Dict[str, Any], str]]:
    env = app.builder.env
    if not hasattr(env, '_viewcode_modules'):
        return
    highlighter = app.builder.highlighter  # type: ignore
    urito = app.builder.get_relative_uri

    modnames = set(env._viewcode_modules)  # type: ignore

    for modname, entry in status_iterator(
            sorted(env._viewcode_modules.items()),  # type: ignore
            __('highlighting module code... '), "blue",
            len(env._viewcode_modules),  # type: ignore
            app.verbosity, lambda x: x[0]):
        if not entry:
            continue
        code, tags, used, refname = entry
        # construct a page name for the highlighted source
        pagename = '_modules/' + modname.replace('.', '/')
        # highlight the source using the builder's highlighter
        if env.config.highlight_language in ('python3', 'default', 'none'):
            lexer = env.config.highlight_language
        else:
            lexer = 'python'
        highlighted = highlighter.highlight_block(code, lexer, linenos=False)
        # split the code into lines
        lines = highlighted.splitlines()
        # split off wrap markup from the first line of the actual code
        before, after = lines[0].split('<pre>')
        lines[0:1] = [before + '<pre>', after]
        # nothing to do for the last line; it always starts with </pre> anyway
        # now that we have code lines (starting at index 1), insert anchors for
        # the collected tags (HACK: this only works if the tag boundaries are
        # properly nested!)
        maxindex = len(lines) - 1
        for name, docname in used.items():
            type, start, end = tags[name]
            backlink = urito(pagename, docname) + '#' + refname + '.' + name
            lines[start] = (
                '<div class="viewcode-block" id="%s"><a class="viewcode-back" '
                'href="%s">%s</a>' % (name, backlink, _('[docs]')) +
                lines[start])
            lines[min(end, maxindex)] += '</div>'
        # try to find parents (for submodules)
        parents = []
        parent = modname
        while '.' in parent:
            parent = parent.rsplit('.', 1)[0]
            if parent in modnames:
                parents.append({
                    'link': urito(pagename, '_modules/' +
                                  parent.replace('.', '/')),
                    'title': parent})
        parents.append({'link': urito(pagename, '_modules/index'),
                        'title': _('Module code')})
        parents.reverse()
        # putting it all together
        context = {
            'parents': parents,
            'title': modname,
            'body': (_('<h1>Source code for %s</h1>') % modname +
                     '\n'.join(lines)),
        }
        yield (pagename, context, 'page.html')

    if not modnames:
        return

    html = ['\n']
    # the stack logic is needed for using nested lists for submodules
    stack = ['']
    for modname in sorted(modnames):
        if modname.startswith(stack[-1]):
            stack.append(modname + '.')
            html.append('<ul>')
        else:
            stack.pop()
            while not modname.startswith(stack[-1]):
                stack.pop()
                html.append('</ul>')
            stack.append(modname + '.')
        html.append('<li><a href="%s">%s</a></li>\n' % (
            urito('_modules/index', '_modules/' + modname.replace('.', '/')),
            modname))
    html.append('</ul>' * (len(stack) - 1))
    context = {
        'title': _('Overview: module code'),
        'body': (_('<h1>All modules for which code is available</h1>') +
                 ''.join(html)),
    }

    yield ('_modules/index', context, 'page.html')
Пример #47
0
        def check_uri():
            # type: () -> Tuple[str, str, int]
            # split off anchor
            if '#' in uri:
                req_url, anchor = uri.split('#', 1)
                for rex in self.anchors_ignore:
                    if rex.match(anchor):
                        anchor = None
                        break
            else:
                req_url = uri
                anchor = None

            # handle non-ASCII URIs
            try:
                req_url.encode('ascii')
            except UnicodeError:
                req_url = encode_uri(req_url)

            try:
                if anchor and self.app.config.linkcheck_anchors:
                    # Read the whole document and see if #anchor exists
                    response = requests.get(req_url, stream=True, config=self.app.config,
                                            **kwargs)
                    found = check_anchor(response, unquote(anchor))

                    if not found:
                        raise Exception(__("Anchor '%s' not found") % anchor)
                else:
                    try:
                        # try a HEAD request first, which should be easier on
                        # the server and the network
                        response = requests.head(req_url, config=self.app.config, **kwargs)
                        response.raise_for_status()
                    except HTTPError:
                        # retry with GET request if that fails, some servers
                        # don't like HEAD requests.
                        response = requests.get(req_url, stream=True, config=self.app.config,
                                                **kwargs)
                        response.raise_for_status()
            except HTTPError as err:
                if err.response.status_code == 401:
                    # We'll take "Unauthorized" as working.
                    return 'working', ' - unauthorized', 0
                else:
                    return 'broken', str(err), 0
            except Exception as err:
                if is_ssl_error(err):
                    return 'ignored', str(err), 0
                else:
                    return 'broken', str(err), 0
            if response.url.rstrip('/') == req_url.rstrip('/'):
                return 'working', '', 0
            else:
                new_url = response.url
                if anchor:
                    new_url += '#' + anchor
                # history contains any redirects, get last
                if response.history:
                    code = response.history[-1].status_code
                    return 'redirected', new_url, code
                else:
                    return 'redirected', new_url, 0
Пример #48
0
def load_mappings(app):
    # type: (Sphinx) -> None
    """Load all intersphinx mappings into the environment."""
    now = int(time.time())
    cache_time = now - app.config.intersphinx_cache_limit * 86400
    inventories = InventoryAdapter(app.builder.env)
    update = False
    for key, value in app.config.intersphinx_mapping.items():
        name = None  # type: unicode
        uri = None  # type: unicode
        inv = None  # type: Union[unicode, Tuple[unicode, ...]]

        if isinstance(value, (list, tuple)):
            # new format
            name, (uri, inv) = key, value
            if not isinstance(name, string_types):
                logger.warning(
                    __('intersphinx identifier %r is not string. Ignored'),
                    name)
                continue
        else:
            # old format, no name
            name, uri, inv = None, key, value
        # we can safely assume that the uri<->inv mapping is not changed
        # during partial rebuilds since a changed intersphinx_mapping
        # setting will cause a full environment reread
        if not isinstance(inv, tuple):
            invs = (inv, )
        else:
            invs = inv  # type: ignore

        failures = []
        for inv in invs:
            if not inv:
                inv = posixpath.join(uri, INVENTORY_FILENAME)
            # decide whether the inventory must be read: always read local
            # files; remote ones only if the cache time is expired
            if '://' not in inv or uri not in inventories.cache \
                    or inventories.cache[uri][1] < cache_time:
                safe_inv_url = _get_safe_url(inv)  # type: ignore
                logger.info('loading intersphinx inventory from %s...',
                            safe_inv_url)
                try:
                    invdata = fetch_inventory(app, uri, inv)
                except Exception as err:
                    failures.append(err.args)
                    continue

                if invdata:
                    inventories.cache[uri] = (name, now, invdata)
                    update = True
                    break

        if failures == []:
            pass
        elif len(failures) < len(invs):
            logger.info("encountered some issues with some of the inventories,"
                        " but they had working alternatives:")
            for fail in failures:
                logger.info(*fail)
        else:
            logger.warning(
                __("failed to reach any of the inventories "
                   "with the following issues:"))
            for fail in failures:
                logger.warning(*fail)

    if update:
        inventories.clear()

        # Duplicate values in different inventories will shadow each
        # other; which one will override which can vary between builds
        # since they are specified using an unordered dict.  To make
        # it more consistent, we sort the named inventories and then
        # add the unnamed inventories last.  This means that the
        # unnamed inventories will shadow the named ones but the named
        # ones can still be accessed when the name is specified.
        cached_vals = list(inventories.cache.values())
        named_vals = sorted(v for v in cached_vals if v[0])
        unnamed_vals = [v for v in cached_vals if not v[0]]
        for name, _x, invdata in named_vals + unnamed_vals:
            if name:
                inventories.named_inventory[name] = invdata
            for type, objects in invdata.items():
                inventories.main_inventory.setdefault(type, {}).update(objects)
Пример #49
0
        def check_uri() -> Tuple[str, str, int]:
            # split off anchor
            if '#' in uri:
                req_url, anchor = uri.split('#', 1)
                for rex in self.anchors_ignore:
                    if rex.match(anchor):
                        anchor = None
                        break
            else:
                req_url = uri
                anchor = None

            # handle non-ASCII URIs
            try:
                req_url.encode('ascii')
            except UnicodeError:
                req_url = encode_uri(req_url)

            # Get auth info, if any
            for pattern, auth_info in self.auth:
                if pattern.match(uri):
                    break
            else:
                auth_info = None

            # update request headers for the URL
            kwargs['headers'] = get_request_headers()

            try:
                if anchor and self.app.config.linkcheck_anchors:
                    # Read the whole document and see if #anchor exists
                    response = requests.get(req_url,
                                            stream=True,
                                            config=self.app.config,
                                            auth=auth_info,
                                            **kwargs)
                    response.raise_for_status()
                    found = check_anchor(response, unquote(anchor))

                    if not found:
                        raise Exception(__("Anchor '%s' not found") % anchor)
                else:
                    try:
                        # try a HEAD request first, which should be easier on
                        # the server and the network
                        response = requests.head(req_url,
                                                 allow_redirects=True,
                                                 config=self.app.config,
                                                 auth=auth_info,
                                                 **kwargs)
                        response.raise_for_status()
                    except (HTTPError, TooManyRedirects) as err:
                        if isinstance(
                                err,
                                HTTPError) and err.response.status_code == 429:
                            raise
                        # retry with GET request if that fails, some servers
                        # don't like HEAD requests.
                        response = requests.get(req_url,
                                                stream=True,
                                                config=self.app.config,
                                                auth=auth_info,
                                                **kwargs)
                        response.raise_for_status()
            except HTTPError as err:
                if err.response.status_code == 401:
                    # We'll take "Unauthorized" as working.
                    return 'working', ' - unauthorized', 0
                elif err.response.status_code == 429:
                    next_check = self.limit_rate(err.response)
                    if next_check is not None:
                        self.wqueue.put((next_check, uri, docname, lineno),
                                        False)
                        return 'rate-limited', '', 0
                    return 'broken', str(err), 0
                elif err.response.status_code == 503:
                    # We'll take "Service Unavailable" as ignored.
                    return 'ignored', str(err), 0
                else:
                    return 'broken', str(err), 0
            except Exception as err:
                return 'broken', str(err), 0
            else:
                netloc = urlparse(req_url).netloc
                try:
                    del self.rate_limits[netloc]
                except KeyError:
                    pass
            if response.url.rstrip('/') == req_url.rstrip('/'):
                return 'working', '', 0
            else:
                new_url = response.url
                if anchor:
                    new_url += '#' + anchor
                # history contains any redirects, get last
                if response.history:
                    code = response.history[-1].status_code
                    return 'redirected', new_url, code
                else:
                    return 'redirected', new_url, 0
Пример #50
0
def check_confval_types(app, config):
    # type: (Sphinx, Config) -> None
    """check all values for deviation from the default value's type, since
    that can result in TypeErrors all over the place NB.
    """
    for confval in config:
        default, rebuild, annotations = config.values[confval.name]

        if hasattr(default, '__call__'):
            default = default(config)  # evaluate default value
        if default is None and not annotations:
            continue  # neither inferrable nor expliclitly annotated types

        if annotations is Any:
            # any type of value is accepted
            pass
        elif isinstance(annotations, ENUM):
            if not annotations.match(confval.value):
                msg = __(
                    "The config value `{name}` has to be a one of {candidates}, "
                    "but `{current}` is given.")
                logger.warning(
                    msg.format(name=confval.name,
                               current=confval.value,
                               candidates=annotations.candidates))
        else:
            if type(confval.value) is type(default):
                continue
            if type(confval.value) in annotations:
                continue

            common_bases = (
                set(type(confval.value).__bases__ +
                    (type(confval.value), )) & set(type(default).__bases__))
            common_bases.discard(object)
            if common_bases:
                continue  # at least we share a non-trivial base class

            if annotations:
                msg = __(
                    "The config value `{name}' has type `{current.__name__}'; "
                    "expected {permitted}.")
                wrapped_annotations = [
                    "`{}'".format(c.__name__) for c in annotations
                ]
                if len(wrapped_annotations) > 2:
                    permitted = "{}, or {}".format(
                        ", ".join(wrapped_annotations[:-1]),
                        wrapped_annotations[-1])
                else:
                    permitted = " or ".join(wrapped_annotations)
                logger.warning(
                    msg.format(name=confval.name,
                               current=type(confval.value),
                               permitted=permitted))
            else:
                msg = __(
                    "The config value `{name}' has type `{current.__name__}', "
                    "defaults to `{default.__name__}'.")
                logger.warning(
                    msg.format(name=confval.name,
                               current=type(confval.value),
                               default=type(default)))
Пример #51
0
    def process_result(self, result: Tuple[str, str, int, str, str,
                                           int]) -> None:
        uri, docname, lineno, status, info, code = result

        filename = self.env.doc2path(docname, None)
        linkstat = dict(filename=filename,
                        lineno=lineno,
                        status=status,
                        code=code,
                        uri=uri,
                        info=info)
        if status == 'unchecked':
            self.write_linkstat(linkstat)
            return
        if status == 'working' and info == 'old':
            self.write_linkstat(linkstat)
            return
        if lineno:
            logger.info('(line %4d) ', lineno, nonl=True)
        if status == 'ignored':
            if info:
                logger.info(darkgray('-ignored- ') + uri + ': ' + info)
            else:
                logger.info(darkgray('-ignored- ') + uri)
            self.write_linkstat(linkstat)
        elif status == 'local':
            logger.info(darkgray('-local-   ') + uri)
            self.write_entry('local', docname, filename, lineno, uri)
            self.write_linkstat(linkstat)
        elif status == 'working':
            logger.info(darkgreen('ok        ') + uri + info)
            self.write_linkstat(linkstat)
        elif status == 'broken':
            if self.app.quiet or self.app.warningiserror:
                logger.warning(__('broken link: %s (%s)'),
                               uri,
                               info,
                               location=(filename, lineno))
            else:
                logger.info(red('broken    ') + uri + red(' - ' + info))
            self.write_entry('broken', docname, filename, lineno,
                             uri + ': ' + info)
            self.write_linkstat(linkstat)
        elif status == 'redirected':
            try:
                text, color = {
                    301: ('permanently', purple),
                    302: ('with Found', purple),
                    303: ('with See Other', purple),
                    307: ('temporarily', turquoise),
                    308: ('permanently', purple),
                }[code]
            except KeyError:
                text, color = ('with unknown code', purple)
            linkstat['text'] = text
            logger.info(
                color('redirect  ') + uri +
                color(' - ' + text + ' to ' + info))
            self.write_entry('redirected ' + text, docname, filename, lineno,
                             uri + ' to ' + info)
            self.write_linkstat(linkstat)
Пример #52
0
def get_parser() -> argparse.ArgumentParser:
    parser = argparse.ArgumentParser(
        usage='%(prog)s [OPTIONS] SOURCEDIR OUTPUTDIR [FILENAMES...]',
        epilog=__('For more information, visit <http://sphinx-doc.org/>.'),
        description=__("""
Generate documentation from source files.

sphinx-build generates documentation from the files in SOURCEDIR and places it
in OUTPUTDIR. It looks for 'conf.py' in SOURCEDIR for the configuration
settings. The 'sphinx-quickstart' tool may be used to generate template files,
including 'conf.py'

sphinx-build can create documentation in different formats. A format is
selected by specifying the builder name on the command line; it defaults to
HTML. Builders can also perform other tasks related to documentation
processing.

By default, everything that is outdated is built. Output only for selected
files can be built by specifying individual filenames.
"""))

    parser.add_argument('--version',
                        action='version',
                        dest='show_version',
                        version='%%(prog)s %s' % __display_version__)

    parser.add_argument('sourcedir',
                        help=__('path to documentation source files'))
    parser.add_argument('outputdir', help=__('path to output directory'))
    parser.add_argument('filenames',
                        nargs='*',
                        help=__('a list of specific files to rebuild. Ignored '
                                'if -a is specified'))

    group = parser.add_argument_group(__('general options'))
    group.add_argument('-b',
                       metavar='BUILDER',
                       dest='builder',
                       default='html',
                       help=__('builder to use (default: html)'))
    group.add_argument('-a',
                       action='store_true',
                       dest='force_all',
                       help=__('write all files (default: only write new and '
                               'changed files)'))
    group.add_argument('-E',
                       action='store_true',
                       dest='freshenv',
                       help=__('don\'t use a saved environment, always read '
                               'all files'))
    group.add_argument('-d',
                       metavar='PATH',
                       dest='doctreedir',
                       help=__('path for the cached environment and doctree '
                               'files (default: OUTPUTDIR/.doctrees)'))
    group.add_argument(
        '-j',
        metavar='N',
        default=1,
        type=jobs_argument,
        dest='jobs',
        help=__('build in parallel with N processes where '
                'possible (special value "auto" will set N to cpu-count)'))
    group = parser.add_argument_group('build configuration options')
    group.add_argument('-c',
                       metavar='PATH',
                       dest='confdir',
                       help=__('path where configuration file (conf.py) is '
                               'located (default: same as SOURCEDIR)'))
    group.add_argument('-C',
                       action='store_true',
                       dest='noconfig',
                       help=__('use no config file at all, only -D options'))
    group.add_argument('-D',
                       metavar='setting=value',
                       action='append',
                       dest='define',
                       default=[],
                       help=__('override a setting in configuration file'))
    group.add_argument('-A',
                       metavar='name=value',
                       action='append',
                       dest='htmldefine',
                       default=[],
                       help=__('pass a value into HTML templates'))
    group.add_argument('-t',
                       metavar='TAG',
                       action='append',
                       dest='tags',
                       default=[],
                       help=__('define tag: include "only" blocks with TAG'))
    group.add_argument('-n',
                       action='store_true',
                       dest='nitpicky',
                       help=__('nit-picky mode, warn about all missing '
                               'references'))

    group = parser.add_argument_group(__('console output options'))
    group.add_argument('-v',
                       action='count',
                       dest='verbosity',
                       default=0,
                       help=__('increase verbosity (can be repeated)'))
    group.add_argument('-q',
                       action='store_true',
                       dest='quiet',
                       help=__('no output on stdout, just warnings on stderr'))
    group.add_argument('-Q',
                       action='store_true',
                       dest='really_quiet',
                       help=__('no output at all, not even warnings'))
    group.add_argument(
        '--color',
        action='store_const',
        const='yes',
        default='auto',
        help=__('do emit colored output (default: auto-detect)'))
    group.add_argument('-N',
                       '--no-color',
                       dest='color',
                       action='store_const',
                       const='no',
                       help=__('do not emit colored output (default: '
                               'auto-detect)'))
    group.add_argument('-w',
                       metavar='FILE',
                       dest='warnfile',
                       help=__('write warnings (and errors) to given file'))
    group.add_argument('-W',
                       action='store_true',
                       dest='warningiserror',
                       help=__('turn warnings into errors'))
    group.add_argument('--keep-going',
                       action='store_true',
                       dest='keep_going',
                       help=__("with -W, keep going when getting warnings"))
    group.add_argument('-T',
                       action='store_true',
                       dest='traceback',
                       help=__('show full traceback on exception'))
    group.add_argument('-P',
                       action='store_true',
                       dest='pdb',
                       help=__('run Pdb on exception'))

    return parser
Пример #53
0
class TexinfoBuilder(Builder):
    """
    Builds Texinfo output to create Info documentation.
    """
    name = 'texinfo'
    format = 'texinfo'
    epilog = __('The Texinfo files are in %(outdir)s.')
    if os.name == 'posix':
        epilog += __("\nRun 'make' in that directory to run these through "
                     "makeinfo\n"
                     "(use 'make info' here to do that automatically).")

    supported_image_types = ['image/png', 'image/jpeg',
                             'image/gif']
    default_translator_class = TexinfoTranslator

    def init(self):
        # type: () -> None
        self.docnames = []       # type: Iterable[str]
        self.document_data = []  # type: List[Tuple[str, str, str, str, str, str, str, bool]]

    def get_outdated_docs(self):
        # type: () -> Union[str, List[str]]
        return 'all documents'  # for now

    def get_target_uri(self, docname, typ=None):
        # type: (str, str) -> str
        if docname not in self.docnames:
            raise NoUri
        else:
            return '%' + docname

    def get_relative_uri(self, from_, to, typ=None):
        # type: (str, str, str) -> str
        # ignore source path
        return self.get_target_uri(to, typ)

    def init_document_data(self):
        # type: () -> None
        preliminary_document_data = [list(x) for x in self.config.texinfo_documents]
        if not preliminary_document_data:
            logger.warning(__('no "texinfo_documents" config value found; no documents '
                              'will be written'))
            return
        # assign subdirs to titles
        self.titles = []  # type: List[Tuple[str, str]]
        for entry in preliminary_document_data:
            docname = entry[0]
            if docname not in self.env.all_docs:
                logger.warning(__('"texinfo_documents" config value references unknown '
                                  'document %s'), docname)
                continue
            self.document_data.append(entry)  # type: ignore
            if docname.endswith(SEP + 'index'):
                docname = docname[:-5]
            self.titles.append((docname, entry[2]))

    def write(self, *ignored):
        # type: (Any) -> None
        self.init_document_data()
        for entry in self.document_data:
            docname, targetname, title, author = entry[:4]
            targetname += '.texi'
            direntry = description = category = ''
            if len(entry) > 6:
                direntry, description, category = entry[4:7]
            toctree_only = False
            if len(entry) > 7:
                toctree_only = entry[7]
            destination = FileOutput(
                destination_path=path.join(self.outdir, targetname),
                encoding='utf-8')
            logger.info(__("processing %s..."), targetname, nonl=True)
            doctree = self.assemble_doctree(
                docname, toctree_only,
                appendices=(self.config.texinfo_appendices or []))
            logger.info(__("writing... "), nonl=True)
            self.post_process_images(doctree)
            docwriter = TexinfoWriter(self)
            settings = OptionParser(
                defaults=self.env.settings,
                components=(docwriter,),
                read_config_files=True).get_default_values()  # type: Any
            settings.author = author
            settings.title = title
            settings.texinfo_filename = targetname[:-5] + '.info'
            settings.texinfo_elements = self.config.texinfo_elements
            settings.texinfo_dir_entry = direntry or ''
            settings.texinfo_dir_category = category or ''
            settings.texinfo_dir_description = description or ''
            settings.docname = docname
            doctree.settings = settings
            docwriter.write(doctree, destination)
            logger.info(__("done"))

    def assemble_doctree(self, indexfile, toctree_only, appendices):
        # type: (str, bool, List[str]) -> nodes.document
        self.docnames = set([indexfile] + appendices)
        logger.info(darkgreen(indexfile) + " ", nonl=True)
        tree = self.env.get_doctree(indexfile)
        tree['docname'] = indexfile
        if toctree_only:
            # extract toctree nodes from the tree and put them in a
            # fresh document
            new_tree = new_document('<texinfo output>')
            new_sect = nodes.section()
            new_sect += nodes.title('<Set title in conf.py>',
                                    '<Set title in conf.py>')
            new_tree += new_sect
            for node in tree.traverse(addnodes.toctree):
                new_sect += node
            tree = new_tree
        largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
                                        darkgreen, [indexfile])
        largetree['docname'] = indexfile
        for docname in appendices:
            appendix = self.env.get_doctree(docname)
            appendix['docname'] = docname
            largetree.append(appendix)
        logger.info('')
        logger.info(__("resolving references..."))
        self.env.resolve_references(largetree, indexfile, self)
        # TODO: add support for external :ref:s
        for pendingnode in largetree.traverse(addnodes.pending_xref):
            docname = pendingnode['refdocname']
            sectname = pendingnode['refsectname']
            newnodes = [nodes.emphasis(sectname, sectname)]  # type: List[nodes.Node]
            for subdir, title in self.titles:
                if docname.startswith(subdir):
                    newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                    newnodes.append(nodes.emphasis(title, title))
                    newnodes.append(nodes.Text(')', ')'))
                    break
            else:
                pass
            pendingnode.replace_self(newnodes)
        return largetree

    def finish(self):
        # type: () -> None
        self.copy_image_files()

        logger.info(bold(__('copying Texinfo support files... ')), nonl=True)
        # copy Makefile
        fn = path.join(self.outdir, 'Makefile')
        logger.info(fn, nonl=True)
        try:
            copy_asset_file(os.path.join(template_dir, 'Makefile'), fn)
        except OSError as err:
            logger.warning(__("error writing file %s: %s"), fn, err)
        logger.info(__(' done'))

    def copy_image_files(self):
        # type: () -> None
        if self.images:
            stringify_func = ImageAdapter(self.app.env).get_original_image_uri
            for src in status_iterator(self.images, __('copying images... '), "brown",
                                       len(self.images), self.app.verbosity,
                                       stringify_func=stringify_func):
                dest = self.images[src]
                try:
                    copy_asset_file(path.join(self.srcdir, src),
                                    path.join(self.outdir, dest))
                except Exception as err:
                    logger.warning(__('cannot copy image file %r: %s'),
                                   path.join(self.srcdir, src), err)
Пример #54
0
 def build_all(self):
     # type: () -> None
     """Build all source files."""
     self.build(None, summary=__('all source files'), method='all')
Пример #55
0
 def add(self, name, default, rebuild, types):
     # type: (unicode, Any, Union[bool, unicode], Any) -> None
     if name in self.values:
         raise ExtensionError(__('Config value %r already present') % name)
     else:
         self.values[name] = (default, rebuild, types)
Пример #56
0
def generate_autosummary_docs(sources,
                              output_dir=None,
                              suffix='.rst',
                              warn=_simple_warn,
                              info=_simple_info,
                              base_path=None,
                              builder=None,
                              template_dir=None,
                              imported_members=False,
                              app=None):
    # type: (List[str], str, str, Callable, Callable, str, Builder, str, bool, Any) -> None

    showed_sources = list(sorted(sources))
    if len(showed_sources) > 20:
        showed_sources = showed_sources[:10] + ['...'] + showed_sources[-10:]
    info(
        __('[autosummary] generating autosummary for: %s') %
        ', '.join(showed_sources))

    if output_dir:
        info(__('[autosummary] writing to %s') % output_dir)

    if base_path is not None:
        sources = [os.path.join(base_path, filename) for filename in sources]

    # create our own templating environment
    template_dirs = None  # type: List[str]
    template_dirs = [
        os.path.join(package_dir, 'ext', 'autosummary', 'templates')
    ]

    template_loader = None  # type: Union[BuiltinTemplateLoader, FileSystemLoader]
    if builder is not None:
        # allow the user to override the templates
        template_loader = BuiltinTemplateLoader()
        template_loader.init(builder, dirs=template_dirs)
    else:
        if template_dir:
            template_dirs.insert(0, template_dir)
        template_loader = FileSystemLoader(template_dirs)
    template_env = SandboxedEnvironment(loader=template_loader)
    template_env.filters['underline'] = _underline

    # replace the builtin html filters
    template_env.filters['escape'] = rst_escape
    template_env.filters['e'] = rst_escape

    # read
    items = find_autosummary_in_files(sources)

    # keep track of new files
    new_files = []

    # write
    for name, path, template_name in sorted(set(items), key=str):
        if path is None:
            # The corresponding autosummary:: directive did not have
            # a :toctree: option
            continue

        path = output_dir or os.path.abspath(path)
        ensuredir(path)

        try:
            name, obj, parent, mod_name = import_by_name(name)
        except ImportError as e:
            warn('[autosummary] failed to import %r: %s' % (name, e))
            continue

        fn = os.path.join(path, name + suffix)

        # skip it if it exists
        if os.path.isfile(fn):
            continue

        new_files.append(fn)

        with open(fn, 'w') as f:
            doc = get_documenter(app, obj, parent)

            if template_name is not None:
                template = template_env.get_template(template_name)
            else:
                try:
                    template = template_env.get_template('autosummary/%s.rst' %
                                                         doc.objtype)
                except TemplateNotFound:
                    template = template_env.get_template(
                        'autosummary/base.rst')

            def get_members(obj, typ, include_public=[], imported=True):
                # type: (Any, str, List[str], bool) -> Tuple[List[str], List[str]]
                items = []  # type: List[str]
                for name in dir(obj):
                    try:
                        value = safe_getattr(obj, name)
                    except AttributeError:
                        continue
                    documenter = get_documenter(app, value, obj)
                    if documenter.objtype == typ:
                        if imported or getattr(value, '__module__',
                                               None) == obj.__name__:
                            # skip imported members if expected
                            items.append(name)
                public = [
                    x for x in items
                    if x in include_public or not x.startswith('_')
                ]
                return public, items

            ns = {}  # type: Dict[str, Any]

            if doc.objtype == 'module':
                ns['members'] = dir(obj)
                ns['functions'], ns['all_functions'] = \
                    get_members(obj, 'function', imported=imported_members)
                ns['classes'], ns['all_classes'] = \
                    get_members(obj, 'class', imported=imported_members)
                ns['exceptions'], ns['all_exceptions'] = \
                    get_members(obj, 'exception', imported=imported_members)
            elif doc.objtype == 'class':
                ns['members'] = dir(obj)
                ns['inherited_members'] = \
                    set(dir(obj)) - set(obj.__dict__.keys())
                ns['methods'], ns['all_methods'] = \
                    get_members(obj, 'method', ['__init__'])
                ns['attributes'], ns['all_attributes'] = \
                    get_members(obj, 'attribute')

            parts = name.split('.')
            if doc.objtype in ('method', 'attribute'):
                mod_name = '.'.join(parts[:-2])
                cls_name = parts[-2]
                obj_name = '.'.join(parts[-2:])
                ns['class'] = cls_name
            else:
                mod_name, obj_name = '.'.join(parts[:-1]), parts[-1]

            ns['fullname'] = name
            ns['module'] = mod_name
            ns['objname'] = obj_name
            ns['name'] = parts[-1]

            ns['objtype'] = doc.objtype
            ns['underline'] = len(name) * '='

            rendered = template.render(**ns)
            f.write(rendered)

    # descend recursively to new files
    if new_files:
        generate_autosummary_docs(new_files,
                                  output_dir=output_dir,
                                  suffix=suffix,
                                  warn=warn,
                                  info=info,
                                  base_path=base_path,
                                  builder=builder,
                                  template_dir=template_dir,
                                  app=app)
Пример #57
0
    def get_codeblock_node(self, code, language):
        """this is copied from sphinx.directives.code.CodeBlock.run

        it has been changed to accept code and language as an arguments instead
        of reading from self

        """

        document = self.state.document
        location = self.state_machine.get_source_and_line(self.lineno)

        linespec = self.options.get("emphasize-lines")
        if linespec:
            try:
                nlines = len(code.split("\n"))
                hl_lines = parselinenos(linespec, nlines)
                if any(i >= nlines for i in hl_lines):
                    emph_lines = self.options["emphasize-lines"]
                    log.warning(__(
                        f"line number spec is out of range(1-{nlines}): {emph_lines!r}"
                    ),
                                location=location)

                hl_lines = [x + 1 for x in hl_lines if x < nlines]
            except ValueError as err:
                return [document.reporter.warning(str(err), line=self.lineno)]
        else:
            hl_lines = None

        if "dedent" in self.options:
            location = self.state_machine.get_source_and_line(self.lineno)
            lines = code.split("\n")
            lines = dedent_lines(lines,
                                 self.options["dedent"],
                                 location=location)
            code = "\n".join(lines)

        literal = nodes.literal_block(code, code)
        literal["language"] = language
        literal[
            "linenos"] = "linenos" in self.options or "lineno-start" in self.options
        literal["classes"] += self.options.get("class", [])
        extra_args = literal["highlight_args"] = {}
        if hl_lines is not None:
            extra_args["hl_lines"] = hl_lines
        if "lineno-start" in self.options:
            extra_args["linenostart"] = self.options["lineno-start"]
        set_source_info(self, literal)

        caption = self.options.get("caption")
        if caption:
            try:
                literal = container_wrapper(self, literal, caption)
            except ValueError as exc:
                return [document.reporter.warning(str(exc), line=self.lineno)]

        # literal will be note_implicit_target that is linked from caption and numref.
        # when options['name'] is provided, it should be primary ID.
        self.add_name(literal)

        return [literal]
Пример #58
0
    def get_items(self, names: List[str]) -> List[Tuple[str, str, str, str]]:
        """Try to import the given names, and return a list of
        ``[(name, signature, summary_string, real_name), ...]``.
        """
        prefixes = get_import_prefixes_from_env(self.env)

        items = []  # type: List[Tuple[str, str, str, str]]

        max_item_chars = 50

        for name in names:
            display_name = name
            if name.startswith('~'):
                name = name[1:]
                display_name = name.split('.')[-1]

            try:
                real_name, obj, parent, modname = self.import_by_name(
                    name, prefixes=prefixes)
            except ImportError:
                logger.warning(__('autosummary: failed to import %s'),
                               name,
                               location=self.get_source_info())
                continue

            self.bridge.result = StringList()  # initialize for each documenter
            full_name = real_name
            if not isinstance(obj, ModuleType):
                # give explicitly separated module name, so that members
                # of inner classes can be documented
                full_name = modname + '::' + full_name[len(modname) + 1:]
            # NB. using full_name here is important, since Documenters
            #     handle module prefixes slightly differently
            documenter = self.create_documenter(self.env.app, obj, parent,
                                                full_name)
            if not documenter.parse_name():
                logger.warning(__('failed to parse name %s'),
                               real_name,
                               location=self.get_source_info())
                items.append((display_name, '', '', real_name))
                continue
            if not documenter.import_object():
                logger.warning(__('failed to import object %s'),
                               real_name,
                               location=self.get_source_info())
                items.append((display_name, '', '', real_name))
                continue
            if documenter.options.members and not documenter.check_module():
                continue

            # try to also get a source code analyzer for attribute docs
            try:
                documenter.analyzer = ModuleAnalyzer.for_module(
                    documenter.get_real_modname())
                # parse right now, to get PycodeErrors on parsing (results will
                # be cached anyway)
                documenter.analyzer.find_attr_docs()
            except PycodeError as err:
                logger.debug('[autodoc] module analyzer failed: %s', err)
                # no source file -- e.g. for builtin and C modules
                documenter.analyzer = None

            # -- Grab the signature

            try:
                sig = documenter.format_signature(show_annotation=False)
            except TypeError:
                # the documenter does not support ``show_annotation`` option
                sig = documenter.format_signature()

            if not sig:
                sig = ''
            else:
                max_chars = max(10, max_item_chars - len(display_name))
                sig = mangle_signature(sig, max_chars=max_chars)

            # -- Grab the summary

            documenter.add_content(None)
            summary = extract_summary(self.bridge.result.data[:],
                                      self.state.document)

            items.append((display_name, sig, summary, real_name))

        return items
Пример #59
0
def render_dot(self: SphinxTranslator,
               code: str,
               options: Dict,
               format: str,
               prefix: str = 'graphviz',
               filename: str = None) -> Tuple[str, str]:
    """Render graphviz code into a PNG or PDF output file."""
    graphviz_dot = options.get('graphviz_dot',
                               self.builder.config.graphviz_dot)
    hashkey = (code + str(options) + str(graphviz_dot) +
               str(self.builder.config.graphviz_dot_args)).encode()

    fname = '%s-%s.%s' % (prefix, sha1(hashkey).hexdigest(), format)
    relfn = posixpath.join(self.builder.imgpath, fname)
    outfn = path.join(self.builder.outdir, self.builder.imagedir, fname)

    if path.isfile(outfn):
        return relfn, outfn

    if (hasattr(self.builder, '_graphviz_warned_dot')
            and self.builder._graphviz_warned_dot.get(graphviz_dot)
        ):  # type: ignore  # NOQA
        return None, None

    ensuredir(path.dirname(outfn))

    dot_args = [graphviz_dot]
    dot_args.extend(self.builder.config.graphviz_dot_args)
    dot_args.extend(['-T' + format, '-o' + outfn])

    docname = options.get('docname', 'index')
    if filename:
        cwd = path.dirname(path.join(self.builder.srcdir, filename))
    else:
        cwd = path.dirname(path.join(self.builder.srcdir, docname))

    if format == 'png':
        dot_args.extend(['-Tcmapx', '-o%s.map' % outfn])

    try:
        ret = subprocess.run(dot_args,
                             input=code.encode(),
                             stdout=PIPE,
                             stderr=PIPE,
                             cwd=cwd,
                             check=True)
        if not path.isfile(outfn):
            raise GraphvizError(
                __('dot did not produce an output file:\n[stderr]\n%r\n'
                   '[stdout]\n%r') % (ret.stderr, ret.stdout))
        return relfn, outfn
    except OSError:
        logger.warning(
            __('dot command %r cannot be run (needed for graphviz '
               'output), check the graphviz_dot setting'), graphviz_dot)
        if not hasattr(self.builder, '_graphviz_warned_dot'):
            self.builder._graphviz_warned_dot = {}  # type: ignore
        self.builder._graphviz_warned_dot[graphviz_dot] = True  # type: ignore
        return None, None
    except CalledProcessError as exc:
        raise GraphvizError(
            __('dot exited with error:\n[stderr]\n%r\n'
               '[stdout]\n%r') % (exc.stderr, exc.stdout)) from exc
Пример #60
0
class LaTeXBuilder(Builder):
    """
    Builds LaTeX output to create PDF.
    """
    name = 'latex'
    format = 'latex'
    epilog = __('The LaTeX files are in %(outdir)s.')
    if os.name == 'posix':
        epilog += __("\nRun 'make' in that directory to run these through "
                     "(pdf)latex\n"
                     "(use `make latexpdf' here to do that automatically).")

    supported_image_types = ['application/pdf', 'image/png', 'image/jpeg']
    supported_remote_images = False
    default_translator_class = LaTeXTranslator

    def init(self):
        # type: () -> None
        self.babel = None  # type: ExtBabel
        self.context = {}  # type: Dict[str, Any]
        self.docnames = []  # type: Iterable[str]
        self.document_data = [
        ]  # type: List[Tuple[str, str, str, str, str, bool]]
        self.usepackages = self.app.registry.latex_packages
        texescape.init()

        self.init_context()
        self.init_babel()

    def get_outdated_docs(self):
        # type: () -> Union[str, List[str]]
        return 'all documents'  # for now

    def get_target_uri(self, docname, typ=None):
        # type: (str, str) -> str
        if docname not in self.docnames:
            raise NoUri
        else:
            return '%' + docname

    def get_relative_uri(self, from_, to, typ=None):
        # type: (str, str, str) -> str
        # ignore source path
        return self.get_target_uri(to, typ)

    def init_document_data(self):
        # type: () -> None
        preliminary_document_data = [
            list(x) for x in self.config.latex_documents
        ]
        if not preliminary_document_data:
            logger.warning(
                __('no "latex_documents" config value found; no documents '
                   'will be written'))
            return
        # assign subdirs to titles
        self.titles = []  # type: List[Tuple[str, str]]
        for entry in preliminary_document_data:
            docname = entry[0]
            if docname not in self.env.all_docs:
                logger.warning(
                    __('"latex_documents" config value references unknown '
                       'document %s'), docname)
                continue
            self.document_data.append(entry)  # type: ignore
            if docname.endswith(SEP + 'index'):
                docname = docname[:-5]
            self.titles.append((docname, entry[2]))

    def init_context(self):
        # type: () -> None
        self.context = DEFAULT_SETTINGS.copy()

        # Add special settings for latex_engine
        self.context.update(
            ADDITIONAL_SETTINGS.get(self.config.latex_engine, {}))

        # for xelatex+French, don't use polyglossia by default
        if self.config.latex_engine == 'xelatex':
            if self.config.language:
                if self.config.language[:2] == 'fr':
                    self.context['polyglossia'] = ''
                    self.context['babel'] = r'\usepackage{babel}'

        # Apply extension settings to context
        self.context['packages'] = self.usepackages

        # Apply user settings to context
        self.context.update(self.config.latex_elements)
        self.context['release'] = self.config.release
        self.context['use_xindy'] = self.config.latex_use_xindy

        if self.config.today:
            self.context['date'] = self.config.today
        else:
            self.context['date'] = format_date(self.config.today_fmt
                                               or _('%b %d, %Y'),
                                               language=self.config.language)

        if self.config.latex_logo:
            self.context['logofilename'] = path.basename(
                self.config.latex_logo)

        # for compatibilities
        self.context['indexname'] = _('Index')
        if self.config.release:
            # Show the release label only if release value exists
            self.context['releasename'] = _('Release')

    def init_babel(self):
        # type: () -> None
        self.babel = ExtBabel(self.config.language, not self.context['babel'])
        if self.config.language and not self.babel.is_supported_language():
            # emit warning if specified language is invalid
            # (only emitting, nothing changed to processing)
            logger.warning(__('no Babel option known for language %r'),
                           self.config.language)

    def write_stylesheet(self):
        # type: () -> None
        highlighter = highlighting.PygmentsBridge('latex',
                                                  self.config.pygments_style)
        stylesheet = path.join(self.outdir, 'sphinxhighlight.sty')
        with open(stylesheet, 'w') as f:
            f.write('\\NeedsTeXFormat{LaTeX2e}[1995/12/01]\n')
            f.write(
                '\\ProvidesPackage{sphinxhighlight}'
                '[2016/05/29 stylesheet for highlighting with pygments]\n\n')
            f.write(highlighter.get_stylesheet())

    def write(self, *ignored):
        # type: (Any) -> None
        docwriter = LaTeXWriter(self)
        docsettings = OptionParser(
            defaults=self.env.settings,
            components=(docwriter, ),
            read_config_files=True).get_default_values()  # type: Any

        self.init_document_data()
        self.write_stylesheet()

        for entry in self.document_data:
            docname, targetname, title, author, docclass = entry[:5]
            toctree_only = False
            if len(entry) > 5:
                toctree_only = entry[5]
            destination = SphinxFileOutput(destination_path=path.join(
                self.outdir, targetname),
                                           encoding='utf-8',
                                           overwrite_if_changed=True)
            with progress_message(__("processing %s") % targetname):
                toctrees = self.env.get_doctree(docname).traverse(
                    addnodes.toctree)
                if toctrees:
                    if toctrees[0].get('maxdepth') > 0:
                        tocdepth = toctrees[0].get('maxdepth')
                    else:
                        tocdepth = None
                else:
                    tocdepth = None
                doctree = self.assemble_doctree(
                    docname,
                    toctree_only,
                    appendices=((docclass != 'howto')
                                and self.config.latex_appendices or []))
                doctree['tocdepth'] = tocdepth
                self.apply_transforms(doctree)
                self.post_process_images(doctree)
                self.update_doc_context(title, author)

            with progress_message(__("writing")):
                docsettings.author = author
                docsettings.title = title
                docsettings.contentsname = self.get_contentsname(docname)
                docsettings.docname = docname
                docsettings.docclass = docclass

                doctree.settings = docsettings
                docwriter.write(doctree, destination)

    def get_contentsname(self, indexfile):
        # type: (str) -> str
        tree = self.env.get_doctree(indexfile)
        contentsname = None
        for toctree in tree.traverse(addnodes.toctree):
            if 'caption' in toctree:
                contentsname = toctree['caption']
                break

        return contentsname

    def update_doc_context(self, title, author):
        # type: (str, str) -> None
        self.context['title'] = title
        self.context['author'] = author

    def assemble_doctree(self, indexfile, toctree_only, appendices):
        # type: (str, bool, List[str]) -> nodes.document
        from docutils import nodes  # NOQA
        self.docnames = set([indexfile] + appendices)
        logger.info(darkgreen(indexfile) + " ", nonl=True)
        tree = self.env.get_doctree(indexfile)
        tree['docname'] = indexfile
        if toctree_only:
            # extract toctree nodes from the tree and put them in a
            # fresh document
            new_tree = new_document('<latex output>')
            new_sect = nodes.section()
            new_sect += nodes.title('<Set title in conf.py>',
                                    '<Set title in conf.py>')
            new_tree += new_sect
            for node in tree.traverse(addnodes.toctree):
                new_sect += node
            tree = new_tree
        largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
                                        darkgreen, [indexfile])
        largetree['docname'] = indexfile
        for docname in appendices:
            appendix = self.env.get_doctree(docname)
            appendix['docname'] = docname
            largetree.append(appendix)
        logger.info('')
        logger.info(__("resolving references..."))
        self.env.resolve_references(largetree, indexfile, self)
        # resolve :ref:s to distant tex files -- we can't add a cross-reference,
        # but append the document name
        for pendingnode in largetree.traverse(addnodes.pending_xref):
            docname = pendingnode['refdocname']
            sectname = pendingnode['refsectname']
            newnodes = [nodes.emphasis(sectname,
                                       sectname)]  # type: List[nodes.Node]
            for subdir, title in self.titles:
                if docname.startswith(subdir):
                    newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                    newnodes.append(nodes.emphasis(title, title))
                    newnodes.append(nodes.Text(')', ')'))
                    break
            else:
                pass
            pendingnode.replace_self(newnodes)
        return largetree

    def apply_transforms(self, doctree):
        # type: (nodes.document) -> None
        transformer = SphinxTransformer(doctree)
        transformer.set_environment(self.env)
        transformer.add_transforms([
            BibliographyTransform, ShowUrlsTransform, LaTeXFootnoteTransform,
            LiteralBlockTransform, DocumentTargetTransform
        ])
        transformer.apply_transforms()

    def finish(self):
        # type: () -> None
        self.copy_image_files()
        self.write_message_catalog()
        self.copy_support_files()

        if self.config.latex_additional_files:
            self.copy_latex_additional_files()

    @progress_message(__('copying TeX support files'))
    def copy_support_files(self):
        # type: () -> None
        """copy TeX support files from texinputs."""
        # configure usage of xindy (impacts Makefile and latexmkrc)
        # FIXME: convert this rather to a confval with suitable default
        #        according to language ? but would require extra documentation
        if self.config.language:
            xindy_lang_option = \
                XINDY_LANG_OPTIONS.get(self.config.language[:2],
                                       '-L general -C utf8 ')
            xindy_cyrillic = self.config.language[:2] in XINDY_CYRILLIC_SCRIPTS
        else:
            xindy_lang_option = '-L english -C utf8 '
            xindy_cyrillic = False
        context = {
            'latex_engine': self.config.latex_engine,
            'xindy_use': self.config.latex_use_xindy,
            'xindy_lang_option': xindy_lang_option,
            'xindy_cyrillic': xindy_cyrillic,
        }
        logger.info(bold(__('copying TeX support files...')))
        staticdirname = path.join(package_dir, 'texinputs')
        for filename in os.listdir(staticdirname):
            if not filename.startswith('.'):
                copy_asset_file(path.join(staticdirname, filename),
                                self.outdir,
                                context=context)

        # use pre-1.6.x Makefile for make latexpdf on Windows
        if os.name == 'nt':
            staticdirname = path.join(package_dir, 'texinputs_win')
            copy_asset_file(path.join(staticdirname, 'Makefile_t'),
                            self.outdir,
                            context=context)

        # the logo is handled differently
        if self.config.latex_logo:
            if not path.isfile(path.join(self.confdir,
                                         self.config.latex_logo)):
                raise SphinxError(
                    __('logo file %r does not exist') % self.config.latex_logo)
            else:
                copy_asset_file(
                    path.join(self.confdir, self.config.latex_logo),
                    self.outdir)

    @progress_message(__('copying additional files'))
    def copy_latex_additional_files(self):
        # type: () -> None
        for filename in self.config.latex_additional_files:
            logger.info(' ' + filename, nonl=True)
            copy_asset_file(path.join(self.confdir, filename), self.outdir)

    def copy_image_files(self):
        # type: () -> None
        if self.images:
            stringify_func = ImageAdapter(self.app.env).get_original_image_uri
            for src in status_iterator(self.images,
                                       __('copying images... '),
                                       "brown",
                                       len(self.images),
                                       self.app.verbosity,
                                       stringify_func=stringify_func):
                dest = self.images[src]
                try:
                    copy_asset_file(path.join(self.srcdir, src),
                                    path.join(self.outdir, dest))
                except Exception as err:
                    logger.warning(__('cannot copy image file %r: %s'),
                                   path.join(self.srcdir, src), err)

    def write_message_catalog(self):
        # type: () -> None
        formats = self.config.numfig_format
        context = {
            'addtocaptions': r'\@iden',
            'figurename': formats.get('figure', '').split('%s', 1),
            'tablename': formats.get('table', '').split('%s', 1),
            'literalblockname': formats.get('code-block', '').split('%s', 1)
        }

        if self.context['babel'] or self.context['polyglossia']:
            context[
                'addtocaptions'] = r'\addto\captions%s' % self.babel.get_language(
                )

        filename = path.join(package_dir, 'templates', 'latex',
                             'sphinxmessages.sty_t')
        copy_asset_file(filename,
                        self.outdir,
                        context=context,
                        renderer=LaTeXRenderer())