def prepare_writing(self, docnames): # type: (Set[unicode]) -> None """A place where you can add logic before :meth:`write_doc` is run""" self.writer = HTMLWriter(self) self.settings = OptionParser( defaults=self.env.settings, components=(self.writer,), read_config_files=True).get_default_values() self.settings.compact_lists = bool(self.config.html_compact_lists) # disable splitting field list table rows with too long field names, # fixing https://gitlab.com/mbukatov/pylatest/issues/44 self.settings.field_name_limit = 0
def __init__(self): """ constructor """ self.app = _CustomSphinx(srcdir=None, confdir=None, outdir=None, doctreedir=None, buildername='SerializingHTMLBuilder') builder = self.app.builder builder.fignumbers = {} HTMLWriter.__init__(self, builder) self.translator_class = HTMLTranslatorWithCustomDirectives self.builder.translator_class = self.translator_class self.builder.secnumbers = {} self.builder._function_node = [] self.builder.current_docname = None
def render_partial(self, node): # type: (nodes.Node) -> Dict[str, str] """Utility: Render a lone doctree node.""" if node is None: return {'fragment': ''} doc = new_document('<partial node>') doc.append(node) if self._publisher is None: self._publisher = Publisher( source_class = DocTreeInput, destination_class=StringOutput) self._publisher.set_components('standalone', 'restructuredtext', 'pseudoxml') pub = self._publisher pub.reader = DoctreeReader() pub.writer = HTMLWriter(self) pub.process_programmatic_settings( None, {'output_encoding': 'unicode'}, None) pub.set_source(doc, None) pub.set_destination(None, None) pub.publish() return pub.writer.parts
def get_sphinx(): sphinx = getattr(local_data, 'sphinx', None) if sphinx is None: sphinx = Sphinx(tempdir, tempdir, tempdir, tempdir, 'json', status=None, warning=None) sphinx.builder.translator_class = CustomHTMLTranslator sphinx.env.patch_lookup_functions() sphinx.env.temp_data['docname'] = 'text' sphinx.env.temp_data['default_domain'] = 'py' pub = Publisher(reader=None, parser=None, writer=HTMLWriter(sphinx.builder), source_class=io.StringInput, destination_class=io.NullOutput) pub.set_components('standalone', 'restructuredtext', None) pub.process_programmatic_settings(None, sphinx.env.settings, None) pub.set_destination(None, None) sphinx.publisher = pub local_data.sphinx = sphinx return sphinx, sphinx.publisher
def render_partial(self, node): """Utility: Render a lone doctree node.""" doc = new_document('<partial node>') doc.append(node) return publish_parts(doc, source_class=DocTreeInput, reader=DoctreeReader(), writer=HTMLWriter(self), settings_overrides={'output_encoding': 'unicode'})
def render_partial(self, node: Node) -> Dict[str, str]: """Utility: Render a lone doctree node.""" if node is None: return {'fragment': ''} doc = new_document('<partial node>') doc.append(node) writer = HTMLWriter(self) return publish_parts(reader_name='doctree', writer=writer, source_class=DocTreeInput, settings_overrides={'output_encoding': 'unicode'}, source=doc)
def _write_html(document) -> str: builder = _get_builder() destination = StringOutput(encoding="utf-8") docwriter = HTMLWriter(builder) docsettings = OptionParser(defaults=builder.env.settings, components=(docwriter, ), read_config_files=True).get_default_values() docsettings.compact_lists = True document.settings = docsettings docwriter.write(document, destination) docwriter.assemble_parts() return docwriter.parts["body"]
class StandaloneHTMLBuilder(Builder): """ Builds standalone HTML docs. """ name = 'html' format = 'html' copysource = True allow_parallel = True out_suffix = '.html' link_suffix = '.html' # defaults to matching out_suffix indexer_format = js_index indexer_dumps_unicode = True supported_image_types = ['image/svg+xml', 'image/png', 'image/gif', 'image/jpeg'] searchindex_filename = 'searchindex.js' add_permalinks = True embedded = False # for things like HTML help or Qt help: suppresses sidebar search = True # for things like HTML help and Apple help: suppress search # This is a class attribute because it is mutated by Sphinx.add_javascript. script_files = ['_static/jquery.js', '_static/underscore.js', '_static/doctools.js'] # Dito for this one. css_files = [] default_sidebars = ['localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'] # cached publisher object for snippets _publisher = None def init(self): # a hash of all config values that, if changed, cause a full rebuild self.config_hash = '' self.tags_hash = '' # basename of images directory self.imagedir = '_images' # section numbers for headings in the currently visited document self.secnumbers = {} # currently written docname self.current_docname = None self.init_templates() self.init_highlighter() self.init_translator_class() if self.config.html_file_suffix is not None: self.out_suffix = self.config.html_file_suffix if self.config.html_link_suffix is not None: self.link_suffix = self.config.html_link_suffix else: self.link_suffix = self.out_suffix if self.config.language is not None: if self._get_translations_js(): self.script_files.append('_static/translations.js') def _get_translations_js(self): candidates = [path.join(package_dir, 'locale', self.config.language, 'LC_MESSAGES', 'sphinx.js'), path.join(sys.prefix, 'share/sphinx/locale', self.config.language, 'sphinx.js')] + \ [path.join(dir, self.config.language, 'LC_MESSAGES', 'sphinx.js') for dir in self.config.locale_dirs] for jsfile in candidates: if path.isfile(jsfile): return jsfile return None def get_theme_config(self): return self.config.html_theme, self.config.html_theme_options def init_templates(self): Theme.init_themes(self.confdir, self.config.html_theme_path, warn=self.warn) themename, themeoptions = self.get_theme_config() self.theme = Theme(themename, warn=self.warn) self.theme_options = themeoptions.copy() self.create_template_bridge() self.templates.init(self, self.theme) def init_highlighter(self): # determine Pygments style and create the highlighter if self.config.pygments_style is not None: style = self.config.pygments_style elif self.theme: style = self.theme.get_confstr('theme', 'pygments_style', 'none') else: style = 'sphinx' self.highlighter = PygmentsBridge('html', style, self.config.trim_doctest_flags) def init_translator_class(self): if self.translator_class is not None: pass elif self.config.html_translator_class: self.translator_class = self.app.import_object( self.config.html_translator_class, 'html_translator_class setting') elif self.config.html_use_smartypants: self.translator_class = SmartyPantsHTMLTranslator else: self.translator_class = HTMLTranslator def get_outdated_docs(self): cfgdict = dict((name, self.config[name]) for (name, desc) in iteritems(self.config.values) if desc[1] == 'html') self.config_hash = get_stable_hash(cfgdict) self.tags_hash = get_stable_hash(sorted(self.tags)) old_config_hash = old_tags_hash = '' try: fp = open(path.join(self.outdir, '.buildinfo')) try: version = fp.readline() if version.rstrip() != '# Sphinx build info version 1': raise ValueError fp.readline() # skip commentary cfg, old_config_hash = fp.readline().strip().split(': ') if cfg != 'config': raise ValueError tag, old_tags_hash = fp.readline().strip().split(': ') if tag != 'tags': raise ValueError finally: fp.close() except ValueError: self.warn('unsupported build info format in %r, building all' % path.join(self.outdir, '.buildinfo')) except Exception: pass if old_config_hash != self.config_hash or \ old_tags_hash != self.tags_hash: for docname in self.env.found_docs: yield docname return if self.templates: template_mtime = self.templates.newest_template_mtime() else: template_mtime = 0 for docname in self.env.found_docs: if docname not in self.env.all_docs: yield docname continue targetname = self.get_outfilename(docname) try: targetmtime = path.getmtime(targetname) except Exception: targetmtime = 0 try: srcmtime = max(path.getmtime(self.env.doc2path(docname)), template_mtime) if srcmtime > targetmtime: yield docname except EnvironmentError: # source doesn't exist anymore pass def render_partial(self, node): """Utility: Render a lone doctree node.""" if node is None: return {'fragment': ''} doc = new_document(b'<partial node>') doc.append(node) if self._publisher is None: self._publisher = Publisher( source_class = DocTreeInput, destination_class=StringOutput) self._publisher.set_components('standalone', 'restructuredtext', 'pseudoxml') pub = self._publisher pub.reader = DoctreeReader() pub.writer = HTMLWriter(self) pub.process_programmatic_settings( None, {'output_encoding': 'unicode'}, None) pub.set_source(doc, None) pub.set_destination(None, None) pub.publish() return pub.writer.parts def prepare_writing(self, docnames): # create the search indexer self.indexer = None if self.search: from sphinx.search import IndexBuilder, languages lang = self.config.html_search_language or self.config.language if not lang or lang not in languages: lang = 'en' self.indexer = IndexBuilder(self.env, lang, self.config.html_search_options, self.config.html_search_scorer) self.load_indexer(docnames) self.docwriter = HTMLWriter(self) self.docsettings = OptionParser( defaults=self.env.settings, components=(self.docwriter,), read_config_files=True).get_default_values() self.docsettings.compact_lists = bool(self.config.html_compact_lists) # determine the additional indices to include self.domain_indices = [] # html_domain_indices can be False/True or a list of index names indices_config = self.config.html_domain_indices if indices_config: for domain_name in sorted(self.env.domains): domain = self.env.domains[domain_name] for indexcls in domain.indices: indexname = '%s-%s' % (domain.name, indexcls.name) if isinstance(indices_config, list): if indexname not in indices_config: continue # deprecated config value if indexname == 'py-modindex' and \ not self.config.html_use_modindex: continue content, collapse = indexcls(domain).generate() if content: self.domain_indices.append( (indexname, indexcls, content, collapse)) # format the "last updated on" string, only once is enough since it # typically doesn't include the time of day lufmt = self.config.html_last_updated_fmt if lufmt is not None: self.last_updated = format_date(lufmt or _('MMM dd, YYYY'), language=self.config.language) else: self.last_updated = None logo = self.config.html_logo and \ path.basename(self.config.html_logo) or '' favicon = self.config.html_favicon and \ path.basename(self.config.html_favicon) or '' if favicon and os.path.splitext(favicon)[1] != '.ico': self.warn('html_favicon is not an .ico file') if not isinstance(self.config.html_use_opensearch, string_types): self.warn('html_use_opensearch config value must now be a string') self.relations = self.env.collect_relations() rellinks = [] if self.get_builder_config('use_index', 'html'): rellinks.append(('genindex', _('General Index'), 'I', _('index'))) for indexname, indexcls, content, collapse in self.domain_indices: # if it has a short name if indexcls.shortname: rellinks.append((indexname, indexcls.localname, '', indexcls.shortname)) if self.config.html_style is not None: stylename = self.config.html_style elif self.theme: stylename = self.theme.get_confstr('theme', 'stylesheet') else: stylename = 'default.css' self.globalcontext = dict( embedded = self.embedded, project = self.config.project, release = self.config.release, version = self.config.version, last_updated = self.last_updated, copyright = self.config.copyright, master_doc = self.config.master_doc, use_opensearch = self.config.html_use_opensearch, docstitle = self.config.html_title, shorttitle = self.config.html_short_title, show_copyright = self.config.html_show_copyright, show_sphinx = self.config.html_show_sphinx, has_source = self.config.html_copy_source, show_source = self.config.html_show_sourcelink, file_suffix = self.out_suffix, script_files = self.script_files, language = self.config.language, css_files = self.css_files, sphinx_version = __display_version__, style = stylename, rellinks = rellinks, builder = self.name, parents = [], logo = logo, favicon = favicon, ) if self.theme: self.globalcontext.update( ('theme_' + key, val) for (key, val) in iteritems(self.theme.get_options(self.theme_options))) self.globalcontext.update(self.config.html_context) def get_doc_context(self, docname, body, metatags): """Collect items for the template context of a page.""" # find out relations prev = next = None parents = [] rellinks = self.globalcontext['rellinks'][:] related = self.relations.get(docname) titles = self.env.titles if related and related[2]: try: next = { 'link': self.get_relative_uri(docname, related[2]), 'title': self.render_partial(titles[related[2]])['title'] } rellinks.append((related[2], next['title'], 'N', _('next'))) except KeyError: next = None if related and related[1]: try: prev = { 'link': self.get_relative_uri(docname, related[1]), 'title': self.render_partial(titles[related[1]])['title'] } rellinks.append((related[1], prev['title'], 'P', _('previous'))) except KeyError: # the relation is (somehow) not in the TOC tree, handle # that gracefully prev = None while related and related[0]: try: parents.append( {'link': self.get_relative_uri(docname, related[0]), 'title': self.render_partial(titles[related[0]])['title']}) except KeyError: pass related = self.relations.get(related[0]) if parents: # remove link to the master file; we have a generic # "back to index" link already parents.pop() parents.reverse() # title rendered as HTML title = self.env.longtitles.get(docname) title = title and self.render_partial(title)['title'] or '' # the name for the copied source sourcename = self.config.html_copy_source and docname + '.txt' or '' # metadata for the document meta = self.env.metadata.get(docname) # Suffix for the document source_suffix = '.' + self.env.doc2path(docname).split('.')[-1] # local TOC and global TOC tree self_toc = self.env.get_toc_for(docname, self) toc = self.render_partial(self_toc)['fragment'] return dict( parents = parents, prev = prev, next = next, title = title, meta = meta, body = body, metatags = metatags, rellinks = rellinks, sourcename = sourcename, toc = toc, # only display a TOC if there's more than one item to show display_toc = (self.env.toc_num_entries[docname] > 1), page_source_suffix = source_suffix, ) def write_doc(self, docname, doctree): destination = StringOutput(encoding='utf-8') doctree.settings = self.docsettings self.secnumbers = self.env.toc_secnumbers.get(docname, {}) self.fignumbers = self.env.toc_fignumbers.get(docname, {}) self.imgpath = relative_uri(self.get_target_uri(docname), '_images') self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads') self.current_docname = docname self.docwriter.write(doctree, destination) self.docwriter.assemble_parts() body = self.docwriter.parts['fragment'] metatags = self.docwriter.clean_meta ctx = self.get_doc_context(docname, body, metatags) self.handle_page(docname, ctx, event_arg=doctree) def write_doc_serialized(self, docname, doctree): self.imgpath = relative_uri(self.get_target_uri(docname), self.imagedir) self.post_process_images(doctree) title = self.env.longtitles.get(docname) title = title and self.render_partial(title)['title'] or '' self.index_page(docname, doctree, title) def finish(self): self.finish_tasks.add_task(self.gen_indices) self.finish_tasks.add_task(self.gen_additional_pages) self.finish_tasks.add_task(self.copy_image_files) self.finish_tasks.add_task(self.copy_download_files) self.finish_tasks.add_task(self.copy_static_files) self.finish_tasks.add_task(self.copy_extra_files) self.finish_tasks.add_task(self.write_buildinfo) # dump the search index self.handle_finish() def gen_indices(self): self.info(bold('generating indices...'), nonl=1) # the global general index if self.get_builder_config('use_index', 'html'): self.write_genindex() # the global domain-specific indices self.write_domain_indices() self.info() def gen_additional_pages(self): # pages from extensions for pagelist in self.app.emit('html-collect-pages'): for pagename, context, template in pagelist: self.handle_page(pagename, context, template) self.info(bold('writing additional pages...'), nonl=1) # additional pages from conf.py for pagename, template in self.config.html_additional_pages.items(): self.info(' '+pagename, nonl=1) self.handle_page(pagename, {}, template) # the search page if self.search: self.info(' search', nonl=1) self.handle_page('search', {}, 'search.html') # the opensearch xml file if self.config.html_use_opensearch and self.search: self.info(' opensearch', nonl=1) fn = path.join(self.outdir, '_static', 'opensearch.xml') self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn) self.info() def write_genindex(self): # the total count of lines for each index letter, used to distribute # the entries into two columns genindex = self.env.create_index(self) indexcounts = [] for _k, entries in genindex: indexcounts.append(sum(1 + len(subitems) for _, (_, subitems, _) in entries)) genindexcontext = dict( genindexentries = genindex, genindexcounts = indexcounts, split_index = self.config.html_split_index, ) self.info(' genindex', nonl=1) if self.config.html_split_index: self.handle_page('genindex', genindexcontext, 'genindex-split.html') self.handle_page('genindex-all', genindexcontext, 'genindex.html') for (key, entries), count in zip(genindex, indexcounts): ctx = {'key': key, 'entries': entries, 'count': count, 'genindexentries': genindex} self.handle_page('genindex-' + key, ctx, 'genindex-single.html') else: self.handle_page('genindex', genindexcontext, 'genindex.html') def write_domain_indices(self): for indexname, indexcls, content, collapse in self.domain_indices: indexcontext = dict( indextitle = indexcls.localname, content = content, collapse_index = collapse, ) self.info(' ' + indexname, nonl=1) self.handle_page(indexname, indexcontext, 'domainindex.html') def copy_image_files(self): # copy image files if self.images: ensuredir(path.join(self.outdir, self.imagedir)) for src in self.app.status_iterator(self.images, 'copying images... ', brown, len(self.images)): dest = self.images[src] try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, self.imagedir, dest)) except Exception as err: self.warn('cannot copy image file %r: %s' % (path.join(self.srcdir, src), err)) def copy_download_files(self): def to_relpath(f): return relative_path(self.srcdir, f) # copy downloadable files if self.env.dlfiles: ensuredir(path.join(self.outdir, '_downloads')) for src in self.app.status_iterator(self.env.dlfiles, 'copying downloadable files... ', brown, len(self.env.dlfiles), stringify_func=to_relpath): dest = self.env.dlfiles[src][1] try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_downloads', dest)) except Exception as err: self.warn('cannot copy downloadable file %r: %s' % (path.join(self.srcdir, src), err)) def copy_static_files(self): # copy static files self.info(bold('copying static files... '), nonl=True) ensuredir(path.join(self.outdir, '_static')) # first, create pygments style file f = open(path.join(self.outdir, '_static', 'pygments.css'), 'w') f.write(self.highlighter.get_stylesheet()) f.close() # then, copy translations JavaScript file if self.config.language is not None: jsfile = self._get_translations_js() if jsfile: copyfile(jsfile, path.join(self.outdir, '_static', 'translations.js')) # copy non-minified stemmer JavaScript file if self.indexer is not None: jsfile = self.indexer.get_js_stemmer_rawcode() if jsfile: copyfile(jsfile, path.join(self.outdir, '_static', '_stemmer.js')) ctx = self.globalcontext.copy() # add context items for search function used in searchtools.js_t if self.indexer is not None: ctx.update(self.indexer.context_for_searchtool()) # then, copy over theme-supplied static files if self.theme: themeentries = [path.join(themepath, 'static') for themepath in self.theme.get_dirchain()[::-1]] for entry in themeentries: copy_static_entry(entry, path.join(self.outdir, '_static'), self, ctx) # then, copy over all user-supplied static files staticentries = [path.join(self.confdir, spath) for spath in self.config.html_static_path] matchers = compile_matchers(self.config.exclude_patterns) for entry in staticentries: if not path.exists(entry): self.warn('html_static_path entry %r does not exist' % entry) continue copy_static_entry(entry, path.join(self.outdir, '_static'), self, ctx, exclude_matchers=matchers) # copy logo and favicon files if not already in static path if self.config.html_logo: logobase = path.basename(self.config.html_logo) logotarget = path.join(self.outdir, '_static', logobase) if not path.isfile(path.join(self.confdir, self.config.html_logo)): self.warn('logo file %r does not exist' % self.config.html_logo) elif not path.isfile(logotarget): copyfile(path.join(self.confdir, self.config.html_logo), logotarget) if self.config.html_favicon: iconbase = path.basename(self.config.html_favicon) icontarget = path.join(self.outdir, '_static', iconbase) if not path.isfile(path.join(self.confdir, self.config.html_favicon)): self.warn('favicon file %r does not exist' % self.config.html_favicon) elif not path.isfile(icontarget): copyfile(path.join(self.confdir, self.config.html_favicon), icontarget) self.info('done') def copy_extra_files(self): # copy html_extra_path files self.info(bold('copying extra files... '), nonl=True) extraentries = [path.join(self.confdir, epath) for epath in self.config.html_extra_path] matchers = compile_matchers(self.config.exclude_patterns) for entry in extraentries: if not path.exists(entry): self.warn('html_extra_path entry %r does not exist' % entry) continue copy_extra_entry(entry, self.outdir, matchers) self.info('done') def write_buildinfo(self): # write build info file fp = open(path.join(self.outdir, '.buildinfo'), 'w') try: fp.write('# Sphinx build info version 1\n' '# This file hashes the configuration used when building' ' these files. When it is not found, a full rebuild will' ' be done.\nconfig: %s\ntags: %s\n' % (self.config_hash, self.tags_hash)) finally: fp.close() def cleanup(self): # clean up theme stuff if self.theme: self.theme.cleanup() def post_process_images(self, doctree): """Pick the best candidate for an image and link down-scaled images to their high res version. """ Builder.post_process_images(self, doctree) if self.config.html_scaled_image_link: for node in doctree.traverse(nodes.image): scale_keys = ('scale', 'width', 'height') if not any((key in node) for key in scale_keys) or \ isinstance(node.parent, nodes.reference): # docutils does unfortunately not preserve the # ``target`` attribute on images, so we need to check # the parent node here. continue uri = node['uri'] reference = nodes.reference('', '', internal=True) if uri in self.images: reference['refuri'] = posixpath.join(self.imgpath, self.images[uri]) else: reference['refuri'] = uri node.replace_self(reference) reference.append(node) def load_indexer(self, docnames): keep = set(self.env.all_docs) - set(docnames) try: searchindexfn = path.join(self.outdir, self.searchindex_filename) if self.indexer_dumps_unicode: f = codecs.open(searchindexfn, 'r', encoding='utf-8') else: f = open(searchindexfn, 'rb') try: self.indexer.load(f, self.indexer_format) finally: f.close() except (IOError, OSError, ValueError): if keep: self.warn('search index couldn\'t be loaded, but not all ' 'documents will be built: the index will be ' 'incomplete.') # delete all entries for files that will be rebuilt self.indexer.prune(keep) def index_page(self, pagename, doctree, title): # only index pages with title if self.indexer is not None and title: self.indexer.feed(pagename, title, doctree) def _get_local_toctree(self, docname, collapse=True, **kwds): if 'includehidden' not in kwds: kwds['includehidden'] = False return self.render_partial(self.env.get_toctree_for( docname, self, collapse, **kwds))['fragment'] def get_outfilename(self, pagename): return path.join(self.outdir, os_path(pagename) + self.out_suffix) def add_sidebars(self, pagename, ctx): def has_wildcard(pattern): return any(char in pattern for char in '*?[') sidebars = None matched = None customsidebar = None for pattern, patsidebars in iteritems(self.config.html_sidebars): if patmatch(pagename, pattern): if matched: if has_wildcard(pattern): # warn if both patterns contain wildcards if has_wildcard(matched): self.warn('page %s matches two patterns in ' 'html_sidebars: %r and %r' % (pagename, matched, pattern)) # else the already matched pattern is more specific # than the present one, because it contains no wildcard continue matched = pattern sidebars = patsidebars if sidebars is None: # keep defaults pass elif isinstance(sidebars, string_types): # 0.x compatible mode: insert custom sidebar before searchbox customsidebar = sidebars sidebars = None ctx['sidebars'] = sidebars ctx['customsidebar'] = customsidebar # --------- these are overwritten by the serialization builder def get_target_uri(self, docname, typ=None): return docname + self.link_suffix def handle_page(self, pagename, addctx, templatename='page.html', outfilename=None, event_arg=None): ctx = self.globalcontext.copy() # current_page_name is backwards compatibility ctx['pagename'] = ctx['current_page_name'] = pagename default_baseuri = self.get_target_uri(pagename) # in the singlehtml builder, default_baseuri still contains an #anchor # part, which relative_uri doesn't really like... default_baseuri = default_baseuri.rsplit('#', 1)[0] def pathto(otheruri, resource=False, baseuri=default_baseuri): if resource and '://' in otheruri: # allow non-local resources given by scheme return otheruri elif not resource: otheruri = self.get_target_uri(otheruri) uri = relative_uri(baseuri, otheruri) or '#' return uri ctx['pathto'] = pathto ctx['hasdoc'] = lambda name: name in self.env.all_docs if self.name != 'htmlhelp': ctx['encoding'] = encoding = self.config.html_output_encoding else: ctx['encoding'] = encoding = self.encoding ctx['toctree'] = lambda **kw: self._get_local_toctree(pagename, **kw) self.add_sidebars(pagename, ctx) ctx.update(addctx) newtmpl = self.app.emit_firstresult('html-page-context', pagename, templatename, ctx, event_arg) if newtmpl: templatename = newtmpl try: output = self.templates.render(templatename, ctx) except UnicodeError: self.warn("a Unicode error occurred when rendering the page %s. " "Please make sure all config values that contain " "non-ASCII content are Unicode strings." % pagename) return if not outfilename: outfilename = self.get_outfilename(pagename) # outfilename's path is in general different from self.outdir ensuredir(path.dirname(outfilename)) try: f = codecs.open(outfilename, 'w', encoding, 'xmlcharrefreplace') try: f.write(output) finally: f.close() except (IOError, OSError) as err: self.warn("error writing file %s: %s" % (outfilename, err)) if self.copysource and ctx.get('sourcename'): # copy the source file for the "show source" link source_name = path.join(self.outdir, '_sources', os_path(ctx['sourcename'])) ensuredir(path.dirname(source_name)) copyfile(self.env.doc2path(pagename), source_name) def handle_finish(self): if self.indexer: self.finish_tasks.add_task(self.dump_search_index) self.finish_tasks.add_task(self.dump_inventory) def dump_inventory(self): self.info(bold('dumping object inventory... '), nonl=True) f = open(path.join(self.outdir, INVENTORY_FILENAME), 'wb') try: f.write((u'# Sphinx inventory version 2\n' u'# Project: %s\n' u'# Version: %s\n' u'# The remainder of this file is compressed using zlib.\n' % (self.config.project, self.config.version)).encode('utf-8')) compressor = zlib.compressobj(9) for domainname, domain in sorted(self.env.domains.items()): for name, dispname, type, docname, anchor, prio in \ sorted(domain.get_objects()): if anchor.endswith(name): # this can shorten the inventory by as much as 25% anchor = anchor[:-len(name)] + '$' uri = self.get_target_uri(docname) if anchor: uri += '#' + anchor if dispname == name: dispname = u'-' f.write(compressor.compress( (u'%s %s:%s %s %s %s\n' % (name, domainname, type, prio, uri, dispname)).encode('utf-8'))) f.write(compressor.flush()) finally: f.close() self.info('done') def dump_search_index(self): self.info( bold('dumping search index in %s ... ' % self.indexer.label()), nonl=True) self.indexer.prune(self.env.all_docs) searchindexfn = path.join(self.outdir, self.searchindex_filename) # first write to a temporary file, so that if dumping fails, # the existing index won't be overwritten if self.indexer_dumps_unicode: f = codecs.open(searchindexfn + '.tmp', 'w', encoding='utf-8') else: f = open(searchindexfn + '.tmp', 'wb') try: self.indexer.dump(f, self.indexer_format) finally: f.close() movefile(searchindexfn + '.tmp', searchindexfn) self.info('done')
def prepare_writing(self, docnames): # create the search indexer from sphinx.search import IndexBuilder, languages lang = self.config.html_search_language or self.config.language if not lang or lang not in languages: lang = 'en' self.indexer = IndexBuilder(self.env, lang, self.config.html_search_options, self.config.html_search_scorer) self.load_indexer(docnames) self.docwriter = HTMLWriter(self) self.docsettings = OptionParser( defaults=self.env.settings, components=(self.docwriter, )).get_default_values() self.docsettings.compact_lists = bool(self.config.html_compact_lists) # determine the additional indices to include self.domain_indices = [] # html_domain_indices can be False/True or a list of index names indices_config = self.config.html_domain_indices if indices_config: for domain in self.env.domains.itervalues(): for indexcls in domain.indices: indexname = '%s-%s' % (domain.name, indexcls.name) if isinstance(indices_config, list): if indexname not in indices_config: continue # deprecated config value if indexname == 'py-modindex' and \ not self.config.html_use_modindex: continue content, collapse = indexcls(domain).generate() if content: self.domain_indices.append( (indexname, indexcls, content, collapse)) # format the "last updated on" string, only once is enough since it # typically doesn't include the time of day lufmt = self.config.html_last_updated_fmt if lufmt is not None: self.last_updated = ustrftime(lufmt or _('%b %d, %Y')) else: self.last_updated = None logo = self.config.html_logo and \ path.basename(self.config.html_logo) or '' favicon = self.config.html_favicon and \ path.basename(self.config.html_favicon) or '' if favicon and os.path.splitext(favicon)[1] != '.ico': self.warn('html_favicon is not an .ico file') if not isinstance(self.config.html_use_opensearch, basestring): self.warn('html_use_opensearch config value must now be a string') self.relations = self.env.collect_relations() rellinks = [] if self.get_builder_config('use_index', 'html'): rellinks.append(('genindex', _('General Index'), 'I', _('index'))) for indexname, indexcls, content, collapse in self.domain_indices: # if it has a short name if indexcls.shortname: rellinks.append( (indexname, indexcls.localname, '', indexcls.shortname)) if self.config.html_style is not None: stylename = self.config.html_style elif self.theme: stylename = self.theme.get_confstr('theme', 'stylesheet') else: stylename = 'default.css' self.globalcontext = dict( embedded=self.embedded, project=self.config.project, release=self.config.release, version=self.config.version, last_updated=self.last_updated, copyright=self.config.copyright, master_doc=self.config.master_doc, use_opensearch=self.config.html_use_opensearch, docstitle=self.config.html_title, shorttitle=self.config.html_short_title, show_copyright=self.config.html_show_copyright, show_sphinx=self.config.html_show_sphinx, has_source=self.config.html_copy_source, show_source=self.config.html_show_sourcelink, file_suffix=self.out_suffix, script_files=self.script_files, css_files=self.css_files, sphinx_version=__version__, style=stylename, rellinks=rellinks, builder=self.name, parents=[], logo=logo, favicon=favicon, ) if self.theme: self.globalcontext.update(('theme_' + key, val) for ( key, val) in self.theme.get_options(self.theme_options).iteritems()) self.globalcontext.update(self.config.html_context)
class StandaloneHTMLBuilder(Builder): """ Builds standalone HTML docs. """ name = 'html' format = 'html' copysource = True allow_parallel = True out_suffix = '.html' link_suffix = '.html' # defaults to matching out_suffix indexer_format = js_index indexer_dumps_unicode = True supported_image_types = ['image/svg+xml', 'image/png', 'image/gif', 'image/jpeg'] searchindex_filename = 'searchindex.js' add_permalinks = True embedded = False # for things like HTML help or Qt help: suppresses sidebar # This is a class attribute because it is mutated by Sphinx.add_javascript. script_files = ['_static/jquery.js', '_static/underscore.js', '_static/doctools.js'] # Dito for this one. css_files = [] default_sidebars = ['localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'] # cached publisher object for snippets _publisher = None def init(self): # a hash of all config values that, if changed, cause a full rebuild self.config_hash = '' self.tags_hash = '' # section numbers for headings in the currently visited document self.secnumbers = {} # currently written docname self.current_docname = None self.init_templates() self.init_highlighter() self.init_translator_class() if self.config.html_file_suffix is not None: self.out_suffix = self.config.html_file_suffix if self.config.html_link_suffix is not None: self.link_suffix = self.config.html_link_suffix else: self.link_suffix = self.out_suffix if self.config.language is not None: if self._get_translations_js(): self.script_files.append('_static/translations.js') def _get_translations_js(self): candidates = [path.join(package_dir, 'locale', self.config.language, 'LC_MESSAGES', 'sphinx.js'), path.join(sys.prefix, 'share/sphinx/locale', self.config.language, 'sphinx.js')] + \ [path.join(dir, self.config.language, 'LC_MESSAGES', 'sphinx.js') for dir in self.config.locale_dirs] for jsfile in candidates: if path.isfile(jsfile): return jsfile return None def get_theme_config(self): return self.config.html_theme, self.config.html_theme_options def init_templates(self): Theme.init_themes(self.confdir, self.config.html_theme_path, warn=self.warn) themename, themeoptions = self.get_theme_config() self.theme = Theme(themename) self.theme_options = themeoptions.copy() self.create_template_bridge() self.templates.init(self, self.theme) def init_highlighter(self): # determine Pygments style and create the highlighter if self.config.pygments_style is not None: style = self.config.pygments_style elif self.theme: style = self.theme.get_confstr('theme', 'pygments_style', 'none') else: style = 'sphinx' self.highlighter = PygmentsBridge('html', style, self.config.trim_doctest_flags) def init_translator_class(self): if self.config.html_translator_class: self.translator_class = self.app.import_object( self.config.html_translator_class, 'html_translator_class setting') elif self.config.html_use_smartypants: self.translator_class = SmartyPantsHTMLTranslator else: self.translator_class = HTMLTranslator def get_outdated_docs(self): cfgdict = dict((name, self.config[name]) for (name, desc) in self.config.values.iteritems() if desc[1] == 'html') self.config_hash = get_stable_hash(cfgdict) self.tags_hash = get_stable_hash(sorted(self.tags)) old_config_hash = old_tags_hash = '' try: fp = open(path.join(self.outdir, '.buildinfo')) try: version = fp.readline() if version.rstrip() != '# Sphinx build info version 1': raise ValueError fp.readline() # skip commentary cfg, old_config_hash = fp.readline().strip().split(': ') if cfg != 'config': raise ValueError tag, old_tags_hash = fp.readline().strip().split(': ') if tag != 'tags': raise ValueError finally: fp.close() except ValueError: self.warn('unsupported build info format in %r, building all' % path.join(self.outdir, '.buildinfo')) except Exception: pass if old_config_hash != self.config_hash or \ old_tags_hash != self.tags_hash: for docname in self.env.found_docs: yield docname return if self.templates: template_mtime = self.templates.newest_template_mtime() else: template_mtime = 0 for docname in self.env.found_docs: if docname not in self.env.all_docs: yield docname continue targetname = self.get_outfilename(docname) try: targetmtime = path.getmtime(targetname) except Exception: targetmtime = 0 try: srcmtime = max(path.getmtime(self.env.doc2path(docname)), template_mtime) if srcmtime > targetmtime: yield docname except EnvironmentError: # source doesn't exist anymore pass def render_partial(self, node): """Utility: Render a lone doctree node.""" if node is None: return {'fragment': ''} doc = new_document(b('<partial node>')) doc.append(node) if self._publisher is None: self._publisher = Publisher( source_class = DocTreeInput, destination_class=StringOutput) self._publisher.set_components('standalone', 'restructuredtext', 'pseudoxml') pub = self._publisher pub.reader = DoctreeReader() pub.writer = HTMLWriter(self) pub.process_programmatic_settings( None, {'output_encoding': 'unicode'}, None) pub.set_source(doc, None) pub.set_destination(None, None) pub.publish() return pub.writer.parts def prepare_writing(self, docnames): # create the search indexer from sphinx.search import IndexBuilder, languages lang = self.config.html_search_language or self.config.language if not lang or lang not in languages: lang = 'en' self.indexer = IndexBuilder(self.env, lang, self.config.html_search_options, self.config.html_search_scorer) self.load_indexer(docnames) self.docwriter = HTMLWriter(self) self.docsettings = OptionParser( defaults=self.env.settings, components=(self.docwriter,), read_config_files=True).get_default_values() self.docsettings.compact_lists = bool(self.config.html_compact_lists) # determine the additional indices to include self.domain_indices = [] # html_domain_indices can be False/True or a list of index names indices_config = self.config.html_domain_indices if indices_config: for domain in self.env.domains.itervalues(): for indexcls in domain.indices: indexname = '%s-%s' % (domain.name, indexcls.name) if isinstance(indices_config, list): if indexname not in indices_config: continue # deprecated config value if indexname == 'py-modindex' and \ not self.config.html_use_modindex: continue content, collapse = indexcls(domain).generate() if content: self.domain_indices.append( (indexname, indexcls, content, collapse)) # format the "last updated on" string, only once is enough since it # typically doesn't include the time of day lufmt = self.config.html_last_updated_fmt if lufmt is not None: self.last_updated = ustrftime(lufmt or _('%b %d, %Y')) else: self.last_updated = None logo = self.config.html_logo and \ path.basename(self.config.html_logo) or '' favicon = self.config.html_favicon and \ path.basename(self.config.html_favicon) or '' if favicon and os.path.splitext(favicon)[1] != '.ico': self.warn('html_favicon is not an .ico file') if not isinstance(self.config.html_use_opensearch, basestring): self.warn('html_use_opensearch config value must now be a string') self.relations = self.env.collect_relations() rellinks = [] if self.get_builder_config('use_index', 'html'): rellinks.append(('genindex', _('General Index'), 'I', _('index'))) for indexname, indexcls, content, collapse in self.domain_indices: # if it has a short name if indexcls.shortname: rellinks.append((indexname, indexcls.localname, '', indexcls.shortname)) if self.config.html_style is not None: stylename = self.config.html_style elif self.theme: stylename = self.theme.get_confstr('theme', 'stylesheet') else: stylename = 'default.css' self.globalcontext = dict( embedded = self.embedded, project = self.config.project, release = self.config.release, version = self.config.version, last_updated = self.last_updated, copyright = self.config.copyright, master_doc = self.config.master_doc, use_opensearch = self.config.html_use_opensearch, docstitle = self.config.html_title, shorttitle = self.config.html_short_title, show_copyright = self.config.html_show_copyright, show_sphinx = self.config.html_show_sphinx, has_source = self.config.html_copy_source, show_source = self.config.html_show_sourcelink, file_suffix = self.out_suffix, script_files = self.script_files, css_files = self.css_files, sphinx_version = __version__, style = stylename, rellinks = rellinks, builder = self.name, parents = [], logo = logo, favicon = favicon, ) if self.theme: self.globalcontext.update( ('theme_' + key, val) for (key, val) in self.theme.get_options(self.theme_options).iteritems()) self.globalcontext.update(self.config.html_context) def get_doc_context(self, docname, body, metatags): """Collect items for the template context of a page.""" # find out relations prev = next = None parents = [] rellinks = self.globalcontext['rellinks'][:] related = self.relations.get(docname) titles = self.env.titles if related and related[2]: try: next = { 'link': self.get_relative_uri(docname, related[2]), 'title': self.render_partial(titles[related[2]])['title'] } rellinks.append((related[2], next['title'], 'N', _('next'))) except KeyError: next = None if related and related[1]: try: prev = { 'link': self.get_relative_uri(docname, related[1]), 'title': self.render_partial(titles[related[1]])['title'] } rellinks.append((related[1], prev['title'], 'P', _('previous'))) except KeyError: # the relation is (somehow) not in the TOC tree, handle # that gracefully prev = None while related and related[0]: try: parents.append( {'link': self.get_relative_uri(docname, related[0]), 'title': self.render_partial(titles[related[0]])['title']}) except KeyError: pass related = self.relations.get(related[0]) if parents: parents.pop() # remove link to the master file; we have a generic # "back to index" link already parents.reverse() # title rendered as HTML title = self.env.longtitles.get(docname) title = title and self.render_partial(title)['title'] or '' # the name for the copied source sourcename = self.config.html_copy_source and docname + '.txt' or '' # metadata for the document meta = self.env.metadata.get(docname) # local TOC and global TOC tree self_toc = self.env.get_toc_for(docname, self) toc = self.render_partial(self_toc)['fragment'] return dict( parents = parents, prev = prev, next = next, title = title, meta = meta, body = body, metatags = metatags, rellinks = rellinks, sourcename = sourcename, toc = toc, # only display a TOC if there's more than one item to show display_toc = (self.env.toc_num_entries[docname] > 1), ) def write_doc(self, docname, doctree): destination = StringOutput(encoding='utf-8') doctree.settings = self.docsettings self.secnumbers = self.env.toc_secnumbers.get(docname, {}) self.imgpath = relative_uri(self.get_target_uri(docname), '_images') self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads') self.current_docname = docname self.docwriter.write(doctree, destination) self.docwriter.assemble_parts() body = self.docwriter.parts['fragment'] metatags = self.docwriter.clean_meta ctx = self.get_doc_context(docname, body, metatags) self.handle_page(docname, ctx, event_arg=doctree) def write_doc_serialized(self, docname, doctree): self.imgpath = relative_uri(self.get_target_uri(docname), '_images') self.post_process_images(doctree) title = self.env.longtitles.get(docname) title = title and self.render_partial(title)['title'] or '' self.index_page(docname, doctree, title) def finish(self): self.info(bold('writing additional files...'), nonl=1) # pages from extensions for pagelist in self.app.emit('html-collect-pages'): for pagename, context, template in pagelist: self.handle_page(pagename, context, template) # the global general index if self.get_builder_config('use_index', 'html'): self.write_genindex() # the global domain-specific indices self.write_domain_indices() # the search page if self.name != 'htmlhelp': self.info(' search', nonl=1) self.handle_page('search', {}, 'search.html') # additional pages from conf.py for pagename, template in self.config.html_additional_pages.items(): self.info(' '+pagename, nonl=1) self.handle_page(pagename, {}, template) if self.config.html_use_opensearch and self.name != 'htmlhelp': self.info(' opensearch', nonl=1) fn = path.join(self.outdir, '_static', 'opensearch.xml') self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn) self.info() self.copy_image_files() self.copy_download_files() self.copy_static_files() self.copy_extra_files() self.write_buildinfo() # dump the search index self.handle_finish() def write_genindex(self): # the total count of lines for each index letter, used to distribute # the entries into two columns genindex = self.env.create_index(self) indexcounts = [] for _, entries in genindex: indexcounts.append(sum(1 + len(subitems) for _, (_, subitems) in entries)) genindexcontext = dict( genindexentries = genindex, genindexcounts = indexcounts, split_index = self.config.html_split_index, ) self.info(' genindex', nonl=1) if self.config.html_split_index: self.handle_page('genindex', genindexcontext, 'genindex-split.html') self.handle_page('genindex-all', genindexcontext, 'genindex.html') for (key, entries), count in zip(genindex, indexcounts): ctx = {'key': key, 'entries': entries, 'count': count, 'genindexentries': genindex} self.handle_page('genindex-' + key, ctx, 'genindex-single.html') else: self.handle_page('genindex', genindexcontext, 'genindex.html') def write_domain_indices(self): for indexname, indexcls, content, collapse in self.domain_indices: indexcontext = dict( indextitle = indexcls.localname, content = content, collapse_index = collapse, ) self.info(' ' + indexname, nonl=1) self.handle_page(indexname, indexcontext, 'domainindex.html') def copy_image_files(self): # copy image files if self.images: ensuredir(path.join(self.outdir, '_images')) for src in self.status_iterator(self.images, 'copying images... ', brown, len(self.images)): dest = self.images[src] try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_images', dest)) except Exception, err: self.warn('cannot copy image file %r: %s' % (path.join(self.srcdir, src), err))
def prepare_writing(self, docnames): # create the search indexer self.indexer = None if self.search: from sphinx.search import IndexBuilder, languages lang = self.config.html_search_language or self.config.language if not lang or lang not in languages: lang = 'en' self.indexer = IndexBuilder(self.env, lang, self.config.html_search_options, self.config.html_search_scorer) self.load_indexer(docnames) self.docwriter = HTMLWriter(self) self.docsettings = OptionParser( defaults=self.env.settings, components=(self.docwriter,), read_config_files=True).get_default_values() self.docsettings.compact_lists = bool(self.config.html_compact_lists) # determine the additional indices to include self.domain_indices = [] # html_domain_indices can be False/True or a list of index names indices_config = self.config.html_domain_indices if indices_config: for domain_name in sorted(self.env.domains): domain = self.env.domains[domain_name] for indexcls in domain.indices: indexname = '%s-%s' % (domain.name, indexcls.name) if isinstance(indices_config, list): if indexname not in indices_config: continue # deprecated config value if indexname == 'py-modindex' and \ not self.config.html_use_modindex: continue content, collapse = indexcls(domain).generate() if content: self.domain_indices.append( (indexname, indexcls, content, collapse)) # format the "last updated on" string, only once is enough since it # typically doesn't include the time of day lufmt = self.config.html_last_updated_fmt if lufmt is not None: self.last_updated = format_date(lufmt or _('MMM dd, YYYY'), language=self.config.language) else: self.last_updated = None logo = self.config.html_logo and \ path.basename(self.config.html_logo) or '' favicon = self.config.html_favicon and \ path.basename(self.config.html_favicon) or '' if favicon and os.path.splitext(favicon)[1] != '.ico': self.warn('html_favicon is not an .ico file') if not isinstance(self.config.html_use_opensearch, string_types): self.warn('html_use_opensearch config value must now be a string') self.relations = self.env.collect_relations() rellinks = [] if self.get_builder_config('use_index', 'html'): rellinks.append(('genindex', _('General Index'), 'I', _('index'))) for indexname, indexcls, content, collapse in self.domain_indices: # if it has a short name if indexcls.shortname: rellinks.append((indexname, indexcls.localname, '', indexcls.shortname)) if self.config.html_style is not None: stylename = self.config.html_style elif self.theme: stylename = self.theme.get_confstr('theme', 'stylesheet') else: stylename = 'default.css' self.globalcontext = dict( embedded = self.embedded, project = self.config.project, release = self.config.release, version = self.config.version, last_updated = self.last_updated, copyright = self.config.copyright, master_doc = self.config.master_doc, use_opensearch = self.config.html_use_opensearch, docstitle = self.config.html_title, shorttitle = self.config.html_short_title, show_copyright = self.config.html_show_copyright, show_sphinx = self.config.html_show_sphinx, has_source = self.config.html_copy_source, show_source = self.config.html_show_sourcelink, file_suffix = self.out_suffix, script_files = self.script_files, language = self.config.language, css_files = self.css_files, sphinx_version = __display_version__, style = stylename, rellinks = rellinks, builder = self.name, parents = [], logo = logo, favicon = favicon, ) if self.theme: self.globalcontext.update( ('theme_' + key, val) for (key, val) in iteritems(self.theme.get_options(self.theme_options))) self.globalcontext.update(self.config.html_context)
def prepare_writing(self, docnames): from sphinx.search import IndexBuilder self.indexer = IndexBuilder(self.env) self.load_indexer(docnames) self.docwriter = HTMLWriter(self) self.docsettings = OptionParser( defaults=self.env.settings, components=(self.docwriter, )).get_default_values() # format the "last updated on" string, only once is enough since it # typically doesn't include the time of day lufmt = self.config.html_last_updated_fmt if lufmt is not None: self.last_updated = ustrftime(lufmt or _('%b %d, %Y')) else: self.last_updated = None logo = self.config.html_logo and \ path.basename(self.config.html_logo) or '' favicon = self.config.html_favicon and \ path.basename(self.config.html_favicon) or '' if favicon and os.path.splitext(favicon)[1] != '.ico': self.warn('html_favicon is not an .ico file') if not isinstance(self.config.html_use_opensearch, basestring): self.warn('html_use_opensearch config value must now be a string') self.relations = self.env.collect_relations() rellinks = [] if self.config.html_use_index: rellinks.append(('genindex', _('General Index'), 'I', _('index'))) if self.config.html_use_modindex and self.env.modules: rellinks.append( ('modindex', _('Global Module Index'), 'M', _('modules'))) if self.config.html_style is not None: stylename = self.config.html_style elif self.theme: stylename = self.theme.get_confstr('theme', 'stylesheet') else: stylename = 'default.css' self.globalcontext = dict( embedded=self.embedded, project=self.config.project, release=self.config.release, version=self.config.version, last_updated=self.last_updated, copyright=self.config.copyright, master_doc=self.config.master_doc, use_opensearch=self.config.html_use_opensearch, docstitle=self.config.html_title, shorttitle=self.config.html_short_title, show_copyright=self.config.html_show_copyright, show_sphinx=self.config.html_show_sphinx, has_source=self.config.html_copy_source, show_source=self.config.html_show_sourcelink, file_suffix=self.out_suffix, script_files=self.script_files, css_files=self.css_files, sphinx_version=__version__, style=stylename, rellinks=rellinks, builder=self.name, parents=[], logo=logo, favicon=favicon, ) if self.theme: self.globalcontext.update( ('theme_' + key, val) for (key, val) in self.theme.get_options( self.config.html_theme_options).iteritems()) self.globalcontext.update(self.config.html_context)
class XmlExportBuilder(Builder): """ Builds XML export file with html content. The builder extends base Builder class with as minimal extra attributes as possible to use HTMLWriter (sphinx html writer). I originally wanted to extend StandaloneHTMLBuilder instead, but I would need to disable most of it's functionality anyway. """ # the builder's name, for the -b command line option name = 'xmlexport' # the builder's output format, or '' if no document output is produced, # value used for self.tags (instance of sphinx.util.tags.Tags) format = 'html' # allow parallel write_doc() calls allow_parallel = False # from StandaloneHTMLBuilder, not directly mentioned in Builder out_suffix = '.xml' link_suffix = '.xml' supported_image_types = [] add_permalinks = False # docutils translator default_translator_class = HTMLTranslator def init(self): # writer object is initialized in prepare_writing method self.writer = None # section numbers for headings in the currently visited document self.secnumbers = {} # figure numbers self.fignumbers = {} # currently written docname self.current_docname = None # type: unicode # sphinx highlighter, from StandaloneHTMLBuilder.init_highlighter() self.highlighter = PygmentsBridge( 'html', 'sphinx', self.config.trim_doctest_flags) # TODO: proper implementation def get_target_uri(self, docname, typ=None): # type: (unicode, unicode) -> unicode """Return the target URI for a document name. *typ* can be used to qualify the link characteristic for individual builders. """ return docname + self.link_suffix # TODO: proper implementation def get_outdated_docs(self): # type: () -> Iterator[unicode] """Return an iterable of output files that are outdated, or a string describing what an update build will build. If the builder does not output individual files corresponding to source files, return a string here. If it does, return an iterable of those files that need to be written. """ for docname in self.env.found_docs: yield docname def prepare_writing(self, docnames): # type: (Set[unicode]) -> None """A place where you can add logic before :meth:`write_doc` is run""" self.writer = HTMLWriter(self) self.settings = OptionParser( defaults=self.env.settings, components=(self.writer,), read_config_files=True).get_default_values() self.settings.compact_lists = bool(self.config.html_compact_lists) # disable splitting field list table rows with too long field names, # fixing https://gitlab.com/mbukatov/pylatest/issues/44 self.settings.field_name_limit = 0 def write_doc(self, docname, doctree): # type: (unicode, nodes.Node) -> None """Where you actually write something to the filesystem.""" # hack: check if the document is a test case is_testcase_doc = False for node in doctree.traverse(test_action_node): is_testcase_doc = True break # we will produce xml export output for test cases only if not is_testcase_doc: return # initialize dict with properties for xml export file properties = {} # set test case id based on selected lookup method if self.app.config.pylatest_export_lookup_method == "custom": testcase_id = "/" + docname properties['lookup-method'] = 'custom' elif self.app.config.pylatest_export_lookup_method == "id": # get test case id from a field list # if the id can't be found there, testcase id attribute is omitted testcase_id = get_testcase_id(doctree) properties['lookup-method'] = 'id' elif self.app.config.pylatest_export_lookup_method == "id,custom": # custom lookup method is used, unless explicit id is specified # in the rst file testcase_id = get_testcase_id(doctree) properties['lookup-method'] = 'id' if testcase_id is None: testcase_id = "/" + docname properties['lookup-method'] = 'custom' else: # TODO: report the error in a better way? msg = "pylatest_export_lookup_method value is invalid" raise Exception(msg) # set test case id based on selected lookup method if self.app.config.pylatest_export_dry_run: properties['dry-run'] = 'true' # generate html output from the doctree destination = StringOutput(encoding='utf-8') # TODO: what is this? doctree.settings = self.settings self.current_docname = docname self.writer.write(doctree, destination) # generate content of target xml file based on html output tc_doc = build_xml_testcase_doc( html_source=self.writer.output, content_type=self.app.config.pylatest_export_content_type, testcase_id=testcase_id, ) # validate and drop invalid metadata if needed if len(self.app.config.pylatest_valid_export_metadata) > 0: for name in list(tc_doc.metadata.keys()): if name not in self.app.config.pylatest_valid_export_metadata: del tc_doc.metadata[name] # create xml export document with single test case export_doc = build_xml_export_doc( project_id=self.app.config.pylatest_project_id, testcases=[tc_doc.build_element_tree()], properties=properties, response_properties= # noqa self.app.config.pylatest_export_response_properties, # noqa ) content_b = etree.tostring( export_doc, xml_declaration=True, encoding='utf-8', pretty_print=self.app.config.pylatest_export_pretty_print) content = content_b.decode('utf-8') # write content into file outfilename = path.join( self.outdir, os_path(docname) + self.out_suffix) ensuredir(path.dirname(outfilename)) try: with codecs.open(outfilename, 'w', 'utf-8') as f: # type: ignore f.write(content) except (IOError, OSError) as err: logger.warning("error writing file %s: %s", outfilename, err) def finish(self): # type: () -> None pass @property def math_renderer_name(self): """ This method needs to be there since sphinx 1.8.0, but XmlExportBuilder doesn't care about math rendering at all. Moreover XmlExportBuilder actually can't implement any math rendering, as it tries to embed pieces of html into xml file. No javascript rendered formulas, images or anything like that is possible in such environment. """ # Because we can't just return None without crashing the build process, # the only safe option which doesn't break the build is returned. Yes, # I have actually no idea what I'm doing here. return "mathjax" def add_js_file(self, *args, **kwargs): """ This method needs to be there since sphinx 1.8.0, but XmlExportBuilder doesn't care about js files at all. XmlExportBuilder tries to embed pieces of html into xml and there is no place nor purpose for any javascript files in xml file we are producing here. """ pass
class StandaloneHTMLBuilder(Builder): """ Builds standalone HTML docs. """ name = "html" format = "html" copysource = True out_suffix = ".html" link_suffix = ".html" # defaults to matching out_suffix indexer_format = js_index supported_image_types = ["image/svg+xml", "image/png", "image/gif", "image/jpeg"] searchindex_filename = "searchindex.js" add_permalinks = True embedded = False # for things like HTML help or Qt help: suppresses sidebar # This is a class attribute because it is mutated by Sphinx.add_javascript. script_files = ["_static/jquery.js", "_static/underscore.js", "_static/doctools.js"] # Dito for this one. css_files = [] default_sidebars = ["localtoc.html", "relations.html", "sourcelink.html", "searchbox.html"] # cached publisher object for snippets _publisher = None def init(self): # a hash of all config values that, if changed, cause a full rebuild self.config_hash = "" self.tags_hash = "" # section numbers for headings in the currently visited document self.secnumbers = {} self.init_templates() self.init_highlighter() self.init_translator_class() if self.config.html_file_suffix is not None: self.out_suffix = self.config.html_file_suffix if self.config.html_link_suffix is not None: self.link_suffix = self.config.html_link_suffix else: self.link_suffix = self.out_suffix if self.config.language is not None: jsfile_list = [ path.join(package_dir, "locale", self.config.language, "LC_MESSAGES", "sphinx.js"), path.join(sys.prefix, "share/sphinx/locale", self.config.language, "sphinx.js"), ] for jsfile in jsfile_list: if path.isfile(jsfile): self.script_files.append("_static/translations.js") break def get_theme_config(self): return self.config.html_theme, self.config.html_theme_options def init_templates(self): Theme.init_themes(self) themename, themeoptions = self.get_theme_config() self.theme = Theme(themename) self.theme_options = themeoptions.copy() self.create_template_bridge() self.templates.init(self, self.theme) def init_highlighter(self): # determine Pygments style and create the highlighter if self.config.pygments_style is not None: style = self.config.pygments_style elif self.theme: style = self.theme.get_confstr("theme", "pygments_style", "none") else: style = "sphinx" self.highlighter = PygmentsBridge("html", style, self.config.trim_doctest_flags) def init_translator_class(self): if self.config.html_translator_class: self.translator_class = self.app.import_object( self.config.html_translator_class, "html_translator_class setting" ) elif self.config.html_use_smartypants: self.translator_class = SmartyPantsHTMLTranslator else: self.translator_class = HTMLTranslator def get_outdated_docs(self): cfgdict = dict( (name, self.config[name]) for (name, desc) in self.config.values.iteritems() if desc[1] == "html" ) self.config_hash = md5(str(cfgdict)).hexdigest() self.tags_hash = md5(str(sorted(self.tags))).hexdigest() old_config_hash = old_tags_hash = "" try: fp = open(path.join(self.outdir, ".buildinfo")) version = fp.readline() if version.rstrip() != "# Sphinx build info version 1": raise ValueError fp.readline() # skip commentary cfg, old_config_hash = fp.readline().strip().split(": ") if cfg != "config": raise ValueError tag, old_tags_hash = fp.readline().strip().split(": ") if tag != "tags": raise ValueError fp.close() except ValueError: self.warn("unsupported build info format in %r, building all" % path.join(self.outdir, ".buildinfo")) except Exception: pass if old_config_hash != self.config_hash or old_tags_hash != self.tags_hash: for docname in self.env.found_docs: yield docname return if self.templates: template_mtime = self.templates.newest_template_mtime() else: template_mtime = 0 for docname in self.env.found_docs: if docname not in self.env.all_docs: yield docname continue targetname = self.get_outfilename(docname) try: targetmtime = path.getmtime(targetname) except Exception: targetmtime = 0 try: srcmtime = max(path.getmtime(self.env.doc2path(docname)), template_mtime) if srcmtime > targetmtime: yield docname except EnvironmentError: # source doesn't exist anymore pass def render_partial(self, node): """Utility: Render a lone doctree node.""" if node is None: return {"fragment": ""} doc = new_document("<partial node>") doc.append(node) if self._publisher is None: self._publisher = Publisher(source_class=DocTreeInput, destination_class=StringOutput) self._publisher.set_components("standalone", "restructuredtext", "pseudoxml") pub = self._publisher pub.reader = DoctreeReader() pub.writer = HTMLWriter(self) pub.process_programmatic_settings(None, {"output_encoding": "unicode"}, None) pub.set_source(doc, None) pub.set_destination(None, None) pub.publish() return pub.writer.parts def prepare_writing(self, docnames): from sphinx.search import IndexBuilder self.indexer = IndexBuilder(self.env) self.load_indexer(docnames) self.docwriter = HTMLWriter(self) self.docsettings = OptionParser(defaults=self.env.settings, components=(self.docwriter,)).get_default_values() self.docsettings.compact_lists = bool(self.config.html_compact_lists) # determine the additional indices to include self.domain_indices = [] # html_domain_indices can be False/True or a list of index names indices_config = self.config.html_domain_indices if indices_config: for domain in self.env.domains.itervalues(): for indexcls in domain.indices: indexname = "%s-%s" % (domain.name, indexcls.name) if isinstance(indices_config, list): if indexname not in indices_config: continue # deprecated config value if indexname == "py-modindex" and not self.config.html_use_modindex: continue content, collapse = indexcls(domain).generate() if content: self.domain_indices.append((indexname, indexcls, content, collapse)) # format the "last updated on" string, only once is enough since it # typically doesn't include the time of day lufmt = self.config.html_last_updated_fmt if lufmt is not None: self.last_updated = ustrftime(lufmt or _("%b %d, %Y")) else: self.last_updated = None logo = self.config.html_logo and path.basename(self.config.html_logo) or "" favicon = self.config.html_favicon and path.basename(self.config.html_favicon) or "" if favicon and os.path.splitext(favicon)[1] != ".ico": self.warn("html_favicon is not an .ico file") if not isinstance(self.config.html_use_opensearch, basestring): self.warn("html_use_opensearch config value must now be a string") self.relations = self.env.collect_relations() rellinks = [] if self.config.html_use_index: rellinks.append(("genindex", _("General Index"), "I", _("index"))) for indexname, indexcls, content, collapse in self.domain_indices: # if it has a short name if indexcls.shortname: rellinks.append((indexname, indexcls.localname, "", indexcls.shortname)) if self.config.html_style is not None: stylename = self.config.html_style elif self.theme: stylename = self.theme.get_confstr("theme", "stylesheet") else: stylename = "default.css" self.globalcontext = dict( embedded=self.embedded, project=self.config.project, release=self.config.release, version=self.config.version, last_updated=self.last_updated, copyright=self.config.copyright, master_doc=self.config.master_doc, use_opensearch=self.config.html_use_opensearch, docstitle=self.config.html_title, shorttitle=self.config.html_short_title, show_copyright=self.config.html_show_copyright, show_sphinx=self.config.html_show_sphinx, has_source=self.config.html_copy_source, show_source=self.config.html_show_sourcelink, file_suffix=self.out_suffix, script_files=self.script_files, css_files=self.css_files, sphinx_version=__version__, style=stylename, rellinks=rellinks, builder=self.name, parents=[], logo=logo, favicon=favicon, ) if self.theme: self.globalcontext.update( ("theme_" + key, val) for (key, val) in self.theme.get_options(self.theme_options).iteritems() ) self.globalcontext.update(self.config.html_context) def get_doc_context(self, docname, body, metatags): """Collect items for the template context of a page.""" # find out relations prev = next = None parents = [] rellinks = self.globalcontext["rellinks"][:] related = self.relations.get(docname) titles = self.env.titles if related and related[2]: try: next = { "link": self.get_relative_uri(docname, related[2]), "title": self.render_partial(titles[related[2]])["title"], } rellinks.append((related[2], next["title"], "N", _("next"))) except KeyError: next = None if related and related[1]: try: prev = { "link": self.get_relative_uri(docname, related[1]), "title": self.render_partial(titles[related[1]])["title"], } rellinks.append((related[1], prev["title"], "P", _("previous"))) except KeyError: # the relation is (somehow) not in the TOC tree, handle # that gracefully prev = None while related and related[0]: try: parents.append( { "link": self.get_relative_uri(docname, related[0]), "title": self.render_partial(titles[related[0]])["title"], } ) except KeyError: pass related = self.relations.get(related[0]) if parents: parents.pop() # remove link to the master file; we have a generic # "back to index" link already parents.reverse() # title rendered as HTML title = self.env.longtitles.get(docname) title = title and self.render_partial(title)["title"] or "" # the name for the copied source sourcename = self.config.html_copy_source and docname + ".txt" or "" # metadata for the document meta = self.env.metadata.get(docname) # local TOC and global TOC tree toc = self.render_partial(self.env.get_toc_for(docname))["fragment"] return dict( parents=parents, prev=prev, next=next, title=title, meta=meta, body=body, metatags=metatags, rellinks=rellinks, sourcename=sourcename, toc=toc, # only display a TOC if there's more than one item to show display_toc=(self.env.toc_num_entries[docname] > 1), ) def write_doc(self, docname, doctree): destination = StringOutput(encoding="utf-8") doctree.settings = self.docsettings self.secnumbers = self.env.toc_secnumbers.get(docname, {}) self.imgpath = relative_uri(self.get_target_uri(docname), "_images") self.post_process_images(doctree) self.dlpath = relative_uri(self.get_target_uri(docname), "_downloads") self.docwriter.write(doctree, destination) self.docwriter.assemble_parts() body = self.docwriter.parts["fragment"] metatags = self.docwriter.clean_meta ctx = self.get_doc_context(docname, body, metatags) self.index_page(docname, doctree, ctx.get("title", "")) self.handle_page(docname, ctx, event_arg=doctree) def finish(self): self.info(bold("writing additional files..."), nonl=1) # pages from extensions for pagelist in self.app.emit("html-collect-pages"): for pagename, context, template in pagelist: self.handle_page(pagename, context, template) # the global general index if self.config.html_use_index: self.write_genindex() # the global domain-specific indices self.write_domain_indices() # the search page if self.name != "htmlhelp": self.info(" search", nonl=1) self.handle_page("search", {}, "search.html") # additional pages from conf.py for pagename, template in self.config.html_additional_pages.items(): self.info(" " + pagename, nonl=1) self.handle_page(pagename, {}, template) if self.config.html_use_opensearch and self.name != "htmlhelp": self.info(" opensearch", nonl=1) fn = path.join(self.outdir, "_static", "opensearch.xml") self.handle_page("opensearch", {}, "opensearch.xml", outfilename=fn) self.info() self.copy_image_files() self.copy_download_files() self.copy_static_files() self.write_buildinfo() # dump the search index self.handle_finish() def write_genindex(self): # the total count of lines for each index letter, used to distribute # the entries into two columns genindex = self.env.create_index(self) indexcounts = [] for _, entries in genindex: indexcounts.append(sum(1 + len(subitems) for _, (_, subitems) in entries)) genindexcontext = dict( genindexentries=genindex, genindexcounts=indexcounts, split_index=self.config.html_split_index ) self.info(" genindex", nonl=1) if self.config.html_split_index: self.handle_page("genindex", genindexcontext, "genindex-split.html") self.handle_page("genindex-all", genindexcontext, "genindex.html") for (key, entries), count in zip(genindex, indexcounts): ctx = {"key": key, "entries": entries, "count": count, "genindexentries": genindex} self.handle_page("genindex-" + key, ctx, "genindex-single.html") else: self.handle_page("genindex", genindexcontext, "genindex.html") def write_domain_indices(self): for indexname, indexcls, content, collapse in self.domain_indices: indexcontext = dict(indextitle=indexcls.localname, content=content, collapse_index=collapse) self.info(" " + indexname, nonl=1) self.handle_page(indexname, indexcontext, "domainindex.html") def copy_image_files(self): # copy image files if self.images: ensuredir(path.join(self.outdir, "_images")) for src in self.status_iterator(self.images, "copying images... ", brown, len(self.images)): dest = self.images[src] try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, "_images", dest)) except Exception, err: self.warn("cannot copy image file %r: %s" % (path.join(self.srcdir, src), err))
class StandaloneHTMLBuilder(Builder): """ Builds standalone HTML docs. """ name = 'html' format = 'html' copysource = True out_suffix = '.html' link_suffix = '.html' # defaults to matching out_suffix indexer_format = js_index supported_image_types = [ 'image/svg+xml', 'image/png', 'image/gif', 'image/jpeg' ] searchindex_filename = 'searchindex.js' add_permalinks = True embedded = False # for things like HTML help or Qt help: suppresses sidebar # This is a class attribute because it is mutated by Sphinx.add_javascript. script_files = ['_static/jquery.js', '_static/doctools.js'] # Dito for this one. css_files = [] # cached publisher object for snippets _publisher = None def init(self): # a hash of all config values that, if changed, cause a full rebuild self.config_hash = '' self.tags_hash = '' # section numbers for headings in the currently visited document self.secnumbers = {} self.init_templates() self.init_highlighter() self.init_translator_class() if self.config.html_file_suffix: self.out_suffix = self.config.html_file_suffix if self.config.html_link_suffix is not None: self.link_suffix = self.config.html_link_suffix else: self.link_suffix = self.out_suffix if self.config.language is not None: jsfile = path.join(package_dir, 'locale', self.config.language, 'LC_MESSAGES', 'sphinx.js') if path.isfile(jsfile): self.script_files.append('_static/translations.js') def init_templates(self): Theme.init_themes(self) self.theme = Theme(self.config.html_theme) self.create_template_bridge() self.templates.init(self, self.theme) def init_highlighter(self): # determine Pygments style and create the highlighter if self.config.pygments_style is not None: style = self.config.pygments_style elif self.theme: style = self.theme.get_confstr('theme', 'pygments_style', 'none') else: style = 'sphinx' self.highlighter = PygmentsBridge('html', style, self.config.trim_doctest_flags) def init_translator_class(self): if self.config.html_translator_class: self.translator_class = self.app.import_object( self.config.html_translator_class, 'html_translator_class setting') elif self.config.html_use_smartypants: self.translator_class = SmartyPantsHTMLTranslator else: self.translator_class = HTMLTranslator def get_outdated_docs(self): cfgdict = dict((name, self.config[name]) for (name, desc) in self.config.values.iteritems() if desc[1] == 'html') self.config_hash = md5(str(cfgdict)).hexdigest() self.tags_hash = md5(str(sorted(self.tags))).hexdigest() old_config_hash = old_tags_hash = '' try: fp = open(path.join(self.outdir, '.buildinfo')) version = fp.readline() if version.rstrip() != '# Sphinx build info version 1': raise ValueError fp.readline() # skip commentary cfg, old_config_hash = fp.readline().strip().split(': ') if cfg != 'config': raise ValueError tag, old_tags_hash = fp.readline().strip().split(': ') if tag != 'tags': raise ValueError fp.close() except ValueError: self.warn('unsupported build info format in %r, building all' % path.join(self.outdir, '.buildinfo')) except Exception: pass if old_config_hash != self.config_hash or \ old_tags_hash != self.tags_hash: for docname in self.env.found_docs: yield docname return if self.templates: template_mtime = self.templates.newest_template_mtime() else: template_mtime = 0 for docname in self.env.found_docs: if docname not in self.env.all_docs: yield docname continue targetname = self.env.doc2path(docname, self.outdir, self.out_suffix) try: targetmtime = path.getmtime(targetname) except Exception: targetmtime = 0 try: srcmtime = max(path.getmtime(self.env.doc2path(docname)), template_mtime) if srcmtime > targetmtime: yield docname except EnvironmentError: # source doesn't exist anymore pass def render_partial(self, node): """Utility: Render a lone doctree node.""" doc = new_document('<partial node>') doc.append(node) if self._publisher is None: self._publisher = Publisher(source_class=DocTreeInput, destination_class=StringOutput) self._publisher.set_components('standalone', 'restructuredtext', 'pseudoxml') pub = self._publisher pub.reader = DoctreeReader() pub.writer = HTMLWriter(self) pub.process_programmatic_settings(None, {'output_encoding': 'unicode'}, None) pub.set_source(doc, None) pub.set_destination(None, None) pub.publish() return pub.writer.parts def prepare_writing(self, docnames): from sphinx.search import IndexBuilder self.indexer = IndexBuilder(self.env) self.load_indexer(docnames) self.docwriter = HTMLWriter(self) self.docsettings = OptionParser( defaults=self.env.settings, components=(self.docwriter, )).get_default_values() # format the "last updated on" string, only once is enough since it # typically doesn't include the time of day lufmt = self.config.html_last_updated_fmt if lufmt is not None: self.last_updated = ustrftime(lufmt or _('%b %d, %Y')) else: self.last_updated = None logo = self.config.html_logo and \ path.basename(self.config.html_logo) or '' favicon = self.config.html_favicon and \ path.basename(self.config.html_favicon) or '' if favicon and os.path.splitext(favicon)[1] != '.ico': self.warn('html_favicon is not an .ico file') if not isinstance(self.config.html_use_opensearch, basestring): self.warn('html_use_opensearch config value must now be a string') self.relations = self.env.collect_relations() rellinks = [] if self.config.html_use_index: rellinks.append(('genindex', _('General Index'), 'I', _('index'))) if self.config.html_use_modindex and self.env.modules: rellinks.append( ('modindex', _('Global Module Index'), 'M', _('modules'))) if self.config.html_style is not None: stylename = self.config.html_style elif self.theme: stylename = self.theme.get_confstr('theme', 'stylesheet') else: stylename = 'default.css' self.globalcontext = dict( embedded=self.embedded, project=self.config.project, release=self.config.release, version=self.config.version, last_updated=self.last_updated, copyright=self.config.copyright, master_doc=self.config.master_doc, use_opensearch=self.config.html_use_opensearch, docstitle=self.config.html_title, shorttitle=self.config.html_short_title, show_copyright=self.config.html_show_copyright, show_sphinx=self.config.html_show_sphinx, has_source=self.config.html_copy_source, show_source=self.config.html_show_sourcelink, file_suffix=self.out_suffix, script_files=self.script_files, css_files=self.css_files, sphinx_version=__version__, style=stylename, rellinks=rellinks, builder=self.name, parents=[], logo=logo, favicon=favicon, ) if self.theme: self.globalcontext.update( ('theme_' + key, val) for (key, val) in self.theme.get_options( self.config.html_theme_options).iteritems()) self.globalcontext.update(self.config.html_context) def get_doc_context(self, docname, body, metatags): """Collect items for the template context of a page.""" # find out relations prev = next = None parents = [] rellinks = self.globalcontext['rellinks'][:] related = self.relations.get(docname) titles = self.env.titles if related and related[2]: try: next = { 'link': self.get_relative_uri(docname, related[2]), 'title': self.render_partial(titles[related[2]])['title'] } rellinks.append((related[2], next['title'], 'N', _('next'))) except KeyError: next = None if related and related[1]: try: prev = { 'link': self.get_relative_uri(docname, related[1]), 'title': self.render_partial(titles[related[1]])['title'] } rellinks.append( (related[1], prev['title'], 'P', _('previous'))) except KeyError: # the relation is (somehow) not in the TOC tree, handle # that gracefully prev = None while related and related[0]: try: parents.append({ 'link': self.get_relative_uri(docname, related[0]), 'title': self.render_partial(titles[related[0]])['title'] }) except KeyError: pass related = self.relations.get(related[0]) if parents: parents.pop() # remove link to the master file; we have a generic # "back to index" link already parents.reverse() # title rendered as HTML title = self.env.longtitles.get(docname) title = title and self.render_partial(title)['title'] or '' # the name for the copied source sourcename = self.config.html_copy_source and docname + '.txt' or '' # metadata for the document meta = self.env.metadata.get(docname) # local TOC and global TOC tree toc = self.render_partial(self.env.get_toc_for(docname))['fragment'] return dict( parents=parents, prev=prev, next=next, title=title, meta=meta, body=body, metatags=metatags, rellinks=rellinks, sourcename=sourcename, toc=toc, # only display a TOC if there's more than one item to show display_toc=(self.env.toc_num_entries[docname] > 1), ) def write_doc(self, docname, doctree): destination = StringOutput(encoding='utf-8') doctree.settings = self.docsettings self.secnumbers = self.env.toc_secnumbers.get(docname, {}) self.imgpath = relative_uri(self.get_target_uri(docname), '_images') self.post_process_images(doctree) self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads') self.docwriter.write(doctree, destination) self.docwriter.assemble_parts() body = self.docwriter.parts['fragment'] metatags = self.docwriter.clean_meta ctx = self.get_doc_context(docname, body, metatags) self.index_page(docname, doctree, ctx.get('title', '')) self.handle_page(docname, ctx, event_arg=doctree) def finish(self): self.info(bold('writing additional files...'), nonl=1) # the global general index if self.config.html_use_index: # the total count of lines for each index letter, used to distribute # the entries into two columns genindex = self.env.create_index(self) indexcounts = [] for _, entries in genindex: indexcounts.append( sum(1 + len(subitems) for _, (_, subitems) in entries)) genindexcontext = dict( genindexentries=genindex, genindexcounts=indexcounts, split_index=self.config.html_split_index, ) self.info(' genindex', nonl=1) if self.config.html_split_index: self.handle_page('genindex', genindexcontext, 'genindex-split.html') self.handle_page('genindex-all', genindexcontext, 'genindex.html') for (key, entries), count in zip(genindex, indexcounts): ctx = { 'key': key, 'entries': entries, 'count': count, 'genindexentries': genindex } self.handle_page('genindex-' + key, ctx, 'genindex-single.html') else: self.handle_page('genindex', genindexcontext, 'genindex.html') # the global module index if self.config.html_use_modindex and self.env.modules: # the sorted list of all modules, for the global module index modules = sorted( ((mn, (self.get_relative_uri('modindex', fn) + '#module-' + mn, sy, pl, dep)) for (mn, (fn, sy, pl, dep)) in self.env.modules.iteritems()), key=lambda x: x[0].lower()) # collect all platforms platforms = set() # sort out collapsable modules modindexentries = [] letters = [] pmn = '' num_toplevels = 0 num_collapsables = 0 cg = 0 # collapse group fl = '' # first letter for mn, (fn, sy, pl, dep) in modules: pl = pl and pl.split(', ') or [] platforms.update(pl) ignore = self.env.config['modindex_common_prefix'] ignore = sorted(ignore, key=len, reverse=True) for i in ignore: if mn.startswith(i): mn = mn[len(i):] stripped = i break else: stripped = '' if fl != mn[0].lower() and mn[0] != '_': # heading letter = mn[0].upper() if letter not in letters: modindexentries.append( ['', False, 0, False, letter, '', [], False, '']) letters.append(letter) tn = mn.split('.')[0] if tn != mn: # submodule if pmn == tn: # first submodule - make parent collapsable modindexentries[-1][1] = True num_collapsables += 1 elif not pmn.startswith(tn): # submodule without parent in list, add dummy entry cg += 1 modindexentries.append( [tn, True, cg, False, '', '', [], False, stripped]) else: num_toplevels += 1 cg += 1 modindexentries.append( [mn, False, cg, (tn != mn), fn, sy, pl, dep, stripped]) pmn = mn fl = mn[0].lower() platforms = sorted(platforms) # apply heuristics when to collapse modindex at page load: # only collapse if number of toplevel modules is larger than # number of submodules collapse = len(modules) - num_toplevels < num_toplevels # As some parts of the module names may have been stripped, those # names have changed, thus it is necessary to sort the entries. if ignore: def sorthelper(entry): name = entry[0] if name == '': # heading name = entry[4] return name.lower() modindexentries.sort(key=sorthelper) letters.sort() modindexcontext = dict( modindexentries=modindexentries, platforms=platforms, letters=letters, collapse_modindex=collapse, ) self.info(' modindex', nonl=1) self.handle_page('modindex', modindexcontext, 'modindex.html') # the search page if self.name != 'htmlhelp': self.info(' search', nonl=1) self.handle_page('search', {}, 'search.html') # additional pages from conf.py for pagename, template in self.config.html_additional_pages.items(): self.info(' ' + pagename, nonl=1) self.handle_page(pagename, {}, template) if self.config.html_use_opensearch and self.name != 'htmlhelp': self.info(' opensearch', nonl=1) fn = path.join(self.outdir, '_static', 'opensearch.xml') self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn) self.info() # copy image files if self.images: self.info(bold('copying images...'), nonl=True) ensuredir(path.join(self.outdir, '_images')) for src, dest in self.images.iteritems(): self.info(' ' + src, nonl=1) try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_images', dest)) except Exception, err: self.warn('cannot copy image file %r: %s' % (path.join(self.srcdir, src), err)) self.info() # copy downloadable files if self.env.dlfiles: self.info(bold('copying downloadable files...'), nonl=True) ensuredir(path.join(self.outdir, '_downloads')) for src, (_, dest) in self.env.dlfiles.iteritems(): self.info(' ' + src, nonl=1) try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_downloads', dest)) except Exception, err: self.warn('cannot copy downloadable file %r: %s' % (path.join(self.srcdir, src), err)) self.info()
def prepare_writing(self, docnames): from sphinx.search import IndexBuilder self.indexer = IndexBuilder(self.env) self.load_indexer(docnames) self.docwriter = HTMLWriter(self) self.docsettings = OptionParser( defaults=self.env.settings, components=(self.docwriter,)).get_default_values() # format the "last updated on" string, only once is enough since it # typically doesn't include the time of day lufmt = self.config.html_last_updated_fmt if lufmt is not None: self.last_updated = ustrftime(lufmt or _('%b %d, %Y')) else: self.last_updated = None logo = self.config.html_logo and \ path.basename(self.config.html_logo) or '' favicon = self.config.html_favicon and \ path.basename(self.config.html_favicon) or '' if favicon and os.path.splitext(favicon)[1] != '.ico': self.warn('html_favicon is not an .ico file') if not isinstance(self.config.html_use_opensearch, basestring): self.warn('html_use_opensearch config value must now be a string') self.relations = self.env.collect_relations() rellinks = [] if self.config.html_use_index: rellinks.append(('genindex', _('General Index'), 'I', _('index'))) if self.config.html_use_modindex and self.env.modules: rellinks.append(('modindex', _('Global Module Index'), 'M', _('modules'))) if self.config.html_style is not None: stylename = self.config.html_style elif self.theme: stylename = self.theme.get_confstr('theme', 'stylesheet') else: stylename = 'default.css' self.globalcontext = dict( embedded = self.embedded, project = self.config.project, release = self.config.release, version = self.config.version, last_updated = self.last_updated, copyright = self.config.copyright, master_doc = self.config.master_doc, use_opensearch = self.config.html_use_opensearch, docstitle = self.config.html_title, shorttitle = self.config.html_short_title, show_sphinx = self.config.html_show_sphinx, has_source = self.config.html_copy_source, show_source = self.config.html_show_sourcelink, file_suffix = self.out_suffix, script_files = self.script_files, sphinx_version = __version__, style = stylename, rellinks = rellinks, builder = self.name, parents = [], logo = logo, favicon = favicon, ) if self.theme: self.globalcontext.update( ('theme_' + key, val) for (key, val) in self.theme.get_options( self.config.html_theme_options).iteritems()) self.globalcontext.update(self.config.html_context)
class StandaloneHTMLBuilder(Builder): """ Builds standalone HTML docs. """ name = 'html' format = 'html' copysource = True allow_parallel = True out_suffix = '.html' link_suffix = '.html' # defaults to matching out_suffix indexer_format = js_index indexer_dumps_unicode = True supported_image_types = [ 'image/svg+xml', 'image/png', 'image/gif', 'image/jpeg' ] searchindex_filename = 'searchindex.js' add_permalinks = True embedded = False # for things like HTML help or Qt help: suppresses sidebar # This is a class attribute because it is mutated by Sphinx.add_javascript. script_files = [ '_static/jquery.js', '_static/underscore.js', '_static/doctools.js' ] # Dito for this one. css_files = [] default_sidebars = [ 'localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html' ] # cached publisher object for snippets _publisher = None def init(self): # a hash of all config values that, if changed, cause a full rebuild self.config_hash = '' self.tags_hash = '' # section numbers for headings in the currently visited document self.secnumbers = {} # currently written docname self.current_docname = None self.init_templates() self.init_highlighter() self.init_translator_class() if self.config.html_file_suffix is not None: self.out_suffix = self.config.html_file_suffix if self.config.html_link_suffix is not None: self.link_suffix = self.config.html_link_suffix else: self.link_suffix = self.out_suffix if self.config.language is not None: if self._get_translations_js(): self.script_files.append('_static/translations.js') def _get_translations_js(self): candidates = [path.join(package_dir, 'locale', self.config.language, 'LC_MESSAGES', 'sphinx.js'), path.join(sys.prefix, 'share/sphinx/locale', self.config.language, 'sphinx.js')] + \ [path.join(dir, self.config.language, 'LC_MESSAGES', 'sphinx.js') for dir in self.config.locale_dirs] for jsfile in candidates: if path.isfile(jsfile): return jsfile return None def get_theme_config(self): return self.config.html_theme, self.config.html_theme_options def init_templates(self): Theme.init_themes(self.confdir, self.config.html_theme_path, warn=self.warn) themename, themeoptions = self.get_theme_config() self.theme = Theme(themename) self.theme_options = themeoptions.copy() self.create_template_bridge() self.templates.init(self, self.theme) def init_highlighter(self): # determine Pygments style and create the highlighter if self.config.pygments_style is not None: style = self.config.pygments_style elif self.theme: style = self.theme.get_confstr('theme', 'pygments_style', 'none') else: style = 'sphinx' self.highlighter = PygmentsBridge('html', style, self.config.trim_doctest_flags) def init_translator_class(self): if self.config.html_translator_class: self.translator_class = self.app.import_object( self.config.html_translator_class, 'html_translator_class setting') elif self.config.html_use_smartypants: self.translator_class = SmartyPantsHTMLTranslator else: self.translator_class = HTMLTranslator def get_outdated_docs(self): cfgdict = dict((name, self.config[name]) for (name, desc) in self.config.values.iteritems() if desc[1] == 'html') self.config_hash = get_stable_hash(cfgdict) self.tags_hash = get_stable_hash(sorted(self.tags)) old_config_hash = old_tags_hash = '' try: fp = open(path.join(self.outdir, '.buildinfo')) try: version = fp.readline() if version.rstrip() != '# Sphinx build info version 1': raise ValueError fp.readline() # skip commentary cfg, old_config_hash = fp.readline().strip().split(': ') if cfg != 'config': raise ValueError tag, old_tags_hash = fp.readline().strip().split(': ') if tag != 'tags': raise ValueError finally: fp.close() except ValueError: self.warn('unsupported build info format in %r, building all' % path.join(self.outdir, '.buildinfo')) except Exception: pass if old_config_hash != self.config_hash or \ old_tags_hash != self.tags_hash: for docname in self.env.found_docs: yield docname return if self.templates: template_mtime = self.templates.newest_template_mtime() else: template_mtime = 0 for docname in self.env.found_docs: if docname not in self.env.all_docs: yield docname continue targetname = self.get_outfilename(docname) try: targetmtime = path.getmtime(targetname) except Exception: targetmtime = 0 try: srcmtime = max(path.getmtime(self.env.doc2path(docname)), template_mtime) if srcmtime > targetmtime: yield docname except EnvironmentError: # source doesn't exist anymore pass def render_partial(self, node): """Utility: Render a lone doctree node.""" if node is None: return {'fragment': ''} doc = new_document(b('<partial node>')) doc.append(node) if self._publisher is None: self._publisher = Publisher(source_class=DocTreeInput, destination_class=StringOutput) self._publisher.set_components('standalone', 'restructuredtext', 'pseudoxml') pub = self._publisher pub.reader = DoctreeReader() pub.writer = HTMLWriter(self) pub.process_programmatic_settings(None, {'output_encoding': 'unicode'}, None) pub.set_source(doc, None) pub.set_destination(None, None) pub.publish() return pub.writer.parts def prepare_writing(self, docnames): # create the search indexer from sphinx.search import IndexBuilder, languages lang = self.config.html_search_language or self.config.language if not lang or lang not in languages: lang = 'en' self.indexer = IndexBuilder(self.env, lang, self.config.html_search_options, self.config.html_search_scorer) self.load_indexer(docnames) self.docwriter = HTMLWriter(self) self.docsettings = OptionParser( defaults=self.env.settings, components=(self.docwriter, )).get_default_values() self.docsettings.compact_lists = bool(self.config.html_compact_lists) # determine the additional indices to include self.domain_indices = [] # html_domain_indices can be False/True or a list of index names indices_config = self.config.html_domain_indices if indices_config: for domain in self.env.domains.itervalues(): for indexcls in domain.indices: indexname = '%s-%s' % (domain.name, indexcls.name) if isinstance(indices_config, list): if indexname not in indices_config: continue # deprecated config value if indexname == 'py-modindex' and \ not self.config.html_use_modindex: continue content, collapse = indexcls(domain).generate() if content: self.domain_indices.append( (indexname, indexcls, content, collapse)) # format the "last updated on" string, only once is enough since it # typically doesn't include the time of day lufmt = self.config.html_last_updated_fmt if lufmt is not None: self.last_updated = ustrftime(lufmt or _('%b %d, %Y')) else: self.last_updated = None logo = self.config.html_logo and \ path.basename(self.config.html_logo) or '' favicon = self.config.html_favicon and \ path.basename(self.config.html_favicon) or '' if favicon and os.path.splitext(favicon)[1] != '.ico': self.warn('html_favicon is not an .ico file') if not isinstance(self.config.html_use_opensearch, basestring): self.warn('html_use_opensearch config value must now be a string') self.relations = self.env.collect_relations() rellinks = [] if self.get_builder_config('use_index', 'html'): rellinks.append(('genindex', _('General Index'), 'I', _('index'))) for indexname, indexcls, content, collapse in self.domain_indices: # if it has a short name if indexcls.shortname: rellinks.append( (indexname, indexcls.localname, '', indexcls.shortname)) if self.config.html_style is not None: stylename = self.config.html_style elif self.theme: stylename = self.theme.get_confstr('theme', 'stylesheet') else: stylename = 'default.css' self.globalcontext = dict( embedded=self.embedded, project=self.config.project, release=self.config.release, version=self.config.version, last_updated=self.last_updated, copyright=self.config.copyright, master_doc=self.config.master_doc, use_opensearch=self.config.html_use_opensearch, docstitle=self.config.html_title, shorttitle=self.config.html_short_title, show_copyright=self.config.html_show_copyright, show_sphinx=self.config.html_show_sphinx, has_source=self.config.html_copy_source, show_source=self.config.html_show_sourcelink, file_suffix=self.out_suffix, script_files=self.script_files, css_files=self.css_files, sphinx_version=__version__, style=stylename, rellinks=rellinks, builder=self.name, parents=[], logo=logo, favicon=favicon, ) if self.theme: self.globalcontext.update(('theme_' + key, val) for ( key, val) in self.theme.get_options(self.theme_options).iteritems()) self.globalcontext.update(self.config.html_context) def get_doc_context(self, docname, body, metatags): """Collect items for the template context of a page.""" # find out relations prev = next = None parents = [] rellinks = self.globalcontext['rellinks'][:] related = self.relations.get(docname) titles = self.env.titles if related and related[2]: try: next = { 'link': self.get_relative_uri(docname, related[2]), 'title': self.render_partial(titles[related[2]])['title'] } rellinks.append((related[2], next['title'], 'N', _('next'))) except KeyError: next = None if related and related[1]: try: prev = { 'link': self.get_relative_uri(docname, related[1]), 'title': self.render_partial(titles[related[1]])['title'] } rellinks.append( (related[1], prev['title'], 'P', _('previous'))) except KeyError: # the relation is (somehow) not in the TOC tree, handle # that gracefully prev = None while related and related[0]: try: parents.append({ 'link': self.get_relative_uri(docname, related[0]), 'title': self.render_partial(titles[related[0]])['title'] }) except KeyError: pass related = self.relations.get(related[0]) if parents: parents.pop() # remove link to the master file; we have a generic # "back to index" link already parents.reverse() # title rendered as HTML title = self.env.longtitles.get(docname) title = title and self.render_partial(title)['title'] or '' # the name for the copied source sourcename = self.config.html_copy_source and docname + '.txt' or '' # metadata for the document meta = self.env.metadata.get(docname) # local TOC and global TOC tree self_toc = self.env.get_toc_for(docname, self) toc = self.render_partial(self_toc)['fragment'] return dict( parents=parents, prev=prev, next=next, title=title, meta=meta, body=body, metatags=metatags, rellinks=rellinks, sourcename=sourcename, toc=toc, # only display a TOC if there's more than one item to show display_toc=(self.env.toc_num_entries[docname] > 1), ) def write_doc(self, docname, doctree): destination = StringOutput(encoding='utf-8') doctree.settings = self.docsettings self.secnumbers = self.env.toc_secnumbers.get(docname, {}) self.imgpath = relative_uri(self.get_target_uri(docname), '_images') self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads') self.current_docname = docname self.docwriter.write(doctree, destination) self.docwriter.assemble_parts() body = self.docwriter.parts['fragment'] metatags = self.docwriter.clean_meta ctx = self.get_doc_context(docname, body, metatags) self.handle_page(docname, ctx, event_arg=doctree) def write_doc_serialized(self, docname, doctree): self.imgpath = relative_uri(self.get_target_uri(docname), '_images') self.post_process_images(doctree) title = self.env.longtitles.get(docname) title = title and self.render_partial(title)['title'] or '' self.index_page(docname, doctree, title) def finish(self): self.info(bold('writing additional files...'), nonl=1) # pages from extensions for pagelist in self.app.emit('html-collect-pages'): for pagename, context, template in pagelist: self.handle_page(pagename, context, template) # the global general index if self.get_builder_config('use_index', 'html'): self.write_genindex() # the global domain-specific indices self.write_domain_indices() # the search page if self.name != 'htmlhelp': self.info(' search', nonl=1) self.handle_page('search', {}, 'search.html') # additional pages from conf.py for pagename, template in self.config.html_additional_pages.items(): self.info(' ' + pagename, nonl=1) self.handle_page(pagename, {}, template) if self.config.html_use_opensearch and self.name != 'htmlhelp': self.info(' opensearch', nonl=1) fn = path.join(self.outdir, '_static', 'opensearch.xml') self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn) self.info() self.copy_image_files() self.copy_download_files() self.copy_static_files() self.write_buildinfo() # dump the search index self.handle_finish() def write_genindex(self): # the total count of lines for each index letter, used to distribute # the entries into two columns genindex = self.env.create_index(self) indexcounts = [] for _, entries in genindex: indexcounts.append( sum(1 + len(subitems) for _, (_, subitems) in entries)) genindexcontext = dict( genindexentries=genindex, genindexcounts=indexcounts, split_index=self.config.html_split_index, ) self.info(' genindex', nonl=1) if self.config.html_split_index: self.handle_page('genindex', genindexcontext, 'genindex-split.html') self.handle_page('genindex-all', genindexcontext, 'genindex.html') for (key, entries), count in zip(genindex, indexcounts): ctx = { 'key': key, 'entries': entries, 'count': count, 'genindexentries': genindex } self.handle_page('genindex-' + key, ctx, 'genindex-single.html') else: self.handle_page('genindex', genindexcontext, 'genindex.html') def write_domain_indices(self): for indexname, indexcls, content, collapse in self.domain_indices: indexcontext = dict( indextitle=indexcls.localname, content=content, collapse_index=collapse, ) self.info(' ' + indexname, nonl=1) self.handle_page(indexname, indexcontext, 'domainindex.html') def copy_image_files(self): # copy image files if self.images: ensuredir(path.join(self.outdir, '_images')) for src in self.status_iterator(self.images, 'copying images... ', brown, len(self.images)): dest = self.images[src] try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_images', dest)) except Exception, err: self.warn('cannot copy image file %r: %s' % (path.join(self.srcdir, src), err))
def prepare_writing(self, docnames: Set[str]) -> None: # create the search indexer self.indexer = None if self.search: from sphinx.search import IndexBuilder lang = self.config.html_search_language or self.config.language if not lang: lang = 'en' self.indexer = IndexBuilder(self.env, lang, self.config.html_search_options, self.config.html_search_scorer) self.load_indexer(docnames) self.docwriter = HTMLWriter(self) self.docsettings = OptionParser( defaults=self.env.settings, components=(self.docwriter,), read_config_files=True).get_default_values() # type: Any self.docsettings.compact_lists = bool(self.config.html_compact_lists) # determine the additional indices to include self.domain_indices = [] # html_domain_indices can be False/True or a list of index names indices_config = self.config.html_domain_indices if indices_config: for domain_name in sorted(self.env.domains): domain = None # type: Domain domain = self.env.domains[domain_name] for indexcls in domain.indices: indexname = '%s-%s' % (domain.name, indexcls.name) if isinstance(indices_config, list): if indexname not in indices_config: continue content, collapse = indexcls(domain).generate() if content: self.domain_indices.append( (indexname, indexcls, content, collapse)) # format the "last updated on" string, only once is enough since it # typically doesn't include the time of day lufmt = self.config.html_last_updated_fmt if lufmt is not None: self.last_updated = format_date(lufmt or _('%b %d, %Y'), language=self.config.language) else: self.last_updated = None logo = self.config.html_logo and \ path.basename(self.config.html_logo) or '' favicon = self.config.html_favicon and \ path.basename(self.config.html_favicon) or '' if not isinstance(self.config.html_use_opensearch, str): logger.warning(__('html_use_opensearch config value must now be a string')) self.relations = self.env.collect_relations() rellinks = [] # type: List[Tuple[str, str, str, str]] if self.use_index: rellinks.append(('genindex', _('General Index'), 'I', _('index'))) for indexname, indexcls, content, collapse in self.domain_indices: # if it has a short name if indexcls.shortname: rellinks.append((indexname, indexcls.localname, '', indexcls.shortname)) if self.config.html_style is not None: stylename = self.config.html_style elif self.theme: stylename = self.theme.get_config('theme', 'stylesheet') else: stylename = 'default.css' self.globalcontext = { 'embedded': self.embedded, 'project': self.config.project, 'release': return_codes_re.sub('', self.config.release), 'version': self.config.version, 'last_updated': self.last_updated, 'copyright': self.config.copyright, 'master_doc': self.config.master_doc, 'use_opensearch': self.config.html_use_opensearch, 'docstitle': self.config.html_title, 'shorttitle': self.config.html_short_title, 'show_copyright': self.config.html_show_copyright, 'show_sphinx': self.config.html_show_sphinx, 'has_source': self.config.html_copy_source, 'show_source': self.config.html_show_sourcelink, 'sourcelink_suffix': self.config.html_sourcelink_suffix, 'file_suffix': self.out_suffix, 'script_files': self.script_files, 'language': self.config.language, 'css_files': self.css_files, 'sphinx_version': __display_version__, 'style': stylename, 'rellinks': rellinks, 'builder': self.name, 'parents': [], 'logo': logo, 'favicon': favicon, 'html5_doctype': html5_ready and not self.config.html4_writer } if self.theme: self.globalcontext.update( ('theme_' + key, val) for (key, val) in self.theme.get_options(self.theme_options).items()) self.globalcontext.update(self.config.html_context)
def run(self): env = self.state.document.settings.env baseurl = env.config.rss_baseurl assert baseurl, 'rss_baseurl must be defined in your config.py' source = self.state_machine.input_lines.source( self.lineno - self.state_machine.input_offset - 1) rss_doc = utils.new_document('<rss>', self.state.document.settings) Parser().parse('\n'.join(self.content), rss_doc) path = os.path.relpath(source, env.srcdir) suffixes = env.config.source_suffix # retain backwards compatibility with sphinx < 1.3 if isinstance(suffixes, basestring): suffixes = [suffixes] for suffix in suffixes: if path.endswith(suffix): path = '%s.html' % path[:-len(suffix)] break builder = env.app.builder docwriter = HTMLWriter(self) docsettings = OptionParser( defaults=env.settings, components=(docwriter, )).get_default_values() docsettings.compact_lists = bool(env.config.html_compact_lists) dest = os.path.join(env.app.outdir, os_path(env.docname) + '.rss') pageurl = '%s/%s' % (baseurl, path) with open(dest, 'w') as rss: title = self.options.get('title', '') description = self.options.get('description', None) rss.write('<?xml version="1.0" encoding="ISO-8859-1" ?>\n') rss.write('<rss version="2.0">\n') rss.write('<channel>\n') rss.write('<title>%s</title>\n' % cgi.escape(title)) rss.write('<link>%s</link>\n' % pageurl) if description: rss.write('<description>%s</description>\n' % cgi.escape(description)) for child in rss_doc.children: if not isinstance(child, nodes.section): continue title_index = child.first_child_matching_class(nodes.title) if title_index is None: continue node = nodes.paragraph() node.extend(child.children[title_index + 1:]) sec_doc = utils.new_document('<rss-section>', docsettings) sec_doc.append(node) visitor = RssTranslator(builder, sec_doc) sec_doc.walkabout(visitor) title = child.children[title_index].astext() sectionurl = '%s#%s' % (pageurl, child.get('ids')[0]) description = ''.join(visitor.body) rss.write('<item>\n') rss.write('<title>%s</title>\n' % cgi.escape(title)) rss.write('<link>%s</link>\n' % sectionurl) rss.write('<description><![CDATA[%s]]></description>\n' % description) rss.write('</item>\n') rss.write('</channel>\n') rss.write('</rss>\n') return []
class StandaloneHTMLBuilder(Builder): """ Builds standalone HTML docs. """ name = 'html' format = 'html' epilog = __('The HTML pages are in %(outdir)s.') copysource = True allow_parallel = True out_suffix = '.html' link_suffix = '.html' # defaults to matching out_suffix indexer_format = js_index # type: Any indexer_dumps_unicode = True # create links to original images from images [True/False] html_scaled_image_link = True supported_image_types = ['image/svg+xml', 'image/png', 'image/gif', 'image/jpeg'] supported_remote_images = True supported_data_uri_images = True searchindex_filename = 'searchindex.js' add_permalinks = True allow_sharp_as_current_path = True embedded = False # for things like HTML help or Qt help: suppresses sidebar search = True # for things like HTML help and Apple help: suppress search use_index = False download_support = True # enable download role imgpath = None # type: str domain_indices = [] # type: List[Tuple[str, Type[Index], List[Tuple[str, List[IndexEntry]]], bool]] # NOQA def __init__(self, app: Sphinx) -> None: super().__init__(app) # CSS files self.css_files = [] # type: List[Dict[str, str]] # JS files self.script_files = [] # type: List[JavaScript] def init(self) -> None: self.build_info = self.create_build_info() # basename of images directory self.imagedir = '_images' # section numbers for headings in the currently visited document self.secnumbers = {} # type: Dict[str, Tuple[int, ...]] # currently written docname self.current_docname = None # type: str self.init_templates() self.init_highlighter() self.init_css_files() self.init_js_files() html_file_suffix = self.get_builder_config('file_suffix', 'html') if html_file_suffix is not None: self.out_suffix = html_file_suffix html_link_suffix = self.get_builder_config('link_suffix', 'html') if html_link_suffix is not None: self.link_suffix = html_link_suffix else: self.link_suffix = self.out_suffix self.use_index = self.get_builder_config('use_index', 'html') def create_build_info(self) -> BuildInfo: return BuildInfo(self.config, self.tags, ['html']) def _get_translations_js(self) -> str: candidates = [path.join(dir, self.config.language, 'LC_MESSAGES', 'sphinx.js') for dir in self.config.locale_dirs] + \ [path.join(package_dir, 'locale', self.config.language, 'LC_MESSAGES', 'sphinx.js'), path.join(sys.prefix, 'share/sphinx/locale', self.config.language, 'sphinx.js')] for jsfile in candidates: if path.isfile(jsfile): return jsfile return None def get_theme_config(self) -> Tuple[str, Dict]: return self.config.html_theme, self.config.html_theme_options def init_templates(self) -> None: theme_factory = HTMLThemeFactory(self.app) themename, themeoptions = self.get_theme_config() self.theme = theme_factory.create(themename) self.theme_options = themeoptions.copy() self.create_template_bridge() self.templates.init(self, self.theme) def init_highlighter(self) -> None: # determine Pygments style and create the highlighter if self.config.pygments_style is not None: style = self.config.pygments_style elif self.theme: style = self.theme.get_config('theme', 'pygments_style', 'none') else: style = 'sphinx' self.highlighter = PygmentsBridge('html', style) def init_css_files(self) -> None: for filename, attrs in self.app.registry.css_files: self.add_css_file(filename, **attrs) for filename, attrs in self.get_builder_config('css_files', 'html'): self.add_css_file(filename, **attrs) def add_css_file(self, filename: str, **kwargs: str) -> None: if '://' not in filename: filename = posixpath.join('_static', filename) self.css_files.append(Stylesheet(filename, **kwargs)) # type: ignore def init_js_files(self) -> None: self.add_js_file('jquery.js') self.add_js_file('underscore.js') self.add_js_file('doctools.js') self.add_js_file('language_data.js') for filename, attrs in self.app.registry.js_files: self.add_js_file(filename, **attrs) for filename, attrs in self.get_builder_config('js_files', 'html'): self.add_js_file(filename, **attrs) if self.config.language and self._get_translations_js(): self.add_js_file('translations.js') def add_js_file(self, filename: str, **kwargs: str) -> None: if filename and '://' not in filename: filename = posixpath.join('_static', filename) self.script_files.append(JavaScript(filename, **kwargs)) @property def default_translator_class(self) -> Type[nodes.NodeVisitor]: # type: ignore if not html5_ready or self.config.html4_writer: return HTMLTranslator else: return HTML5Translator @property def math_renderer_name(self) -> str: name = self.get_builder_config('math_renderer', 'html') if name is not None: # use given name return name else: # not given: choose a math_renderer from registered ones as possible renderers = list(self.app.registry.html_inline_math_renderers) if len(renderers) == 1: # only default math_renderer (mathjax) is registered return renderers[0] elif len(renderers) == 2: # default and another math_renderer are registered; prior the another renderers.remove('mathjax') return renderers[0] else: # many math_renderers are registered. can't choose automatically! return None def get_outdated_docs(self) -> Iterator[str]: try: with open(path.join(self.outdir, '.buildinfo')) as fp: buildinfo = BuildInfo.load(fp) if self.build_info != buildinfo: yield from self.env.found_docs return except ValueError as exc: logger.warning(__('Failed to read build info file: %r'), exc) except OSError: # ignore errors on reading pass if self.templates: template_mtime = self.templates.newest_template_mtime() else: template_mtime = 0 for docname in self.env.found_docs: if docname not in self.env.all_docs: yield docname continue targetname = self.get_outfilename(docname) try: targetmtime = path.getmtime(targetname) except Exception: targetmtime = 0 try: srcmtime = max(path.getmtime(self.env.doc2path(docname)), template_mtime) if srcmtime > targetmtime: yield docname except OSError: # source doesn't exist anymore pass def get_asset_paths(self) -> List[str]: return self.config.html_extra_path + self.config.html_static_path def render_partial(self, node: Node) -> Dict[str, str]: """Utility: Render a lone doctree node.""" if node is None: return {'fragment': ''} doc = new_document('<partial node>') doc.append(node) writer = HTMLWriter(self) return publish_parts(reader_name='doctree', writer=writer, source_class=DocTreeInput, settings_overrides={'output_encoding': 'unicode'}, source=doc) def prepare_writing(self, docnames: Set[str]) -> None: # create the search indexer self.indexer = None if self.search: from sphinx.search import IndexBuilder lang = self.config.html_search_language or self.config.language if not lang: lang = 'en' self.indexer = IndexBuilder(self.env, lang, self.config.html_search_options, self.config.html_search_scorer) self.load_indexer(docnames) self.docwriter = HTMLWriter(self) self.docsettings = OptionParser( defaults=self.env.settings, components=(self.docwriter,), read_config_files=True).get_default_values() # type: Any self.docsettings.compact_lists = bool(self.config.html_compact_lists) # determine the additional indices to include self.domain_indices = [] # html_domain_indices can be False/True or a list of index names indices_config = self.config.html_domain_indices if indices_config: for domain_name in sorted(self.env.domains): domain = None # type: Domain domain = self.env.domains[domain_name] for indexcls in domain.indices: indexname = '%s-%s' % (domain.name, indexcls.name) if isinstance(indices_config, list): if indexname not in indices_config: continue content, collapse = indexcls(domain).generate() if content: self.domain_indices.append( (indexname, indexcls, content, collapse)) # format the "last updated on" string, only once is enough since it # typically doesn't include the time of day lufmt = self.config.html_last_updated_fmt if lufmt is not None: self.last_updated = format_date(lufmt or _('%b %d, %Y'), language=self.config.language) else: self.last_updated = None logo = self.config.html_logo and \ path.basename(self.config.html_logo) or '' favicon = self.config.html_favicon and \ path.basename(self.config.html_favicon) or '' if not isinstance(self.config.html_use_opensearch, str): logger.warning(__('html_use_opensearch config value must now be a string')) self.relations = self.env.collect_relations() rellinks = [] # type: List[Tuple[str, str, str, str]] if self.use_index: rellinks.append(('genindex', _('General Index'), 'I', _('index'))) for indexname, indexcls, content, collapse in self.domain_indices: # if it has a short name if indexcls.shortname: rellinks.append((indexname, indexcls.localname, '', indexcls.shortname)) if self.config.html_style is not None: stylename = self.config.html_style elif self.theme: stylename = self.theme.get_config('theme', 'stylesheet') else: stylename = 'default.css' self.globalcontext = { 'embedded': self.embedded, 'project': self.config.project, 'release': return_codes_re.sub('', self.config.release), 'version': self.config.version, 'last_updated': self.last_updated, 'copyright': self.config.copyright, 'master_doc': self.config.master_doc, 'use_opensearch': self.config.html_use_opensearch, 'docstitle': self.config.html_title, 'shorttitle': self.config.html_short_title, 'show_copyright': self.config.html_show_copyright, 'show_sphinx': self.config.html_show_sphinx, 'has_source': self.config.html_copy_source, 'show_source': self.config.html_show_sourcelink, 'sourcelink_suffix': self.config.html_sourcelink_suffix, 'file_suffix': self.out_suffix, 'script_files': self.script_files, 'language': self.config.language, 'css_files': self.css_files, 'sphinx_version': __display_version__, 'style': stylename, 'rellinks': rellinks, 'builder': self.name, 'parents': [], 'logo': logo, 'favicon': favicon, 'html5_doctype': html5_ready and not self.config.html4_writer } if self.theme: self.globalcontext.update( ('theme_' + key, val) for (key, val) in self.theme.get_options(self.theme_options).items()) self.globalcontext.update(self.config.html_context) def get_doc_context(self, docname: str, body: str, metatags: str) -> Dict[str, Any]: """Collect items for the template context of a page.""" # find out relations prev = next = None parents = [] rellinks = self.globalcontext['rellinks'][:] related = self.relations.get(docname) titles = self.env.titles if related and related[2]: try: next = { 'link': self.get_relative_uri(docname, related[2]), 'title': self.render_partial(titles[related[2]])['title'] } rellinks.append((related[2], next['title'], 'N', _('next'))) except KeyError: next = None if related and related[1]: try: prev = { 'link': self.get_relative_uri(docname, related[1]), 'title': self.render_partial(titles[related[1]])['title'] } rellinks.append((related[1], prev['title'], 'P', _('previous'))) except KeyError: # the relation is (somehow) not in the TOC tree, handle # that gracefully prev = None while related and related[0]: try: parents.append( {'link': self.get_relative_uri(docname, related[0]), 'title': self.render_partial(titles[related[0]])['title']}) except KeyError: pass related = self.relations.get(related[0]) if parents: # remove link to the master file; we have a generic # "back to index" link already parents.pop() parents.reverse() # title rendered as HTML title_node = self.env.longtitles.get(docname) title = title_node and self.render_partial(title_node)['title'] or '' # Suffix for the document source_suffix = path.splitext(self.env.doc2path(docname))[1] # the name for the copied source if self.config.html_copy_source: sourcename = docname + source_suffix if source_suffix != self.config.html_sourcelink_suffix: sourcename += self.config.html_sourcelink_suffix else: sourcename = '' # metadata for the document meta = self.env.metadata.get(docname) # local TOC and global TOC tree self_toc = TocTree(self.env).get_toc_for(docname, self) toc = self.render_partial(self_toc)['fragment'] return { 'parents': parents, 'prev': prev, 'next': next, 'title': title, 'meta': meta, 'body': body, 'metatags': metatags, 'rellinks': rellinks, 'sourcename': sourcename, 'toc': toc, # only display a TOC if there's more than one item to show 'display_toc': (self.env.toc_num_entries[docname] > 1), 'page_source_suffix': source_suffix, } def write_doc(self, docname: str, doctree: nodes.document) -> None: destination = StringOutput(encoding='utf-8') doctree.settings = self.docsettings self.secnumbers = self.env.toc_secnumbers.get(docname, {}) self.fignumbers = self.env.toc_fignumbers.get(docname, {}) self.imgpath = relative_uri(self.get_target_uri(docname), '_images') self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads') self.current_docname = docname self.docwriter.write(doctree, destination) self.docwriter.assemble_parts() body = self.docwriter.parts['fragment'] metatags = self.docwriter.clean_meta ctx = self.get_doc_context(docname, body, metatags) self.handle_page(docname, ctx, event_arg=doctree) def write_doc_serialized(self, docname: str, doctree: nodes.document) -> None: self.imgpath = relative_uri(self.get_target_uri(docname), self.imagedir) self.post_process_images(doctree) title_node = self.env.longtitles.get(docname) title = title_node and self.render_partial(title_node)['title'] or '' self.index_page(docname, doctree, title) def finish(self) -> None: self.finish_tasks.add_task(self.gen_indices) self.finish_tasks.add_task(self.gen_additional_pages) self.finish_tasks.add_task(self.copy_image_files) self.finish_tasks.add_task(self.copy_download_files) self.finish_tasks.add_task(self.copy_static_files) self.finish_tasks.add_task(self.copy_extra_files) self.finish_tasks.add_task(self.write_buildinfo) # dump the search index self.handle_finish() def gen_indices(self) -> None: logger.info(bold(__('generating indices...')), nonl=True) # the global general index if self.use_index: self.write_genindex() # the global domain-specific indices self.write_domain_indices() logger.info('') def gen_additional_pages(self) -> None: # pages from extensions for pagelist in self.events.emit('html-collect-pages'): for pagename, context, template in pagelist: self.handle_page(pagename, context, template) logger.info(bold(__('writing additional pages...')), nonl=True) # additional pages from conf.py for pagename, template in self.config.html_additional_pages.items(): logger.info(' ' + pagename, nonl=True) self.handle_page(pagename, {}, template) # the search page if self.search: logger.info(' search', nonl=True) self.handle_page('search', {}, 'search.html') # the opensearch xml file if self.config.html_use_opensearch and self.search: logger.info(' opensearch', nonl=True) fn = path.join(self.outdir, '_static', 'opensearch.xml') self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn) logger.info('') def write_genindex(self) -> None: # the total count of lines for each index letter, used to distribute # the entries into two columns genindex = IndexEntries(self.env).create_index(self) indexcounts = [] for _k, entries in genindex: indexcounts.append(sum(1 + len(subitems) for _, (_, subitems, _) in entries)) genindexcontext = { 'genindexentries': genindex, 'genindexcounts': indexcounts, 'split_index': self.config.html_split_index, } logger.info(' genindex', nonl=True) if self.config.html_split_index: self.handle_page('genindex', genindexcontext, 'genindex-split.html') self.handle_page('genindex-all', genindexcontext, 'genindex.html') for (key, entries), count in zip(genindex, indexcounts): ctx = {'key': key, 'entries': entries, 'count': count, 'genindexentries': genindex} self.handle_page('genindex-' + key, ctx, 'genindex-single.html') else: self.handle_page('genindex', genindexcontext, 'genindex.html') def write_domain_indices(self) -> None: for indexname, indexcls, content, collapse in self.domain_indices: indexcontext = { 'indextitle': indexcls.localname, 'content': content, 'collapse_index': collapse, } logger.info(' ' + indexname, nonl=True) self.handle_page(indexname, indexcontext, 'domainindex.html') def copy_image_files(self) -> None: if self.images: stringify_func = ImageAdapter(self.app.env).get_original_image_uri ensuredir(path.join(self.outdir, self.imagedir)) for src in status_iterator(self.images, __('copying images... '), "brown", len(self.images), self.app.verbosity, stringify_func=stringify_func): dest = self.images[src] try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, self.imagedir, dest)) except Exception as err: logger.warning(__('cannot copy image file %r: %s'), path.join(self.srcdir, src), err) def copy_download_files(self) -> None: def to_relpath(f: str) -> str: return relative_path(self.srcdir, f) # copy downloadable files if self.env.dlfiles: ensuredir(path.join(self.outdir, '_downloads')) for src in status_iterator(self.env.dlfiles, __('copying downloadable files... '), "brown", len(self.env.dlfiles), self.app.verbosity, stringify_func=to_relpath): try: dest = path.join(self.outdir, '_downloads', self.env.dlfiles[src][1]) ensuredir(path.dirname(dest)) copyfile(path.join(self.srcdir, src), dest) except OSError as err: logger.warning(__('cannot copy downloadable file %r: %s'), path.join(self.srcdir, src), err) def copy_static_files(self) -> None: try: # copy static files logger.info(bold(__('copying static files... ')), nonl=True) ensuredir(path.join(self.outdir, '_static')) # first, create pygments style file with open(path.join(self.outdir, '_static', 'pygments.css'), 'w') as f: f.write(self.highlighter.get_stylesheet()) # then, copy translations JavaScript file if self.config.language is not None: jsfile = self._get_translations_js() if jsfile: copyfile(jsfile, path.join(self.outdir, '_static', 'translations.js')) # copy non-minified stemmer JavaScript file if self.indexer is not None: jsfile = self.indexer.get_js_stemmer_rawcode() if jsfile: copyfile(jsfile, path.join(self.outdir, '_static', '_stemmer.js')) ctx = self.globalcontext.copy() # add context items for search function used in searchtools.js_t if self.indexer is not None: ctx.update(self.indexer.context_for_searchtool()) # then, copy over theme-supplied static files if self.theme: for theme_path in self.theme.get_theme_dirs()[::-1]: entry = path.join(theme_path, 'static') copy_asset(entry, path.join(self.outdir, '_static'), excluded=DOTFILES, context=ctx, renderer=self.templates) # then, copy over all user-supplied static files excluded = Matcher(self.config.exclude_patterns + ["**/.*"]) for static_path in self.config.html_static_path: entry = path.join(self.confdir, static_path) copy_asset(entry, path.join(self.outdir, '_static'), excluded, context=ctx, renderer=self.templates) # copy logo and favicon files if not already in static path if self.config.html_logo: entry = path.join(self.confdir, self.config.html_logo) copy_asset(entry, path.join(self.outdir, '_static')) if self.config.html_favicon: entry = path.join(self.confdir, self.config.html_favicon) copy_asset(entry, path.join(self.outdir, '_static')) logger.info(__('done')) except OSError as err: logger.warning(__('cannot copy static file %r'), err) def copy_extra_files(self) -> None: try: # copy html_extra_path files logger.info(bold(__('copying extra files... ')), nonl=True) excluded = Matcher(self.config.exclude_patterns) for extra_path in self.config.html_extra_path: entry = path.join(self.confdir, extra_path) copy_asset(entry, self.outdir, excluded) logger.info(__('done')) except OSError as err: logger.warning(__('cannot copy extra file %r'), err) def write_buildinfo(self) -> None: try: with open(path.join(self.outdir, '.buildinfo'), 'w') as fp: self.build_info.dump(fp) except OSError as exc: logger.warning(__('Failed to write build info file: %r'), exc) def cleanup(self) -> None: # clean up theme stuff if self.theme: self.theme.cleanup() def post_process_images(self, doctree: Node) -> None: """Pick the best candidate for an image and link down-scaled images to their high res version. """ Builder.post_process_images(self, doctree) if self.config.html_scaled_image_link and self.html_scaled_image_link: for node in doctree.traverse(nodes.image): scale_keys = ('scale', 'width', 'height') if not any((key in node) for key in scale_keys) or \ isinstance(node.parent, nodes.reference): # docutils does unfortunately not preserve the # ``target`` attribute on images, so we need to check # the parent node here. continue uri = node['uri'] reference = nodes.reference('', '', internal=True) if uri in self.images: reference['refuri'] = posixpath.join(self.imgpath, self.images[uri]) else: reference['refuri'] = uri node.replace_self(reference) reference.append(node) def load_indexer(self, docnames: Iterable[str]) -> None: keep = set(self.env.all_docs) - set(docnames) try: searchindexfn = path.join(self.outdir, self.searchindex_filename) if self.indexer_dumps_unicode: with open(searchindexfn, encoding='utf-8') as ft: self.indexer.load(ft, self.indexer_format) else: with open(searchindexfn, 'rb') as fb: self.indexer.load(fb, self.indexer_format) except (OSError, ValueError): if keep: logger.warning(__('search index couldn\'t be loaded, but not all ' 'documents will be built: the index will be ' 'incomplete.')) # delete all entries for files that will be rebuilt self.indexer.prune(keep) def index_page(self, pagename: str, doctree: nodes.document, title: str) -> None: # only index pages with title if self.indexer is not None and title: filename = self.env.doc2path(pagename, base=None) try: self.indexer.feed(pagename, filename, title, doctree) except TypeError: # fallback for old search-adapters self.indexer.feed(pagename, title, doctree) # type: ignore indexer_name = self.indexer.__class__.__name__ warnings.warn( 'The %s.feed() method signature is deprecated. Update to ' '%s.feed(docname, filename, title, doctree).' % ( indexer_name, indexer_name), RemovedInSphinx40Warning) def _get_local_toctree(self, docname: str, collapse: bool = True, **kwds) -> str: if 'includehidden' not in kwds: kwds['includehidden'] = False return self.render_partial(TocTree(self.env).get_toctree_for( docname, self, collapse, **kwds))['fragment'] def get_outfilename(self, pagename: str) -> str: return path.join(self.outdir, os_path(pagename) + self.out_suffix) def add_sidebars(self, pagename: str, ctx: Dict) -> None: def has_wildcard(pattern: str) -> bool: return any(char in pattern for char in '*?[') sidebars = None matched = None customsidebar = None # default sidebars settings for selected theme if self.theme.name == 'alabaster': # provide default settings for alabaster (for compatibility) # Note: this will be removed before Sphinx-2.0 try: # get default sidebars settings from alabaster (if defined) theme_default_sidebars = self.theme.config.get('theme', 'sidebars') if theme_default_sidebars: sidebars = [name.strip() for name in theme_default_sidebars.split(',')] except Exception: # fallback to better default settings sidebars = ['about.html', 'navigation.html', 'relations.html', 'searchbox.html', 'donate.html'] else: theme_default_sidebars = self.theme.get_config('theme', 'sidebars', None) if theme_default_sidebars: sidebars = [name.strip() for name in theme_default_sidebars.split(',')] # user sidebar settings html_sidebars = self.get_builder_config('sidebars', 'html') for pattern, patsidebars in html_sidebars.items(): if patmatch(pagename, pattern): if matched: if has_wildcard(pattern): # warn if both patterns contain wildcards if has_wildcard(matched): logger.warning(__('page %s matches two patterns in ' 'html_sidebars: %r and %r'), pagename, matched, pattern) # else the already matched pattern is more specific # than the present one, because it contains no wildcard continue matched = pattern sidebars = patsidebars if sidebars is None: # keep defaults pass ctx['sidebars'] = sidebars ctx['customsidebar'] = customsidebar # --------- these are overwritten by the serialization builder def get_target_uri(self, docname: str, typ: str = None) -> str: return docname + self.link_suffix def handle_page(self, pagename: str, addctx: Dict, templatename: str = 'page.html', outfilename: str = None, event_arg: Any = None) -> None: ctx = self.globalcontext.copy() # current_page_name is backwards compatibility ctx['pagename'] = ctx['current_page_name'] = pagename ctx['encoding'] = self.config.html_output_encoding default_baseuri = self.get_target_uri(pagename) # in the singlehtml builder, default_baseuri still contains an #anchor # part, which relative_uri doesn't really like... default_baseuri = default_baseuri.rsplit('#', 1)[0] if self.config.html_baseurl: ctx['pageurl'] = posixpath.join(self.config.html_baseurl, pagename + self.out_suffix) else: ctx['pageurl'] = None def pathto(otheruri: str, resource: bool = False, baseuri: str = default_baseuri) -> str: # NOQA if resource and '://' in otheruri: # allow non-local resources given by scheme return otheruri elif not resource: otheruri = self.get_target_uri(otheruri) uri = relative_uri(baseuri, otheruri) or '#' if uri == '#' and not self.allow_sharp_as_current_path: uri = baseuri return uri ctx['pathto'] = pathto def css_tag(css: Stylesheet) -> str: attrs = [] for key in sorted(css.attributes): value = css.attributes[key] if value is not None: attrs.append('%s="%s"' % (key, html.escape(value, True))) attrs.append('href="%s"' % pathto(css.filename, resource=True)) return '<link %s />' % ' '.join(attrs) ctx['css_tag'] = css_tag def hasdoc(name: str) -> bool: if name in self.env.all_docs: return True elif name == 'search' and self.search: return True elif name == 'genindex' and self.get_builder_config('use_index', 'html'): return True return False ctx['hasdoc'] = hasdoc ctx['toctree'] = lambda **kw: self._get_local_toctree(pagename, **kw) self.add_sidebars(pagename, ctx) ctx.update(addctx) self.update_page_context(pagename, templatename, ctx, event_arg) newtmpl = self.app.emit_firstresult('html-page-context', pagename, templatename, ctx, event_arg) if newtmpl: templatename = newtmpl try: output = self.templates.render(templatename, ctx) except UnicodeError: logger.warning(__("a Unicode error occurred when rendering the page %s. " "Please make sure all config values that contain " "non-ASCII content are Unicode strings."), pagename) return except Exception as exc: raise ThemeError(__("An error happened in rendering the page %s.\nReason: %r") % (pagename, exc)) if not outfilename: outfilename = self.get_outfilename(pagename) # outfilename's path is in general different from self.outdir ensuredir(path.dirname(outfilename)) try: with open(outfilename, 'w', encoding=ctx['encoding'], errors='xmlcharrefreplace') as f: f.write(output) except OSError as err: logger.warning(__("error writing file %s: %s"), outfilename, err) if self.copysource and ctx.get('sourcename'): # copy the source file for the "show source" link source_name = path.join(self.outdir, '_sources', os_path(ctx['sourcename'])) ensuredir(path.dirname(source_name)) copyfile(self.env.doc2path(pagename), source_name) def update_page_context(self, pagename: str, templatename: str, ctx: Dict, event_arg: Any) -> None: pass def handle_finish(self) -> None: if self.indexer: self.finish_tasks.add_task(self.dump_search_index) self.finish_tasks.add_task(self.dump_inventory) def dump_inventory(self) -> None: logger.info(bold(__('dumping object inventory... ')), nonl=True) InventoryFile.dump(path.join(self.outdir, INVENTORY_FILENAME), self.env, self) logger.info(__('done')) def dump_search_index(self) -> None: logger.info( bold(__('dumping search index in %s ... ') % self.indexer.label()), nonl=True) self.indexer.prune(self.env.all_docs) searchindexfn = path.join(self.outdir, self.searchindex_filename) # first write to a temporary file, so that if dumping fails, # the existing index won't be overwritten if self.indexer_dumps_unicode: with open(searchindexfn + '.tmp', 'w', encoding='utf-8') as ft: self.indexer.dump(ft, self.indexer_format) else: with open(searchindexfn + '.tmp', 'wb') as fb: self.indexer.dump(fb, self.indexer_format) movefile(searchindexfn + '.tmp', searchindexfn) logger.info(__('done'))
def prepare_writing(self, _doc_names: set[str]) -> None: self.docwriter = HTMLWriter(self) _opt_parser = OptionParser([self.docwriter], defaults=self.env.settings, read_config_files=True) self.docsettings = _opt_parser.get_default_values()
def prepare_writing(self, docnames): # create the search indexer from sphinx.search import IndexBuilder, languages lang = self.config.html_search_language or self.config.language if not lang or lang not in languages: lang = "en" self.indexer = IndexBuilder(self.env, lang, self.config.html_search_options, self.config.html_search_scorer) self.load_indexer(docnames) self.docwriter = HTMLWriter(self) self.docsettings = OptionParser( defaults=self.env.settings, components=(self.docwriter,), read_config_files=True ).get_default_values() self.docsettings.compact_lists = bool(self.config.html_compact_lists) # determine the additional indices to include self.domain_indices = [] # html_domain_indices can be False/True or a list of index names indices_config = self.config.html_domain_indices if indices_config: for domain in self.env.domains.itervalues(): for indexcls in domain.indices: indexname = "%s-%s" % (domain.name, indexcls.name) if isinstance(indices_config, list): if indexname not in indices_config: continue # deprecated config value if indexname == "py-modindex" and not self.config.html_use_modindex: continue content, collapse = indexcls(domain).generate() if content: self.domain_indices.append((indexname, indexcls, content, collapse)) # format the "last updated on" string, only once is enough since it # typically doesn't include the time of day lufmt = self.config.html_last_updated_fmt if lufmt is not None: self.last_updated = ustrftime(lufmt or _("%b %d, %Y")) else: self.last_updated = None logo = self.config.html_logo and path.basename(self.config.html_logo) or "" favicon = self.config.html_favicon and path.basename(self.config.html_favicon) or "" if favicon and os.path.splitext(favicon)[1] != ".ico": self.warn("html_favicon is not an .ico file") if not isinstance(self.config.html_use_opensearch, basestring): self.warn("html_use_opensearch config value must now be a string") self.relations = self.env.collect_relations() rellinks = [] if self.get_builder_config("use_index", "html"): rellinks.append(("genindex", _("General Index"), "I", _("index"))) for indexname, indexcls, content, collapse in self.domain_indices: # if it has a short name if indexcls.shortname: rellinks.append((indexname, indexcls.localname, "", indexcls.shortname)) if self.config.html_style is not None: stylename = self.config.html_style elif self.theme: stylename = self.theme.get_confstr("theme", "stylesheet") else: stylename = "default.css" self.globalcontext = dict( embedded=self.embedded, project=self.config.project, release=self.config.release, version=self.config.version, last_updated=self.last_updated, copyright=self.config.copyright, master_doc=self.config.master_doc, use_opensearch=self.config.html_use_opensearch, docstitle=self.config.html_title, shorttitle=self.config.html_short_title, show_copyright=self.config.html_show_copyright, show_sphinx=self.config.html_show_sphinx, has_source=self.config.html_copy_source, show_source=self.config.html_show_sourcelink, file_suffix=self.out_suffix, script_files=self.script_files, css_files=self.css_files, sphinx_version=__version__, style=stylename, rellinks=rellinks, builder=self.name, parents=[], logo=logo, favicon=favicon, ) if self.theme: self.globalcontext.update( ("theme_" + key, val) for (key, val) in self.theme.get_options(self.theme_options).iteritems() ) self.globalcontext.update(self.config.html_context)
class StandaloneHTMLBuilder(Builder): """ Builds standalone HTML docs. """ name = 'html' format = 'html' copysource = True out_suffix = '.html' link_suffix = '.html' # defaults to matching out_suffix indexer_format = js_index supported_image_types = ['image/svg+xml', 'image/png', 'image/gif', 'image/jpeg'] searchindex_filename = 'searchindex.js' add_permalinks = True embedded = False # for things like HTML help or Qt help: suppresses sidebar # This is a class attribute because it is mutated by Sphinx.add_javascript. script_files = ['_static/jquery.js', '_static/doctools.js'] def init(self): # a hash of all config values that, if changed, cause a full rebuild self.config_hash = '' self.tags_hash = '' # section numbers for headings in the currently visited document self.secnumbers = {} self.init_templates() self.init_highlighter() self.init_translator_class() if self.config.html_file_suffix: self.out_suffix = self.config.html_file_suffix if self.config.html_link_suffix is not None: self.link_suffix = self.config.html_link_suffix else: self.link_suffix = self.out_suffix if self.config.language is not None: jsfile = path.join(package_dir, 'locale', self.config.language, 'LC_MESSAGES', 'sphinx.js') if path.isfile(jsfile): self.script_files.append('_static/translations.js') def init_templates(self): Theme.init_themes(self) self.theme = Theme(self.config.html_theme) self.create_template_bridge() self.templates.init(self, self.theme) def init_highlighter(self): # determine Pygments style and create the highlighter if self.config.pygments_style is not None: style = self.config.pygments_style elif self.theme: style = self.theme.get_confstr('theme', 'pygments_style', 'none') else: style = 'sphinx' self.highlighter = PygmentsBridge('html', style) def init_translator_class(self): if self.config.html_translator_class: self.translator_class = self.app.import_object( self.config.html_translator_class, 'html_translator_class setting') elif self.config.html_use_smartypants: self.translator_class = SmartyPantsHTMLTranslator else: self.translator_class = HTMLTranslator def get_outdated_docs(self): cfgdict = dict((name, self.config[name]) for (name, desc) in self.config.values.iteritems() if desc[1] == 'html') self.config_hash = md5(str(cfgdict)).hexdigest() self.tags_hash = md5(str(sorted(self.tags))).hexdigest() old_config_hash = old_tags_hash = '' try: fp = open(path.join(self.outdir, '.buildinfo')) version = fp.readline() if version.rstrip() != '# Sphinx build info version 1': raise ValueError fp.readline() # skip commentary cfg, old_config_hash = fp.readline().strip().split(': ') if cfg != 'config': raise ValueError tag, old_tags_hash = fp.readline().strip().split(': ') if tag != 'tags': raise ValueError fp.close() except ValueError: self.warn('unsupported build info format in %r, building all' % path.join(self.outdir, '.buildinfo')) except Exception: pass if old_config_hash != self.config_hash or \ old_tags_hash != self.tags_hash: for docname in self.env.found_docs: yield docname return if self.templates: template_mtime = self.templates.newest_template_mtime() else: template_mtime = 0 for docname in self.env.found_docs: if docname not in self.env.all_docs: yield docname continue targetname = self.env.doc2path(docname, self.outdir, self.out_suffix) try: targetmtime = path.getmtime(targetname) except Exception: targetmtime = 0 try: srcmtime = max(path.getmtime(self.env.doc2path(docname)), template_mtime) if srcmtime > targetmtime: yield docname except EnvironmentError: # source doesn't exist anymore pass def render_partial(self, node): """Utility: Render a lone doctree node.""" doc = new_document('<partial node>') doc.append(node) return publish_parts( doc, source_class=DocTreeInput, reader=DoctreeReader(), writer=HTMLWriter(self), settings_overrides={'output_encoding': 'unicode'} ) def prepare_writing(self, docnames): from sphinx.search import IndexBuilder self.indexer = IndexBuilder(self.env) self.load_indexer(docnames) self.docwriter = HTMLWriter(self) self.docsettings = OptionParser( defaults=self.env.settings, components=(self.docwriter,)).get_default_values() # format the "last updated on" string, only once is enough since it # typically doesn't include the time of day lufmt = self.config.html_last_updated_fmt if lufmt is not None: self.last_updated = ustrftime(lufmt or _('%b %d, %Y')) else: self.last_updated = None logo = self.config.html_logo and \ path.basename(self.config.html_logo) or '' favicon = self.config.html_favicon and \ path.basename(self.config.html_favicon) or '' if favicon and os.path.splitext(favicon)[1] != '.ico': self.warn('html_favicon is not an .ico file') if not isinstance(self.config.html_use_opensearch, basestring): self.warn('html_use_opensearch config value must now be a string') self.relations = self.env.collect_relations() rellinks = [] if self.config.html_use_index: rellinks.append(('genindex', _('General Index'), 'I', _('index'))) if self.config.html_use_modindex and self.env.modules: rellinks.append(('modindex', _('Global Module Index'), 'M', _('modules'))) if self.config.html_style is not None: stylename = self.config.html_style elif self.theme: stylename = self.theme.get_confstr('theme', 'stylesheet') else: stylename = 'default.css' self.globalcontext = dict( embedded = self.embedded, project = self.config.project, release = self.config.release, version = self.config.version, last_updated = self.last_updated, copyright = self.config.copyright, master_doc = self.config.master_doc, use_opensearch = self.config.html_use_opensearch, docstitle = self.config.html_title, shorttitle = self.config.html_short_title, show_sphinx = self.config.html_show_sphinx, has_source = self.config.html_copy_source, show_source = self.config.html_show_sourcelink, file_suffix = self.out_suffix, script_files = self.script_files, sphinx_version = __version__, style = stylename, rellinks = rellinks, builder = self.name, parents = [], logo = logo, favicon = favicon, ) if self.theme: self.globalcontext.update( ('theme_' + key, val) for (key, val) in self.theme.get_options( self.config.html_theme_options).iteritems()) self.globalcontext.update(self.config.html_context) def get_doc_context(self, docname, body, metatags): """Collect items for the template context of a page.""" # find out relations prev = next = None parents = [] rellinks = self.globalcontext['rellinks'][:] related = self.relations.get(docname) titles = self.env.titles if related and related[2]: try: next = { 'link': self.get_relative_uri(docname, related[2]), 'title': self.render_partial(titles[related[2]])['title'] } rellinks.append((related[2], next['title'], 'N', _('next'))) except KeyError: next = None if related and related[1]: try: prev = { 'link': self.get_relative_uri(docname, related[1]), 'title': self.render_partial(titles[related[1]])['title'] } rellinks.append((related[1], prev['title'], 'P', _('previous'))) except KeyError: # the relation is (somehow) not in the TOC tree, handle # that gracefully prev = None while related and related[0]: try: parents.append( {'link': self.get_relative_uri(docname, related[0]), 'title': self.render_partial(titles[related[0]])['title']}) except KeyError: pass related = self.relations.get(related[0]) if parents: parents.pop() # remove link to the master file; we have a generic # "back to index" link already parents.reverse() # title rendered as HTML title = self.env.longtitles.get(docname) title = title and self.render_partial(title)['title'] or '' # the name for the copied source sourcename = self.config.html_copy_source and docname + '.txt' or '' # metadata for the document meta = self.env.metadata.get(docname) # local TOC and global TOC tree toc = self.render_partial(self.env.get_toc_for(docname))['fragment'] return dict( parents = parents, prev = prev, next = next, title = title, meta = meta, body = body, metatags = metatags, rellinks = rellinks, sourcename = sourcename, toc = toc, # only display a TOC if there's more than one item to show display_toc = (self.env.toc_num_entries[docname] > 1), ) def write_doc(self, docname, doctree): destination = StringOutput(encoding='utf-8') doctree.settings = self.docsettings self.secnumbers = self.env.toc_secnumbers.get(docname, {}) self.imgpath = relative_uri(self.get_target_uri(docname), '_images') self.post_process_images(doctree) self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads') self.docwriter.write(doctree, destination) self.docwriter.assemble_parts() body = self.docwriter.parts['fragment'] metatags = self.docwriter.clean_meta ctx = self.get_doc_context(docname, body, metatags) self.index_page(docname, doctree, ctx.get('title', '')) self.handle_page(docname, ctx, event_arg=doctree) def finish(self): self.info(bold('writing additional files...'), nonl=1) # the global general index if self.config.html_use_index: # the total count of lines for each index letter, used to distribute # the entries into two columns genindex = self.env.create_index(self) indexcounts = [] for _, entries in genindex: indexcounts.append(sum(1 + len(subitems) for _, (_, subitems) in entries)) genindexcontext = dict( genindexentries = genindex, genindexcounts = indexcounts, split_index = self.config.html_split_index, ) self.info(' genindex', nonl=1) if self.config.html_split_index: self.handle_page('genindex', genindexcontext, 'genindex-split.html') self.handle_page('genindex-all', genindexcontext, 'genindex.html') for (key, entries), count in zip(genindex, indexcounts): ctx = {'key': key, 'entries': entries, 'count': count, 'genindexentries': genindex} self.handle_page('genindex-' + key, ctx, 'genindex-single.html') else: self.handle_page('genindex', genindexcontext, 'genindex.html') # the global module index if self.config.html_use_modindex and self.env.modules: # the sorted list of all modules, for the global module index modules = sorted(((mn, (self.get_relative_uri('modindex', fn) + '#module-' + mn, sy, pl, dep)) for (mn, (fn, sy, pl, dep)) in self.env.modules.iteritems()), key=lambda x: x[0].lower()) # collect all platforms platforms = set() # sort out collapsable modules modindexentries = [] letters = [] pmn = '' num_toplevels = 0 num_collapsables = 0 cg = 0 # collapse group fl = '' # first letter for mn, (fn, sy, pl, dep) in modules: pl = pl and pl.split(', ') or [] platforms.update(pl) ignore = self.env.config['modindex_common_prefix'] ignore = sorted(ignore, key=len, reverse=True) for i in ignore: if mn.startswith(i): mn = mn[len(i):] stripped = i break else: stripped = '' if fl != mn[0].lower() and mn[0] != '_': # heading letter = mn[0].upper() if letter not in letters: modindexentries.append(['', False, 0, False, letter, '', [], False, '']) letters.append(letter) tn = mn.split('.')[0] if tn != mn: # submodule if pmn == tn: # first submodule - make parent collapsable modindexentries[-1][1] = True num_collapsables += 1 elif not pmn.startswith(tn): # submodule without parent in list, add dummy entry cg += 1 modindexentries.append([tn, True, cg, False, '', '', [], False, stripped]) else: num_toplevels += 1 cg += 1 modindexentries.append([mn, False, cg, (tn != mn), fn, sy, pl, dep, stripped]) pmn = mn fl = mn[0].lower() platforms = sorted(platforms) # apply heuristics when to collapse modindex at page load: # only collapse if number of toplevel modules is larger than # number of submodules collapse = len(modules) - num_toplevels < num_toplevels # As some parts of the module names may have been stripped, those # names have changed, thus it is necessary to sort the entries. if ignore: def sorthelper(entry): name = entry[0] if name == '': # heading name = entry[4] return name.lower() modindexentries.sort(key=sorthelper) letters.sort() modindexcontext = dict( modindexentries = modindexentries, platforms = platforms, letters = letters, collapse_modindex = collapse, ) self.info(' modindex', nonl=1) self.handle_page('modindex', modindexcontext, 'modindex.html') # the search page if self.name != 'htmlhelp': self.info(' search', nonl=1) self.handle_page('search', {}, 'search.html') # additional pages from conf.py for pagename, template in self.config.html_additional_pages.items(): self.info(' '+pagename, nonl=1) self.handle_page(pagename, {}, template) if self.config.html_use_opensearch and self.name != 'htmlhelp': self.info(' opensearch', nonl=1) fn = path.join(self.outdir, '_static', 'opensearch.xml') self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn) self.info() # copy image files if self.images: self.info(bold('copying images...'), nonl=True) ensuredir(path.join(self.outdir, '_images')) for src, dest in self.images.iteritems(): self.info(' '+src, nonl=1) try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_images', dest)) except Exception, err: self.warn('cannot copy image file %r: %s' % (path.join(self.srcdir, src), err)) self.info() # copy downloadable files if self.env.dlfiles: self.info(bold('copying downloadable files...'), nonl=True) ensuredir(path.join(self.outdir, '_downloads')) for src, (_, dest) in self.env.dlfiles.iteritems(): self.info(' '+src, nonl=1) try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_downloads', dest)) except Exception, err: self.warn('cannot copy downloadable file %r: %s' % (path.join(self.srcdir, src), err)) self.info()
class StandaloneHTMLBuilder(Builder): """ Builds standalone HTML docs. """ name = 'html' format = 'html' copysource = True allow_parallel = True out_suffix = '.html' link_suffix = '.html' # defaults to matching out_suffix indexer_format = js_index indexer_dumps_unicode = True supported_image_types = [ 'image/svg+xml', 'image/png', 'image/gif', 'image/jpeg' ] searchindex_filename = 'searchindex.js' add_permalinks = True embedded = False # for things like HTML help or Qt help: suppresses sidebar # This is a class attribute because it is mutated by Sphinx.add_javascript. script_files = [ '_static/jquery.js', '_static/underscore.js', '_static/doctools.js' ] # Dito for this one. css_files = [] default_sidebars = [ 'localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html' ] # cached publisher object for snippets _publisher = None def init(self): # a hash of all config values that, if changed, cause a full rebuild self.config_hash = '' self.tags_hash = '' # section numbers for headings in the currently visited document self.secnumbers = {} # currently written docname self.current_docname = None self.init_templates() self.init_highlighter() self.init_translator_class() if self.config.html_file_suffix is not None: self.out_suffix = self.config.html_file_suffix if self.config.html_link_suffix is not None: self.link_suffix = self.config.html_link_suffix else: self.link_suffix = self.out_suffix if self.config.language is not None: if self._get_translations_js(): self.script_files.append('_static/translations.js') def _get_translations_js(self): candidates = [path.join(package_dir, 'locale', self.config.language, 'LC_MESSAGES', 'sphinx.js'), path.join(sys.prefix, 'share/sphinx/locale', self.config.language, 'sphinx.js')] + \ [path.join(dir, self.config.language, 'LC_MESSAGES', 'sphinx.js') for dir in self.config.locale_dirs] for jsfile in candidates: if path.isfile(jsfile): return jsfile return None def get_theme_config(self): return self.config.html_theme, self.config.html_theme_options def init_templates(self): Theme.init_themes(self.confdir, self.config.html_theme_path, warn=self.warn) themename, themeoptions = self.get_theme_config() self.theme = Theme(themename) self.theme_options = themeoptions.copy() self.create_template_bridge() self.templates.init(self, self.theme) def init_highlighter(self): # determine Pygments style and create the highlighter if self.config.pygments_style is not None: style = self.config.pygments_style elif self.theme: style = self.theme.get_confstr('theme', 'pygments_style', 'none') else: style = 'sphinx' self.highlighter = PygmentsBridge('html', style, self.config.trim_doctest_flags) def init_translator_class(self): if self.config.html_translator_class: self.translator_class = self.app.import_object( self.config.html_translator_class, 'html_translator_class setting') elif self.config.html_use_smartypants: self.translator_class = SmartyPantsHTMLTranslator else: self.translator_class = HTMLTranslator def get_outdated_docs(self): cfgdict = dict((name, self.config[name]) for (name, desc) in self.config.values.items() if desc[1] == 'html') self.config_hash = get_stable_hash(cfgdict) self.tags_hash = get_stable_hash(sorted(self.tags)) old_config_hash = old_tags_hash = '' try: fp = open(path.join(self.outdir, '.buildinfo')) try: version = fp.readline() if version.rstrip() != '# Sphinx build info version 1': raise ValueError fp.readline() # skip commentary cfg, old_config_hash = fp.readline().strip().split(': ') if cfg != 'config': raise ValueError tag, old_tags_hash = fp.readline().strip().split(': ') if tag != 'tags': raise ValueError finally: fp.close() except ValueError: self.warn('unsupported build info format in %r, building all' % path.join(self.outdir, '.buildinfo')) except Exception: pass if old_config_hash != self.config_hash or \ old_tags_hash != self.tags_hash: for docname in self.env.found_docs: yield docname return if self.templates: template_mtime = self.templates.newest_template_mtime() else: template_mtime = 0 for docname in self.env.found_docs: if docname not in self.env.all_docs: yield docname continue targetname = self.get_outfilename(docname) try: targetmtime = path.getmtime(targetname) except Exception: targetmtime = 0 try: srcmtime = max(path.getmtime(self.env.doc2path(docname)), template_mtime) if srcmtime > targetmtime: yield docname except EnvironmentError: # source doesn't exist anymore pass def render_partial(self, node): """Utility: Render a lone doctree node.""" if node is None: return {'fragment': ''} doc = new_document(b('<partial node>')) doc.append(node) if self._publisher is None: self._publisher = Publisher(source_class=DocTreeInput, destination_class=StringOutput) self._publisher.set_components('standalone', 'restructuredtext', 'pseudoxml') pub = self._publisher pub.reader = DoctreeReader() pub.writer = HTMLWriter(self) pub.process_programmatic_settings(None, {'output_encoding': 'unicode'}, None) pub.set_source(doc, None) pub.set_destination(None, None) pub.publish() return pub.writer.parts def prepare_writing(self, docnames): # create the search indexer from sphinx.search import IndexBuilder, languages lang = self.config.html_search_language or self.config.language if not lang or lang not in languages: lang = 'en' self.indexer = IndexBuilder(self.env, lang, self.config.html_search_options, self.config.html_search_scorer) self.load_indexer(docnames) self.docwriter = HTMLWriter(self) self.docsettings = OptionParser( defaults=self.env.settings, components=(self.docwriter, ), read_config_files=True).get_default_values() self.docsettings.compact_lists = bool(self.config.html_compact_lists) # determine the additional indices to include self.domain_indices = [] # html_domain_indices can be False/True or a list of index names indices_config = self.config.html_domain_indices if indices_config: for domain in self.env.domains.values(): for indexcls in domain.indices: indexname = '%s-%s' % (domain.name, indexcls.name) if isinstance(indices_config, list): if indexname not in indices_config: continue # deprecated config value if indexname == 'py-modindex' and \ not self.config.html_use_modindex: continue content, collapse = indexcls(domain).generate() if content: self.domain_indices.append( (indexname, indexcls, content, collapse)) # format the "last updated on" string, only once is enough since it # typically doesn't include the time of day lufmt = self.config.html_last_updated_fmt if lufmt is not None: self.last_updated = ustrftime(lufmt or _('%b %d, %Y')) else: self.last_updated = None logo = self.config.html_logo and \ path.basename(self.config.html_logo) or '' favicon = self.config.html_favicon and \ path.basename(self.config.html_favicon) or '' if favicon and os.path.splitext(favicon)[1] != '.ico': self.warn('html_favicon is not an .ico file') if not isinstance(self.config.html_use_opensearch, str): self.warn('html_use_opensearch config value must now be a string') self.relations = self.env.collect_relations() rellinks = [] if self.get_builder_config('use_index', 'html'): rellinks.append(('genindex', _('General Index'), 'I', _('index'))) for indexname, indexcls, content, collapse in self.domain_indices: # if it has a short name if indexcls.shortname: rellinks.append( (indexname, indexcls.localname, '', indexcls.shortname)) if self.config.html_style is not None: stylename = self.config.html_style elif self.theme: stylename = self.theme.get_confstr('theme', 'stylesheet') else: stylename = 'default.css' self.globalcontext = dict( embedded=self.embedded, project=self.config.project, release=self.config.release, version=self.config.version, last_updated=self.last_updated, copyright=self.config.copyright, master_doc=self.config.master_doc, use_opensearch=self.config.html_use_opensearch, docstitle=self.config.html_title, shorttitle=self.config.html_short_title, show_copyright=self.config.html_show_copyright, show_sphinx=self.config.html_show_sphinx, has_source=self.config.html_copy_source, show_source=self.config.html_show_sourcelink, file_suffix=self.out_suffix, script_files=self.script_files, css_files=self.css_files, sphinx_version=__version__, style=stylename, rellinks=rellinks, builder=self.name, parents=[], logo=logo, favicon=favicon, ) if self.theme: self.globalcontext.update(('theme_' + key, val) for ( key, val) in self.theme.get_options(self.theme_options).items()) self.globalcontext.update(self.config.html_context) def get_doc_context(self, docname, body, metatags): """Collect items for the template context of a page.""" # find out relations prev = next = None parents = [] rellinks = self.globalcontext['rellinks'][:] related = self.relations.get(docname) titles = self.env.titles if related and related[2]: try: next = { 'link': self.get_relative_uri(docname, related[2]), 'title': self.render_partial(titles[related[2]])['title'] } rellinks.append((related[2], next['title'], 'N', _('next'))) except KeyError: next = None if related and related[1]: try: prev = { 'link': self.get_relative_uri(docname, related[1]), 'title': self.render_partial(titles[related[1]])['title'] } rellinks.append( (related[1], prev['title'], 'P', _('previous'))) except KeyError: # the relation is (somehow) not in the TOC tree, handle # that gracefully prev = None while related and related[0]: try: parents.append({ 'link': self.get_relative_uri(docname, related[0]), 'title': self.render_partial(titles[related[0]])['title'] }) except KeyError: pass related = self.relations.get(related[0]) if parents: parents.pop() # remove link to the master file; we have a generic # "back to index" link already parents.reverse() # title rendered as HTML title = self.env.longtitles.get(docname) title = title and self.render_partial(title)['title'] or '' # the name for the copied source sourcename = self.config.html_copy_source and docname + '.txt' or '' # metadata for the document meta = self.env.metadata.get(docname) # local TOC and global TOC tree self_toc = self.env.get_toc_for(docname, self) toc = self.render_partial(self_toc)['fragment'] return dict( parents=parents, prev=prev, next=next, title=title, meta=meta, body=body, metatags=metatags, rellinks=rellinks, sourcename=sourcename, toc=toc, # only display a TOC if there's more than one item to show display_toc=(self.env.toc_num_entries[docname] > 1), ) def write_doc(self, docname, doctree): destination = StringOutput(encoding='utf-8') doctree.settings = self.docsettings self.secnumbers = self.env.toc_secnumbers.get(docname, {}) self.imgpath = relative_uri(self.get_target_uri(docname), '_images') self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads') self.current_docname = docname self.docwriter.write(doctree, destination) self.docwriter.assemble_parts() body = self.docwriter.parts['fragment'] metatags = self.docwriter.clean_meta ctx = self.get_doc_context(docname, body, metatags) self.handle_page(docname, ctx, event_arg=doctree) def write_doc_serialized(self, docname, doctree): self.imgpath = relative_uri(self.get_target_uri(docname), '_images') self.post_process_images(doctree) title = self.env.longtitles.get(docname) title = title and self.render_partial(title)['title'] or '' self.index_page(docname, doctree, title) def finish(self): self.info(bold('writing additional files...'), nonl=1) # pages from extensions for pagelist in self.app.emit('html-collect-pages'): for pagename, context, template in pagelist: self.handle_page(pagename, context, template) # the global general index if self.get_builder_config('use_index', 'html'): self.write_genindex() # the global domain-specific indices self.write_domain_indices() # the search page if self.name != 'htmlhelp': self.info(' search', nonl=1) self.handle_page('search', {}, 'search.html') # additional pages from conf.py for pagename, template in list( self.config.html_additional_pages.items()): self.info(' ' + pagename, nonl=1) self.handle_page(pagename, {}, template) if self.config.html_use_opensearch and self.name != 'htmlhelp': self.info(' opensearch', nonl=1) fn = path.join(self.outdir, '_static', 'opensearch.xml') self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn) self.info() self.copy_image_files() self.copy_download_files() self.copy_static_files() self.copy_extra_files() self.write_buildinfo() # dump the search index self.handle_finish() def write_genindex(self): # the total count of lines for each index letter, used to distribute # the entries into two columns genindex = self.env.create_index(self) indexcounts = [] for _, entries in genindex: indexcounts.append( sum(1 + len(subitems) for _, (_, subitems) in entries)) genindexcontext = dict( genindexentries=genindex, genindexcounts=indexcounts, split_index=self.config.html_split_index, ) self.info(' genindex', nonl=1) if self.config.html_split_index: self.handle_page('genindex', genindexcontext, 'genindex-split.html') self.handle_page('genindex-all', genindexcontext, 'genindex.html') for (key, entries), count in zip(genindex, indexcounts): ctx = { 'key': key, 'entries': entries, 'count': count, 'genindexentries': genindex } self.handle_page('genindex-' + key, ctx, 'genindex-single.html') else: self.handle_page('genindex', genindexcontext, 'genindex.html') def write_domain_indices(self): for indexname, indexcls, content, collapse in self.domain_indices: indexcontext = dict( indextitle=indexcls.localname, content=content, collapse_index=collapse, ) self.info(' ' + indexname, nonl=1) self.handle_page(indexname, indexcontext, 'domainindex.html') def copy_image_files(self): # copy image files if self.images: ensuredir(path.join(self.outdir, '_images')) for src in self.status_iterator(self.images, 'copying images... ', brown, len(self.images)): dest = self.images[src] try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_images', dest)) except Exception as err: self.warn('cannot copy image file %r: %s' % (path.join(self.srcdir, src), err)) def copy_download_files(self): # copy downloadable files if self.env.dlfiles: ensuredir(path.join(self.outdir, '_downloads')) for src in self.status_iterator(self.env.dlfiles, 'copying downloadable files... ', brown, len(self.env.dlfiles)): dest = self.env.dlfiles[src][1] try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_downloads', dest)) except Exception as err: self.warn('cannot copy downloadable file %r: %s' % (path.join(self.srcdir, src), err)) def copy_static_files(self): # copy static files self.info(bold('copying static files... '), nonl=True) ensuredir(path.join(self.outdir, '_static')) # first, create pygments style file f = open(path.join(self.outdir, '_static', 'pygments.css'), 'w') f.write(self.highlighter.get_stylesheet()) f.close() # then, copy translations JavaScript file if self.config.language is not None: jsfile = self._get_translations_js() if jsfile: copyfile(jsfile, path.join(self.outdir, '_static', 'translations.js')) # add context items for search function used in searchtools.js_t ctx = self.globalcontext.copy() ctx.update(self.indexer.context_for_searchtool()) # then, copy over theme-supplied static files if self.theme: themeentries = [ path.join(themepath, 'static') for themepath in self.theme.get_dirchain()[::-1] ] for entry in themeentries: copy_static_entry(entry, path.join(self.outdir, '_static'), self, ctx) # then, copy over all user-supplied static files staticentries = [ path.join(self.confdir, spath) for spath in self.config.html_static_path ] matchers = compile_matchers( self.config.exclude_patterns + ['**/' + d for d in self.config.exclude_dirnames]) for entry in staticentries: if not path.exists(entry): self.warn('html_static_path entry %r does not exist' % entry) continue copy_static_entry(entry, path.join(self.outdir, '_static'), self, ctx, exclude_matchers=matchers) # copy logo and favicon files if not already in static path if self.config.html_logo: logobase = path.basename(self.config.html_logo) logotarget = path.join(self.outdir, '_static', logobase) if not path.isfile(logotarget): copyfile(path.join(self.confdir, self.config.html_logo), logotarget) if self.config.html_favicon: iconbase = path.basename(self.config.html_favicon) icontarget = path.join(self.outdir, '_static', iconbase) if not path.isfile(icontarget): copyfile(path.join(self.confdir, self.config.html_favicon), icontarget) self.info('done') def copy_extra_files(self): # copy html_extra_path files self.info(bold('copying extra files... '), nonl=True) extraentries = [ path.join(self.confdir, epath) for epath in self.config.html_extra_path ] for entry in extraentries: if not path.exists(entry): self.warn('html_extra_path entry %r does not exist' % entry) continue copy_static_entry(entry, self.outdir, self) def write_buildinfo(self): # write build info file fp = open(path.join(self.outdir, '.buildinfo'), 'w') try: fp.write('# Sphinx build info version 1\n' '# This file hashes the configuration used when building' ' these files. When it is not found, a full rebuild will' ' be done.\nconfig: %s\ntags: %s\n' % (self.config_hash, self.tags_hash)) finally: fp.close() def cleanup(self): # clean up theme stuff if self.theme: self.theme.cleanup() def post_process_images(self, doctree): """Pick the best candidate for an image and link down-scaled images to their high res version. """ Builder.post_process_images(self, doctree) for node in doctree.traverse(nodes.image): scale_keys = ('scale', 'width', 'height') if not any((key in node) for key in scale_keys) or \ isinstance(node.parent, nodes.reference): # docutils does unfortunately not preserve the # ``target`` attribute on images, so we need to check # the parent node here. continue uri = node['uri'] reference = nodes.reference('', '', internal=True) if uri in self.images: reference['refuri'] = posixpath.join(self.imgpath, self.images[uri]) else: reference['refuri'] = uri node.replace_self(reference) reference.append(node) def load_indexer(self, docnames): keep = set(self.env.all_docs) - set(docnames) try: searchindexfn = path.join(self.outdir, self.searchindex_filename) if self.indexer_dumps_unicode: f = codecs.open(searchindexfn, 'r', encoding='utf-8') else: f = open(searchindexfn, 'rb') try: self.indexer.load(f, self.indexer_format) finally: f.close() except (IOError, OSError, ValueError): if keep: self.warn('search index couldn\'t be loaded, but not all ' 'documents will be built: the index will be ' 'incomplete.') # delete all entries for files that will be rebuilt self.indexer.prune(keep) def index_page(self, pagename, doctree, title): # only index pages with title if self.indexer is not None and title: self.indexer.feed(pagename, title, doctree) def _get_local_toctree(self, docname, collapse=True, **kwds): if 'includehidden' not in kwds: kwds['includehidden'] = False return self.render_partial( self.env.get_toctree_for(docname, self, collapse, **kwds))['fragment'] def get_outfilename(self, pagename): return path.join(self.outdir, os_path(pagename) + self.out_suffix) def add_sidebars(self, pagename, ctx): def has_wildcard(pattern): return any(char in pattern for char in '*?[') sidebars = None matched = None customsidebar = None for pattern, patsidebars in self.config.html_sidebars.items(): if patmatch(pagename, pattern): if matched: if has_wildcard(pattern): # warn if both patterns contain wildcards if has_wildcard(matched): self.warn('page %s matches two patterns in ' 'html_sidebars: %r and %r' % (pagename, matched, pattern)) # else the already matched pattern is more specific # than the present one, because it contains no wildcard continue matched = pattern sidebars = patsidebars if sidebars is None: # keep defaults pass elif isinstance(sidebars, str): # 0.x compatible mode: insert custom sidebar before searchbox customsidebar = sidebars sidebars = None ctx['sidebars'] = sidebars ctx['customsidebar'] = customsidebar # --------- these are overwritten by the serialization builder def get_target_uri(self, docname, typ=None): return docname + self.link_suffix def handle_page(self, pagename, addctx, templatename='page.html', outfilename=None, event_arg=None): ctx = self.globalcontext.copy() # current_page_name is backwards compatibility ctx['pagename'] = ctx['current_page_name'] = pagename default_baseuri = self.get_target_uri(pagename) # in the singlehtml builder, default_baseuri still contains an #anchor # part, which relative_uri doesn't really like... default_baseuri = default_baseuri.rsplit('#', 1)[0] def pathto(otheruri, resource=False, baseuri=default_baseuri): if resource and '://' in otheruri: # allow non-local resources given by scheme return otheruri elif not resource: otheruri = self.get_target_uri(otheruri) uri = relative_uri(baseuri, otheruri) or '#' return uri ctx['pathto'] = pathto ctx['hasdoc'] = lambda name: name in self.env.all_docs if self.name != 'htmlhelp': ctx['encoding'] = encoding = self.config.html_output_encoding else: ctx['encoding'] = encoding = self.encoding ctx['toctree'] = lambda **kw: self._get_local_toctree(pagename, **kw) self.add_sidebars(pagename, ctx) ctx.update(addctx) self.app.emit('html-page-context', pagename, templatename, ctx, event_arg) try: output = self.templates.render(templatename, ctx) except UnicodeError: self.warn("a Unicode error occurred when rendering the page %s. " "Please make sure all config values that contain " "non-ASCII content are Unicode strings." % pagename) return if not outfilename: outfilename = self.get_outfilename(pagename) # outfilename's path is in general different from self.outdir ensuredir(path.dirname(outfilename)) try: f = codecs.open(outfilename, 'w', encoding, 'xmlcharrefreplace') try: f.write(output) finally: f.close() except (IOError, OSError) as err: self.warn("error writing file %s: %s" % (outfilename, err)) if self.copysource and ctx.get('sourcename'): # copy the source file for the "show source" link source_name = path.join(self.outdir, '_sources', os_path(ctx['sourcename'])) ensuredir(path.dirname(source_name)) copyfile(self.env.doc2path(pagename), source_name) def handle_finish(self): self.dump_search_index() self.dump_inventory() def dump_inventory(self): self.info(bold('dumping object inventory... '), nonl=True) f = open(path.join(self.outdir, INVENTORY_FILENAME), 'wb') try: f.write( ('# Sphinx inventory version 2\n' '# Project: %s\n' '# Version: %s\n' '# The remainder of this file is compressed using zlib.\n' % (self.config.project, self.config.version)).encode('utf-8')) compressor = zlib.compressobj(9) for domainname, domain in self.env.domains.items(): for name, dispname, type, docname, anchor, prio in \ domain.get_objects(): if anchor.endswith(name): # this can shorten the inventory by as much as 25% anchor = anchor[:-len(name)] + '$' uri = self.get_target_uri(docname) + '#' + anchor if dispname == name: dispname = '-' f.write( compressor.compress(('%s %s:%s %s %s %s\n' % (name, domainname, type, prio, uri, dispname)).encode('utf-8'))) f.write(compressor.flush()) finally: f.close() self.info('done') def dump_search_index(self): self.info(bold('dumping search index... '), nonl=True) self.indexer.prune(self.env.all_docs) searchindexfn = path.join(self.outdir, self.searchindex_filename) # first write to a temporary file, so that if dumping fails, # the existing index won't be overwritten if self.indexer_dumps_unicode: f = codecs.open(searchindexfn + '.tmp', 'w', encoding='utf-8') else: f = open(searchindexfn + '.tmp', 'wb') try: self.indexer.dump(f, self.indexer_format) finally: f.close() movefile(searchindexfn + '.tmp', searchindexfn) self.info('done')