def handle_page(self, pagename, ctx, templatename='page.html', outfilename=None, event_arg=None): ctx['current_page_name'] = pagename sidebarfile = self.config.html_sidebars.get(pagename) if sidebarfile: ctx['customsidebar'] = sidebarfile if not outfilename: outfilename = path.join(self.outdir, os_path(pagename) + self.out_suffix) self.app.emit('html-page-context', pagename, templatename, ctx, event_arg) ensuredir(path.dirname(outfilename)) f = open(outfilename, 'wb') try: self.implementation.dump(ctx, f, 2) finally: f.close() # if there is a source file, copy the source file for the # "show source" link if ctx.get('sourcename'): source_name = path.join(self.outdir, '_sources', os_path(ctx['sourcename'])) ensuredir(path.dirname(source_name)) copyfile(self.env.doc2path(pagename), source_name)
def build_finished(app, exception): files = set() # Sphinx 3.0.0 for docpath, element in app.env.bokeh_plot_files.items(): if len(element) == 4: script, js, js_path, source = element elif len(element) == 5: script, js, js_path, source, docpath = element else: raise ValueError("\n".join([ str(type(element)), str(len(element)), str(element), ])) from e files.add((js_path, docpath)) files_iter = status_iterator(sorted(files), 'copying bokeh-plot files... ', 'brown', len(files), app.verbosity, stringify_func=lambda x: basename(x[0])) for (file, docpath) in files_iter: target = join(app.builder.outdir, docpath, basename(file)) ensuredir(dirname(target)) try: copyfile(file, target) except OSError as e: raise SphinxError('cannot copy local file %r, reason: %s' % (file, e))
def run(self): env = self.state.document.settings.env app = env.app # workaround (used below) for https://github.com/sphinx-doc/sphinx/issues/3924 current_docname = env.docname docdir = dirname(env.doc2path(env.docname)) specpath = join(docdir, self.arguments[0]) dest_dir = join(dirname(specpath), "gallery") ensuredir(dest_dir) env.note_dependency(specpath) spec = json.load(open(specpath)) details = spec['details'] details_iter = status_iterator( details, 'copying gallery files... ', 'brown', len(details), stringify_func=lambda x: x['name'] + ".py") env.gallery_updated = [] for detail in details_iter: src_path = abspath(join("..", detail['path'])) dest_path = join(dest_dir, detail['name'] + ".py") # sphinx pickled env works only with forward slash docname = join(env.app.config.bokeh_gallery_dir, detail['name']).replace("\\", "/") try: copyfile(src_path, dest_path) except OSError as e: raise SphinxError('cannot copy gallery file %r, reason: %s' % (src_path, e)) try: env.clear_doc(docname) env.read_doc(docname, app=app) env.gallery_updated.append(docname) except Exception as e: raise SphinxError('failed to read gallery doc %r, reason: %s' % (docname, e)) names = [detail['name'] for detail in details] rst_text = GALLERY_PAGE.render(names=names) # workaround for https://github.com/sphinx-doc/sphinx/issues/3924 env.temp_data['docname'] = current_docname return self._parse(rst_text, "<bokeh-gallery>")
def run(self): env = self.state.document.settings.env app = env.app # workaround (used below) for https://github.com/sphinx-doc/sphinx/issues/3924 current_docname = env.docname docdir = dirname(env.doc2path(env.docname)) specpath = join(docdir, self.arguments[0]) dest_dir = join(dirname(specpath), "gallery") ensuredir(dest_dir) env.note_dependency(specpath) spec = json.load(open(specpath)) details = spec['details'] details_iter = status_iterator(details, 'copying gallery files... ', 'brown', len(details), stringify_func=lambda x: x['name'] + ".py") env.gallery_updated = [] for detail in details_iter: src_path = abspath(join("..", detail['path'])) dest_path = join(dest_dir, detail['name'] + ".py") # sphinx pickled env works only with forward slash docname = join(env.app.config.bokeh_gallery_dir, detail['name']).replace("\\","/") try: copyfile(src_path, dest_path) except OSError as e: raise SphinxError('cannot copy gallery file %r, reason: %s' % (src_path, e)) try: env.clear_doc(docname) env.read_doc(docname, app=app) env.gallery_updated.append(docname) except Exception as e: raise SphinxError('failed to read gallery doc %r, reason: %s' % (docname, e)) names = [detail['name']for detail in details] rst_text = GALLERY_PAGE.render(names=names) # workaround for https://github.com/sphinx-doc/sphinx/issues/3924 env.temp_data['docname'] = current_docname return self._parse(rst_text, "<bokeh-gallery>")
def finish(self): # copy image files if self.images: self.info(bold('copying images...'), nonl=1) for src, dest in self.images.iteritems(): self.info(' ' + src, nonl=1) copyfile(path.join(self.srcdir, src), path.join(self.outdir, dest)) self.info() # copy additional files if self.config.latex_additional_files: self.info(bold('copying additional files...'), nonl=1) for filename in self.config.latex_additional_files: self.info(' ' + filename, nonl=1) copyfile(path.join(self.confdir, filename), path.join(self.outdir, path.basename(filename))) self.info() # the logo is handled differently if self.config.latex_logo: logobase = path.basename(self.config.latex_logo) copyfile(path.join(self.confdir, self.config.latex_logo), path.join(self.outdir, logobase)) self.info(bold('copying TeX support files... '), nonl=True) staticdirname = path.join(package_dir, 'texinputs') for filename in os.listdir(staticdirname): if not filename.startswith('.'): copyfile(path.join(staticdirname, filename), path.join(self.outdir, filename)) self.info('done')
def finish(self): # copy image files if self.images: self.info(bold('copying images...'), nonl=1) for src, dest in self.images.iteritems(): self.info(' '+src, nonl=1) copyfile(path.join(self.srcdir, src), path.join(self.outdir, dest)) self.info() # copy additional files if self.config.latex_additional_files: self.info(bold('copying additional files...'), nonl=1) for filename in self.config.latex_additional_files: self.info(' '+filename, nonl=1) copyfile(path.join(self.confdir, filename), path.join(self.outdir, path.basename(filename))) self.info() # the logo is handled differently if self.config.latex_logo: logobase = path.basename(self.config.latex_logo) copyfile(path.join(self.confdir, self.config.latex_logo), path.join(self.outdir, logobase)) self.info(bold('copying TeX support files... '), nonl=True) staticdirname = path.join(package_dir, 'texinputs') for filename in os.listdir(staticdirname): if not filename.startswith('.'): copyfile(path.join(staticdirname, filename), path.join(self.outdir, filename)) self.info('done')
def build_finished(app, exception): files = set() for (script, js, js_path, source, docpath) in app.env.bokeh_plot_files.values(): files.add((js_path, docpath)) files_iter = status_iterator(sorted(files), "copying bokeh-plot files... ", "brown", len(files), app.verbosity, stringify_func=lambda x: basename(x[0])) for (file, docpath) in files_iter: target = join(app.builder.outdir, docpath, basename(file)) ensuredir(dirname(target)) try: copyfile(file, target) except OSError as e: raise SphinxError(f"cannot copy local file {file!r}, reason: {e}")
def run(self): env = self.state.document.settings.env app = env.app docdir = dirname(env.doc2path(env.docname)) dest_dir = join(docdir, "gallery") ensuredir(dest_dir) specpath = join(docdir, self.arguments[0]) env.note_dependency(specpath) spec = json.load(open(specpath)) details = spec['details'] details_iter = app.status_iterator(details, 'copying gallery files... ', console.brown, len(details), lambda x: x['name'] + ".py") env.gallery_updated = [] for detail in details_iter: src_path = abspath(join("..", detail['path'])) dest_path = join(dest_dir, detail['name'] + ".py") # sphinx pickled env works only with forward slash docname = join(env.app.config.bokeh_gallery_dir, detail['name']).replace("\\", "/") try: copyfile(src_path, dest_path) except OSError as e: raise SphinxError('cannot copy gallery file %r, reason: %s' % (src_path, e)) try: env.clear_doc(docname) env.read_doc(docname, app=app) env.gallery_updated.append(docname) except Exception as e: raise SphinxError('failed to read gallery doc %r, reason: %s' % (docname, e)) names = [detail['name'] for detail in details] rst_text = GALLERY_PAGE.render(names=names) return self._parse(rst_text, "<bokeh-gallery>")
def build_finished(app, exception): files = set() for (script, js, js_path, source) in app.env.bokeh_plot_files.values(): files.add(js_path) files_iter = status_iterator(sorted(files), 'copying bokeh-plot files... ', console.brown, len(files), lambda x: basename(x)) for file in files_iter: target = join(app.builder.outdir, "scripts", basename(file)) ensuredir(dirname(target)) try: copyfile(file, target) except OSError as e: raise SphinxError('cannot copy local file %r, reason: %s' % (file, e))
def build_finished(app, exception): if exception is not None: return builder = app.builder # Get output _static folder. target_dir = os.path.join(builder.outdir, '_static', 'diff2html') # If targetDir is not exist, create it. if not os.path.exists(target_dir): os.mkdir(target_dir) # copy resources to _static folder static_dir = os.path.join(os.path.dirname(__file__), 'static') html_resources = os.listdir(static_dir) for htmlResource in html_resources: copyfile(os.path.join(static_dir, htmlResource), os.path.join(builder.outdir, '_static', 'diff2html', htmlResource))
def copy_download_files(app, exc): """ Copies all files mentioned with role *downloadlink*. """ if exc: builder = app.builder logger = logging.getLogger("downloadlink") mes = "Builder format '{0}'-'{1}', unable to copy file due to {2}".format( builder.format, builder.__class__.__name__, exc) logger.warning(mes) return def to_relpath(f): return relative_path(app.srcdir, f) # copy downloadable files builder = app.builder if builder.env.dllinkfiles: logger = logging.getLogger("downloadlink") logger.info("[downloadlink] copy_download_files") for src in status_iterator(builder.env.dllinkfiles, __('copying downloadable(link) files... '), "brown", len(builder.env.dllinkfiles), builder.app.verbosity, stringify_func=to_relpath): docname, dest = builder.env.dllinkfiles[src] relpath = set(os.path.dirname(dn) for dn in docname) for rel in relpath: dest = os.path.join(builder.outdir, rel) ensuredir(os.path.dirname(dest)) shortname = os.path.split(src)[-1] dest = os.path.join(dest, shortname) name = os.path.join(builder.srcdir, src) try: copyfile(name, dest) logger.info("[downloadlink] copy '{0}' to '{1}'".format( name, dest)) except FileNotFoundError: mes = "Builder format '{0}'-'{3}', unable to copy file '{1}' into {2}'".format( builder.format, name, dest, builder.__class__.__name__) logger.warning( "[downloadlink] cannot copy '{0}' to '{1}'".format( name, dest))
def build_finished(app, exception): if exception is not None: return builder = app.builder # Get output _static folder. target_dir = os.path.join(builder.outdir, '_static', 'diff2html') # If targetDir is not exist, create it. if not os.path.exists(target_dir): os.mkdir(target_dir) # copy resources to _static folder static_dir = os.path.join(os.path.dirname(__file__), 'static') html_resources = os.listdir(static_dir) for htmlResource in html_resources: copyfile( os.path.join(static_dir, htmlResource), os.path.join(builder.outdir, '_static', 'diff2html', htmlResource))
def build_finished(app, exception): files = set() for (script, js, js_path, source) in app.env.bokeh_plot_files.values(): files.add(js_path) files_iter = app.status_iterator(sorted(files), 'copying bokeh-plot files... ', console.brown, len(files), lambda x: basename(x)) for file in files_iter: target = join(app.builder.outdir, "scripts", basename(file)) ensuredir(dirname(target)) try: copyfile(file, target) except OSError as e: raise SphinxError('cannot copy local file %r, reason: %s' % (file, e))
def run(self): env = self.state.document.settings.env app = env.app docdir = dirname(env.doc2path(env.docname)) dest_dir = join(docdir, "gallery") ensuredir(dest_dir) specpath = join(docdir, self.arguments[0]) env.note_dependency(specpath) spec = json.load(open(specpath)) details = spec['details'] details_iter = app.status_iterator(details, 'copying gallery files... ', console.brown, len(details), lambda x: x['name'] + ".py") env.gallery_updated = [] for detail in details_iter: src_path = abspath(join("..", detail['path'])) dest_path = join(dest_dir, detail['name'] + ".py") docname = join("docs", "gallery", detail['name']) try: copyfile(src_path, dest_path) except OSError as e: raise SphinxError('cannot copy gallery file %r, reason: %s' % (src_path, e)) try: env.clear_doc(docname) env.read_doc(docname, app=app) env.gallery_updated.append(docname) except Exception as e: raise SphinxError('failed to read gallery doc %r, reason: %s' % (docname, e)) names = [detail['name']for detail in details] rst_text = GALLERY_PAGE.render(names=names) return self._parse(rst_text, "<bokeh-gallery>")
def handle_finish(self): # dump the global context outfilename = path.join(self.outdir, self.globalcontext_filename) f = open(outfilename, 'wb') try: self.implementation.dump(self.globalcontext, f, 2) finally: f.close() # super here to dump the search index StandaloneHTMLBuilder.handle_finish(self) # copy the environment file from the doctree dir to the output dir # as needed by the web app copyfile(path.join(self.doctreedir, ENV_PICKLE_FILENAME), path.join(self.outdir, ENV_PICKLE_FILENAME)) # touch 'last build' file, used by the web application to determine # when to reload its environment and clear the cache open(path.join(self.outdir, LAST_BUILD_FILENAME), 'w').close()
def build_finished(app, exception): files = set() for (script, js, js_path, source, docpath) in app.env.bokeh_plot_files.values(): files.add((js_path, docpath)) files_iter = status_iterator(sorted(files), 'copying bokeh-plot files... ', 'brown', len(files), app.verbosity, stringify_func=lambda x: basename(x[0])) for (file, docpath) in files_iter: target = join(app.builder.outdir, docpath, basename(file)) ensuredir(dirname(target)) try: copyfile(file, target) except OSError as e: raise SphinxError('cannot copy local file %r, reason: %s' % (file, e))
(path.join(self.srcdir, src), err)) self.info() # copy static files self.info(bold('copying static files... '), nonl=True) ensuredir(path.join(self.outdir, '_static')) # first, create pygments style file f = open(path.join(self.outdir, '_static', 'pygments.css'), 'w') f.write(self.highlighter.get_stylesheet()) f.close() # then, copy translations JavaScript file if self.config.language is not None: jsfile = path.join(package_dir, 'locale', self.config.language, 'LC_MESSAGES', 'sphinx.js') if path.isfile(jsfile): copyfile(jsfile, path.join(self.outdir, '_static', 'translations.js')) # then, copy over all user-supplied static files if self.theme: staticdirnames = [ path.join(themepath, 'static') for themepath in self.theme.get_dirchain()[::-1] ] else: staticdirnames = [] staticdirnames += [ path.join(self.confdir, spath) for spath in self.config.html_static_path ] for staticdirname in staticdirnames: if not path.isdir(staticdirname): self.warn('static directory %r does not exist' % staticdirname)
def finish(self): self.info(bold('writing additional files...'), nonl=1) # the global general index if self.config.html_use_index: # the total count of lines for each index letter, used to distribute # the entries into two columns genindex = self.env.create_index(self) indexcounts = [] for _, entries in genindex: indexcounts.append(sum(1 + len(subitems) for _, (_, subitems) in entries)) genindexcontext = dict( genindexentries = genindex, genindexcounts = indexcounts, split_index = self.config.html_split_index, ) self.info(' genindex', nonl=1) if self.config.html_split_index: self.handle_page('genindex', genindexcontext, 'genindex-split.html') self.handle_page('genindex-all', genindexcontext, 'genindex.html') for (key, entries), count in zip(genindex, indexcounts): ctx = {'key': key, 'entries': entries, 'count': count, 'genindexentries': genindex} self.handle_page('genindex-' + key, ctx, 'genindex-single.html') else: self.handle_page('genindex', genindexcontext, 'genindex.html') # the global module index if self.config.html_use_modindex and self.env.modules: # the sorted list of all modules, for the global module index modules = sorted(((mn, (self.get_relative_uri('modindex', fn) + '#module-' + mn, sy, pl, dep)) for (mn, (fn, sy, pl, dep)) in self.env.modules.iteritems()), key=lambda x: x[0].lower()) # collect all platforms platforms = set() # sort out collapsable modules modindexentries = [] letters = [] pmn = '' num_toplevels = 0 num_collapsables = 0 cg = 0 # collapse group fl = '' # first letter for mn, (fn, sy, pl, dep) in modules: pl = pl and pl.split(', ') or [] platforms.update(pl) ignore = self.env.config['modindex_common_prefix'] ignore = sorted(ignore, key=len, reverse=True) for i in ignore: if mn.startswith(i): mn = mn[len(i):] stripped = i break else: stripped = '' if fl != mn[0].lower() and mn[0] != '_': # heading letter = mn[0].upper() if letter not in letters: modindexentries.append(['', False, 0, False, letter, '', [], False, '']) letters.append(letter) tn = mn.split('.')[0] if tn != mn: # submodule if pmn == tn: # first submodule - make parent collapsable modindexentries[-1][1] = True num_collapsables += 1 elif not pmn.startswith(tn): # submodule without parent in list, add dummy entry cg += 1 modindexentries.append([tn, True, cg, False, '', '', [], False, stripped]) else: num_toplevels += 1 cg += 1 modindexentries.append([mn, False, cg, (tn != mn), fn, sy, pl, dep, stripped]) pmn = mn fl = mn[0].lower() platforms = sorted(platforms) # apply heuristics when to collapse modindex at page load: # only collapse if number of toplevel modules is larger than # number of submodules collapse = len(modules) - num_toplevels < num_toplevels # As some parts of the module names may have been stripped, those # names have changed, thus it is necessary to sort the entries. if ignore: def sorthelper(entry): name = entry[0] if name == '': # heading name = entry[4] return name.lower() modindexentries.sort(key=sorthelper) letters.sort() modindexcontext = dict( modindexentries = modindexentries, platforms = platforms, letters = letters, collapse_modindex = collapse, ) self.info(' modindex', nonl=1) self.handle_page('modindex', modindexcontext, 'modindex.html') # the search page if self.name != 'htmlhelp': self.info(' search', nonl=1) self.handle_page('search', {}, 'search.html') # additional pages from conf.py for pagename, template in self.config.html_additional_pages.items(): self.info(' '+pagename, nonl=1) self.handle_page(pagename, {}, template) if self.config.html_use_opensearch and self.name != 'htmlhelp': self.info(' opensearch', nonl=1) fn = path.join(self.outdir, '_static', 'opensearch.xml') self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn) self.info() # copy image files if self.images: self.info(bold('copying images...'), nonl=True) ensuredir(path.join(self.outdir, '_images')) for src, dest in self.images.iteritems(): self.info(' '+src, nonl=1) try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_images', dest)) except Exception, err: self.warn('cannot copy image file %r: %s' % (path.join(self.srcdir, src), err)) self.info()
(path.join(self.srcdir, src), err)) self.info() # copy static files self.info(bold('copying static files... '), nonl=True) ensuredir(path.join(self.outdir, '_static')) # first, create pygments style file f = open(path.join(self.outdir, '_static', 'pygments.css'), 'w') f.write(self.highlighter.get_stylesheet()) f.close() # then, copy translations JavaScript file if self.config.language is not None: jsfile = path.join(package_dir, 'locale', self.config.language, 'LC_MESSAGES', 'sphinx.js') if path.isfile(jsfile): copyfile(jsfile, path.join(self.outdir, '_static', 'translations.js')) # then, copy over all user-supplied static files if self.theme: staticdirnames = [path.join(themepath, 'static') for themepath in self.theme.get_dirchain()[::-1]] else: staticdirnames = [] staticdirnames += [path.join(self.confdir, spath) for spath in self.config.html_static_path] for staticdirname in staticdirnames: if not path.isdir(staticdirname): self.warn('static directory %r does not exist' % staticdirname) continue for filename in os.listdir(staticdirname): if filename.startswith('.'): continue
def finish(self): self.info(bold('writing additional files...'), nonl=1) # the global general index if self.config.html_use_index: # the total count of lines for each index letter, used to distribute # the entries into two columns genindex = self.env.create_index(self) indexcounts = [] for _, entries in genindex: indexcounts.append( sum(1 + len(subitems) for _, (_, subitems) in entries)) genindexcontext = dict( genindexentries=genindex, genindexcounts=indexcounts, split_index=self.config.html_split_index, ) self.info(' genindex', nonl=1) if self.config.html_split_index: self.handle_page('genindex', genindexcontext, 'genindex-split.html') self.handle_page('genindex-all', genindexcontext, 'genindex.html') for (key, entries), count in zip(genindex, indexcounts): ctx = { 'key': key, 'entries': entries, 'count': count, 'genindexentries': genindex } self.handle_page('genindex-' + key, ctx, 'genindex-single.html') else: self.handle_page('genindex', genindexcontext, 'genindex.html') # the global module index if self.config.html_use_modindex and self.env.modules: # the sorted list of all modules, for the global module index modules = sorted( ((mn, (self.get_relative_uri('modindex', fn) + '#module-' + mn, sy, pl, dep)) for (mn, (fn, sy, pl, dep)) in self.env.modules.iteritems()), key=lambda x: x[0].lower()) # collect all platforms platforms = set() # sort out collapsable modules modindexentries = [] letters = [] pmn = '' num_toplevels = 0 num_collapsables = 0 cg = 0 # collapse group fl = '' # first letter for mn, (fn, sy, pl, dep) in modules: pl = pl and pl.split(', ') or [] platforms.update(pl) ignore = self.env.config['modindex_common_prefix'] ignore = sorted(ignore, key=len, reverse=True) for i in ignore: if mn.startswith(i): mn = mn[len(i):] stripped = i break else: stripped = '' if fl != mn[0].lower() and mn[0] != '_': # heading letter = mn[0].upper() if letter not in letters: modindexentries.append( ['', False, 0, False, letter, '', [], False, '']) letters.append(letter) tn = mn.split('.')[0] if tn != mn: # submodule if pmn == tn: # first submodule - make parent collapsable modindexentries[-1][1] = True num_collapsables += 1 elif not pmn.startswith(tn): # submodule without parent in list, add dummy entry cg += 1 modindexentries.append( [tn, True, cg, False, '', '', [], False, stripped]) else: num_toplevels += 1 cg += 1 modindexentries.append( [mn, False, cg, (tn != mn), fn, sy, pl, dep, stripped]) pmn = mn fl = mn[0].lower() platforms = sorted(platforms) # apply heuristics when to collapse modindex at page load: # only collapse if number of toplevel modules is larger than # number of submodules collapse = len(modules) - num_toplevels < num_toplevels # As some parts of the module names may have been stripped, those # names have changed, thus it is necessary to sort the entries. if ignore: def sorthelper(entry): name = entry[0] if name == '': # heading name = entry[4] return name.lower() modindexentries.sort(key=sorthelper) letters.sort() modindexcontext = dict( modindexentries=modindexentries, platforms=platforms, letters=letters, collapse_modindex=collapse, ) self.info(' modindex', nonl=1) self.handle_page('modindex', modindexcontext, 'modindex.html') # the search page if self.name != 'htmlhelp': self.info(' search', nonl=1) self.handle_page('search', {}, 'search.html') # additional pages from conf.py for pagename, template in self.config.html_additional_pages.items(): self.info(' ' + pagename, nonl=1) self.handle_page(pagename, {}, template) if self.config.html_use_opensearch and self.name != 'htmlhelp': self.info(' opensearch', nonl=1) fn = path.join(self.outdir, '_static', 'opensearch.xml') self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn) self.info() # copy image files if self.images: self.info(bold('copying images...'), nonl=True) ensuredir(path.join(self.outdir, '_images')) for src, dest in self.images.iteritems(): self.info(' ' + src, nonl=1) try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_images', dest)) except Exception, err: self.warn('cannot copy image file %r: %s' % (path.join(self.srcdir, src), err)) self.info()
class StandaloneHTMLBuilder(Builder): """ Builds standalone HTML docs. """ name = 'html' format = 'html' copysource = True out_suffix = '.html' link_suffix = '.html' # defaults to matching out_suffix indexer_format = js_index supported_image_types = [ 'image/svg+xml', 'image/png', 'image/gif', 'image/jpeg' ] searchindex_filename = 'searchindex.js' add_permalinks = True embedded = False # for things like HTML help or Qt help: suppresses sidebar # This is a class attribute because it is mutated by Sphinx.add_javascript. script_files = ['_static/jquery.js', '_static/doctools.js'] # Dito for this one. css_files = [] # cached publisher object for snippets _publisher = None def init(self): # a hash of all config values that, if changed, cause a full rebuild self.config_hash = '' self.tags_hash = '' # section numbers for headings in the currently visited document self.secnumbers = {} self.init_templates() self.init_highlighter() self.init_translator_class() if self.config.html_file_suffix: self.out_suffix = self.config.html_file_suffix if self.config.html_link_suffix is not None: self.link_suffix = self.config.html_link_suffix else: self.link_suffix = self.out_suffix if self.config.language is not None: jsfile = path.join(package_dir, 'locale', self.config.language, 'LC_MESSAGES', 'sphinx.js') if path.isfile(jsfile): self.script_files.append('_static/translations.js') def init_templates(self): Theme.init_themes(self) self.theme = Theme(self.config.html_theme) self.create_template_bridge() self.templates.init(self, self.theme) def init_highlighter(self): # determine Pygments style and create the highlighter if self.config.pygments_style is not None: style = self.config.pygments_style elif self.theme: style = self.theme.get_confstr('theme', 'pygments_style', 'none') else: style = 'sphinx' self.highlighter = PygmentsBridge('html', style, self.config.trim_doctest_flags) def init_translator_class(self): if self.config.html_translator_class: self.translator_class = self.app.import_object( self.config.html_translator_class, 'html_translator_class setting') elif self.config.html_use_smartypants: self.translator_class = SmartyPantsHTMLTranslator else: self.translator_class = HTMLTranslator def get_outdated_docs(self): cfgdict = dict((name, self.config[name]) for (name, desc) in self.config.values.iteritems() if desc[1] == 'html') self.config_hash = md5(str(cfgdict)).hexdigest() self.tags_hash = md5(str(sorted(self.tags))).hexdigest() old_config_hash = old_tags_hash = '' try: fp = open(path.join(self.outdir, '.buildinfo')) version = fp.readline() if version.rstrip() != '# Sphinx build info version 1': raise ValueError fp.readline() # skip commentary cfg, old_config_hash = fp.readline().strip().split(': ') if cfg != 'config': raise ValueError tag, old_tags_hash = fp.readline().strip().split(': ') if tag != 'tags': raise ValueError fp.close() except ValueError: self.warn('unsupported build info format in %r, building all' % path.join(self.outdir, '.buildinfo')) except Exception: pass if old_config_hash != self.config_hash or \ old_tags_hash != self.tags_hash: for docname in self.env.found_docs: yield docname return if self.templates: template_mtime = self.templates.newest_template_mtime() else: template_mtime = 0 for docname in self.env.found_docs: if docname not in self.env.all_docs: yield docname continue targetname = self.env.doc2path(docname, self.outdir, self.out_suffix) try: targetmtime = path.getmtime(targetname) except Exception: targetmtime = 0 try: srcmtime = max(path.getmtime(self.env.doc2path(docname)), template_mtime) if srcmtime > targetmtime: yield docname except EnvironmentError: # source doesn't exist anymore pass def render_partial(self, node): """Utility: Render a lone doctree node.""" doc = new_document('<partial node>') doc.append(node) if self._publisher is None: self._publisher = Publisher(source_class=DocTreeInput, destination_class=StringOutput) self._publisher.set_components('standalone', 'restructuredtext', 'pseudoxml') pub = self._publisher pub.reader = DoctreeReader() pub.writer = HTMLWriter(self) pub.process_programmatic_settings(None, {'output_encoding': 'unicode'}, None) pub.set_source(doc, None) pub.set_destination(None, None) pub.publish() return pub.writer.parts def prepare_writing(self, docnames): from sphinx.search import IndexBuilder self.indexer = IndexBuilder(self.env) self.load_indexer(docnames) self.docwriter = HTMLWriter(self) self.docsettings = OptionParser( defaults=self.env.settings, components=(self.docwriter, )).get_default_values() # format the "last updated on" string, only once is enough since it # typically doesn't include the time of day lufmt = self.config.html_last_updated_fmt if lufmt is not None: self.last_updated = ustrftime(lufmt or _('%b %d, %Y')) else: self.last_updated = None logo = self.config.html_logo and \ path.basename(self.config.html_logo) or '' favicon = self.config.html_favicon and \ path.basename(self.config.html_favicon) or '' if favicon and os.path.splitext(favicon)[1] != '.ico': self.warn('html_favicon is not an .ico file') if not isinstance(self.config.html_use_opensearch, basestring): self.warn('html_use_opensearch config value must now be a string') self.relations = self.env.collect_relations() rellinks = [] if self.config.html_use_index: rellinks.append(('genindex', _('General Index'), 'I', _('index'))) if self.config.html_use_modindex and self.env.modules: rellinks.append( ('modindex', _('Global Module Index'), 'M', _('modules'))) if self.config.html_style is not None: stylename = self.config.html_style elif self.theme: stylename = self.theme.get_confstr('theme', 'stylesheet') else: stylename = 'default.css' self.globalcontext = dict( embedded=self.embedded, project=self.config.project, release=self.config.release, version=self.config.version, last_updated=self.last_updated, copyright=self.config.copyright, master_doc=self.config.master_doc, use_opensearch=self.config.html_use_opensearch, docstitle=self.config.html_title, shorttitle=self.config.html_short_title, show_copyright=self.config.html_show_copyright, show_sphinx=self.config.html_show_sphinx, has_source=self.config.html_copy_source, show_source=self.config.html_show_sourcelink, file_suffix=self.out_suffix, script_files=self.script_files, css_files=self.css_files, sphinx_version=__version__, style=stylename, rellinks=rellinks, builder=self.name, parents=[], logo=logo, favicon=favicon, ) if self.theme: self.globalcontext.update( ('theme_' + key, val) for (key, val) in self.theme.get_options( self.config.html_theme_options).iteritems()) self.globalcontext.update(self.config.html_context) def get_doc_context(self, docname, body, metatags): """Collect items for the template context of a page.""" # find out relations prev = next = None parents = [] rellinks = self.globalcontext['rellinks'][:] related = self.relations.get(docname) titles = self.env.titles if related and related[2]: try: next = { 'link': self.get_relative_uri(docname, related[2]), 'title': self.render_partial(titles[related[2]])['title'] } rellinks.append((related[2], next['title'], 'N', _('next'))) except KeyError: next = None if related and related[1]: try: prev = { 'link': self.get_relative_uri(docname, related[1]), 'title': self.render_partial(titles[related[1]])['title'] } rellinks.append( (related[1], prev['title'], 'P', _('previous'))) except KeyError: # the relation is (somehow) not in the TOC tree, handle # that gracefully prev = None while related and related[0]: try: parents.append({ 'link': self.get_relative_uri(docname, related[0]), 'title': self.render_partial(titles[related[0]])['title'] }) except KeyError: pass related = self.relations.get(related[0]) if parents: parents.pop() # remove link to the master file; we have a generic # "back to index" link already parents.reverse() # title rendered as HTML title = self.env.longtitles.get(docname) title = title and self.render_partial(title)['title'] or '' # the name for the copied source sourcename = self.config.html_copy_source and docname + '.txt' or '' # metadata for the document meta = self.env.metadata.get(docname) # local TOC and global TOC tree toc = self.render_partial(self.env.get_toc_for(docname))['fragment'] return dict( parents=parents, prev=prev, next=next, title=title, meta=meta, body=body, metatags=metatags, rellinks=rellinks, sourcename=sourcename, toc=toc, # only display a TOC if there's more than one item to show display_toc=(self.env.toc_num_entries[docname] > 1), ) def write_doc(self, docname, doctree): destination = StringOutput(encoding='utf-8') doctree.settings = self.docsettings self.secnumbers = self.env.toc_secnumbers.get(docname, {}) self.imgpath = relative_uri(self.get_target_uri(docname), '_images') self.post_process_images(doctree) self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads') self.docwriter.write(doctree, destination) self.docwriter.assemble_parts() body = self.docwriter.parts['fragment'] metatags = self.docwriter.clean_meta ctx = self.get_doc_context(docname, body, metatags) self.index_page(docname, doctree, ctx.get('title', '')) self.handle_page(docname, ctx, event_arg=doctree) def finish(self): self.info(bold('writing additional files...'), nonl=1) # the global general index if self.config.html_use_index: # the total count of lines for each index letter, used to distribute # the entries into two columns genindex = self.env.create_index(self) indexcounts = [] for _, entries in genindex: indexcounts.append( sum(1 + len(subitems) for _, (_, subitems) in entries)) genindexcontext = dict( genindexentries=genindex, genindexcounts=indexcounts, split_index=self.config.html_split_index, ) self.info(' genindex', nonl=1) if self.config.html_split_index: self.handle_page('genindex', genindexcontext, 'genindex-split.html') self.handle_page('genindex-all', genindexcontext, 'genindex.html') for (key, entries), count in zip(genindex, indexcounts): ctx = { 'key': key, 'entries': entries, 'count': count, 'genindexentries': genindex } self.handle_page('genindex-' + key, ctx, 'genindex-single.html') else: self.handle_page('genindex', genindexcontext, 'genindex.html') # the global module index if self.config.html_use_modindex and self.env.modules: # the sorted list of all modules, for the global module index modules = sorted( ((mn, (self.get_relative_uri('modindex', fn) + '#module-' + mn, sy, pl, dep)) for (mn, (fn, sy, pl, dep)) in self.env.modules.iteritems()), key=lambda x: x[0].lower()) # collect all platforms platforms = set() # sort out collapsable modules modindexentries = [] letters = [] pmn = '' num_toplevels = 0 num_collapsables = 0 cg = 0 # collapse group fl = '' # first letter for mn, (fn, sy, pl, dep) in modules: pl = pl and pl.split(', ') or [] platforms.update(pl) ignore = self.env.config['modindex_common_prefix'] ignore = sorted(ignore, key=len, reverse=True) for i in ignore: if mn.startswith(i): mn = mn[len(i):] stripped = i break else: stripped = '' if fl != mn[0].lower() and mn[0] != '_': # heading letter = mn[0].upper() if letter not in letters: modindexentries.append( ['', False, 0, False, letter, '', [], False, '']) letters.append(letter) tn = mn.split('.')[0] if tn != mn: # submodule if pmn == tn: # first submodule - make parent collapsable modindexentries[-1][1] = True num_collapsables += 1 elif not pmn.startswith(tn): # submodule without parent in list, add dummy entry cg += 1 modindexentries.append( [tn, True, cg, False, '', '', [], False, stripped]) else: num_toplevels += 1 cg += 1 modindexentries.append( [mn, False, cg, (tn != mn), fn, sy, pl, dep, stripped]) pmn = mn fl = mn[0].lower() platforms = sorted(platforms) # apply heuristics when to collapse modindex at page load: # only collapse if number of toplevel modules is larger than # number of submodules collapse = len(modules) - num_toplevels < num_toplevels # As some parts of the module names may have been stripped, those # names have changed, thus it is necessary to sort the entries. if ignore: def sorthelper(entry): name = entry[0] if name == '': # heading name = entry[4] return name.lower() modindexentries.sort(key=sorthelper) letters.sort() modindexcontext = dict( modindexentries=modindexentries, platforms=platforms, letters=letters, collapse_modindex=collapse, ) self.info(' modindex', nonl=1) self.handle_page('modindex', modindexcontext, 'modindex.html') # the search page if self.name != 'htmlhelp': self.info(' search', nonl=1) self.handle_page('search', {}, 'search.html') # additional pages from conf.py for pagename, template in self.config.html_additional_pages.items(): self.info(' ' + pagename, nonl=1) self.handle_page(pagename, {}, template) if self.config.html_use_opensearch and self.name != 'htmlhelp': self.info(' opensearch', nonl=1) fn = path.join(self.outdir, '_static', 'opensearch.xml') self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn) self.info() # copy image files if self.images: self.info(bold('copying images...'), nonl=True) ensuredir(path.join(self.outdir, '_images')) for src, dest in self.images.iteritems(): self.info(' ' + src, nonl=1) try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_images', dest)) except Exception, err: self.warn('cannot copy image file %r: %s' % (path.join(self.srcdir, src), err)) self.info() # copy downloadable files if self.env.dlfiles: self.info(bold('copying downloadable files...'), nonl=True) ensuredir(path.join(self.outdir, '_downloads')) for src, (_, dest) in self.env.dlfiles.iteritems(): self.info(' ' + src, nonl=1) try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_downloads', dest)) except Exception, err: self.warn('cannot copy downloadable file %r: %s' % (path.join(self.srcdir, src), err)) self.info()