def run(self): self.assert_has_content() text = '\n'.join(self.content) parsed = highlight(text, PythonLexer(), HtmlFormatter()) result = [nodes.raw('', parsed, format='html')] if True: # If we want a snapshot - this should check the 'snapshot argument'# fn = '{}.png'.format(sha(text).hexdigest()) env = self.state.document.settings.env rel_filename, filename = env.relfn2path(fn) outfn = os.path.join(env.app.builder.outdir, '_static', rel_filename) ensuredir(os.path.dirname(outfn)) script_to_render = BOT_HEADER + text try: subprocess.call(['sbot', '-o', '%s' % outfn, script_to_render]) except Exception, e: raise ShoebotError(str(e)) # TODO - Support other output formats image_node = nodes.raw('', html_img_tag(rel_filename), format='html') result.insert(0,image_node)
def handle_page(self, pagename, ctx, templatename='page.html', outfilename=None, event_arg=None): ctx['current_page_name'] = pagename sidebarfile = self.config.html_sidebars.get(pagename) if sidebarfile: ctx['customsidebar'] = sidebarfile if not outfilename: outfilename = path.join(self.outdir, os_path(pagename) + self.out_suffix) self.app.emit('html-page-context', pagename, templatename, ctx, event_arg) ensuredir(path.dirname(outfilename)) f = open(outfilename, 'wb') try: self.implementation.dump(ctx, f, 2) finally: f.close() # if there is a source file, copy the source file for the # "show source" link if ctx.get('sourcename'): source_name = path.join(self.outdir, '_sources', os_path(ctx['sourcename'])) ensuredir(path.dirname(source_name)) copyfile(self.env.doc2path(pagename), source_name)
def handle_page(self, pagename, addctx, templatename='page.html', outfilename=None, event_arg=None): ctx = self.globalcontext.copy() # current_page_name is backwards compatibility ctx['pagename'] = ctx['current_page_name'] = pagename def pathto(otheruri, resource=False, baseuri=self.get_target_uri(pagename)): if not resource: otheruri = self.get_target_uri(otheruri) return relative_uri(baseuri, otheruri) ctx['pathto'] = pathto ctx['hasdoc'] = lambda name: name in self.env.all_docs ctx['customsidebar'] = self.config.html_sidebars.get(pagename) ctx['toctree'] = lambda **kw: self._get_local_toctree(pagename, **kw) ctx.update(addctx) self.app.emit('html-page-context', pagename, templatename, ctx, event_arg) output = self.templates.render(templatename, ctx) if not outfilename: outfilename = self.get_outfilename(pagename) # outfilename's path is in general different from self.outdir ensuredir(path.dirname(outfilename)) try: f = codecs.open(outfilename, 'w', 'utf-8') try: f.write(output) finally: f.close() except (IOError, OSError), err: self.warn("error writing file %s: %s" % (outfilename, err))
def run(self): self.assert_has_content() text = "\n".join(self.content) parsed = highlight(text, PythonLexer(), HtmlFormatter()) result = [nodes.raw("", parsed, format="html")] options_dict = dict(self.options) opt_size = options_dict.get("size", (100, 100)) fn = options_dict.get("filename") or "{}.png".format(sha(text).hexdigest()) env = self.state.document.settings.env rel_filename, filename = env.relfn2path(fn) outfn = os.path.join(env.app.builder.outdir, "_static", rel_filename) ensuredir(os.path.dirname(outfn)) script_to_render = BOT_HEADER.format(size=opt_size) + text if os.path.isfile(outfn): raise ShoebotError("File %s exists, not overwriting.") try: cmd = ["sbot", "-o", "%s" % outfn, script_to_render] subprocess.call(cmd) except Exception as e: print("oops %e" % e) print("cmd: ") print(" ".join(cmd)) raise ShoebotError(str(e)) image_node = nodes.image(uri="_static/{}".format(rel_filename), alt="test") result.insert(0, image_node) return result
def render_aafigure(app, text, options): """ Render an ASCII art figure into the requested format output file. """ _id = None if aafigure is None: raise AafigError('aafigure module not installed') fname = '%s.%s' % (get_basename(text, options), options['format']) if app.builder.format == 'html': # HTML imgpath = relative_uri(app.builder.env.docname, '_images') relfn = posixpath.join(imgpath, fname) outfn = path.join(app.builder.outdir, '_images', fname) else: # Non-HTML if app.builder.format != 'latex': app.builder.warn('aafig: the builder format %s is not officially ' 'supported, aafigure images could not work. Please report ' 'problems and working builder to avoid this warning in ' 'the future' % app.builder.format) relfn = fname outfn = path.join(app.builder.outdir, fname) metadata_fname = '%s.aafig' % outfn try: if path.isfile(outfn): extra = None if options['format'].lower() == 'svg': f = None try: try: with open(metadata_fname, 'r') as f: extra = f.read() except Exception: raise AafigError() finally: if f is not None: f.close() return relfn, outfn, _id, extra except AafigError: pass ensuredir(path.dirname(outfn)) try: (visitor, output) = aafigure.render(text, outfn, options) output.close() except aafigure.UnsupportedFormatError as e: raise AafigError(str(e)) extra = None if options['format'].lower() == 'svg': extra = visitor.get_size_attrs() with open(metadata_fname, 'w') as f: f.write(extra) return relfn, outfn, _id, extra
def render_yuml(app, uri, text, options): """ Render yuml into a image file. """ format_map = DEFAULT_FORMATS.copy() format_map.update(app.builder.config.yuml_format) option_map = DEFAULT_OPTIONS.copy() option_map.update(options) term = format_map[app.builder.format] fname = get_filename(text, options, term) if app.builder.format == 'html': # HTML imgpath = relative_uri(app.builder.env.docname,'_images') relfn = posixpath.join(imgpath, fname) outfn = path.join(app.builder.outdir, '_images', fname) else: # Non-HTML if app.builder.format != 'latex': log_warn(app, 'yuml: the builder format %s is not supported.' % app.builder.format) relfn = fname outfn = path.join(app.builder.outdir, fname) ensuredir(path.dirname(outfn)) docdir = (path.dirname(app.builder.env.docname)) try: debug(app, '[Yuml] generating diagram in %s' % fname) opts = option_map['style'] if 'scale' in option_map: opts += ';scale:%s' % option_map['scale'] opts += ';dir:%s' % option_map['direction'] try: data = urllib.parse.quote(text, encoding='utf-8') except Exception: data = urllib.quote(text.encode('utf-8')) url = '%s/%s/%s/%s.%s' % (app.builder.config.yuml_server_url.strip('/'), opts, option_map['type'], data, term) debug(app, '[Yuml] with URL %s' % url) headers = { 'User-Agent' : 'sphinxcontrib/yuml v0.1', 'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8' } req = urllib2.Request(url, None, headers) rep = urllib2.urlopen(req).read() out = open(outfn, 'wb') out.write(rep) out.close() except Exception: (_t, e, _tb) = sys.exc_info() del(_tb) raise YumlError(str(e)) return relfn
def render_sdx(self, code, options, format, prefix='sdedit'): """ Render sequence diagram into a PNG or PDF output file. """ hashkey = code.encode('utf-8') + str(options) + \ str(self.builder.config.sdedit_args) ofname = '%s-%s.%s' % (prefix, sha(hashkey).hexdigest(), format) ifname = '%s-%s.sd' % (prefix, sha(hashkey).hexdigest()) infn = os.path.join(self.builder.outdir, ifname) if hasattr(self.builder, 'imgpath'): # HTML relfn = posixpath.join(self.builder.imgpath, ofname) outfn = os.path.join(self.builder.outdir, '_images', ofname) else: # LaTeX relfn = ofname outfn = os.path.join(self.builder.outdir, ofname) if os.path.isfile(outfn): return relfn, outfn if hasattr(self.builder, '_sdedit_warned'): return None, None ensuredir(os.path.dirname(outfn)) ensuredir(os.path.dirname(infn)) inputfile = open(infn, "w") if isinstance(code, unicode): code = code.encode('utf-8') inputfile.write(code) inputfile.close() path = self.builder.config.sdedit_path if path.endswith(".jar"): sdedit_args = [self.builder.config.sdedit_java_path, "-jar", path] else: sdedit_args = [path] sdedit_args.extend(self.builder.config.sdedit_args) sdedit_args.extend(['-t', format, '-o', outfn, infn]) if options.get("linewrap"): sdedit_args.extend(['--lineWrap', 'true']) if options.get("threadnumber"): sdedit_args.extend(['--threadNumbersVisible', 'true']) try: p = Popen(sdedit_args, stdout=PIPE, stdin=None, stderr=PIPE) except OSError, err: if err.errno != 2: # No such file or directory raise self.builder.warn('sdedit command %r cannot be run (needed for ' 'sequence diagram output), check the sdedit_path ' ' setting' % self.builder.config.sdedit_path) self.builder._sdedit_warned = True return None, None
def render_gruffy(self, code, options, formattype, prefix='gruffy'): hashkey = code.encode('utf-8') + str(options) fname = "%s-%s.%s" % (prefix, sha(hashkey).hexdigest(), formattype) relfn = posixpath.join('_images', fname) outfn = os.path.join(self.builder.outdir, '_images', fname) ensuredir(os.path.dirname(outfn)) exec "g = gruffy.%s(%d)" % (options['type'], options['width']) g.title = str(options['title']) for line in code.splitlines(): exec "g.%s" % line.strip() g.write(outfn) return relfn
def render_aafigure(app, text, options): """ Render an ASCII art figure into the requested format output file. """ if aafigure is None: raise AafigError("aafigure module not installed") fname = get_basename(text, options) fname = "%s.%s" % (get_basename(text, options), options["format"]) if app.builder.format == "html": # HTML imgpath = relative_uri(app.builder.env.docname, "_images") relfn = posixpath.join(imgpath, fname) outfn = path.join(app.builder.outdir, "_images", fname) else: # Non-HTML if app.builder.format != "latex": app.builder.warn( "aafig: the builder format %s is not officially " "supported, aafigure images could not work. Please report " "problems and working builder to avoid this warning in " "the future" % app.builder.format ) relfn = fname outfn = path.join(app.builder.outdir, fname) metadata_fname = "%s.aafig" % outfn try: if path.isfile(outfn): extra = None if options["format"].lower() == "svg": f = None try: try: f = file(metadata_fname, "r") extra = f.read() except: raise AafigError() finally: if f is not None: f.close() return relfn, outfn, id, extra except AafigError: pass ensuredir(path.dirname(outfn)) try: (visitor, output) = aafigure.render(text, outfn, options) output.close() except aafigure.UnsupportedFormatError, e: raise AafigError(str(e))
def write_doc(self, docname, doctree): destination = StringOutput(encoding='utf-8') self.writer.write(doctree, destination) outfilename = path.join(self.outdir, os_path(docname) + self.out_suffix) ensuredir(path.dirname(outfilename)) try: f = codecs.open(outfilename, 'w', 'utf-8') try: f.write(self.writer.output) finally: f.close() except (IOError, OSError), err: self.warn("error writing file %s: %s" % (outfilename, err))
def setup(app): app.add_node(bibtex, html=(visit_bibtex_node, depart_bibtex_node)) app.add_directive('bibtex', Bibtex) # Sphinx already comes with jquery, otherwise the following is needed. #app.add_javascript('http://ajax.googleapis.com/ajax/libs/jquery/1.4.2/' # 'jquery.min.js') bibjs = 'bibtex_js.js' app.add_javascript(bibjs) source = os.path.abspath(os.path.join(os.path.dirname(__file__), bibjs)) target = os.path.join(app.outdir, '_static', bibjs) ensuredir(os.path.dirname(target)) shutil.copyfile(source, target)
def run(self): env = self.state.document.settings.env app = env.app # workaround (used below) for https://github.com/sphinx-doc/sphinx/issues/3924 current_docname = env.docname docdir = dirname(env.doc2path(env.docname)) specpath = join(docdir, self.arguments[0]) dest_dir = join(dirname(specpath), "gallery") ensuredir(dest_dir) env.note_dependency(specpath) spec = json.load(open(specpath)) details = spec['details'] details_iter = status_iterator(details, 'copying gallery files... ', 'brown', len(details), stringify_func=lambda x: x['name'] + ".py") env.gallery_updated = [] for detail in details_iter: src_path = abspath(join("..", detail['path'])) dest_path = join(dest_dir, detail['name'] + ".py") # sphinx pickled env works only with forward slash docname = join(env.app.config.bokeh_gallery_dir, detail['name']).replace("\\","/") try: copyfile(src_path, dest_path) except OSError as e: raise SphinxError('cannot copy gallery file %r, reason: %s' % (src_path, e)) try: env.clear_doc(docname) env.read_doc(docname, app=app) env.gallery_updated.append(docname) except Exception as e: raise SphinxError('failed to read gallery doc %r, reason: %s' % (docname, e)) names = [detail['name']for detail in details] rst_text = GALLERY_PAGE.render(names=names) # workaround for https://github.com/sphinx-doc/sphinx/issues/3924 env.temp_data['docname'] = current_docname return self._parse(rst_text, "<bokeh-gallery>")
def write_doc(self, docname, doctree): doctree = self.assemble_doctree(docname) destination = StringOutput(encoding="utf-8") self.writer.write(doctree, destination) outfilename = self.handle_filename(docname) + self.out_suffix ensuredir(path.dirname(outfilename)) try: f = codecs.open(outfilename, "w", "utf-8") try: f.write("".join(self.writer.output)) finally: f.close() except (IOError, OSError), err: self.warn("error writing file %s: %s" % (outfilename, err))
def render_lily(self, lily): """ Render the Lilypond music expression *lily* using lilypond. """ shasum = "%s.png" % sha(lily.encode('utf-8')).hexdigest() relfn = posixpath.join(self.builder.imgpath, 'lily', shasum) outfn = path.join(self.builder.outdir, '_images', 'lily', shasum) if path.isfile(outfn): return relfn if hasattr(self.builder, '_lilypng_warned'): return None, None music = DOC_HEAD + self.builder.config.pnglily_preamble + lily if isinstance(music, unicode): music = music.encode('utf-8') # use only one tempdir per build -- the use of a directory is cleaner # than using temporary files, since we can clean up everything at once # just removing the whole directory (see cleanup_tempdir_lily) if not hasattr(self.builder, '_lilypng_tempdir'): tempdir = self.builder._lilypng_tempdir = tempfile.mkdtemp() else: tempdir = self.builder._lilypng_tempdir tf = open(path.join(tempdir, 'music.ly'), 'w') tf.write(music) tf.close() ensuredir(path.dirname(outfn)) # use some standard lilypond arguments lilypond_args = [self.builder.config.pnglily_lilypond] #lilypond_args += ['-o', tempdir, '--png'] lilypond_args += ['-dbackend=eps', '-dno-gs-load-fonts', '-dinclude-eps-fonts', '-o', tempdir, '--png'] # add custom ones from config value lilypond_args.extend(self.builder.config.pnglily_lilypond_args) # last, the input file name lilypond_args.append(path.join(tempdir, 'music.ly')) try: p = Popen(lilypond_args, stdout=PIPE, stderr=PIPE) except OSError, err: if err.errno != 2: # No such file or directory raise self.builder.warn('lilypond command %r cannot be run (needed for music ' 'display), check the pnglily_lilypond setting' % self.builder.config.pnglily_lilypond) self.builder._lilypng_warned = True return None, None
def render_msc(self, code, format, prefix='mscgen'): """ Render mscgen code into a PNG or PDF output file. """ hashkey = str(code.encode('utf-8')) + str(self.builder.config.mscgen_args) id = sha(bytes(hashkey, 'utf-8')).hexdigest() fname = '%s-%s.%s' % (prefix, id, format) if hasattr(self.builder, 'imgpath') and self.builder.imgpath: # HTML relfn = posixpath.join(self.builder.imgpath, fname) outfn = path.join(self.builder.outdir, '_images', fname) tmpfn = outfn mapfn = outfn + '.map' else: # LaTeX relfn = fname outfn = path.join(self.builder.outdir, fname) format = 'eps' tmpfn = outfn[:-3] + format if path.isfile(outfn): return relfn, outfn, id if hasattr(self.builder, '_mscgen_warned'): return None, None, None ensuredir(path.dirname(outfn)) # mscgen don't support encodings very well. ISO-8859-1 seems to work best, # at least for PNG. if isinstance(code, str): code = code.encode('iso-8859-1') mscgen_args = [self.builder.config.mscgen] mscgen_args.extend(self.builder.config.mscgen_args) mscgen_args.extend(['-T', format, '-o', tmpfn, '-S', 'signalling']) if not run_cmd(self.builder, mscgen_args, 'mscgen', 'mscgen', code): return None, None, None # if format == 'png': # mscgen_args = mscgen_args[:-4] + ['-T', 'ismap', '-o', mapfn] # if not run_cmd(self.builder, mscgen_args, 'mscgen', 'mscgen', code): # return None, None, None # else: # PDF/EPS # if not eps_to_pdf(self.builder, tmpfn, outfn): # return None, None, None return relfn, outfn, id
def write_doc(self, docname, doctree): # Mostly copied from TextBuilder destination = StringOutput(encoding='utf-8') self.writer.write(doctree, destination) outfilename = path.join(self.outdir, os_path(docname.capitalize())+self.out_suffix) # normally different from self.outdir ensuredir(path.dirname(outfilename)) try: f = codecs.open(outfilename, 'w', 'utf-8') try: f.write(self.writer.output) finally: f.close() except (IOError, OSError), err: self.warn("Error writing file %s: %s" % (outfilename, err))
def html_tag_with_thumbnail(self, code, fname, new_width, image, imgcls): width, height = image.size new_height = height * new_width / width image.resize((new_width, new_height), Image.ANTIALIAS) image.filter(ImageFilter.DETAIL) dir, file = os.path.split(fname) ofname = os.path.basename(fname) relfn = posixpath.join(self.builder.imgpath, "thumbnail", ofname) outfn = os.path.join(self.builder.outdir, '_images', "thumbnail", ofname) ensuredir(os.path.dirname(outfn)) image.save(outfn, "PNG") imgcss = imgcls and 'class="%s"' % imgcls or '' self.body.append('<a href="%s">' % relfn) self.body.append('<img src="%s" alt="%s" width="%s" height="%s" %s/>\n' % (fname, self.encode(code).strip(), new_width, new_height, imgcss)) self.body.append('</a>')
def config_inited_handler(app, config): gallery_dir = join(app.srcdir, config.bokeh_gallery_dir) gallery_file = gallery_dir + ".json" if not exists(gallery_file) and isfile(gallery_file): raise SphinxError("could not find gallery file %r for configured gallery dir %r" % (gallery_file, gallery_dir)) gallery_file_mtime = getmtime(gallery_file) ensuredir(gallery_dir) # we will remove each file we process from this set and see if anything is # left at the end (and remove it in that case) extras = set(os.listdir(gallery_dir)) # app.env.note_dependency(specpath) spec = json.load(open(gallery_file)) details = spec['details'] names = set(x['name'] for x in details) if len(names) < len(details): raise SphinxError("gallery file %r has duplicate names" % gallery_file) details_iter = status_iterator(details, 'creating gallery file entries... ', 'brown', len(details), app.verbosity, stringify_func=lambda x: x['name'] + ".rst") for detail in details_iter: detail_file_name = detail['name'] + ".rst" detail_file_path = join(gallery_dir, detail_file_name) if detail_file_path in extras: extras.remove(detail_file_path) # if the gallery detail file is newer than the gallery file, assume it is up to date if exists(detail_file_path) and getmtime(detail_file_path) > gallery_file_mtime: continue with open(detail_file_path, "w") as f: source_path = abspath(join(app.srcdir, "..", "..", detail['path'])) f.write(GALLERY_DETAIL.render(filename=detail['name']+'.py', source_path=source_path)) for extra_file in extras: os.remove(join(gallery_dir, extra_file))
def render_gnuplot(app, text, options): """ Render gnuplot text into a image file. """ format_map = DEFAULT_FORMATS.copy() format_map.update(app.builder.config.gnuplot_format) hashid = get_hashid(text,options) term = format_map[app.builder.format] if app.builder.format == 'html': fname = 'plot-%s.%s' % (hashid, term) # HTML imgpath = relative_uri(app.builder.env.docname,'_images') relfn = posixpath.join(imgpath,fname) outfn = path.join(app.builder.outdir, '_images', fname) else: # Non-HTML if app.builder.format != 'latex': app.builder.warn('gnuplot: the builder format %s ' 'is not officially supported.' % app.builder.format) fname = 'plot-%s.%s' % (hashid, term) relfn = fname outfn = path.join(app.builder.outdir, fname) if path.isfile(outfn): return relfn, outfn, hashid ensuredir(path.dirname(outfn)) docdir = (path.dirname(app.builder.env.docname)) try: plot = Popen('gnuplot -persist', shell=True, bufsize=64, stdin=PIPE) if docdir: plot.stdin.write('cd "%s"\n' % docdir) plot.stdin.write("set terminal %s " % (term,)) if 'size' in options: plot.stdin.write("size %s\n" % options['size']) else: plot.stdin.write("\n") if 'title' in options: plot.stdin.write('set title "%s"\n' % options['title']) plot.stdin.write("set output '%s'\n" % (outfn,)) plot.stdin.write("%s\n" % text) plot.stdin.write("\nquit\n") plot.stdin.flush() except Exception, e: raise GnuplotError(str(e))
def visit_map_node_latex(self, node): """ self is the app, although not well documented http://www.sphinx-doc.org/en/stable/_modules/sphinx/application.html """ _uuid = node._data_hash__ fname = 'map_plot-%s.png' % str(_uuid) outfn = os.path.join(self.builder.outdir, fname) if _OVERWRITE_IMAGE_ or not os.path.isfile(outfn): data = node.attributes['__plotmapargs__'] fig = get_map_from_csv(**data) ensuredir(os.path.dirname(outfn)) fig.savefig(outfn) node['uri'] = fname self.visit_image(node)
def finish(self): docname = 'Sidebar' outfilename = path.join(self.outdir, os_path(docname.capitalize())+self.out_suffix) # normally different from self.outdir ensuredir(path.dirname(outfilename)) sidebar = '' self.info('Writing sidebar') for level, node in self.sections: sidebar += '*%s %s %s\n' % (level, str(node.list_attributes), node.astext()) try: f = codecs.open(outfilename, 'w', 'utf-8') try: f.write(sidebar) finally: f.close() except (IOError, OSError), err: self.warn("Error writing file %s: %s" % (outfilename, err))
def copy_download_files(app, exc): """ Copies all files mentioned with role *downloadlink*. """ if exc: builder = app.builder logger = logging.getLogger("downloadlink") mes = "Builder format '{0}'-'{1}', unable to copy file due to {2}".format( builder.format, builder.__class__.__name__, exc) logger.warning(mes) return def to_relpath(f): return relative_path(app.srcdir, f) # copy downloadable files builder = app.builder if builder.env.dllinkfiles: logger = logging.getLogger("downloadlink") logger.info("[downloadlink] copy_download_files") for src in status_iterator(builder.env.dllinkfiles, __('copying downloadable(link) files... '), "brown", len(builder.env.dllinkfiles), builder.app.verbosity, stringify_func=to_relpath): docname, dest = builder.env.dllinkfiles[src] relpath = set(os.path.dirname(dn) for dn in docname) for rel in relpath: dest = os.path.join(builder.outdir, rel) ensuredir(os.path.dirname(dest)) shortname = os.path.split(src)[-1] dest = os.path.join(dest, shortname) name = os.path.join(builder.srcdir, src) try: copyfile(name, dest) logger.info("[downloadlink] copy '{0}' to '{1}'".format( name, dest)) except FileNotFoundError: mes = "Builder format '{0}'-'{3}', unable to copy file '{1}' into {2}'".format( builder.format, name, dest, builder.__class__.__name__) logger.warning( "[downloadlink] cannot copy '{0}' to '{1}'".format( name, dest))
def render_aafigure(self, text, options): """ Render an ASCII art figure into the requested format output file. """ fname = get_basename(text, options) fname = f"{get_basename(text, options)}.{options['format']}" if True: #TODO: hasattr(self.builder, 'imgpath'): # HTML #TODO relfn = posixpath.join(self.builder.imgpath, fname) relfn = '_build/html/_images/' + fname #TODO: outfn = path.join(self.builder.outdir, '_images', fname) outfn = '/home/luca/repos/aafigure/documentation/_build/html/_images/' + fname else: # LaTeX relfn = fname outfn = path.join(self.builder.outdir, fname) metadata_fname = f'{outfn}.aafig' try: if path.isfile(outfn): extra = None if options['format'].lower() == 'svg': f = None try: try: f = file(metadata_fname, 'r') extra = f.read() except: raise AafigError() finally: if f is not None: f.close() return relfn, outfn, id, extra except AafigError: pass ensuredir(path.dirname(outfn)) try: (visitor, output) = aafigure.render(text, outfn, options) output.close() except aafigure.UnsupportedFormatError, e: raise AafigError(str(e))
def render_aafigure(self, text, options): """ Render an ASCII art figure into the requested format output file. """ fname = get_basename(text, options) fname = '%s.%s' % (get_basename(text, options), options['format']) if True: #TODO: hasattr(self.builder, 'imgpath'): # HTML #TODO relfn = posixpath.join(self.builder.imgpath, fname) relfn = '_build/html/_images/' + fname #TODO: outfn = path.join(self.builder.outdir, '_images', fname) outfn = '/home/luca/repos/aafigure/documentation/_build/html/_images/' + fname else: # LaTeX relfn = fname outfn = path.join(self.builder.outdir, fname) metadata_fname = '%s.aafig' % outfn try: if path.isfile(outfn): extra = None if options['format'].lower() == 'svg': f = None try: try: f = file(metadata_fname, 'r') extra = f.read() except: raise AafigError() finally: if f is not None: f.close() return relfn, outfn, id, extra except AafigError: pass ensuredir(path.dirname(outfn)) try: (visitor, output) = aafigure.render(text, outfn, options) output.close() except aafigure.UnsupportedFormatError, e: raise AafigError(str(e))
def build_finished(app, exception): files = set() for (script, js, js_path, source) in app.env.bokeh_plot_files.values(): files.add(js_path) files_iter = app.status_iterator(sorted(files), 'copying bokeh-plot files... ', console.brown, len(files), lambda x: basename(x)) for file in files_iter: target = join(app.builder.outdir, "scripts", basename(file)) ensuredir(dirname(target)) try: copyfile(file, target) except OSError as e: raise SphinxError('cannot copy local file %r, reason: %s' % (file, e))
def run(self): env = self.state.document.settings.env app = env.app docdir = dirname(env.doc2path(env.docname)) dest_dir = join(docdir, "gallery") ensuredir(dest_dir) specpath = join(docdir, self.arguments[0]) env.note_dependency(specpath) spec = json.load(open(specpath)) details = spec['details'] details_iter = app.status_iterator(details, 'copying gallery files... ', console.brown, len(details), lambda x: x['name'] + ".py") env.gallery_updated = [] for detail in details_iter: src_path = abspath(join("..", detail['path'])) dest_path = join(dest_dir, detail['name'] + ".py") docname = join("docs", "gallery", detail['name']) try: copyfile(src_path, dest_path) except OSError as e: raise SphinxError('cannot copy gallery file %r, reason: %s' % (src_path, e)) try: env.clear_doc(docname) env.read_doc(docname, app=app) env.gallery_updated.append(docname) except Exception as e: raise SphinxError('failed to read gallery doc %r, reason: %s' % (docname, e)) names = [detail['name'] for detail in details] rst_text = GALLERY_PAGE.render(names=names) return self._parse(rst_text, "<bokeh-gallery>")
def run(self): env = self.state.document.settings.env app = env.app docdir = dirname(env.doc2path(env.docname)) dest_dir = join(docdir, "gallery") ensuredir(dest_dir) specpath = join(docdir, self.arguments[0]) env.note_dependency(specpath) spec = json.load(open(specpath)) details = spec['details'] details_iter = app.status_iterator(details, 'copying gallery files... ', console.brown, len(details), lambda x: x['name'] + ".py") env.gallery_updated = [] for detail in details_iter: src_path = abspath(join("..", detail['path'])) dest_path = join(dest_dir, detail['name'] + ".py") docname = join("docs", "gallery", detail['name']) try: copyfile(src_path, dest_path) except OSError as e: raise SphinxError('cannot copy gallery file %r, reason: %s' % (src_path, e)) try: env.clear_doc(docname) env.read_doc(docname, app=app) env.gallery_updated.append(docname) except Exception as e: raise SphinxError('failed to read gallery doc %r, reason: %s' % (docname, e)) names = [detail['name']for detail in details] rst_text = GALLERY_PAGE.render(names=names) return self._parse(rst_text, "<bokeh-gallery>")
def config_inited_handler(app, config): gallery_dir = join(app.srcdir, config.bokeh_gallery_dir) gallery_file = f"{gallery_dir}.json" if not exists(gallery_file) and isfile(gallery_file): raise SphinxError(f"could not find gallery file {gallery_file!r} for configured gallery dir {gallery_dir!r}") gallery_file_mtime = getmtime(gallery_file) ensuredir(gallery_dir) # we will remove each file we process from this set and see if anything is # left at the end (and remove it in that case) extras = set(os.listdir(gallery_dir)) # app.env.note_dependency(specpath) spec = json.load(open(gallery_file)) details = spec["details"] names = {x["name"] for x in details} if len(names) < len(details): raise SphinxError(f"gallery file {gallery_file!r} has duplicate names") details_iter = status_iterator(details, "creating gallery file entries... ", "brown", len(details), app.verbosity, stringify_func=lambda x: x["name"] + ".rst") for detail in details_iter: detail_file_name = detail["name"] + ".rst" detail_file_path = join(gallery_dir, detail_file_name) if detail_file_path in extras: extras.remove(detail_file_path) # if the gallery detail file is newer than the gallery file, assume it is up to date if exists(detail_file_path) and getmtime(detail_file_path) > gallery_file_mtime: continue with open(detail_file_path, "w") as f: source_path = abspath(join(app.srcdir, "..", "..", detail["path"])) f.write(GALLERY_DETAIL.render(filename=detail["name"] + ".py", source_path=source_path)) for extra_file in extras: os.remove(join(gallery_dir, extra_file))
def render_aafigure(self, text, options): """ Render an ASCII art figure into the requested format output file. """ fname = get_basename(text, options) #fname = '%s.%s' % (get_basename(text, options), options['format']) if hasattr(self.builder, 'imgpath'): # HTML relfn = posixpath.join(self.builder.imgpath, fname) outfn = path.join(self.builder.outdir, '_images', fname) else: # LaTeX relfn = fname outfn = path.join(self.builder.outdir, fname) metadata_fname = '%s.aafig' % outfn try: if path.isfile(outfn): extra = None if options['format'].lower() == 'svg': f = None try: try: f = file(metadata_fname, 'r') extra = f.read() except: raise AafigError() finally: if f is not None: f.close() return relfn, outfn, id, extra except AafigError: pass ensuredir(path.dirname(outfn)) try: (visitor, output) = aafigure.render(text, outfn, options) output.close() except aafigure.UnsupportedFormatError, e: raise AafigError(str(e))
def build_finished(app, exception): files = set() for (script, js, js_path, source, docpath) in app.env.bokeh_plot_files.values(): files.add((js_path, docpath)) files_iter = status_iterator(sorted(files), 'copying bokeh-plot files... ', 'brown', len(files), app.verbosity, stringify_func=lambda x: basename(x[0])) for (file, docpath) in files_iter: target = join(app.builder.outdir, docpath, basename(file)) ensuredir(dirname(target)) try: copyfile(file, target) except OSError as e: raise SphinxError('cannot copy local file %r, reason: %s' % (file, e))
def render_dot(self, code, options, format, prefix='graphviz'): """ Render graphviz code into a PNG or PDF output file. """ hashkey = code.encode('utf-8') + str(options) + \ str(self.builder.config.graphviz_dot_args) fname = '%s-%s.%s' % (prefix, sha(hashkey).hexdigest(), format) if hasattr(self.builder, 'imgpath'): # HTML relfn = posixpath.join(self.builder.imgpath, fname) outfn = path.join(self.builder.outdir, '_images', fname) else: # LaTeX relfn = fname outfn = path.join(self.builder.outdir, fname) if path.isfile(outfn): return relfn, outfn if hasattr(self.builder, '_graphviz_warned_dot') or \ hasattr(self.builder, '_graphviz_warned_ps2pdf'): return None, None ensuredir(path.dirname(outfn)) dot_args = [self.builder.config.graphviz_dot] dot_args.extend(self.builder.config.graphviz_dot_args) dot_args.extend(options) dot_args.extend(['-T' + format, '-o' + outfn]) if format == 'png': dot_args.extend(['-Tcmapx', '-o%s.map' % outfn]) try: p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE) except OSError, err: if err.errno != 2: # No such file or directory raise self.builder.warn('dot command %r cannot be run (needed for graphviz ' 'output), check the graphviz_dot setting' % self.builder.config.graphviz_dot) self.builder._graphviz_warned_dot = True return None, None
def handle_page(self, pagename, addctx, templatename='page.html', outfilename=None, event_arg=None): ctx = self.globalcontext.copy() # current_page_name is backwards compatibility ctx['pagename'] = ctx['current_page_name'] = pagename def pathto(otheruri, resource=False, baseuri=self.get_target_uri(pagename)): if not resource: otheruri = self.get_target_uri(otheruri) return relative_uri(baseuri, otheruri) ctx['pathto'] = pathto ctx['hasdoc'] = lambda name: name in self.env.all_docs ctx['customsidebar'] = self.config.html_sidebars.get(pagename) ctx['encoding'] = encoding = self.config.html_output_encoding ctx['toctree'] = lambda **kw: self._get_local_toctree(pagename, **kw) ctx.update(addctx) self.app.emit('html-page-context', pagename, templatename, ctx, event_arg) output = self.templates.render(templatename, ctx) if not outfilename: outfilename = self.get_outfilename(pagename) # outfilename's path is in general different from self.outdir ensuredir(path.dirname(outfilename)) try: f = codecs.open(outfilename, 'w', encoding) try: f.write(output) finally: f.close() except (IOError, OSError), err: self.warn("error writing file %s: %s" % (outfilename, err))
def test_ensuredir(): with tempfile.TemporaryDirectory() as tmp_path: # Does not raise an exception for an existing directory. ensuredir(tmp_path) path = os.path.join(tmp_path, 'a', 'b', 'c') ensuredir(path) assert os.path.isdir(path) with tempfile.NamedTemporaryFile() as tmp: with pytest.raises(OSError): ensuredir(tmp.name)
def setUp(self): """ Use a known testing file; write it to a temporary location for the test. """ self.tempdir = tempfile.mkdtemp() self.local_path = self.tempdir + os.sep + 'local' + os.sep self.remote_path = self.tempdir + os.sep + 'remote' + os.sep ensuredir(self.tempdir) ensuredir(self.local_path) ensuredir(self.remote_path) f = open(self.remote_path + 'index.rst', 'w') f.writelines(['Header\n','======\n', '\n', 'Paragraph 1\n', '\n', 'Paragraph 2\n', '\n', 'Paragraph 3\n']) f.close() self.local_repo = 'file://' + self.local_path self.remote_repo = 'file://' + self.remote_path dvcs.local_repo_physical_dir = self.local_path
def setUp(self): """ Use a known testing file; write it to a temporary location for the test. """ self.tempdir = tempfile.mkdtemp() self.local_path = self.tempdir + os.sep + 'local' + os.sep self.remote_path = self.tempdir + os.sep + 'remote' + os.sep ensuredir(self.tempdir) ensuredir(self.local_path) ensuredir(self.remote_path) f = open(self.remote_path + 'index.rst', 'w') f.writelines([ 'Header\n', '======\n', '\n', 'Paragraph 1\n', '\n', 'Paragraph 2\n', '\n', 'Paragraph 3\n' ]) f.close() self.local_repo = 'file://' + self.local_path self.remote_repo = 'file://' + self.remote_path dvcs.local_repo_physical_dir = self.local_path
def render_aafigure(app, text, options): """ Render an ASCII art figure into the requested format output file. """ if aafigure is None: raise AafigError('aafigure module not installed') fname = get_basename(text, options) fname = '%s.%s' % (get_basename(text, options), options['format']) if app.builder.format == 'html': # HTML imgpath = relative_uri(app.builder.env.docname, '_images') relfn = posixpath.join(imgpath, fname) outfn = path.join(app.builder.outdir, '_images', fname) else: # Non-HTML if app.builder.format != 'latex': logger.warn( 'aafig: the builder format %s is not officially ' 'supported, aafigure images could not work. Please report ' 'problems and working builder to avoid this warning in ' 'the future' % app.builder.format) relfn = fname outfn = path.join(app.builder.outdir, fname) metadata_fname = '%s.aafig' % outfn try: if path.isfile(outfn): extra = None if options['format'].lower() == 'svg': f = None try: try: f = file(metadata_fname, 'r') extra = f.read() except: raise AafigError() finally: if f is not None: f.close() return relfn, outfn, id, extra except AafigError: pass ensuredir(path.dirname(outfn)) try: (visitor, output) = aafigure.render(text, outfn, options) output.close() except aafigure.UnsupportedFormatError as e: raise AafigError(str(e)) extra = None if options['format'].lower() == 'svg': extra = visitor.get_size_attrs() f = open(metadata_fname, 'w') f.write(extra) f.close() return relfn, outfn, id, extra
def builder_inited(app): app.env.bokeh_plot_auxdir = join(app.env.doctreedir, "bokeh_plot") ensuredir(app.env.bokeh_plot_auxdir) # sphinx/build/doctrees/bokeh_plot if not hasattr(app.env, "bokeh_plot_files"): app.env.bokeh_plot_files = set()
if self.env.dlfiles: self.info(bold('copying downloadable files...'), nonl=True) ensuredir(path.join(self.outdir, '_downloads')) for src, (_, dest) in self.env.dlfiles.iteritems(): self.info(' '+src, nonl=1) try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_downloads', dest)) except Exception, err: self.warn('cannot copy downloadable file %r: %s' % (path.join(self.srcdir, src), err)) self.info() # copy static files self.info(bold('copying static files... '), nonl=True) ensuredir(path.join(self.outdir, '_static')) # first, create pygments style file f = open(path.join(self.outdir, '_static', 'pygments.css'), 'w') f.write(self.highlighter.get_stylesheet()) f.close() # then, copy translations JavaScript file if self.config.language is not None: jsfile = path.join(package_dir, 'locale', self.config.language, 'LC_MESSAGES', 'sphinx.js') if path.isfile(jsfile): copyfile(jsfile, path.join(self.outdir, '_static', 'translations.js')) # then, copy over all user-supplied static files if self.theme: staticdirnames = [path.join(themepath, 'static') for themepath in self.theme.get_dirchain()[::-1]]
def render_drawio(self: SphinxTranslator, node: DrawIONode, in_filename: str, default_output_format: str) -> str: """Render drawio file into an output image file.""" page_index = str(node["config"].get("page-index", 0)) output_format = node["config"].get("format") or default_output_format scale = str(node["config"].get("scale", self.config.drawio_default_scale)) transparent = node["config"].get("transparency", self.config.drawio_default_transparency) # Any directive options which would change the output file would go here unique_values = ( # This ensures that the same file hash is generated no matter the build directory # Mainly useful for pytest, as it creates a new build directory every time node["filename"].replace(self.builder.srcdir, ""), page_index, scale, output_format, *[ str(node["config"].get(option)) for option in DrawIO.optional_uniques ]) hash_key = "\n".join(unique_values) sha_key = sha1(hash_key.encode()).hexdigest() filename = "drawio-{}.{}".format(sha_key, default_output_format) file_path = posixpath.join(self.builder.imgpath, filename) out_file_path = os.path.join(self.builder.outdir, self.builder.imagedir, filename) if os.path.isfile(out_file_path): return file_path ensuredir(os.path.dirname(out_file_path)) if self.builder.config.drawio_binary_path: binary_path = self.builder.config.drawio_binary_path elif platform.system() == "Windows": binary_path = r"C:\Program Files\draw.io\draw.io.exe" else: binary_path = "/opt/draw.io/drawio" extra_args = [] for option in DrawIO.optional_uniques: if option in node["config"]: value = node["config"][option] extra_args.append("--{}".format(option)) extra_args.append(str(value)) if transparent: extra_args.append("--transparent") drawio_args = [ binary_path, "--export", "--page-index", page_index, "--scale", scale, *extra_args, "--format", output_format, "--output", out_file_path, in_filename, ] doc_name = node.get("doc_name", "index") cwd = os.path.dirname(os.path.join(self.builder.srcdir, doc_name)) new_env = os.environ.copy() if is_headless(self.config): new_env["DISPLAY"] = ":{}".format(X_DISPLAY_NUMBER) try: ret = subprocess.run(drawio_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=cwd, check=True, env=new_env) if not os.path.isfile(out_file_path): raise DrawIOError("draw.io did not produce an output file:" "\n[stderr]\n{}\n[stdout]\n{}".format( ret.stderr, ret.stdout)) return file_path except OSError as exc: raise DrawIOError("draw.io ({}) exited with error:\n{}".format( " ".join(drawio_args), exc)) except subprocess.CalledProcessError as exc: raise DrawIOError("draw.io ({}) exited with error:\n[stderr]\n{}" "\n[stdout]\n{}".format(" ".join(drawio_args), exc.stderr, exc.stdout))
def render_lily(self, lily_source): """Render the Lilypond music expression *lily* using lilypond. """ if hasattr(self.builder, '_lilypond_warned'): return None, None # self.builder.warn('Render lilipond') # _logger.info('Render lilypond\n' + lily_source) print('Render lilypond\n' + lily_source) basename = "{}.png".format(sha(lily_source.encode('utf-8')).hexdigest()) relative_filename = os.path.join(self.builder.imgpath, 'lily', basename) absolut_filename = os.path.join(self.builder.outdir, '_images', 'lily', basename) if os.path.isfile(absolut_filename): return relative_filename lily_source = DOCUMENT_BEGIN + self.builder.config.lilypond_preamble + lily_source # use only one tempdir per build -- the use of a directory is cleaner # than using temporary files, since we can clean up everything at once # just removing the whole directory (see cleanup_lily_tempdir) if not hasattr(self.builder, '_lilypond_tempdir'): tempdir = self.builder._lilypond_tempdir = tempfile.mkdtemp() else: tempdir = self.builder._lilypond_tempdir lilypond_input_file = os.path.join(tempdir, 'music.ly') with open(lilypond_input_file, 'w') as fh: fh.write(lily_source) ensuredir(os.path.dirname(absolut_filename)) # use some standard lilypond arguments lilypond_args = [self.builder.config.lilypond_command] # Cropped SVG # http://lilypond.1069038.n5.nabble.com/Cropped-SVG-Output-td182397.html # https://github.com/Abjad/abjad/issues/606 Option to ask lilypond to render to SVG and use ipython.display.SVG to show it #606 # inkscape -S lylipond-test.svg # svg78,170.07411,25.448056,223.11942,34.394129 # X Y W H lilypond_args += [ '-o', tempdir, # '--format=png', '-dbackend=eps', # -dbackend=svg --svg-woff # cf. http://lilypond.org/doc/v2.19/Documentation/usage/command_002dline-usage#advanced-command-line-options-for-lilypond '-dno-gs-load-fonts', '-dinclude-eps-fonts', '--png', '-dresolution=200', ] # add custom ones from config value lilypond_args.extend(self.builder.config.lilypond_args) # last, the input file name lilypond_args.append(lilypond_input_file) try: process = Popen(lilypond_args, stdout=PIPE, stderr=PIPE) except OSError as exception: if exception.errno != 2: # No such file or directory raise template = 'lilypond command {} cannot be run (needed for music display), check the lilypond_command setting' self.builder.warn(template.format( self.builder.config.lilypond_command)) self.builder._lilypond_warned = True return None, None stdout, stderr = process.communicate() if process.returncode != 0: template = 'lilypond exited with error:\n[stderr]\n{}\n[stdout]\n{}' raise LilyExtError( template.format(stderr.decode('utf-8'), stdout.decode('utf-8'))) shutil.copyfile(os.path.join(tempdir, 'music.png'), absolut_filename) # Popen(['mogrify', '-trim', absolut_filename], stdout=PIPE, stderr=PIPE) return relative_filename
if err.errno != 2: # No such file or directory raise if not hasattr(self.builder, '_mathpng_warned_latex'): self.builder.warn( 'LaTeX command %r cannot be run (needed for math ' 'display), check the pngmath_latex setting' % self.builder.config.pngmath_latex) self.builder._mathpng_warned_latex = True return relfn, None stdout, stderr = p.communicate() if p.returncode != 0: raise MathExtError( 'latex exited with error:\n[stderr]\n%s\n[stdout]\n%s' % (stderr, stdout)) ensuredir(path.dirname(outfn)) # use some standard dvipng arguments dvipng_args = shlex.split(self.builder.config.pngmath_dvipng) dvipng_args += ['-o', outfn, '-T', 'tight', '-z9'] # add custom ones from config value dvipng_args.extend(self.builder.config.pngmath_dvipng_args) if use_preview: dvipng_args.append('--depth') # last, the input file name dvipng_args.append(path.join(tempdir, 'math.dvi')) try: p = Popen(dvipng_args, stdout=PIPE, stderr=PIPE) except OSError, err: if err.errno != 2: # No such file or directory raise if not hasattr(self.builder, '_mathpng_warned_dvipng'):
class StandaloneHTMLBuilder(Builder): """ Builds standalone HTML docs. """ name = 'html' format = 'html' copysource = True out_suffix = '.html' link_suffix = '.html' # defaults to matching out_suffix indexer_format = js_index supported_image_types = [ 'image/svg+xml', 'image/png', 'image/gif', 'image/jpeg' ] searchindex_filename = 'searchindex.js' add_permalinks = True embedded = False # for things like HTML help or Qt help: suppresses sidebar # This is a class attribute because it is mutated by Sphinx.add_javascript. script_files = ['_static/jquery.js', '_static/doctools.js'] # Dito for this one. css_files = [] # cached publisher object for snippets _publisher = None def init(self): # a hash of all config values that, if changed, cause a full rebuild self.config_hash = '' self.tags_hash = '' # section numbers for headings in the currently visited document self.secnumbers = {} self.init_templates() self.init_highlighter() self.init_translator_class() if self.config.html_file_suffix: self.out_suffix = self.config.html_file_suffix if self.config.html_link_suffix is not None: self.link_suffix = self.config.html_link_suffix else: self.link_suffix = self.out_suffix if self.config.language is not None: jsfile = path.join(package_dir, 'locale', self.config.language, 'LC_MESSAGES', 'sphinx.js') if path.isfile(jsfile): self.script_files.append('_static/translations.js') def init_templates(self): Theme.init_themes(self) self.theme = Theme(self.config.html_theme) self.create_template_bridge() self.templates.init(self, self.theme) def init_highlighter(self): # determine Pygments style and create the highlighter if self.config.pygments_style is not None: style = self.config.pygments_style elif self.theme: style = self.theme.get_confstr('theme', 'pygments_style', 'none') else: style = 'sphinx' self.highlighter = PygmentsBridge('html', style, self.config.trim_doctest_flags) def init_translator_class(self): if self.config.html_translator_class: self.translator_class = self.app.import_object( self.config.html_translator_class, 'html_translator_class setting') elif self.config.html_use_smartypants: self.translator_class = SmartyPantsHTMLTranslator else: self.translator_class = HTMLTranslator def get_outdated_docs(self): cfgdict = dict((name, self.config[name]) for (name, desc) in self.config.values.iteritems() if desc[1] == 'html') self.config_hash = md5(str(cfgdict)).hexdigest() self.tags_hash = md5(str(sorted(self.tags))).hexdigest() old_config_hash = old_tags_hash = '' try: fp = open(path.join(self.outdir, '.buildinfo')) version = fp.readline() if version.rstrip() != '# Sphinx build info version 1': raise ValueError fp.readline() # skip commentary cfg, old_config_hash = fp.readline().strip().split(': ') if cfg != 'config': raise ValueError tag, old_tags_hash = fp.readline().strip().split(': ') if tag != 'tags': raise ValueError fp.close() except ValueError: self.warn('unsupported build info format in %r, building all' % path.join(self.outdir, '.buildinfo')) except Exception: pass if old_config_hash != self.config_hash or \ old_tags_hash != self.tags_hash: for docname in self.env.found_docs: yield docname return if self.templates: template_mtime = self.templates.newest_template_mtime() else: template_mtime = 0 for docname in self.env.found_docs: if docname not in self.env.all_docs: yield docname continue targetname = self.env.doc2path(docname, self.outdir, self.out_suffix) try: targetmtime = path.getmtime(targetname) except Exception: targetmtime = 0 try: srcmtime = max(path.getmtime(self.env.doc2path(docname)), template_mtime) if srcmtime > targetmtime: yield docname except EnvironmentError: # source doesn't exist anymore pass def render_partial(self, node): """Utility: Render a lone doctree node.""" doc = new_document('<partial node>') doc.append(node) if self._publisher is None: self._publisher = Publisher(source_class=DocTreeInput, destination_class=StringOutput) self._publisher.set_components('standalone', 'restructuredtext', 'pseudoxml') pub = self._publisher pub.reader = DoctreeReader() pub.writer = HTMLWriter(self) pub.process_programmatic_settings(None, {'output_encoding': 'unicode'}, None) pub.set_source(doc, None) pub.set_destination(None, None) pub.publish() return pub.writer.parts def prepare_writing(self, docnames): from sphinx.search import IndexBuilder self.indexer = IndexBuilder(self.env) self.load_indexer(docnames) self.docwriter = HTMLWriter(self) self.docsettings = OptionParser( defaults=self.env.settings, components=(self.docwriter, )).get_default_values() # format the "last updated on" string, only once is enough since it # typically doesn't include the time of day lufmt = self.config.html_last_updated_fmt if lufmt is not None: self.last_updated = ustrftime(lufmt or _('%b %d, %Y')) else: self.last_updated = None logo = self.config.html_logo and \ path.basename(self.config.html_logo) or '' favicon = self.config.html_favicon and \ path.basename(self.config.html_favicon) or '' if favicon and os.path.splitext(favicon)[1] != '.ico': self.warn('html_favicon is not an .ico file') if not isinstance(self.config.html_use_opensearch, basestring): self.warn('html_use_opensearch config value must now be a string') self.relations = self.env.collect_relations() rellinks = [] if self.config.html_use_index: rellinks.append(('genindex', _('General Index'), 'I', _('index'))) if self.config.html_use_modindex and self.env.modules: rellinks.append( ('modindex', _('Global Module Index'), 'M', _('modules'))) if self.config.html_style is not None: stylename = self.config.html_style elif self.theme: stylename = self.theme.get_confstr('theme', 'stylesheet') else: stylename = 'default.css' self.globalcontext = dict( embedded=self.embedded, project=self.config.project, release=self.config.release, version=self.config.version, last_updated=self.last_updated, copyright=self.config.copyright, master_doc=self.config.master_doc, use_opensearch=self.config.html_use_opensearch, docstitle=self.config.html_title, shorttitle=self.config.html_short_title, show_copyright=self.config.html_show_copyright, show_sphinx=self.config.html_show_sphinx, has_source=self.config.html_copy_source, show_source=self.config.html_show_sourcelink, file_suffix=self.out_suffix, script_files=self.script_files, css_files=self.css_files, sphinx_version=__version__, style=stylename, rellinks=rellinks, builder=self.name, parents=[], logo=logo, favicon=favicon, ) if self.theme: self.globalcontext.update( ('theme_' + key, val) for (key, val) in self.theme.get_options( self.config.html_theme_options).iteritems()) self.globalcontext.update(self.config.html_context) def get_doc_context(self, docname, body, metatags): """Collect items for the template context of a page.""" # find out relations prev = next = None parents = [] rellinks = self.globalcontext['rellinks'][:] related = self.relations.get(docname) titles = self.env.titles if related and related[2]: try: next = { 'link': self.get_relative_uri(docname, related[2]), 'title': self.render_partial(titles[related[2]])['title'] } rellinks.append((related[2], next['title'], 'N', _('next'))) except KeyError: next = None if related and related[1]: try: prev = { 'link': self.get_relative_uri(docname, related[1]), 'title': self.render_partial(titles[related[1]])['title'] } rellinks.append( (related[1], prev['title'], 'P', _('previous'))) except KeyError: # the relation is (somehow) not in the TOC tree, handle # that gracefully prev = None while related and related[0]: try: parents.append({ 'link': self.get_relative_uri(docname, related[0]), 'title': self.render_partial(titles[related[0]])['title'] }) except KeyError: pass related = self.relations.get(related[0]) if parents: parents.pop() # remove link to the master file; we have a generic # "back to index" link already parents.reverse() # title rendered as HTML title = self.env.longtitles.get(docname) title = title and self.render_partial(title)['title'] or '' # the name for the copied source sourcename = self.config.html_copy_source and docname + '.txt' or '' # metadata for the document meta = self.env.metadata.get(docname) # local TOC and global TOC tree toc = self.render_partial(self.env.get_toc_for(docname))['fragment'] return dict( parents=parents, prev=prev, next=next, title=title, meta=meta, body=body, metatags=metatags, rellinks=rellinks, sourcename=sourcename, toc=toc, # only display a TOC if there's more than one item to show display_toc=(self.env.toc_num_entries[docname] > 1), ) def write_doc(self, docname, doctree): destination = StringOutput(encoding='utf-8') doctree.settings = self.docsettings self.secnumbers = self.env.toc_secnumbers.get(docname, {}) self.imgpath = relative_uri(self.get_target_uri(docname), '_images') self.post_process_images(doctree) self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads') self.docwriter.write(doctree, destination) self.docwriter.assemble_parts() body = self.docwriter.parts['fragment'] metatags = self.docwriter.clean_meta ctx = self.get_doc_context(docname, body, metatags) self.index_page(docname, doctree, ctx.get('title', '')) self.handle_page(docname, ctx, event_arg=doctree) def finish(self): self.info(bold('writing additional files...'), nonl=1) # the global general index if self.config.html_use_index: # the total count of lines for each index letter, used to distribute # the entries into two columns genindex = self.env.create_index(self) indexcounts = [] for _, entries in genindex: indexcounts.append( sum(1 + len(subitems) for _, (_, subitems) in entries)) genindexcontext = dict( genindexentries=genindex, genindexcounts=indexcounts, split_index=self.config.html_split_index, ) self.info(' genindex', nonl=1) if self.config.html_split_index: self.handle_page('genindex', genindexcontext, 'genindex-split.html') self.handle_page('genindex-all', genindexcontext, 'genindex.html') for (key, entries), count in zip(genindex, indexcounts): ctx = { 'key': key, 'entries': entries, 'count': count, 'genindexentries': genindex } self.handle_page('genindex-' + key, ctx, 'genindex-single.html') else: self.handle_page('genindex', genindexcontext, 'genindex.html') # the global module index if self.config.html_use_modindex and self.env.modules: # the sorted list of all modules, for the global module index modules = sorted( ((mn, (self.get_relative_uri('modindex', fn) + '#module-' + mn, sy, pl, dep)) for (mn, (fn, sy, pl, dep)) in self.env.modules.iteritems()), key=lambda x: x[0].lower()) # collect all platforms platforms = set() # sort out collapsable modules modindexentries = [] letters = [] pmn = '' num_toplevels = 0 num_collapsables = 0 cg = 0 # collapse group fl = '' # first letter for mn, (fn, sy, pl, dep) in modules: pl = pl and pl.split(', ') or [] platforms.update(pl) ignore = self.env.config['modindex_common_prefix'] ignore = sorted(ignore, key=len, reverse=True) for i in ignore: if mn.startswith(i): mn = mn[len(i):] stripped = i break else: stripped = '' if fl != mn[0].lower() and mn[0] != '_': # heading letter = mn[0].upper() if letter not in letters: modindexentries.append( ['', False, 0, False, letter, '', [], False, '']) letters.append(letter) tn = mn.split('.')[0] if tn != mn: # submodule if pmn == tn: # first submodule - make parent collapsable modindexentries[-1][1] = True num_collapsables += 1 elif not pmn.startswith(tn): # submodule without parent in list, add dummy entry cg += 1 modindexentries.append( [tn, True, cg, False, '', '', [], False, stripped]) else: num_toplevels += 1 cg += 1 modindexentries.append( [mn, False, cg, (tn != mn), fn, sy, pl, dep, stripped]) pmn = mn fl = mn[0].lower() platforms = sorted(platforms) # apply heuristics when to collapse modindex at page load: # only collapse if number of toplevel modules is larger than # number of submodules collapse = len(modules) - num_toplevels < num_toplevels # As some parts of the module names may have been stripped, those # names have changed, thus it is necessary to sort the entries. if ignore: def sorthelper(entry): name = entry[0] if name == '': # heading name = entry[4] return name.lower() modindexentries.sort(key=sorthelper) letters.sort() modindexcontext = dict( modindexentries=modindexentries, platforms=platforms, letters=letters, collapse_modindex=collapse, ) self.info(' modindex', nonl=1) self.handle_page('modindex', modindexcontext, 'modindex.html') # the search page if self.name != 'htmlhelp': self.info(' search', nonl=1) self.handle_page('search', {}, 'search.html') # additional pages from conf.py for pagename, template in self.config.html_additional_pages.items(): self.info(' ' + pagename, nonl=1) self.handle_page(pagename, {}, template) if self.config.html_use_opensearch and self.name != 'htmlhelp': self.info(' opensearch', nonl=1) fn = path.join(self.outdir, '_static', 'opensearch.xml') self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn) self.info() # copy image files if self.images: self.info(bold('copying images...'), nonl=True) ensuredir(path.join(self.outdir, '_images')) for src, dest in self.images.iteritems(): self.info(' ' + src, nonl=1) try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_images', dest)) except Exception, err: self.warn('cannot copy image file %r: %s' % (path.join(self.srcdir, src), err)) self.info() # copy downloadable files if self.env.dlfiles: self.info(bold('copying downloadable files...'), nonl=True) ensuredir(path.join(self.outdir, '_downloads')) for src, (_, dest) in self.env.dlfiles.iteritems(): self.info(' ' + src, nonl=1) try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_downloads', dest)) except Exception, err: self.warn('cannot copy downloadable file %r: %s' % (path.join(self.srcdir, src), err)) self.info()
def build_finished(app, exception): """ Output YAML on the file system. """ # Used to get rid of the uidname field for cleaner toc file. def sanitize_uidname_field(toc_yaml): for module in toc_yaml: if 'items' in module: sanitize_uidname_field(module['items']) module.pop('uidname') # Parses the package name and returns package name and module name. def find_package_name(package_name): for name in package_name: if name != "google" and name != "cloud": return [name, package_name[-1]] # Used to disambiguate names that have same entries. def disambiguate_toc_name(toc_yaml): names = {} for module in toc_yaml: names[module['name']] = 1 if module['name'] not in names else 2 if 'items' in module: disambiguate_toc_name(module['items']) for module in toc_yaml: if names[module['name']] > 1: module['name'] = ".".join( find_package_name(module['uidname'].split("."))) def find_node_in_toc_tree(toc_yaml, to_add_node): for module in toc_yaml: if module['uidname'] == to_add_node: return module if 'items' in module: items = module['items'] found_module = find_node_in_toc_tree(items, to_add_node) if found_module != None: return found_module return None def convert_module_to_package_if_needed(obj): if 'source' in obj and 'path' in obj['source'] and obj['source'][ 'path']: if obj['source']['path'].endswith(INITPY): obj['type'] = 'package' return for child_uid in obj['children']: if child_uid in app.env.docfx_info_uid_types: child_uid_type = app.env.docfx_info_uid_types[child_uid] if child_uid_type == MODULE: obj['type'] = 'package' return normalized_outdir = os.path.normpath( os.path.join( app.builder.outdir, # Output Directory for Builder API_ROOT, )) ensuredir(normalized_outdir) toc_yaml = [] # Used to record filenames dumped to avoid confliction # caused by Windows case insensitive file system file_name_set = set() # Order matters here, we need modules before lower level classes, # so that we can make sure to inject the TOC properly for data_set in (app.env.docfx_yaml_modules, app.env.docfx_yaml_classes, app.env.docfx_yaml_functions): # noqa for uid, yaml_data in iter(sorted(data_set.items())): if not uid: # Skip objects without a module continue references = [] # Merge module data with class data for obj in yaml_data: arg_params = obj.get('syntax', {}).get('parameters', []) if (len(arg_params) > 0 and 'id' in arg_params[0] and arg_params[0]['id'] == 'self'): # Support having `self` as an arg param, but not documented arg_params = arg_params[1:] obj['syntax']['parameters'] = arg_params if obj['uid'] in app.env.docfx_info_field_data and \ obj['type'] == app.env.docfx_info_field_data[obj['uid']]['type']: # Avoid entities with same uid and diff type. del (app.env.docfx_info_field_data[obj['uid']]['type'] ) # Delete `type` temporarily if 'syntax' not in obj: obj['syntax'] = {} merged_params = [] if 'parameters' in app.env.docfx_info_field_data[ obj['uid']]: doc_params = app.env.docfx_info_field_data[ obj['uid']].get('parameters', []) if arg_params and doc_params: if len(arg_params) - len(doc_params) > 0: app.warn( "Documented params don't match size of params:" " {}".format(obj['uid'])) # Zip 2 param lists until the long one is exhausted for args, docs in zip_longest(arg_params, doc_params, fillvalue={}): if len(args) == 0: merged_params.append(docs) else: args.update(docs) merged_params.append(args) obj['syntax'].update( app.env.docfx_info_field_data[obj['uid']]) if merged_params: obj['syntax']['parameters'] = merged_params if 'parameters' in obj['syntax'] and obj[ 'type'] == 'method': for args in obj['syntax']['parameters']: if 'isRequired' not in args and 'defaultValue' not in args: args['isRequired'] = True # Raise up summary if 'summary' in obj['syntax'] and obj['syntax']['summary']: obj['summary'] = obj['syntax'].pop('summary').strip( " \n\r\r") # Raise up remarks if 'remarks' in obj['syntax'] and obj['syntax']['remarks']: obj['remarks'] = obj['syntax'].pop('remarks') # Raise up seealso if 'seealso' in obj['syntax'] and obj['syntax']['seealso']: obj['seealsoContent'] = obj['syntax'].pop('seealso') # Raise up example if 'example' in obj['syntax'] and obj['syntax']['example']: obj.setdefault('example', []).append(obj['syntax'].pop('example')) # Raise up exceptions if 'exceptions' in obj['syntax'] and obj['syntax'][ 'exceptions']: obj['exceptions'] = obj['syntax'].pop('exceptions') # Raise up references if 'references' in obj['syntax'] and obj['syntax'][ 'references']: obj.setdefault('references', []).extend( obj['syntax'].pop('references')) # add content of temp list 'added_attribute' to children and yaml_data if 'added_attribute' in obj['syntax'] and obj['syntax'][ 'added_attribute']: added_attribute = obj['syntax'].pop('added_attribute') for attrData in added_attribute: existed_Data = next( (n for n in yaml_data if n['uid'] == attrData['uid']), None) if existed_Data: # Update data for already existed one which has attribute comment in source file existed_Data.update(attrData) else: obj.get('children', []).append(attrData['uid']) yaml_data.append(attrData) if 'class' in attrData: # Get parent for attrData of Non enum class parent = attrData['class'] else: # Get parent for attrData of enum class parent = attrData['parent'] obj['references'].append( _create_reference(attrData, parent)) app.env.docfx_info_field_data[obj['uid']]['type'] = obj[ 'type'] # Revert `type` for other objects to use if 'references' in obj: # Ensure that references have no duplicate ref ref_uids = [r['uid'] for r in references] for ref_obj in obj['references']: if ref_obj['uid'] not in ref_uids: references.append(ref_obj) obj.pop('references') if obj['type'] == 'module': convert_module_to_package_if_needed(obj) if obj['type'] == 'method': # Update the name to use shorter name to show obj['name'] = obj['source']['id'] # To distinguish distribution package and import package if obj.get('type', '') == 'package' and obj.get( 'kind', '') != 'distribution': obj['kind'] = 'import' try: if remove_inheritance_for_notfound_class: if 'inheritance' in obj: python_sdk_name = obj['uid'].split('.')[0] obj['inheritance'] = [ n for n in obj['inheritance'] if not n['type'].startswith(python_sdk_name) or n['type'] in app.env.docfx_info_uid_types ] if not obj['inheritance']: obj.pop('inheritance') except NameError: pass if 'source' in obj and (not obj['source']['remote']['repo'] or \ obj['source']['remote']['repo'] == 'https://apidrop.visualstudio.com/Content%20CI/_git/ReferenceAutomation'): del (obj['source']) # Output file if uid.lower() in file_name_set: filename = uid + "(%s)" % app.env.docfx_info_uid_types[uid] else: filename = uid out_file = os.path.join(normalized_outdir, '%s.yml' % filename) ensuredir(os.path.dirname(out_file)) if app.verbosity >= 1: app.info( bold('[docfx_yaml] ') + darkgreen('Outputting %s' % filename)) with open(out_file, 'w') as out_file_obj: out_file_obj.write('### YamlMime:UniversalReference\n') try: dump( { 'items': yaml_data, 'references': references, 'api_name': [], # Hack around docfx YAML }, out_file_obj, default_flow_style=False) except Exception as e: raise ValueError( "Unable to dump object\n{0}".format(yaml_data)) from e file_name_set.add(filename) # Parse the name of the object. # Some types will need additional parsing to de-duplicate their names and contain # a portion of their parent name for better disambiguation. This is done in # disambiguate_toc_name node_name = obj.get('class').split(".")[-1] if obj.get( 'class') else obj['name'] # Build nested TOC if uid.count('.') >= 1: parent_level = '.'.join(uid.split('.')[:-1]) found_node = find_node_in_toc_tree(toc_yaml, parent_level) if found_node: found_node.pop('uid', 'No uid found') found_node.setdefault('items', [{ 'name': 'Overview', 'uidname': parent_level, 'uid': parent_level }]).append({ 'name': node_name, 'uidname': uid, 'uid': uid }) else: toc_yaml.append({ 'name': node_name, 'uidname': uid, 'uid': uid }) else: toc_yaml.append({ 'name': node_name, 'uidname': uid, 'uid': uid }) if len(toc_yaml) == 0: raise RuntimeError("No documentation for this module.") # Perform additional disambiguation of the name disambiguate_toc_name(toc_yaml) # Keeping uidname field carrys over onto the toc.yaml files, we need to # be keep using them but don't need them in the actual file toc_yaml_with_uid = copy.deepcopy(toc_yaml) sanitize_uidname_field(toc_yaml) toc_file = os.path.join(normalized_outdir, 'toc.yml') with open(toc_file, 'w') as writable: writable.write( dump( [{ 'name': app.config.project, 'items': [{ 'name': 'Overview', 'uid': 'project-' + app.config.project }] + toc_yaml }], default_flow_style=False, )) index_file = os.path.join(normalized_outdir, 'index.yml') index_children = [] index_references = [] for item in toc_yaml_with_uid: index_children.append(item.get('uidname', '')) index_references.append({ 'uid': item.get('uidname', ''), 'name': item.get('name', ''), 'fullname': item.get('uidname', ''), 'isExternal': False }) with open(index_file, 'w') as index_file_obj: index_file_obj.write('### YamlMime:UniversalReference\n') dump( { 'items': [{ 'uid': 'project-' + app.config.project, 'name': app.config.project, 'fullName': app.config.project, 'langs': ['python'], 'type': 'package', 'kind': 'distribution', 'summary': '', 'children': index_children }], 'references': index_references }, index_file_obj, default_flow_style=False)
def render_math(self, math): """ Render the LaTeX math expression *math* using latex and dvipng. Return the filename relative to the built document and the "depth", that is, the distance of image bottom and baseline in pixels, if the option to use preview_latex is switched on. Error handling may seem strange, but follows a pattern: if LaTeX or dvipng aren't available, only a warning is generated (since that enables people on machines without these programs to at least build the rest of the docs successfully). If the programs are there, however, they may not fail since that indicates a problem in the math source. """ use_preview = self.builder.config.pngmath_use_preview shasum = "%s.png" % sha(math).hexdigest() relfn = posixpath.join(self.builder.imgpath, 'math', shasum) outfn = path.join(self.builder.outdir, '_images', 'math', shasum) latex = DOC_HEAD + self.builder.config.pngmath_latex_preamble latex += (use_preview and DOC_BODY_PREVIEW or DOC_BODY) % math if isinstance(latex, unicode): latex = latex.encode('utf-8') # use only one tempdir per build -- the use of a directory is cleaner # than using temporary files, since we can clean up everything at once # just removing the whole directory (see cleanup_tempdir) if not hasattr(self.builder, '_mathpng_tempdir'): tempdir = self.builder._mathpng_tempdir = tempfile.mkdtemp() else: tempdir = self.builder._mathpng_tempdir tf = open(path.join(tempdir, 'math.tex'), 'w') tf.write(latex) tf.close() # build latex command; old versions of latex don't have the # --output-directory option, so we have to manually chdir to the # temp dir to run it. ltx_args = shlex.split(self.builder.config.pngmath_latex) ltx_args += ['--interaction=nonstopmode', 'math.tex'] curdir = getcwd() chdir(tempdir) try: try: p = Popen(ltx_args, stdout=PIPE, stderr=PIPE) except OSError, err: if err.errno != 2: # No such file or directory raise if not hasattr(self.builder, '_mathpng_warned_latex'): self.builder.warn('LaTeX command %r cannot be run (needed for math ' 'display), check the pngmath_latex setting' % self.builder.config.pngmath_latex) self.builder._mathpng_warned_latex = True return relfn, None finally: chdir(curdir) stdout, stderr = p.communicate() if p.returncode != 0: raise MathExtError('latex exited with error:\n[stderr]\n%s\n[stdout]\n%s' % (stderr, stdout)) ensuredir(path.dirname(outfn)) # use some standard dvipng arguments dvipng_args = shlex.split(self.builder.config.pngmath_dvipng) dvipng_args += ['-o', outfn, '-T', 'tight', '-z9'] # add custom ones from config value dvipng_args.extend(self.builder.config.pngmath_dvipng_args) if use_preview: dvipng_args.append('--depth') # last, the input file name dvipng_args.append(path.join(tempdir, 'math.dvi')) try: p = Popen(dvipng_args, stdout=PIPE, stderr=PIPE) except OSError, err: if err.errno != 2: # No such file or directory raise if not hasattr(self.builder, '_mathpng_warned_dvipng'): self.builder.warn('dvipng command %r cannot be run (needed for math ' 'display), check the pngmath_dvipng setting' % self.builder.config.pngmath_dvipng) self.builder._mathpng_warned_dvipng = True return relfn, None
def libreoffice_render(app, doctree): """ Render LibreOffice drawing (doctree-read callback) """ for figure in doctree.traverse(nodes.figure): if not hasattr(figure, 'libreoffice'): continue drawing = figure.libreoffice['drawing'] options = figure.libreoffice['options'] # Check for LibreOffice if not app.builder.config.libreoffice_binary: app.builder.warn('Unable to find a LibreOffice installation') figure.replace_self(nodes.literal_block(drawing, drawing)) continue # Update formats with configuration format_map = DEFAULT_FORMATS.copy() format_map.update(app.builder.config.libreoffice_format) # Setup paths inp_fn_abs = app.builder.env.relfn2path(drawing)[1] inp_fn_base, _ = os.path.splitext(os.path.basename(drawing)) out_fext = format_map[app.builder.format] out_fn = '%s.%s' % (inp_fn_base, out_fext) if app.builder.format == 'html': imgpath = relative_uri(app.builder.env.docname, '_images') out_fn_rel = posixpath.join(imgpath, out_fn) out_dir = os.path.join(app.builder.outdir, '_images') out_fn_abs = os.path.join(out_dir, out_fn) else: if app.builder.format != 'latex': app.builder.warn('libreoffice: the builder format %s ' 'is not officially supported.' % app.builder.format) out_fn_rel = out_fn out_dir = app.builder.outdir out_fn_abs = os.path.join(out_dir, out_fn) ensuredir(out_dir) # Run LibreOffice # A solution to run LibreOffice when another instance is already # running is to use a unique UserInstallation folder. More info at: # https://www.libreoffice.org/bugzilla/show_bug.cgi?id=37531 if os.name != 'nt': tmp_folder = tempfile.mkdtemp() tmp_path = 'file://' + tmp_folder else: tmp_path = '$SYSUSERCONFIG/tmp' call([ app.builder.config.libreoffice_binary, '--headless', '-env:UserInstallation=' + tmp_path, '--convert-to', out_fext, '--outdir', out_dir, inp_fn_abs ], stdout=PIPE, stderr=PIPE) if os.name != 'nt': shutil.rmtree(tmp_folder) # Crop white borders (images only) if 'autocrop' in options and out_fext not in ('pdf', 'svg'): if options['autocrop']: im = Image.open(out_fn_abs) im.load() im_box = ImageOps.invert(im).getbbox() im = im.crop(im_box) im.save(out_fn_abs) # Get (w, h) - required to make :scale: work without indicating (w, h) if out_fext not in ('pdf', 'svg'): im = Image.open(out_fn_abs) im.load() (out_width, out_height) = im.size # Fill image information for image in figure.traverse(nodes.image): image['uri'] = out_fn_rel try: image['width'] = str(out_width) image['height'] = str(out_height) except: pass
def build_finished(app, exception): """ Output YAML on the file system. """ def find_node_in_toc_tree(toc_yaml, to_add_node): for module in toc_yaml: if module['name'] == to_add_node: return module if 'items' in module: items = module['items'] found_module = find_node_in_toc_tree(items, to_add_node) if found_module != None: return found_module return None def convert_module_to_package_if_needed(obj): if 'source' in obj and 'path' in obj['source'] and obj['source'][ 'path']: if obj['source']['path'].endswith(INITPY): obj['type'] = 'package' return for child_uid in obj['children']: if child_uid in app.env.docfx_info_uid_types: child_uid_type = app.env.docfx_info_uid_types[child_uid] if child_uid_type == MODULE: obj['type'] = 'package' return normalized_outdir = os.path.normpath( os.path.join( app.builder.outdir, # Output Directory for Builder API_ROOT, )) ensuredir(normalized_outdir) toc_yaml = [] # Order matters here, we need modules before lower level classes, # so that we can make sure to inject the TOC properly for data_set in (app.env.docfx_yaml_modules, app.env.docfx_yaml_classes): # noqa for filename, yaml_data in iter(sorted(data_set.items())): if not filename: # Skip objects without a module continue references = [] # Merge module data with class data for obj in yaml_data: if obj['uid'] in app.env.docfx_info_field_data: if 'syntax' not in obj: obj['syntax'] = {} merged_params = [] if 'parameters' in app.env.docfx_info_field_data[ obj['uid']]: arg_params = obj['syntax'].get('parameters', []) doc_params = app.env.docfx_info_field_data[ obj['uid']].get('parameters', []) if arg_params and doc_params: if len(arg_params) - len(doc_params) > 1: app.warn( "Documented params don't match size of params:" " {}".format(obj['uid'])) if ('id' in arg_params[0] and arg_params[0]['id'] == 'self'): # Support having `self` as an arg param, but not documented arg_params = arg_params[1:] # Zip 2 param lists until the long one is exhausted for args, docs in zip_longest(arg_params, doc_params, fillvalue={}): args.update(docs) merged_params.append(args) obj['syntax'].update( app.env.docfx_info_field_data[obj['uid']]) if merged_params: obj['syntax']['parameters'] = merged_params # Raise up summary if 'summary' in obj['syntax'] and obj['syntax']['summary']: obj['summary'] = obj['syntax'].pop('summary') # Raise up seealso if 'seealso' in obj['syntax'] and obj['syntax']['seealso']: obj['seealsoContent'] = obj['syntax'].pop('seealso') # Raise up example if 'example' in obj['syntax'] and obj['syntax']['example']: obj.setdefault('example', []).append(obj['syntax'].pop('example')) # Raise up exceptions if 'exceptions' in obj['syntax'] and obj['syntax'][ 'exceptions']: obj['exceptions'] = obj['syntax'].pop('exceptions') # Raise up references if 'references' in obj['syntax'] and obj['syntax'][ 'references']: obj.setdefault('references', []).extend( obj['syntax'].pop('references')) # add content of temp list 'added_attribute' to children and yaml_data if 'added_attribute' in obj['syntax'] and obj['syntax'][ 'added_attribute']: added_attribute = obj['syntax'].pop('added_attribute') for attrData in added_attribute: existed_Data = next( (n for n in yaml_data if n['uid'] == attrData['uid']), None) if existed_Data: # Update data for already existed one which has attribute comment in source file existed_Data.update(attrData) else: obj.get('children', []).append(attrData['uid']) yaml_data.append(attrData) if 'class' in attrData: # Get parent for attrData of Non enum class parent = attrData['class'] else: # Get parent for attrData of enum class parent = attrData['parent'] obj['references'].append( _create_reference(attrData, parent)) if 'references' in obj: # Ensure that references have no duplicate ref ref_uids = [r['uid'] for r in references] for ref_obj in obj['references']: if ref_obj['uid'] not in ref_uids: references.append(ref_obj) obj.pop('references') if obj['type'] == 'module': convert_module_to_package_if_needed(obj) try: if remove_inheritance_for_notfound_class: if 'inheritance' in obj: python_sdk_name = obj['uid'].split('.')[0] obj['inheritance'] = [ n for n in obj['inheritance'] if not n['type'].startswith(python_sdk_name) or n['type'] in app.env.docfx_info_uid_types ] if not obj['inheritance']: obj.pop('inheritance') except NameError: pass # Output file out_file = os.path.join(normalized_outdir, '%s.yml' % filename) ensuredir(os.path.dirname(out_file)) if app.verbosity >= 1: app.info( bold('[docfx_yaml] ') + darkgreen('Outputting %s' % filename)) with open(out_file, 'w') as out_file_obj: out_file_obj.write('### YamlMime:UniversalReference\n') dump( { 'items': yaml_data, 'references': references, 'api_name': [], # Hack around docfx YAML }, out_file_obj, default_flow_style=False) # Build nested TOC if filename.count('.') >= 1: parent_level = '.'.join(filename.split('.')[:-1]) found_node = find_node_in_toc_tree(toc_yaml, parent_level) if found_node: found_node.setdefault('items', []).append({ 'name': filename, 'uid': filename }) else: toc_yaml.append({'name': filename, 'uid': filename}) else: toc_yaml.append({'name': filename, 'uid': filename}) toc_file = os.path.join(normalized_outdir, 'toc.yml') with open(toc_file, 'w') as writable: writable.write(dump( toc_yaml, default_flow_style=False, ))
def write(self, *ignored): version = self.config.version libchanges = {} apichanges = [] otherchanges = {} if version not in self.env.versionchanges: self.info(bold('no changes in version %s.' % version)) return self.info(bold('writing summary file...')) for type, docname, lineno, module, descname, content in \ self.env.versionchanges[version]: if isinstance(descname, tuple): descname = descname[0] ttext = self.typemap[type] context = content.replace('\n', ' ') if descname and docname.startswith('c-api'): if not descname: continue if context: entry = '<b>%s</b>: <i>%s:</i> %s' % (descname, ttext, context) else: entry = '<b>%s</b>: <i>%s</i>.' % (descname, ttext) apichanges.append((entry, docname, lineno)) elif descname or module: if not module: module = _('Builtins') if not descname: descname = _('Module level') if context: entry = '<b>%s</b>: <i>%s:</i> %s' % (descname, ttext, context) else: entry = '<b>%s</b>: <i>%s</i>.' % (descname, ttext) libchanges.setdefault(module, []).append((entry, docname, lineno)) else: if not context: continue entry = '<i>%s:</i> %s' % (ttext.capitalize(), context) title = self.env.titles[docname].astext() otherchanges.setdefault((docname, title), []).append( (entry, docname, lineno)) ctx = { 'project': self.config.project, 'version': version, 'docstitle': self.config.html_title, 'shorttitle': self.config.html_short_title, 'libchanges': sorted(libchanges.iteritems()), 'apichanges': sorted(apichanges), 'otherchanges': sorted(otherchanges.iteritems()), 'show_copyright': self.config.html_show_copyright, 'show_sphinx': self.config.html_show_sphinx, } f = codecs.open(path.join(self.outdir, 'index.html'), 'w', 'utf8') try: f.write(self.templates.render('changes/frameset.html', ctx)) finally: f.close() f = codecs.open(path.join(self.outdir, 'changes.html'), 'w', 'utf8') try: f.write(self.templates.render('changes/versionchanges.html', ctx)) finally: f.close() hltext = ['.. versionadded:: %s' % version, '.. versionchanged:: %s' % version, '.. deprecated:: %s' % version] def hl(no, line): line = '<a name="L%s"> </a>' % no + escape(line) for x in hltext: if x in line: line = '<span class="hl">%s</span>' % line break return line self.info(bold('copying source files...')) for docname in self.env.all_docs: f = codecs.open(self.env.doc2path(docname), 'r', 'latin1') lines = f.readlines() targetfn = path.join(self.outdir, 'rst', os_path(docname)) + '.html' ensuredir(path.dirname(targetfn)) f = codecs.open(targetfn, 'w', 'latin1') try: text = ''.join(hl(i+1, line) for (i, line) in enumerate(lines)) ctx = { 'filename': self.env.doc2path(docname, None), 'text': text } f.write(self.templates.render('changes/rstsource.html', ctx)) finally: f.close() themectx = dict(('theme_' + key, val) for (key, val) in self.theme.get_options({}).iteritems()) copy_static_entry(path.join(package_dir, 'themes', 'default', 'static', 'default.css_t'), path.join(self.outdir, 'default.css_t'), self, themectx) copy_static_entry(path.join(package_dir, 'themes', 'basic', 'static', 'basic.css'), path.join(self.outdir, 'basic.css'), self)
default="index", help=__("master document name")) args = parser.parse_args(sys.argv[1:]) rootpath = path.abspath(args.module_path) # normalize opts if args.header is None: args.header = rootpath.split(path.sep)[-1] if args.suffix.startswith("."): args.suffix = args.suffix[1:] if not path.isdir(rootpath): print(__(f"{rootpath} is not a directory."), file=sys.stderr) sys.exit(1) if not args.dryrun: ensuredir(args.destdir) excludes = [path.abspath(exclude) for exclude in args.exclude_pattern] modules = recurse_tree(rootpath, excludes, args, args.templatedir) template_values = { "top_modules": [{ "path": f"api/{module}", "caption": module.split(".")[1].title() } for module in modules if module.count(".") == 1], "maxdepth": args.maxdepth, } write_master_file(templatedir=args.templatedir, master_name=args.master, values=template_values, opts=args)
def builder_inited(app): app.env.bokeh_plot_auxdir = join(app.env.doctreedir, 'bokeh_plot') ensuredir(app.env.bokeh_plot_auxdir) # sphinx/_build/doctrees/bokeh_plot if not hasattr(app.env, 'bokeh_plot_files'): app.env.bokeh_plot_files = {}
def generate_autosummary_docs(sources, output_dir=None, suffix='.rst', warn=_simple_warn, info=_simple_info, base_path=None, builder=None, template_dir=None): showed_sources = list(sorted(sources)) if len(showed_sources) > 20: showed_sources = showed_sources[:10] + ['...'] + showed_sources[-10:] info('[autosummary] generating autosummary for: %s' % ', '.join(showed_sources)) if output_dir: info('[autosummary] writing to %s' % output_dir) if base_path is not None: sources = [os.path.join(base_path, filename) for filename in sources] # create our own templating environment template_dirs = [os.path.join(os.path.dirname(__file__), template_dir)] if builder is not None: # allow the user to override the templates template_loader = BuiltinTemplateLoader() template_loader.init(builder, dirs=template_dirs) else: if template_dir: template_dirs.insert(0, template_dir) template_loader = FileSystemLoader(template_dirs) template_env = SandboxedEnvironment(loader=template_loader) # read items = find_autosummary_in_files(sources) # remove possible duplicates items = dict([(item, True) for item in items]).keys() # keep track of new files new_files = [] # write for name, path, template_name in sorted(items): if path is None: # The corresponding autosummary:: directive did not have # a :toctree: option continue path = output_dir or os.path.abspath(path) ensuredir(path) try: obj, name = import_by_name(name) except ImportError, e: warn('[autosummary] failed to import %r: %s' % (name, e)) continue fn = os.path.join(path, name + suffix) # skip it if it exists if os.path.isfile(fn): continue new_files.append(fn) f = open(fn, 'w') try: doc = get_documenter(obj) if template_name is not None: template = template_env.get_template(template_name) else: try: template = template_env.get_template('autosummary/%s.rst' % doc.objtype) except TemplateNotFound: template = template_env.get_template( 'autosummary/base.rst') def get_members(obj, typ, include_public=[]): items = [ # filter by file content !!!! name for name in dir(obj) if get_documenter(getattr(obj, name)).objtype == typ \ and _is_from_same_file(getattr(obj, name), obj) ] public = [ x for x in items if x in include_public or not x.startswith('_') ] return public, items ns = {} if doc.objtype == 'module': ns['members'] = dir(obj) ns['functions'], ns['all_functions'] = \ get_members(obj, 'function') ns['classes'], ns['all_classes'] = \ get_members(obj, 'class') ns['exceptions'], ns['all_exceptions'] = \ get_members(obj, 'exception') ns['methods'], ns['all_methods'] = \ get_members(obj, 'method') elif doc.objtype == 'class': ns['members'] = dir(obj) ns['methods'], ns['all_methods'] = \ get_members(obj, 'method', ['__init__']) ns['attributes'], ns['all_attributes'] = \ get_members(obj, 'attribute') parts = name.split('.') if doc.objtype in ('method', 'attribute'): mod_name = '.'.join(parts[:-2]) cls_name = parts[-2] obj_name = '.'.join(parts[-2:]) ns['class'] = cls_name else: mod_name, obj_name = '.'.join(parts[:-1]), parts[-1] ns['fullname'] = name ns['module'] = mod_name ns['objname'] = obj_name ns['name'] = parts[-1] ns['objtype'] = doc.objtype ns['underline'] = len(name) * '=' rendered = template.render(**ns) f.write(rendered) finally: f.close()
def render_drawio(self: SphinxTranslator, node: DrawIONode, in_filename: str, output_format: str) -> str: """Render drawio file into an output image file.""" page_index = str(node.get("page-index", 0)) # Any directive options which would change the output file would go here unique_values = ( # This ensures that the same file hash is generated no matter the build directory # Mainly useful for pytest, as it creates a new build directory every time node["filename"].replace(self.builder.srcdir, ""), page_index, ) hash_key = "\n".join(unique_values) sha_key = sha1(hash_key.encode()).hexdigest() filename = "drawio-{}.{}".format(sha_key, output_format) file_path = posixpath.join(self.builder.imgpath, filename) out_file_path = os.path.join(self.builder.outdir, self.builder.imagedir, filename) if os.path.isfile(out_file_path): return file_path ensuredir(os.path.dirname(out_file_path)) if self.builder.config.drawio_binary_path: binary_path = self.builder.config.drawio_binary_path elif platform.system() == "Windows": binary_path = r"C:\Program Files\draw.io\draw.io.exe" else: binary_path = "/opt/draw.io/drawio" drawio_args = [ binary_path, "--no-sandbox", "--export", "--page-index", page_index, "--format", output_format, "--output", out_file_path, in_filename, ] if self.builder.config.drawio_headless: # This can only be added if true, an empty string is bad drawio_args.insert(0, "xvfb-run") drawio_args.insert(1, "--auto-servernum") doc_name = node.get("doc_name", "index") cwd = os.path.dirname(os.path.join(self.builder.srcdir, doc_name)) try: ret = subprocess.run(drawio_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, check=True) if not os.path.isfile(out_file_path): raise DrawIOError("draw.io did not produce an output file:" "\n[stderr]\n{}\n[stdout]\n{}".format( ret.stderr, ret.stdout)) return file_path except OSError as exc: raise DrawIOError("draw.io ({}) exited with error:\n{}".format( " ".join(drawio_args), exc)) except subprocess.CalledProcessError as exc: raise DrawIOError("draw.io ({}) exited with error:\n[stderr]\n{}" "\n[stdout]\n{}".format(" ".join(drawio_args), exc.stderr, exc.stdout))
def finish(self): self.info(bold('writing additional files...'), nonl=1) # the global general index if self.config.html_use_index: # the total count of lines for each index letter, used to distribute # the entries into two columns genindex = self.env.create_index(self) indexcounts = [] for _, entries in genindex: indexcounts.append(sum(1 + len(subitems) for _, (_, subitems) in entries)) genindexcontext = dict( genindexentries = genindex, genindexcounts = indexcounts, split_index = self.config.html_split_index, ) self.info(' genindex', nonl=1) if self.config.html_split_index: self.handle_page('genindex', genindexcontext, 'genindex-split.html') self.handle_page('genindex-all', genindexcontext, 'genindex.html') for (key, entries), count in zip(genindex, indexcounts): ctx = {'key': key, 'entries': entries, 'count': count, 'genindexentries': genindex} self.handle_page('genindex-' + key, ctx, 'genindex-single.html') else: self.handle_page('genindex', genindexcontext, 'genindex.html') # the global module index if self.config.html_use_modindex and self.env.modules: # the sorted list of all modules, for the global module index modules = sorted(((mn, (self.get_relative_uri('modindex', fn) + '#module-' + mn, sy, pl, dep)) for (mn, (fn, sy, pl, dep)) in self.env.modules.iteritems()), key=lambda x: x[0].lower()) # collect all platforms platforms = set() # sort out collapsable modules modindexentries = [] letters = [] pmn = '' num_toplevels = 0 num_collapsables = 0 cg = 0 # collapse group fl = '' # first letter for mn, (fn, sy, pl, dep) in modules: pl = pl and pl.split(', ') or [] platforms.update(pl) ignore = self.env.config['modindex_common_prefix'] ignore = sorted(ignore, key=len, reverse=True) for i in ignore: if mn.startswith(i): mn = mn[len(i):] stripped = i break else: stripped = '' if fl != mn[0].lower() and mn[0] != '_': # heading letter = mn[0].upper() if letter not in letters: modindexentries.append(['', False, 0, False, letter, '', [], False, '']) letters.append(letter) tn = mn.split('.')[0] if tn != mn: # submodule if pmn == tn: # first submodule - make parent collapsable modindexentries[-1][1] = True num_collapsables += 1 elif not pmn.startswith(tn): # submodule without parent in list, add dummy entry cg += 1 modindexentries.append([tn, True, cg, False, '', '', [], False, stripped]) else: num_toplevels += 1 cg += 1 modindexentries.append([mn, False, cg, (tn != mn), fn, sy, pl, dep, stripped]) pmn = mn fl = mn[0].lower() platforms = sorted(platforms) # apply heuristics when to collapse modindex at page load: # only collapse if number of toplevel modules is larger than # number of submodules collapse = len(modules) - num_toplevels < num_toplevels # As some parts of the module names may have been stripped, those # names have changed, thus it is necessary to sort the entries. if ignore: def sorthelper(entry): name = entry[0] if name == '': # heading name = entry[4] return name.lower() modindexentries.sort(key=sorthelper) letters.sort() modindexcontext = dict( modindexentries = modindexentries, platforms = platforms, letters = letters, collapse_modindex = collapse, ) self.info(' modindex', nonl=1) self.handle_page('modindex', modindexcontext, 'modindex.html') # the search page if self.name != 'htmlhelp': self.info(' search', nonl=1) self.handle_page('search', {}, 'search.html') # additional pages from conf.py for pagename, template in self.config.html_additional_pages.items(): self.info(' '+pagename, nonl=1) self.handle_page(pagename, {}, template) if self.config.html_use_opensearch and self.name != 'htmlhelp': self.info(' opensearch', nonl=1) fn = path.join(self.outdir, '_static', 'opensearch.xml') self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn) self.info() # copy image files if self.images: self.info(bold('copying images...'), nonl=True) ensuredir(path.join(self.outdir, '_images')) for src, dest in self.images.iteritems(): self.info(' '+src, nonl=1) try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_images', dest)) except Exception, err: self.warn('cannot copy image file %r: %s' % (path.join(self.srcdir, src), err)) self.info()
if self.env.dlfiles: self.info(bold('copying downloadable files...'), nonl=True) ensuredir(path.join(self.outdir, '_downloads')) for src, (_, dest) in self.env.dlfiles.iteritems(): self.info(' ' + src, nonl=1) try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_downloads', dest)) except Exception, err: self.warn('cannot copy downloadable file %r: %s' % (path.join(self.srcdir, src), err)) self.info() # copy static files self.info(bold('copying static files... '), nonl=True) ensuredir(path.join(self.outdir, '_static')) # first, create pygments style file f = open(path.join(self.outdir, '_static', 'pygments.css'), 'w') f.write(self.highlighter.get_stylesheet()) f.close() # then, copy translations JavaScript file if self.config.language is not None: jsfile = path.join(package_dir, 'locale', self.config.language, 'LC_MESSAGES', 'sphinx.js') if path.isfile(jsfile): copyfile(jsfile, path.join(self.outdir, '_static', 'translations.js')) # then, copy over all user-supplied static files if self.theme: staticdirnames = [ path.join(themepath, 'static')
def finish(self): self.info(bold('writing additional files...'), nonl=1) # the global general index if self.config.html_use_index: # the total count of lines for each index letter, used to distribute # the entries into two columns genindex = self.env.create_index(self) indexcounts = [] for _, entries in genindex: indexcounts.append( sum(1 + len(subitems) for _, (_, subitems) in entries)) genindexcontext = dict( genindexentries=genindex, genindexcounts=indexcounts, split_index=self.config.html_split_index, ) self.info(' genindex', nonl=1) if self.config.html_split_index: self.handle_page('genindex', genindexcontext, 'genindex-split.html') self.handle_page('genindex-all', genindexcontext, 'genindex.html') for (key, entries), count in zip(genindex, indexcounts): ctx = { 'key': key, 'entries': entries, 'count': count, 'genindexentries': genindex } self.handle_page('genindex-' + key, ctx, 'genindex-single.html') else: self.handle_page('genindex', genindexcontext, 'genindex.html') # the global module index if self.config.html_use_modindex and self.env.modules: # the sorted list of all modules, for the global module index modules = sorted( ((mn, (self.get_relative_uri('modindex', fn) + '#module-' + mn, sy, pl, dep)) for (mn, (fn, sy, pl, dep)) in self.env.modules.iteritems()), key=lambda x: x[0].lower()) # collect all platforms platforms = set() # sort out collapsable modules modindexentries = [] letters = [] pmn = '' num_toplevels = 0 num_collapsables = 0 cg = 0 # collapse group fl = '' # first letter for mn, (fn, sy, pl, dep) in modules: pl = pl and pl.split(', ') or [] platforms.update(pl) ignore = self.env.config['modindex_common_prefix'] ignore = sorted(ignore, key=len, reverse=True) for i in ignore: if mn.startswith(i): mn = mn[len(i):] stripped = i break else: stripped = '' if fl != mn[0].lower() and mn[0] != '_': # heading letter = mn[0].upper() if letter not in letters: modindexentries.append( ['', False, 0, False, letter, '', [], False, '']) letters.append(letter) tn = mn.split('.')[0] if tn != mn: # submodule if pmn == tn: # first submodule - make parent collapsable modindexentries[-1][1] = True num_collapsables += 1 elif not pmn.startswith(tn): # submodule without parent in list, add dummy entry cg += 1 modindexentries.append( [tn, True, cg, False, '', '', [], False, stripped]) else: num_toplevels += 1 cg += 1 modindexentries.append( [mn, False, cg, (tn != mn), fn, sy, pl, dep, stripped]) pmn = mn fl = mn[0].lower() platforms = sorted(platforms) # apply heuristics when to collapse modindex at page load: # only collapse if number of toplevel modules is larger than # number of submodules collapse = len(modules) - num_toplevels < num_toplevels # As some parts of the module names may have been stripped, those # names have changed, thus it is necessary to sort the entries. if ignore: def sorthelper(entry): name = entry[0] if name == '': # heading name = entry[4] return name.lower() modindexentries.sort(key=sorthelper) letters.sort() modindexcontext = dict( modindexentries=modindexentries, platforms=platforms, letters=letters, collapse_modindex=collapse, ) self.info(' modindex', nonl=1) self.handle_page('modindex', modindexcontext, 'modindex.html') # the search page if self.name != 'htmlhelp': self.info(' search', nonl=1) self.handle_page('search', {}, 'search.html') # additional pages from conf.py for pagename, template in self.config.html_additional_pages.items(): self.info(' ' + pagename, nonl=1) self.handle_page(pagename, {}, template) if self.config.html_use_opensearch and self.name != 'htmlhelp': self.info(' opensearch', nonl=1) fn = path.join(self.outdir, '_static', 'opensearch.xml') self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn) self.info() # copy image files if self.images: self.info(bold('copying images...'), nonl=True) ensuredir(path.join(self.outdir, '_images')) for src, dest in self.images.iteritems(): self.info(' ' + src, nonl=1) try: copyfile(path.join(self.srcdir, src), path.join(self.outdir, '_images', dest)) except Exception, err: self.warn('cannot copy image file %r: %s' % (path.join(self.srcdir, src), err)) self.info()
def generate_autosummary_docs(sources, output_dir=None, suffix=None, warn=_simple_warn, info=_simple_info): info('generating autosummary for: %s' % ', '.join(sources)) if output_dir: info('writing to %s' % output_dir) # read names = {} for name, loc in get_documented(sources).items(): for (filename, sec_title, keyword, toctree) in loc: if toctree is not None: path = os.path.join(os.path.dirname(filename), toctree) names[name] = os.path.abspath(path) # write for name, path in sorted(names.items()): path = output_dir or path ensuredir(path) try: obj, name = import_by_name(name) except ImportError, e: warn('failed to import %r: %s' % (name, e)) continue fn = os.path.join(path, name + (suffix or '.rst')) # skip it if it exists if os.path.isfile(fn): continue f = open(fn, 'w') try: if inspect.ismodule(obj): # XXX replace this with autodoc's API? tmpl = env.get_template('module') functions = [ getattr(obj, item).__name__ for item in dir(obj) if inspect.isfunction(getattr(obj, item)) ] classes = [ getattr(obj, item).__name__ for item in dir(obj) if inspect.isclass(getattr(obj, item)) and not issubclass(getattr(obj, item), Exception) ] exceptions = [ getattr(obj, item).__name__ for item in dir(obj) if inspect.isclass(getattr(obj, item)) and issubclass(getattr(obj, item), Exception) ] rendered = tmpl.render(name=name, underline='=' * len(name), functions=functions, classes=classes, exceptions=exceptions, len_functions=len(functions), len_classes=len(classes), len_exceptions=len(exceptions)) f.write(rendered) else: f.write('%s\n%s\n\n' % (name, '=' * len(name))) if inspect.isclass(obj): if issubclass(obj, Exception): f.write(format_modulemember(name, 'autoexception')) else: f.write(format_modulemember(name, 'autoclass')) elif inspect.ismethod(obj) or inspect.ismethoddescriptor(obj): f.write(format_classmember(name, 'automethod')) elif callable(obj): f.write(format_modulemember(name, 'autofunction')) elif hasattr(obj, '__get__'): f.write(format_classmember(name, 'autoattribute')) else: f.write(format_modulemember(name, 'autofunction')) finally: f.close()