def run(self): env = self.state.document.settings.env baseurl = env.config.rss_baseurl assert baseurl, "rss_baseurl must be defined in your config.py" source = self.state_machine.input_lines.source(self.lineno - self.state_machine.input_offset - 1) rss_doc = utils.new_document(b("<rss>"), self.state.document.settings) Parser().parse("\n".join(self.content), rss_doc) rst_suffix = env.config.source_suffix path = os.path.relpath(source, env.srcdir).replace(rst_suffix, ".html") builder = env.app.builder docwriter = HTMLWriter(self) docsettings = OptionParser(defaults=env.settings, components=(docwriter,)).get_default_values() docsettings.compact_lists = bool(env.config.html_compact_lists) dest = os.path.join(env.app.outdir, os_path(env.docname) + ".rss") pageurl = "%s/%s" % (baseurl, path) with open(dest, "w") as rss: title = self.options.get("title", "") description = self.options.get("description", None) rss.write('<?xml version="1.0" encoding="ISO-8859-1" ?>\n') rss.write('<rss version="2.0">\n') rss.write("<channel>\n") rss.write("<title>%s</title>\n" % cgi.escape(title)) rss.write("<link>%s</link>\n" % pageurl) if description: rss.write("<description>%s</description>\n" % cgi.escape(description)) for child in rss_doc.children: if not isinstance(child, nodes.section): continue title_index = child.first_child_matching_class(nodes.title) if title_index is None: continue node = nodes.paragraph() node.extend(child.children[title_index + 1 :]) sec_doc = utils.new_document(b("<rss-section>"), docsettings) sec_doc.append(node) visitor = RssTranslator(builder, sec_doc) sec_doc.walkabout(visitor) title = child.children[title_index].astext() sectionurl = "%s#%s" % (pageurl, child.get("ids")[0]) description = "".join(visitor.body) rss.write("<item>\n") rss.write("<title>%s</title>\n" % cgi.escape(title)) rss.write("<link>%s</link>\n" % sectionurl) rss.write("<description><![CDATA[%s]]></description>\n" % description) rss.write("</item>\n") rss.write("</channel>\n") rss.write("</rss>\n") return []
def split_lines(iter): buf = b('') for chunk in iter: buf += chunk lineend = buf.find(b('\n')) while lineend != -1: yield buf[:lineend].decode('utf-8') buf = buf[lineend+1:] lineend = buf.find(b('\n')) assert not buf
def split_lines(iter): buf = b('') for chunk in iter: buf += chunk lineend = buf.find(b('\n')) while lineend != -1: yield buf[:lineend].decode('utf-8') buf = buf[lineend + 1:] lineend = buf.find(b('\n')) assert not buf
def split_lines(iter): buf = b("") for chunk in iter: buf += chunk lineend = buf.find(b("\n")) while lineend != -1: yield buf[:lineend].decode("utf-8") buf = buf[lineend + 1 :] lineend = buf.find(b("\n")) assert not buf
def test_config_eol(tmpdir): # test config file's eol patterns: LF, CRLF configfile = tmpdir / 'conf.py' for eol in ('\n', '\r\n'): configfile.write_bytes(b('project = "spam"' + eol)) cfg = Config(tmpdir, 'conf.py', {}, None) cfg.init_values(lambda warning: 1 / 0) assert cfg.project == u'spam'
def test_config_eol(tmpdir): # test config file's eol patterns: LF, CRLF configfile = tmpdir / 'conf.py' for eol in ('\n', '\r\n'): configfile.write_bytes(b('project = "spam"' + eol)) cfg = Config(tmpdir, 'conf.py', {}, None) cfg.init_values() assert cfg.project == u'spam'
def test_config_eol(tmpdir): # test config file's eol patterns: LF, CRLF configfile = tmpdir / "conf.py" for eol in ("\n", "\r\n"): configfile.write_bytes(b('project = "spam"' + eol)) cfg = Config(tmpdir, "conf.py", {}, None) cfg.init_values() assert cfg.project == u"spam"
def test_wordcollector(): doc = utils.new_document(b('test data'), settings) doc['file'] = 'dummy' parser.parse(FILE_CONTENTS, doc) ix = IndexBuilder(None, 'en', {}, None) ix.feed('filename', 'title', doc) assert 'boson' not in ix._mapping assert 'fermion' in ix._mapping
def render_map(builder, node): if node: doc = new_document(b('<partial node>')) doc.append(node) publisher = Publisher( source_class = DocTreeInput, destination_class=StringOutput) publisher.set_components('standalone', 'restructuredtext', 'pseudoxml') publisher.reader = DoctreeReader() publisher.writer = DitaMapWriter(builder) publisher.process_programmatic_settings(None, {'output_encoding': 'utf-8'}, None) publisher.set_source(doc, None) publisher.set_destination(None, None) publisher.publish() return publisher.writer.output output = XML_HEAD output += u"<map></map>" return output
def verify_re(rst, html_expected, latex_expected): document = utils.new_document(b('test data'), settings) document['file'] = 'dummy' parser.parse(rst, document) for msg in document.traverse(nodes.system_message): if msg['level'] == 1: msg.replace_self([]) if html_expected: html_translator = ForgivingHTMLTranslator(app.builder, document) document.walkabout(html_translator) html_translated = ''.join(html_translator.fragment).strip() assert re.match(html_expected, html_translated), 'from ' + rst if latex_expected: latex_translator = ForgivingLaTeXTranslator(document, app.builder) latex_translator.first_document = -1 # don't write \begin{document} document.walkabout(latex_translator) latex_translated = ''.join(latex_translator.body).strip() assert re.match(latex_expected, latex_translated), 'from ' + repr(rst)
def render_partial(self, node): """Utility: Render a lone doctree node.""" if node is None: return {"fragment": ""} doc = new_document(b("<partial node>")) doc.append(node) if self._publisher is None: self._publisher = Publisher(source_class=DocTreeInput, destination_class=StringOutput) self._publisher.set_components("standalone", "restructuredtext", "pseudoxml") pub = self._publisher pub.reader = DoctreeReader() pub.writer = HTMLWriter(self) pub.process_programmatic_settings(None, {"output_encoding": "unicode"}, None) pub.set_source(doc, None) pub.set_destination(None, None) pub.publish() return pub.writer.parts
def render_partial(self, node): """Utility: Render a lone doctree node.""" if node is None: return {'fragment': ''} doc = new_document(b('<partial node>')) doc.append(node) if self._publisher is None: self._publisher = Publisher(source_class=DocTreeInput, destination_class=StringOutput) self._publisher.set_components('standalone', 'restructuredtext', 'pseudoxml') pub = self._publisher pub.reader = DoctreeReader() pub.writer = HTMLWriter(self) pub.process_programmatic_settings(None, {'output_encoding': 'unicode'}, None) pub.set_source(doc, None) pub.set_destination(None, None) pub.publish() return pub.writer.parts
def run(self): env = self.state.document.settings.env baseurl = env.config.rss_baseurl assert baseurl, 'rss_baseurl must be defined in your config.py' source = self.state_machine.input_lines.source( self.lineno - self.state_machine.input_offset - 1) rss_doc = utils.new_document(b('<rss>'), self.state.document.settings) Parser().parse('\n'.join(self.content), rss_doc) rst_suffix = env.config.source_suffix path = os.path.relpath(source, env.srcdir).replace(rst_suffix, '.html') builder = env.app.builder docwriter = HTMLWriter(self) docsettings = OptionParser( defaults=env.settings, components=(docwriter, )).get_default_values() docsettings.compact_lists = bool(env.config.html_compact_lists) dest = os.path.join(env.app.outdir, os_path(env.docname) + '.rss') pageurl = '%s/%s' % (baseurl, path) with open(dest, 'w') as rss: title = self.options.get('title', '') description = self.options.get('description', None) rss.write('<?xml version="1.0" encoding="ISO-8859-1" ?>\n') rss.write('<rss version="2.0">\n') rss.write('<channel>\n') rss.write('<title>%s</title>\n' % cgi.escape(title)) rss.write('<link>%s</link>\n' % pageurl) if description: rss.write('<description>%s</description>\n' % cgi.escape(description)) for child in rss_doc.children: if not isinstance(child, nodes.section): continue title_index = child.first_child_matching_class(nodes.title) if title_index is None: continue node = nodes.paragraph() node.extend(child.children[title_index + 1:]) sec_doc = utils.new_document(b('<rss-section>'), docsettings) sec_doc.append(node) visitor = RssTranslator(builder, sec_doc) sec_doc.walkabout(visitor) title = child.children[title_index].astext() sectionurl = '%s#%s' % (pageurl, child.get('ids')[0]) description = ''.join(visitor.body) rss.write('<item>\n') rss.write('<title>%s</title>\n' % cgi.escape(title)) rss.write('<link>%s</link>\n' % sectionurl) rss.write('<description><![CDATA[%s]]></description>\n' % description) rss.write('</item>\n') rss.write('</channel>\n') rss.write('</rss>\n') return []
DOC_BODY = r''' \begin{document} %s \end{document} ''' DOC_BODY_PREVIEW = r''' \usepackage[active]{preview} \begin{document} \begin{preview} %s \end{preview} \end{document} ''' depth_re = re.compile(b(r'\[\d+ depth=(-?\d+)\]')) def render_math(self, math): """Render the LaTeX math expression *math* using latex and dvipng. Return the filename relative to the built document and the "depth", that is, the distance of image bottom and baseline in pixels, if the option to use preview_latex is switched on. Error handling may seem strange, but follows a pattern: if LaTeX or dvipng aren't available, only a warning is generated (since that enables people on machines without these programs to at least build the rest of the docs successfully). If the programs are there, however, they may not fail since that indicates a problem in the math source. """
PNG image manipulation helpers. :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import struct import binascii from sphinx.util.pycompat import b LEN_IEND = 12 LEN_DEPTH = 22 DEPTH_CHUNK_LEN = struct.pack('!i', 10) DEPTH_CHUNK_START = b('tEXtDepth\x00') IEND_CHUNK = b('\x00\x00\x00\x00IEND\xAE\x42\x60\x82') def read_png_depth(filename): """Read the special tEXt chunk indicating the depth from a PNG file.""" result = None f = open(filename, 'rb') try: f.seek(-(LEN_IEND + LEN_DEPTH), 2) depthchunk = f.read(LEN_DEPTH) if not depthchunk.startswith(DEPTH_CHUNK_LEN + DEPTH_CHUNK_START): # either not a PNG file or not containing the depth chunk return None result = struct.unpack('!i', depthchunk[14:18])[0] finally:
PNG image manipulation helpers. :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import struct import binascii from sphinx.util.pycompat import b LEN_IEND = 12 LEN_DEPTH = 22 DEPTH_CHUNK_LEN = struct.pack("!i", 10) DEPTH_CHUNK_START = b("tEXtDepth\x00") IEND_CHUNK = b("\x00\x00\x00\x00IEND\xAE\x42\x60\x82") def read_png_depth(filename): """Read the special tEXt chunk indicating the depth from a PNG file.""" result = None f = open(filename, "rb") try: f.seek(-(LEN_IEND + LEN_DEPTH), 2) depthchunk = f.read(LEN_DEPTH) if not depthchunk.startswith(DEPTH_CHUNK_LEN + DEPTH_CHUNK_START): # either not a PNG file or not containing the depth chunk return None result = struct.unpack("!i", depthchunk[14:18])[0] finally:
def read_chunks(): decompressor = zlib.decompressobj() for chunk in iter(lambda: f.read(bufsize), b('')): yield decompressor.decompress(chunk) yield decompressor.flush()
PNG image manipulation helpers. :copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import struct import binascii from sphinx.util.pycompat import b LEN_IEND = 12 LEN_DEPTH = 22 DEPTH_CHUNK_LEN = struct.pack('!i', 10) DEPTH_CHUNK_START = b('tEXtDepth\x00') IEND_CHUNK = b('\x00\x00\x00\x00IEND\xAE\x42\x60\x82') def read_png_depth(filename): """Read the special tEXt chunk indicating the depth from a PNG file.""" result = None f = open(filename, 'rb') try: f.seek(- (LEN_IEND + LEN_DEPTH), 2) depthchunk = f.read(LEN_DEPTH) if not depthchunk.startswith(DEPTH_CHUNK_LEN + DEPTH_CHUNK_START): # either not a PNG file or not containing the depth chunk return None result = struct.unpack('!i', depthchunk[14:18])[0] finally:
def env_updated(app, env): config = app.builder.config doctree = env.get_doctree(config.master_doc) from sphinx import addnodes toctrees = [] for toctreenode in doctree.traverse(addnodes.toctree): toctree = env.resolve_toctree(config.master_doc, app.builder, toctreenode, prune = False, includehidden = True, maxdepth = 0, collapse = False) toctrees.append(toctree) if not toctrees: toc = None else: toc = toctrees[0] for toctree in toctrees[1:]: toc.extend(toctree.children) # toc = env.get_toctree_for(config.master_doc, app.builder, False) node = toc doc = new_document(b('<partial node>')) doc.append(node) pub = Publisher( source_class = DocTreeInput, destination_class=StringOutput) pub.set_components('standalone', 'restructuredtext', 'pseudoxml') pub.reader = DoctreeReader() pub.writer = Writer() pub.writer.format = 'pseudoxml' pub.process_programmatic_settings( None, {'output_encoding': 'unicode'}, None) pub.set_source(doc, None) pub.set_destination(None, None) pub.publish() import xml.etree.cElementTree as ET from cStringIO import StringIO #out = re.sub(r'^<!DOCTYPE[^>]*>\s*', '<?xml version="1.0"?>', pub.writer.output) out = pub.writer.output.encode('utf-8').replace(' encoding="unicode"', ' encoding="utf-8"') #pprint.pprint(out) doctree = ET.fromstring(out) #pprint.pprint(doctree) #pprint.pprint(dir(doctree)) if hasattr(doctree, 'getroot'): doctree = doctree.getroot() #root = doctree.getroot() fuzzy_find_entries = [] docs = {} def indexentries(entry, links, cap = ''): if links: fuzzy_find_entries.append(dict( href = links[0][1], name = entry, path = "index/%s/%s" % (char,cap), info = "INDEX", detail = '', )) for i, (ismain, link) in enumerate(links[1:], start=1): doclink = link.split('#', 1)[0] docname = docs.get(doclink, i) fuzzy_find_entries.append(dict( href = link, name = "%s (%s)" % (entry, docname), path = "index/%s/%s" % (char, cap), info = "INDEX", detail = '', )) return '' else: return entry if app.config.html_findanything_add_topics: if hasattr(doctree, 'iter'): references = doctree.iter('reference') else: references = doctree.getiterator('reference') # fuzzy_find_entries.append(dict( # href = link, # name = entry, # path = "index/%s/" % char, # info = "INDEX", # detail = '', # )) refset = set() for ref in references: refuri = ref.attrib['refuri'] docs[refuri] = ref.text path = "/" if "/" in refuri: path = refuri.rsplit('/', 1)[0]+"/" if '#' in refuri: docname = docs.get(refuri.split('#', 1)[0]) if docname: path += docname + "/" info = 'SECTION' else: info = 'PAGE' e = dict( href = ref.attrib['refuri'], name = ref.text, info = info, path = path, detail = '', ) refid = "%(href)s^%(path)s^%(name)s^%(info)s" % e if refid in refset: continue refset.add(refid) fuzzy_find_entries.append(e) if app.config.html_findanything_add_indexentries: genindex = env.create_index(app.builder) for char,char_list in genindex: for entry, (links, subitems) in char_list: cap = indexentries(entry, links) if subitems: if cap: cap += '/' for subname, sublinks in subitems: indexentries(subname, sublinks, cap=cap) s = json.dumps(fuzzy_find_entries) static_dir = os.path.join(app.builder.outdir, '_static') if not os.path.exists(static_dir): os.makedirs(static_dir) with open(os.path.join(static_dir, 'fuzzyindex.js'), 'wb') as f: f.write("DOCUMENTATION_OPTIONS.FIND_ANYTHING_ENTRIES = %s;" % s); if app.config.html_findanything_use_cached_hits: f.write("DOCUMENTATION_OPTIONS.FIND_ANYTHING_USE_CACHED_HITS = true;") f.write("DOCUMENTATION_OPTIONS.FIND_ANYTHING_WIDTH = '%s';" % app.config.html_findanything_width);
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import os import re import sys from os import path from sphinx.errors import ConfigError from sphinx.locale import l_ from sphinx.util.osutil import make_filename from sphinx.util.pycompat import bytes, b, convert_with_2to3 nonascii_re = re.compile(b(r'[\x80-\xff]')) CONFIG_SYNTAX_ERROR = "There is a syntax error in your configuration file: %s" if sys.version_info >= (3, 0): CONFIG_SYNTAX_ERROR += "\nDid you change the syntax from 2.x to 3.x?" class Config(object): """ Configuration file abstraction. """ # the values are: (default, what needs to be rebuilt if changed) # If you add a value here, don't forget to include it in the # quickstart.py file template as well as in the docs!
DOC_BODY = r''' \begin{document} %s \end{document} ''' DOC_BODY_PREVIEW = r''' \usepackage[active]{preview} \begin{document} \begin{preview} %s \end{preview} \end{document} ''' depth_re = re.compile(b(r'\[\d+ depth=(-?\d+)\]')) def render_math(self, math): """Render the LaTeX math expression *math* using latex and dvipng. Return the filename relative to the built document and the "depth", that is, the distance of image bottom and baseline in pixels, if the option to use preview_latex is switched on. Error handling may seem strange, but follows a pattern: if LaTeX or dvipng aren't available, only a warning is generated (since that enables people on machines without these programs to at least build the rest of the docs successfully). If the programs are there, however, they may not fail since that indicates a problem in the math source. """ use_preview = self.builder.config.pngmath_use_preview
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import os import re import sys from os import path from sphinx.errors import ConfigError from sphinx.locale import l_ from sphinx.util.osutil import make_filename from sphinx.util.pycompat import bytes, b, execfile_ nonascii_re = re.compile(b(r"[\x80-\xff]")) CONFIG_SYNTAX_ERROR = "There is a syntax error in your configuration file: %s" if sys.version_info >= (3, 0): CONFIG_SYNTAX_ERROR += "\nDid you change the syntax from 2.x to 3.x?" class Config(object): """ Configuration file abstraction. """ # the values are: (default, what needs to be rebuilt if changed) # If you add a value here, don't forget to include it in the # quickstart.py file template as well as in the docs!