def run(self): env = self.state.document.settings.env baseurl = env.config.rss_baseurl assert baseurl, "rss_baseurl must be defined in your config.py" source = self.state_machine.input_lines.source(self.lineno - self.state_machine.input_offset - 1) rss_doc = utils.new_document(b("<rss>"), self.state.document.settings) Parser().parse("\n".join(self.content), rss_doc) rst_suffix = env.config.source_suffix path = os.path.relpath(source, env.srcdir).replace(rst_suffix, ".html") builder = env.app.builder docwriter = HTMLWriter(self) docsettings = OptionParser(defaults=env.settings, components=(docwriter,)).get_default_values() docsettings.compact_lists = bool(env.config.html_compact_lists) dest = os.path.join(env.app.outdir, os_path(env.docname) + ".rss") pageurl = "%s/%s" % (baseurl, path) with open(dest, "w") as rss: title = self.options.get("title", "") description = self.options.get("description", None) rss.write('<?xml version="1.0" encoding="ISO-8859-1" ?>\n') rss.write('<rss version="2.0">\n') rss.write("<channel>\n") rss.write("<title>%s</title>\n" % cgi.escape(title)) rss.write("<link>%s</link>\n" % pageurl) if description: rss.write("<description>%s</description>\n" % cgi.escape(description)) for child in rss_doc.children: if not isinstance(child, nodes.section): continue title_index = child.first_child_matching_class(nodes.title) if title_index is None: continue node = nodes.paragraph() node.extend(child.children[title_index + 1 :]) sec_doc = utils.new_document(b("<rss-section>"), docsettings) sec_doc.append(node) visitor = RssTranslator(builder, sec_doc) sec_doc.walkabout(visitor) title = child.children[title_index].astext() sectionurl = "%s#%s" % (pageurl, child.get("ids")[0]) description = "".join(visitor.body) rss.write("<item>\n") rss.write("<title>%s</title>\n" % cgi.escape(title)) rss.write("<link>%s</link>\n" % sectionurl) rss.write("<description><![CDATA[%s]]></description>\n" % description) rss.write("</item>\n") rss.write("</channel>\n") rss.write("</rss>\n") return []
def assertParses(self, source, expected, alt=False): # noqa parser = CommonMarkParser() parser.parse(dedent(source), new_document('<string>')) self.assertMultiLineEqual( dedent(expected).lstrip(), dedent(parser.document.asdom().toprettyxml(indent=' ')), )
def test_meta(self): from docutils import utils, nodes from docutils.parsers.rst.directives.html import MetaBody from docutils.core import publish_from_doctree doc = utils.new_document('<program>') doc.append(nodes.title('', '', nodes.Text('Title'))) doc.append(nodes.paragraph('', '', nodes.Text('some text.'))) doc.append(MetaBody('').meta('', name='title', content='Title')) doc.append(MetaBody('').meta('', name='generator', content='pyramid_describe/0.0.0')) doc.append(MetaBody('').meta('', name='location', content='http://example.com/')) doc.append(MetaBody('').meta('', name='one-digit', content='3')) chk = '''\ ====== Title ====== some text. .. meta:: :title: Title :generator: pyramid_describe/0.0.0 :location: http://example.com/ :one-digit: 3 ''' out = publish_from_doctree( doc, writer=rst.Writer(), settings_overrides={'explicit_title': False}) self.assertMultiLineEqual(out, chk)
def run(self): indexnode, node = super(DjangoAdminModel, self).run() sig = self.arguments[0] lst = [] if not 'noautodoc' in self.options: exclude = [ a.strip() for a in self.options.get('exclude', '').split(',') ] app_label, model_name = sig.split('.') for name, opts in model_attributes(app_label, model_name).items(): if name in exclude: continue lst.append(".. djangoadmin:attribute:: %s.%s" % (sig, name)) lst.append('') lst.append(" %s" % unicode(opts['description'])) lst.append('') text = '\n'.join(lst) new_doc = new_document('temp-string', self.state.document.settings) parser = Parser() parser.parse(text, new_doc) container = nodes.container() container.extend(new_doc.children) node[1].extend(container) return [indexnode, node]
def check_rst_document(source, source_path='<string>', settings=None): """Returns a list of objects containing problems in the provided reStructuredText document ``source``. ``settings`` is the settings object for the docutils document instance. If None, the default settings are used. """ alist = [] def accumulate(x): return alist.append(x) document = utils.new_document(source_path, settings=settings) document.reporter.attach_observer(accumulate) if settings is None: # Fill in some values to prevent AttributeError document.settings.tab_width = 8 document.settings.pep_references = None document.settings.rfc_references = None document.settings.smart_quotes = True document.settings.file_insertion_enabled = True parser = Parser() parser.parse(source, document) # Now apply transforms to get more warnings document.transformer.add_transforms(check_transforms) document.transformer.apply_transforms() return alist
def document(self): if self._doc is None: # Use the rst parsers document output to do as much of the # validation as we can without resorting to custom logic (this # parser is what sphinx and others use anyway so it's hopefully # mature). parser_cls = docutils_parser.get_parser_class("rst") parser = parser_cls() defaults = { 'halt_level': 5, 'report_level': 5, 'quiet': True, 'file_insertion_enabled': False, 'traceback': True, # Development use only. 'dump_settings': False, 'dump_internals': False, 'dump_transforms': False, } opt = frontend.OptionParser(components=[parser], defaults=defaults) doc = utils.new_document(source_path=self.filename, settings=opt.get_default_values()) parser.parse(self.contents, doc) self._doc = doc return self._doc
def test_parser(self): if self.run_in_debugger: pdb.set_trace() document = utils.new_document('test data', self.settings) self.parser.parse(self.input, document) output = document.pformat() self.compare_output(self.input, output, self.expected)
def split(self, page): """ Move this page's top-level sections to sub-pages. Calls docutils to parse the text properly. Do we need to adjust heading styles ? """ d = new_document( page.pageName(), OptionParser(components=(Parser,)).get_default_values()) Parser().parse(page.text(), d) sections = [s for s in d.traverse() if isinstance(s,section)] # assume title is first element and body is the rest # create a sub-page for each section for s in sections: page.create( page=s[0].astext(), text=s.child_text_separator.join([p.astext() for p in s[1:]])) # leave just the preamble on the parent page page.edit( text=d.child_text_separator.join( [p.astext() for p in d[:d.first_child_matching_class(section)]])) if getattr(page,'REQUEST',None): page.REQUEST.RESPONSE.redirect(page.pageUrl())
def merge(self, page): """ Merge sub-pages as sections of this page. This merges all offspring, not just immediate children. """ #get a rst parse tree of the current page d = new_document( page.pageName(), OptionParser(components=(Parser,)).get_default_values()) Parser().parse(page.text(), d) #walk the offspring, adding as elements to the tree and deleting def walk(p): d2 = new_document( p.pageName(), OptionParser(components=(Parser,)).get_default_values()) Parser().parse(p.text(), d2) d += d2.traverse() for c in page.childrenNesting(): c = p.pageWithName(c) walk(c) c.delete() walk(page) #convert the tree back to source text and update this page page.edit(text=d.astext()) #or: walk the offspring, adding as text to this page with #appropriate headings, and deleting #need to adjust headings ? #for p in page.offspringNesting(): # pass if getattr(page,'REQUEST',None): page.REQUEST.RESPONSE.redirect(page.pageUrl())
def parse_text(text): parser = rst.Parser() settings = frontend.OptionParser( components=(rst.Parser,)).get_default_values() document = utils.new_document(text, settings) parser.parse(text, document) return document.children
def new_document(self): document = new_document(self.source.source_path, self.settings) document.reporter.attach_observer(self.report) document.reporter.set_conditions('', 10000, 10000, None) self._encoding = document.reporter.encoding self._error_handler = document.reporter.error_handler return document
def test_ids_generated(self): from docutils import utils, nodes from docutils.core import publish_from_doctree doc = utils.new_document('<program>') docsect = nodes.section('') docsect['classes'] = ('c1 c2',) docsect['ids'] = ('my-test-id',) docsect['target-ids'] = ('my-test-id',) docsect.append(nodes.title('', '', nodes.Text('Title'))) docsect.append(nodes.paragraph('', '', nodes.Text('some text.'))) docsect.append( nodes.section( '', nodes.title('', '', nodes.Text('Sub-Title')), nodes.paragraph('', '', nodes.Text('some more text')))) doc.append(docsect) chk = '''\ .. class:: c1 c2 .. _`my-test-id`: ====== Title ====== some text. --------- Sub-Title --------- some more text ''' out = publish_from_doctree(doc, writer=rst.Writer()) self.assertMultiLineEqual(out, chk)
def build_doc(name): doc = new_document(name) doc.settings.tab_width = 4 doc.settings.character_level_inline_markup = "\ " doc.settings.file_insertion_enabled = True doc.settings.pep_references = "http://www.python.org/dev/peps/" doc.settings.rfc_references = "http://tools.ietf.org/html/" return doc
def test_inputrestrictions(self): parser_class = parsers.get_parser_class('rst') parser = parser_class() document = utils.new_document('test data', frontend.OptionParser( components=(parser, )).get_default_values()) self.assertRaises(UnicodeError, # UnicodeDecodeError since py2.3 parser.parse, 'hol%s' % chr(224), document)
def __init__(self): env = sphinx.environment.BuildEnvironment(None, None, None) CPPDomain(env) env.temp_data['docname'] = 'mock-doc' settings = frontend.OptionParser( components=(parsers.rst.Parser,)).get_default_values() settings.env = env self.document = utils.new_document('', settings)
def test_wordcollector(): doc = utils.new_document(b('test data'), settings) doc['file'] = 'dummy' parser.parse(FILE_CONTENTS, doc) ix = IndexBuilder(None, 'en', {}, None) ix.feed('filename', 'title', doc) assert 'boson' not in ix._mapping assert 'fermion' in ix._mapping
def test_kwargs(self): transformer = transforms.Transformer(utils.new_document('test data')) transformer.add_transform(TestTransform, foo=42) transformer.apply_transforms() self.assertEqual(len(transformer.applied), 1) self.assertEqual(len(transformer.applied[0]), 4) transform_record = transformer.applied[0] self.assertEqual(transform_record[1], TestTransform) self.assertEqual(transform_record[3], {'foo': 42})
def parse(filehandle): """ Parse a document read from the given filehandle into a :class:`dmr.data.Document` object. The document must contain: * A top-level title, the resume owner's name; * A :class:`docutils.nodes.line_block` containing contact information for the resume, to be parsed with :func:`dmr.data.Contact.parse`; and * Any number of subsections that conform to the restrictions of the various :class:`dmr.data.Section` subclasses. :param filehandle: The file-like object to parse the document from. :type filehandle: file :returns: :class:`dmr.data.Document` """ parser = Parser() settings = OptionParser(components=(Parser,)).get_default_values() logger.info("Parsing document from %s" % filehandle.name) document = new_document(filehandle.name, settings) try: parser.parse(filehandle.read(), document) except IOError: fatal("Could not parse %s: %s" % (filehandle.name, sys.exc_info()[1])) top = None options = dict() for child in document.children: if isinstance(child, docutils.nodes.Structural): if top: fatal("Document must have exactly one top-level heading") top = child elif isinstance(child, docutils.nodes.comment): contents = child_by_class(child, docutils.nodes.Text) if contents and contents.startswith("options"): opts = contents.splitlines() try: # see if this is a format-specific option block ofmt = opts[0].split("=")[1] logger.debug("Found document options for %s: %s" % (ofmt, opts[1:])) except IndexError: ofmt = None logger.debug("Found default document options: %s" % opts[1:]) options[ofmt] = opts[1:] else: logger.info("Skipping unknown node %s" % child) for ofmt in [None, config.format]: if ofmt in options: parse_document_options(options[ofmt]) doc = Document.parse(top) doc.source = document return doc
def doctree_from_dict(data): if 'document' == data['node']: node = utils.new_document('<string>') else: node = getattr(nodes, data['node'])(rawsource='', **data.get('kwds', {})) for child in data.get('children', []): node += doctree_from_dict(child) return node
def make_document(tree,settings=None): """Return a docutils Document tree constructed from this Python tree. The tree given must be either a Package or Module tree. """ # @@@ Can it ever be anything other than a package or module? # I'd assert not - the module is the basic "smallest unit". # Should we test that? if isinstance(tree,Package): document = new_document("Package %s"%tree.filename,settings) section = make_package_section(tree) else: document = new_document("Module %s"%os.path.splitext(tree.filename)[0], settings) section = make_module_section(tree) document.append(section) return document
def new_document(self): document = new_document(self.source.source_path, self.settings) # Capture all warning messages. document.reporter.attach_observer(self.report) # These are used so we know how to encode warning messages: self._encoding = document.reporter.encoding self._error_handler = document.reporter.error_handler # Return the new document. return document
def parse_(rst): document = utils.new_document(b'test data', settings) document['file'] = 'dummy' parser = RstParser() parser.parse(rst, document) for msg in document.traverse(nodes.system_message): if msg['level'] == 1: msg.replace_self([]) return document
def buildDocument(oldTree, newTree, settings): """Returns a new document for the result of converting `oldTree` to `newTree`.""" if (not isinstance(oldTree, docutils.nodes.document) or not isinstance(newTree, docutils.nodes.document)): raise TypeError("Roots of trees must be documents") return new_document(u"%s => %s" % ( settings._old_source, settings._new_source, ), settings)
def make_citation(label, text, settings): name = fully_normalize_name(label) citation = nodes.citation(text) citation += nodes.label('', label) new_doc = new_document('temp-string', settings) parser = Parser() parser.parse(text, new_doc) citation['names'].append(name) citation += new_doc.children return citation
def walk(p): d2 = new_document( p.pageName(), OptionParser(components=(Parser,)).get_default_values()) Parser().parse(p.text(), d2) d += d2.traverse() for c in page.childrenNesting(): c = p.pageWithName(c) walk(c) c.delete()
def render_partial(self, node): """Utility: Render a lone doctree node.""" doc = new_document('<partial node>') doc.append(node) return publish_parts( doc, source_class=DocTreeInput, reader=DoctreeReader(), writer=HTMLWriter(self), settings_overrides={'output_encoding': 'unicode'} )
def test_altered_title(self): from docutils import utils, nodes from docutils.core import publish_from_doctree doc = utils.new_document('<program>') doc['title'] = 'Altered Title' doc.append(nodes.title('', '', nodes.Text('Title'))) doc.append(nodes.paragraph('', '', nodes.Text('some text.'))) chk = '.. title:: Altered Title\n\n======\nTitle\n======\n\nsome text.\n' out = publish_from_doctree( doc, writer=rst.Writer(), settings_overrides={'explicit_title': False}) self.assertMultiLineEqual(out, chk)
def test_transforms(self): if self.run_in_debugger: pdb.set_trace() document = utils.new_document('test data', self.settings) self.parser.parse(self.input, document) # Don't do a ``populate_from_components()`` because that would # enable the Transformer's default transforms. document.transformer.add_transforms(self.transforms) document.transformer.add_transform(universal.TestMessages) document.transformer.components['writer'] = self document.transformer.apply_transforms() output = document.pformat() self.compare_output(self.input, output, self.expected)
def run(self): parser = CommonMarkParser() document = self.state.document filename = self.arguments[0] curdir = getattr(document.settings, "_source", None) or os.getcwd() filepath = os.path.join(curdir, filename) with open(filepath) as rf: text = rf.read() subdocument = new_document(filepath) parser.parse(text, subdocument) return subdocument.children
def write(self, *ignored): writer = TextWriter(self) for label in self.status_iterator(pydoc_topic_labels, "building topics... ", length=len(pydoc_topic_labels)): if label not in self.env.domaindata["std"]["labels"]: self.warn("label %r not in documentation" % label) continue docname, labelid, sectname = self.env.domaindata["std"]["labels"][label] doctree = self.env.get_and_resolve_doctree(docname, self) document = new_document("<section node>") document.append(doctree.ids[labelid]) destination = StringOutput(encoding="utf-8") writer.write(document, destination) self.topics[label] = str(writer.output)
def auto_code_block(self, node): """Try to automatically generate nodes for codeblock syntax. Parameters ---------- node : nodes.literal_block Original codeblock node Returns ------- tocnode: docutils node The converted toc tree node, None if conversion is not possible. """ assert isinstance(node, nodes.literal_block) if 'language' not in node: return None self.state_machine.reset(self.document, node.parent, self.current_level) content = node.rawsource.split('\n') language = node['language'] if language == 'math': if self.config['enable_math']: return self.state_machine.run_directive('math', content=content) elif language == 'eval_rst': if self.config['enable_eval_rst']: # allow embed non section level rst node = nodes.section() self.state_machine.state.nested_parse(StringList( content, source=node.rawsource), 0, node=node, match_titles=False) return node.children[:] else: match = re.search('[ ]?[\w_-]+::.*', language) if match: parser = Parser() new_doc = new_document(None, self.document.settings) newsource = u'.. ' + match.group(0) + '\n' + node.rawsource parser.parse(newsource, new_doc) return new_doc.children[:] else: return self.state_machine.run_directive('code-block', arguments=[language], content=content) return None
def parse_doc(dir, file): parser = Parser() wd = os.getcwd() os.chdir(dir) with io.open(join(dir, file + '.rst'), encoding='utf-8') as fh: doc = new_document( file, OptionParser( components=(docutils.parsers.rst.Parser,) ).get_default_values(), ) parser.parse( fh.read(), doc, ) os.chdir(wd) return doc
def render_map(builder, node): if node: doc = new_document(b('<partial node>')) doc.append(node) publisher = Publisher( source_class = DocTreeInput, destination_class=StringOutput) publisher.set_components('standalone', 'restructuredtext', 'pseudoxml') publisher.reader = DoctreeReader() publisher.writer = DitaMapWriter(builder) publisher.process_programmatic_settings(None, {'output_encoding': 'utf-8'}, None) publisher.set_source(doc, None) publisher.set_destination(None, None) publisher.publish() return publisher.writer.output output = XML_HEAD output += u"<map></map>" return output
def _parse_rst(text: List[str]) -> Document: """ Parse the given list of text lines in the reStructuredText format. Args: text: The list of text lines parsed in the reStructuredText format. Returns: The Docutils document root. """ parser = RSTParser() settings = DocOptParser(components=(RSTParser,)).get_default_values() document = new_document('<rst-doc>', settings=settings) parser.parse('\n'.join(text), document) return document
def assemble_doctree(self, indexfile, toctree_only, appendices): self.docnames = set([indexfile] + appendices) self.info(darkgreen(indexfile) + " ", nonl=1) tree = self.env.get_doctree(indexfile) tree['docname'] = indexfile if toctree_only: # extract toctree nodes from the tree and put them in a # fresh document new_tree = new_document('<pythonbook output>') new_sect = nodes.section() new_sect += nodes.title('<Set title in conf.py>', '<Set title in conf.py>') new_tree += new_sect for node in tree.traverse(addnodes.toctree): new_sect += node tree = new_tree try: largetree = inline_all_toctrees(self, self.docnames, indexfile, tree, darkgreen) except: largetree = inline_all_toctrees(self, self.docnames, indexfile, tree, darkgreen, [indexfile]) largetree['docname'] = indexfile for docname in appendices: appendix = self.env.get_doctree(docname) appendix['docname'] = docname largetree.append(appendix) self.info() self.info("resolving references...") self.env.resolve_references(largetree, indexfile, self) # resolve :ref:s to distant tex files -- we can't add a cross-reference, # but append the document name for pendingnode in largetree.traverse(addnodes.pending_xref): docname = pendingnode['refdocname'] sectname = pendingnode['refsectname'] newnodes = [nodes.emphasis(sectname, sectname)] for subdir, title in self.titles: if docname.startswith(subdir): newnodes.append(nodes.Text(_(' (in '), _(' (in '))) newnodes.append(nodes.emphasis(title, title)) newnodes.append(nodes.Text(')', ')')) break else: pass pendingnode.replace_self(newnodes) return largetree
def preload(self, filename, encoding='utf-8', errors='strict'): '''Preload a rst file to get its toctree and its title. The result will be stored in :attr:`toctrees` with the ``filename`` as key. ''' with open(filename, 'rb') as fd: text = fd.read().decode(encoding, errors) # parse the source document = utils.new_document('Document', self._settings) self._parser.parse(text, document) # fill the current document node visitor = _ToctreeVisitor(document) document.walkabout(visitor) self.toctrees[filename] = visitor.toctree return text
def test_inputrestrictions(self): parser_class = parsers.get_parser_class('rst') parser = parser_class() document = utils.new_document( 'test data', frontend.OptionParser(components=(parser, )).get_default_values()) if sys.version_info < (3, ): # supplying string input is supported, but only if ascii-decodable self.assertRaises( UnicodeError, # UnicodeDecodeError since py2.3 parser.parse, b('hol%s' % chr(224)), document) else: # input must be unicode at all times self.assertRaises(TypeError, parser.parse, b('hol'), document)
class ErrorReportingTests(unittest.TestCase): """ Test cases where error reporting can go wrong. Do not test the exact output (as this varies with the locale), just ensure that the correct exception is thrown. """ # These tests fail with a 'problematic locale', # Docutils revision < 7035, and Python 2: parser = parsers.rst.Parser() """Parser shared by all ParserTestCases.""" option_parser = frontend.OptionParser(components=(parsers.rst.Parser,)) settings = option_parser.get_default_values() settings.report_level = 1 settings.halt_level = 1 settings.warning_stream = '' document = utils.new_document('test data', settings) def setUp(self): if testlocale: locale.setlocale(locale.LC_ALL, testlocale) def tearDown(self): if testlocale: locale.setlocale(locale.LC_ALL, oldlocale) def test_include(self): source = ('.. include:: bogus.txt') self.assertRaises(utils.SystemMessage, self.parser.parse, source, self.document) def test_raw_file(self): source = ('.. raw:: html\n' ' :file: bogus.html\n') self.assertRaises(utils.SystemMessage, self.parser.parse, source, self.document) def test_csv_table(self): source = ('.. csv-table:: external file\n' ' :file: bogus.csv\n') self.assertRaises(utils.SystemMessage, self.parser.parse, source, self.document)
def extract_statuses_table(docstring): """Extract statuses table from passed docstring. Doc string should be written in rst. Should contain table with title "Result status". Result is rst code of first table which match to this description. """ try: doc_tree = core.publish_doctree(docstring) table = _find_element(doc_tree, nodes.table) title = _find_element(table, nodes.title) if title[0] == 'Result status': document = utils.new_document('<string>') document += table else: raise LookupError('Statuses table not found') except IndexError: raise LookupError('Statuses table not found') return core.publish_from_doctree(document, writer=Writer()).decode()
def test_transforms_verbosely(self): if self.run_in_debugger: pdb.set_trace() print '\n', self.id print '-' * 70 print self.input settings = self.settings.copy() settings.__dict__.update(self.suite_settings) document = utils.new_document('test data', settings) self.parser.parse(self.input, document) print '-' * 70 print document.pformat() for transformClass in self.transforms: transformClass(document).apply() output = document.pformat() print '-' * 70 print output self.compare_output(self.input, output, self.expected)
def strip_rst(line): if line.endswith("::"): # Drop :: at the end, it would cause Literal block expected line = line[:-2] parser = docutils.parsers.rst.Parser() components = (docutils.parsers.rst.Parser, ) settings = docutils.frontend.OptionParser( components=components).get_default_values() stderr_stringio = io.StringIO() with redirect_stderr(stderr_stringio): document = new_document("<rst-doc>", settings=settings) parser.parse(line, document) stderr = stderr_stringio.getvalue() if stderr: print(stderr.strip(), "while parsing:", line) visitor = NodeToTextVisitor(document) document.walk(visitor) return str(visitor)
def apply_transforms(self): # type: () -> None if isinstance(self.document, nodes.document): if not hasattr(self.document.settings, 'env') and self.env: self.document.settings.env = self.env Transformer.apply_transforms(self) else: # wrap the target node by document node during transforming try: document = new_document('') if self.env: document.settings.env = self.env document += self.document self.document = document Transformer.apply_transforms(self) finally: self.document = self.document[0]
def aFunction(string): source = StringInput(string, encoding='utf-8') docname = "fake" __app.env.temp_data['docname'] = docname settings = frontend.OptionParser((Writer, )).get_default_values() settings.tab_width = 8 settings.pep_references = False settings.rfc_references = False settings.env = __app.env reader = DoctreeReader() parser = parsers.get_parser_class("rst")() docu = utils.new_document(source.source_path, settings) parser.parse(source.read(), docu) __app.builder.prepare_writing((docname, )) return __app.builder.write_doc(docname, docu).destination
def _load_from_text(self, *largs): try: # clear the current widgets self.content.clear_widgets() self.anchors_widgets = [] self.refs_assoc = {} # parse the source document = utils.new_document('Document', self._settings) self._parser.parse(self.text, document) # fill the current document node visitor = _Visitor(self, document) document.walkabout(visitor) self.title = visitor.title or 'No title' except: Logger.exception('Rst: error while loading text')
def basic_test_document(text='', settings_overrides={}): reader_name = 'standalone' parser_name = 'restructuredtext' reader_class = readers.get_reader_class(reader_name) reader = reader_class(parser_name=parser_name) parser = reader.parser # we test some things that generate warnings, but don't want those warnings to pollute the test output settings_overrides['warning_stream'] = sys.stdout options = frontend.OptionParser(components=(parser, reader), defaults=settings_overrides) settings = options.get_default_values() document = utils.new_document('rst_test_utils', settings) parser.parse(text, document) #print 'parser.parse(), document=\n%s' % document.asdom().childNodes[0].toprettyxml(' ','\n') return document
def write(self, *ignored): try: # sphinx>=1.6 from sphinx.util import status_iterator except ImportError: # sphinx<1.6 status_iterator = self.status_iterator writer = TextWriter(self) for label in status_iterator(pydoc_topic_labels, 'building topics... ', length=len(pydoc_topic_labels)): if label not in self.env.domaindata['std']['labels']: self.warn('label %r not in documentation' % label) continue docname, labelid, sectname = self.env.domaindata['std']['labels'][label] doctree = self.env.get_and_resolve_doctree(docname, self) document = new_document('<section node>') document.append(doctree.ids[labelid]) destination = StringOutput(encoding='utf-8') writer.write(document, destination) self.topics[label] = writer.output
def verify_re(rst, html_expected, latex_expected): document = utils.new_document('test data', settings) parser.parse(rst, document) for msg in document.traverse(nodes.system_message): if msg['level'] == 1: msg.replace_self([]) if html_expected: html_translator = ForgivingHTMLTranslator(app.builder, document) document.walkabout(html_translator) html_translated = ''.join(html_translator.fragment).strip() assert re.match(html_expected, html_translated), 'from' + rst if latex_expected: latex_translator = ForgivingLaTeXTranslator(document, app.builder) latex_translator.first_document = -1 # don't write \begin{document} document.walkabout(latex_translator) latex_translated = ''.join(latex_translator.body).strip() assert re.match(latex_expected, latex_translated), 'from ' + repr(rst)
def assemble_doctree(self, indexfile, toctree_only, appendices): # type: (unicode, bool, List[unicode]) -> nodes.Node self.docnames = set([indexfile] + appendices) logger.info(darkgreen(indexfile) + " ", nonl=1) tree = self.env.get_doctree(indexfile) tree['docname'] = indexfile if toctree_only: # extract toctree nodes from the tree and put them in a # fresh document new_tree = new_document('<texinfo output>') new_sect = nodes.section() new_sect += nodes.title(u'<Set title in conf.py>', u'<Set title in conf.py>') new_tree += new_sect for node in tree.traverse(addnodes.toctree): new_sect += node tree = new_tree largetree = inline_all_toctrees(self, self.docnames, indexfile, tree, darkgreen, [indexfile]) largetree['docname'] = indexfile for docname in appendices: appendix = self.env.get_doctree(docname) appendix['docname'] = docname largetree.append(appendix) logger.info('') logger.info("resolving references...") self.env.resolve_references(largetree, indexfile, self) # TODO: add support for external :ref:s for pendingnode in largetree.traverse(addnodes.pending_xref): docname = pendingnode['refdocname'] sectname = pendingnode['refsectname'] newnodes = [nodes.emphasis(sectname, sectname)] for subdir, title in self.titles: if docname.startswith(subdir): newnodes.append(nodes.Text(_(' (in '), _(' (in '))) newnodes.append(nodes.emphasis(title, title)) newnodes.append(nodes.Text(')', ')')) break else: pass pendingnode.replace_self(newnodes) return largetree
def _lint_docutils(source, fpath, Parser, traceback): from io import StringIO from docutils.utils import new_document from docutils.frontend import OptionParser from docutils.utils import Reporter from .docutils import JsErrorPrinter parser = Parser() settings = OptionParser(components=(Parser, )).get_default_values() settings.traceback = traceback observer = JsErrorPrinter(StringIO(), settings) document = new_document(fpath, settings) document.reporter.report_level = 0 # Report all messages document.reporter.halt_level = Reporter.SEVERE_LEVEL + 1 # Do not exit early document.reporter.stream = False # Disable textual reporting document.reporter.attach_observer(observer) parser.parse(source, document) return observer.stream.getvalue()
def preload(self, filename): '''Preload a rst file to get its toctree, and its title. The result will be stored in :data:`toctrees` with the ``filename`` as key. ''' if filename in self.toctrees: return if not exists(filename): return with open(filename) as fd: text = fd.read() # parse the source document = utils.new_document('Document', self._settings) self._parser.parse(text, document) # fill the current document node visitor = _ToctreeVisitor(document) document.walkabout(visitor) self.toctrees[filename] = visitor.toctree
def convert_element(self, elem): """Recursively convert an element from the ElementTree to Docutils Nodes. """ if elem.tag == 'document': node = utils.new_document('*XML*') else: nodetype = getattr(nodes, elem.tag) node = nodetype() for attr in elem.keys(): if attr != '{http://www.w3.org/XML/1998/namespace}space': # this is just part of the XML output, not the docutil node node[attr] = elem.get(attr) if elem.text: node.append(nodes.Text(elem.text)) for child in elem: node.append(self.convert_element(child)) if child.tail: node.append(nodes.Text(child.tail)) return node
def build_row(item): """Return nodes.row with property description""" prop, propschema, required = item row = nodes.row() # Property row += nodes.entry('', nodes.paragraph(text=prop)) # Type str_type = type_description(propschema) par_type = nodes.paragraph() is_text = True for part in reClassDef.split(str_type): if part: if is_text: par_type += nodes.Text(part) else: par_type += addnodes.pending_xref( reftarget=part, reftype="class", refdomain=None, # py:class="None" py:module="altair" refdoc="user_guide/marks" refexplicit=False, refwarn=False ) par_type += nodes.literal(text = part,classes="xref py py-class") is_text = not is_text row += nodes.entry('', par_type) # Description md_parser = CommonMarkParser() str_descr = "***Required.*** " if required else "" str_descr += propschema.get('description', ' ') doc_descr = utils.new_document("schema_description") md_parser.parse(str_descr, doc_descr) row += nodes.entry('', *doc_descr.children) return row
def extract_summary(obj): # type: (List[unicode], Any) -> unicode """Extract summary from docstring.""" try: doc = inspect.getdoc(obj).split("\n") except AttributeError: doc = '' # Skip a blank lines at the top while doc and not doc[0].strip(): doc.pop(0) # If there's a blank line, then we can assume the first sentence / # paragraph has ended, so anything after shouldn't be part of the # summary for i, piece in enumerate(doc): if not piece.strip(): doc = doc[:i] break # Try to find the "first sentence", which may span multiple lines sentences = periods_re.split(" ".join(doc)) # type: ignore if len(sentences) == 1: summary = sentences[0].strip() else: summary = '' state_machine = RSTStateMachine(state_classes, 'Body') while sentences: summary += sentences.pop(0) + '.' node = new_document('') node.reporter = NullReporter('', 999, 4) node.settings.pep_references = None node.settings.rfc_references = None state_machine.run([summary], node) if not node.traverse(nodes.system_message): # considered as that splitting by period does not break inline # markups break return summary
def analyze(self, content: str): parser = Parser() components = (Parser,) settings = OptionParser(components=components).get_default_values() document = new_document('<rst-doc>', settings=settings) parser.parse(content, document) result = {} def assign_result(value): entries = value.split('\n') for entry in entries: key, value = entry.split('=') value = value.strip() if ',' in value: value = [item.strip() for item in value.split(',')] result[key.strip()] = value visitor = _RstVisitor(document, callback=assign_result) document.walk(visitor) return result
def render_partial(self, node): """Utility: Render a lone doctree node.""" doc = new_document('<partial node>') doc.append(node) if self._publisher is None: self._publisher = Publisher(source_class=DocTreeInput, destination_class=StringOutput) self._publisher.set_components('standalone', 'restructuredtext', 'pseudoxml') pub = self._publisher pub.reader = DoctreeReader() pub.writer = HTMLWriter(self) pub.process_programmatic_settings(None, {'output_encoding': 'unicode'}, None) pub.set_source(doc, None) pub.set_destination(None, None) pub.publish() return pub.writer.parts
def check_docutils_inliner(po, msgstr): inliner = Inliner() settings = AttrDict({ 'character_level_inline_markup': False, 'pep_references': None, 'rfc_references': None }) inliner.init_customizations(settings) document = new_document(None) document.settings.syntax_highlight = 'long' stream = StringIO() reporter = Reporter(po.file, report_level=Reporter.WARNING_LEVEL, halt_level=Reporter.SEVERE_LEVEL, stream=stream) memo = Struct(document=document, reporter=reporter, language=None, inliner=inliner) inliner.parse(msgstr, po.current_index, memo, None) return stream.getvalue()
def parse_rst(rst_string): from abjad.tools import abjadbooktools parser = Parser() directives.register_directive( 'abjad', abjadbooktools.AbjadDirective, ) directives.register_directive( 'import', abjadbooktools.ImportDirective, ) directives.register_directive( 'reveal', abjadbooktools.RevealDirective, ) directives.register_directive('shell', abjadbooktools.ShellDirective) settings = OptionParser(components=(Parser, )).get_default_values() document = new_document('test', settings) parser.parse(rst_string, document) document = parser.document return document
def create_subdocument(self, chunk): """If `chunk` wraps a section node, create and return a new document node with the section added, otherwise return the root node. """ if isinstance(chunk.node, nodes.document): return chunk.node # Create a new document. doctree = utils.new_document(self.settings._source, self.document.settings) # Add the decoration (head and footer). if self.document.decoration and len(self.document.decoration): root_decor = self.document.decoration decor = doctree.get_decoration() # Note: We can't use root_decor.get_{header,footer}(), because # they create the header/footer if it is missing. We don't want # that. if isinstance(root_decor[0], nodes.header): header = decor.get_header() for n in root_decor[0]: header.append(n.deepcopy()) if isinstance(root_decor[-1], nodes.footer): footer = decor.get_footer() for n in root_decor[-1]: footer.append(n.deepcopy()) ## Copy <meta> nodes. #for meta, dummy in self.meta_nodes: # doctree.append(meta.deepcopy()) # Now add the copy of the section, and set the document node's title # from the section title. doctree += chunk.node.deepcopy() if isinstance(doctree[-1][0], nodes.title): n = doctree[-1][0] # XXX: Skip generated section number? (Should be configurable.) doctree['title'] = chunk.get_title() return doctree
def apply(self): config = self.document.settings.env.config settings, source = self.document.settings, self.document['source'] codes = resource_filename(__name__, 'codes.json') replacements = json.load(open(codes, encoding='utf-8')) to_handle = (set(replacements.keys()) - set(self.document.substitution_defs)) for ref in self.document.traverse(nodes.substitution_reference): refname = ref['refname'] if refname in to_handle: text = replacements[refname] doc = new_document(source, settings) doc.reporter = LoggingReporter.from_reporter(doc.reporter) self.parser.parse(text, doc) substitution = doc.next_node() # Remove encapsulating paragraph if isinstance(substitution, nodes.paragraph): substitution = substitution.next_node() ref.replace_self(substitution)
def renderList(l, markDownHelp, settings=None): """ Given a list of reStructuredText or MarkDown sections, return a docutils node list """ if len(l) == 0: return [] if markDownHelp: from sphinxarg.markdown import parseMarkDownBlock return parseMarkDownBlock('\n\n'.join(l) + '\n') else: all_children = [] for element in l: if isinstance(element, str): if settings is None: settings = OptionParser(components=(Parser,)).get_default_values() document = new_document(None, settings) Parser().parse(element + '\n', document) all_children += document.children elif isinstance(element, nodes.definition): all_children += element return all_children