def _gen_docutils_html(source, fpath, webpage_style, include_banner, include_vernums, html_assets, traceback, Parser, Reader): from docutils.core import publish_string from .docutils import HtmlTranslator, HtmlWriter # The encoding/decoding dance below happens because setting output_encoding # to "unicode" causes reST to generate a bad <meta> tag, and setting # input_encoding to "unicode" breaks the ‘.. include’ directive. html_assets.extend(HtmlTranslator.JS + HtmlTranslator.CSS) settings_overrides = { 'traceback': traceback, 'embed_stylesheet': False, 'stylesheet_path': None, 'stylesheet_dirs': [], 'alectryon_banner': include_banner, 'alectryon_vernums': include_vernums, 'webpage_style': webpage_style, 'input_encoding': 'utf-8', 'output_encoding': 'utf-8' } parser = Parser() return publish_string( source=source.encode("utf-8"), source_path=fpath, destination_path=None, reader=Reader(parser), reader_name=None, parser=parser, parser_name=None, writer=HtmlWriter(), writer_name=None, settings=None, settings_spec=None, settings_overrides=settings_overrides, config_section=None, enable_exit_status=True).decode("utf-8")
def get_transforms(self): # Remove the DocInfo transform, to ensure that :author: fields # are correctly handled. return [ t for t in StandaloneReader.get_transforms(self) if t != docutils.transforms.frontmatter.DocInfo ]
def get_transforms(self): """ Returns the standard `Reader` transforms with `DocTitle` replaced. """ transforms = list(Reader.get_transforms(self)) transforms[transforms.index(DocTitle)] = DocTitleNoSubtitle return transforms
def process_labels(site, logger, source, post): site.processing_labels = True pub = Publisher(reader=Reader(), parser=None, writer=None) pub.set_components(None, 'restructuredtext', 'html') # Reading the file will generate output/errors that we don't care about # at this stage. The report_level = 5 means no output pub.process_programmatic_settings( settings_spec=None, settings_overrides={'report_level': 5}, config_section=None, ) pub.set_source(None, source) pub.publish() document = pub.document site.processing_labels = False # Code based on Sphinx std domain for name, is_explicit in document.nametypes.items(): if not is_explicit: continue labelid = document.nameids[name] if labelid is None: continue node = document.ids[labelid] if node.tagname == 'target' and 'refid' in node: node = document.ids.get(node['refid']) labelid = node['names'][0] if node.tagname == 'footnote' or 'refuri' in node or node.tagname.startswith( 'desc_'): continue if name in site.ref_labels: logger.warn( 'Duplicate label {dup}, other instance in {other}'.format( dup=name, other=site.ref_labels[name][0])) site.anon_ref_labels[name] = post.permalink(), labelid def clean_astext(node): """Like node.astext(), but ignore images. Taken from sphinx.util.nodes""" node = node.deepcopy() for img in node.traverse(nodes.image): img['alt'] = '' for raw in node.traverse(nodes.raw): raw.parent.remove(raw) return node.astext() if node.tagname in ('section', 'rubric'): sectname = clean_astext(node[0]) else: continue site.ref_labels[name] = post.permalink(), labelid, sectname
def test_notebook_translator(testdata, name): """Ensure that the notebook translator can correctly convert an rst doctree into the correct sequence of notebook cells.""" rst = testdata(os.path.join("doc", name + ".rst")).decode("utf8") json = testdata(os.path.join("doc", name + ".ipynb")).decode("utf8") nb = nbf.reads(json) # Getting docutils to generate a doctree for us appears to require some gymnastics # perhaps there is a better way? parser = Parser() settings = _get_mock_settings() reader = Reader() doctree = reader.read(StringInput(rst), parser, settings) translator = NotebookTranslator(doctree) doctree.walkabout(translator) actual = nbf.writes(translator.asnotebook()) expected = nbf.writes(nb) assert expected == actual
def _docutils_cmdline(description, Reader, Parser): import locale locale.setlocale(locale.LC_ALL, '') from docutils.core import publish_cmdline, default_description from .docutils import setup, HtmlWriter setup() parser = Parser() publish_cmdline(reader=Reader(parser), parser=parser, writer=HtmlWriter(), settings_overrides={'stylesheet_path': None}, description=(description + default_description))
def process_targets(site, logger, source, permalink): """Process the target locations in the reST files.""" site.processing_targets = True reader = Reader() reader.l_settings = {"source": source} with open(source, "r", encoding="utf8") as in_file: data = in_file.read() pub = Publisher( reader=reader, parser=None, writer=None, settings=None, source_class=StringInput, destination_class=StringOutput, ) pub.set_components(None, "restructuredtext", "html") # Reading the file will generate output/errors that we don't care about # at this stage. The report_level = 5 means no output pub.process_programmatic_settings(settings_spec=None, settings_overrides={"report_level": 5}, config_section=None) pub.set_source(data, None) pub.set_destination(None, None) pub.publish() document = pub.document site.processing_targets = False # Code based on Sphinx std domain for name, is_explicit in document.nametypes.items(): if not is_explicit: continue labelid = document.nameids[name] if labelid is None: continue node = document.ids[labelid] if node.tagname == "target" and "refid" in node: node = document.ids.get(node["refid"]) labelid = node["names"][0] if (node.tagname == "footnote" or "refuri" in node or node.tagname.startswith("desc_")): continue if name in site.ref_targets: logger.warn( "Duplicate label {dup}, other instance in {other}".format( dup=name, other=site.ref_targets[name][0])) site.anon_ref_targets[name] = permalink, labelid def clean_astext(node): """Like node.astext(), but ignore images. Taken from sphinx.util.nodes """ node = node.deepcopy() for img in node.traverse(nodes.image): img["alt"] = "" for raw in node.traverse(nodes.raw): raw.parent.remove(raw) return node.astext() if node.tagname in ("section", "rubric"): sectname = clean_astext(node[0]) else: continue site.ref_targets[name] = permalink, labelid, sectname
def get_transforms(self): transforms = Reader().get_transforms() transforms.remove(Transitions) transforms.append(HovercraftTransitions) return transforms
def get_transforms(self): return Reader.get_transforms( self) + DOCUTILS_TRANSFORMS + self.extra_transforms
def get_transforms(self): transforms = [str2object(transform) for transform in WALIKI_RST_TRANSFORMS] return Reader.get_transforms(self) + transforms
def __init__(self, parser=None, parser_name=None, extra_transforms=None): Reader.__init__(self, parser, parser_name) self.extra_transforms = extra_transforms or []
def new_document(self): document = _Reader.new_document(self) document.reporter.stream = False document.reporter.attach_observer(self.pass_to_format_logger) return document
def __init__(self, errors): self._errors = errors StandaloneReader.__init__(self)
def __init__(self, *args, **kwargs): if docutils is None: raise AssertionError('requires docutils') Reader.__init__(self, *args, **kwargs)
def read1(text): reader = Reader() source = StringInput(text) parser = Parser() # one time document = reader.read(source, parser, get_settings()) return document
def parse(src): parser = Parser() settings = rst_mock_settings reader = Reader() return reader.read(StringInput(src), parser, settings)
def read(self, source, parser, settings): self.read_configuration(settings, problematic=True) return Reader.read(self, source, parser, settings)
def get_transforms(self): return StandaloneReader.get_transforms(self) + [ BlogMetaTransform, ]
def __init__(self, errors: List[ParseError]): self._errors = errors StandaloneReader.__init__(self)
def get_transforms(self): transforms = [import_string(transform) for transform in WALIKI_RST_TRANSFORMS] return Reader.get_transforms(self) + transforms
def get_transforms(self): # AlectryonTransform not added here because the CoqDirective does it return Reader.get_transforms(self) + self.extra_transforms