示例#1
0
def extract_summary(doc, document):
    # type: (List[unicode], Any) -> unicode
    """Extract summary from docstring."""

    # Skip a blank lines at the top
    while doc and not doc[0].strip():
        doc.pop(0)

    # If there's a blank line, then we can assume the first sentence /
    # paragraph has ended, so anything after shouldn't be part of the
    # summary
    for i, piece in enumerate(doc):
        if not piece.strip():
            doc = doc[:i]
            break

    # Try to find the "first sentence", which may span multiple lines
    sentences = periods_re.split(" ".join(doc))  # type: ignore
    if len(sentences) == 1:
        summary = sentences[0].strip()
    else:
        summary = ''
        state_machine = RSTStateMachine(state_classes, 'Body')
        while sentences:
            summary += sentences.pop(0) + '.'
            node = new_document('', document.settings)
            node.reporter = NullReporter()
            state_machine.run([summary], node)
            if not node.traverse(nodes.system_message):
                # considered as that splitting by period does not break inline markups
                break

    return summary
示例#2
0
def test_extract_summary(capsys):
    from sphinx.util.docutils import new_document
    from mock import Mock
    settings = Mock(language_code='',
                    id_prefix='',
                    auto_id_prefix='',
                    pep_reference=False,
                    rfc_reference=False)
    document = new_document('', settings)

    # normal case
    doc = ['',
           'This is a first sentence. And second one.',
           '',
           'Second block is here']
    assert extract_summary(doc, document) == 'This is a first sentence.'

    # inliner case
    doc = ['This sentence contains *emphasis text having dots.*,',
           'it does not break sentence.']
    assert extract_summary(doc, document) == ' '.join(doc)

    # abbreviations
    doc = ['Blabla, i.e. bla.']
    assert extract_summary(doc, document) == 'Blabla, i.e.'

    _, err = capsys.readouterr()
    assert err == ''
示例#3
0
def extract_summary(doc, document):
    # type: (List[str], Any) -> str
    """Extract summary from docstring."""

    # Skip a blank lines at the top
    while doc and not doc[0].strip():
        doc.pop(0)

    # If there's a blank line, then we can assume the first sentence /
    # paragraph has ended, so anything after shouldn't be part of the
    # summary
    for i, piece in enumerate(doc):
        if not piece.strip():
            doc = doc[:i]
            break

    if doc == []:
        return ''

    # parse the docstring
    state_machine = RSTStateMachine(state_classes, 'Body')
    node = new_document('', document.settings)
    node.reporter = NullReporter()
    state_machine.run(doc, node)

    if not isinstance(node[0], nodes.paragraph):
        # document starts with non-paragraph: pick up the first line
        summary = doc[0].strip()
    else:
        # Try to find the "first sentence", which may span multiple lines
        sentences = periods_re.split(" ".join(doc))
        if len(sentences) == 1:
            summary = sentences[0].strip()
        else:
            summary = ''
            while sentences:
                summary += sentences.pop(0) + '.'
                node[:] = []
                state_machine.run([summary], node)
                if not node.traverse(nodes.system_message):
                    # considered as that splitting by period does not break inline markups
                    break

    # strip literal notation mark ``::`` from tail of summary
    summary = literal_re.sub('.', summary)

    return summary
示例#4
0
def test_RSTParser_prolog_epilog(RSTStateMachine, app):
    document = new_document('dummy.rst')
    document.settings = Mock(tab_width=8, language_code='')
    parser = RSTParser()
    parser.set_application(app)

    # normal case
    text = ('hello Sphinx world\n'
            'Sphinx is a document generator')
    parser.parse(text, document)
    (content, _), _ = RSTStateMachine().run.call_args

    assert list(content.xitems()) == [('dummy.rst', 0, 'hello Sphinx world'),
                                      ('dummy.rst', 1, 'Sphinx is a document generator')]

    # with rst_prolog
    app.env.config.rst_prolog = 'this is rst_prolog\nhello reST!'
    parser.parse(text, document)
    (content, _), _ = RSTStateMachine().run.call_args
    assert list(content.xitems()) == [('<rst_prolog>', 0, 'this is rst_prolog'),
                                      ('<rst_prolog>', 1, 'hello reST!'),
                                      ('<generated>', 0, ''),
                                      ('dummy.rst', 0, 'hello Sphinx world'),
                                      ('dummy.rst', 1, 'Sphinx is a document generator')]

    # with rst_epilog
    app.env.config.rst_prolog = None
    app.env.config.rst_epilog = 'this is rst_epilog\ngood-bye reST!'
    parser.parse(text, document)
    (content, _), _ = RSTStateMachine().run.call_args
    assert list(content.xitems()) == [('dummy.rst', 0, 'hello Sphinx world'),
                                      ('dummy.rst', 1, 'Sphinx is a document generator'),
                                      ('<generated>', 0, ''),
                                      ('<rst_epilog>', 0, 'this is rst_epilog'),
                                      ('<rst_epilog>', 1, 'good-bye reST!')]

    # expandtabs / convert whitespaces
    app.env.config.rst_prolog = None
    app.env.config.rst_epilog = None
    text = ('\thello Sphinx world\n'
            '\v\fSphinx is a document generator')
    parser.parse(text, document)
    (content, _), _ = RSTStateMachine().run.call_args
    assert list(content.xitems()) == [('dummy.rst', 0, '        hello Sphinx world'),
                                      ('dummy.rst', 1, '  Sphinx is a document generator')]
示例#5
0
    def apply_transforms(self):
        # type: () -> None
        if isinstance(self.document, nodes.document):
            if not hasattr(self.document.settings, 'env') and self.env:
                self.document.settings.env = self.env

            super().apply_transforms()
        else:
            # wrap the target node by document node during transforming
            try:
                document = new_document('')
                if self.env:
                    document.settings.env = self.env
                document += self.document
                self.document = document
                super().apply_transforms()
            finally:
                self.document = self.document[0]
示例#6
0
 def assemble_doctree(self, indexfile, toctree_only, appendices):
     # type: (unicode, bool, List[unicode]) -> nodes.Node
     from docutils import nodes  # NOQA
     self.docnames = set([indexfile] + appendices)
     logger.info(darkgreen(indexfile) + " ", nonl=1)
     tree = self.env.get_doctree(indexfile)
     tree['docname'] = indexfile
     if toctree_only:
         # extract toctree nodes from the tree and put them in a
         # fresh document
         new_tree = new_document('<latex output>')
         new_sect = nodes.section()
         new_sect += nodes.title(u'<Set title in conf.py>',
                                 u'<Set title in conf.py>')
         new_tree += new_sect
         for node in tree.traverse(addnodes.toctree):
             new_sect += node
         tree = new_tree
     largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
                                     darkgreen, [indexfile])
     largetree['docname'] = indexfile
     for docname in appendices:
         appendix = self.env.get_doctree(docname)
         appendix['docname'] = docname
         largetree.append(appendix)
     logger.info('')
     logger.info(__("resolving references..."))
     self.env.resolve_references(largetree, indexfile, self)
     # resolve :ref:s to distant tex files -- we can't add a cross-reference,
     # but append the document name
     for pendingnode in largetree.traverse(addnodes.pending_xref):
         docname = pendingnode['refdocname']
         sectname = pendingnode['refsectname']
         newnodes = [nodes.emphasis(sectname, sectname)]
         for subdir, title in self.titles:
             if docname.startswith(subdir):
                 newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                 newnodes.append(nodes.emphasis(title, title))
                 newnodes.append(nodes.Text(')', ')'))
                 break
         else:
             pass
         pendingnode.replace_self(newnodes)
     return largetree
示例#7
0
文件: __init__.py 项目: zhou-/sphinx
 def assemble_doctree(self, indexfile: str, toctree_only: bool,
                      appendices: List[str]) -> nodes.document:  # NOQA
     self.docnames = set([indexfile] + appendices)
     logger.info(darkgreen(indexfile) + " ", nonl=True)
     tree = self.env.get_doctree(indexfile)
     tree['docname'] = indexfile
     if toctree_only:
         # extract toctree nodes from the tree and put them in a
         # fresh document
         new_tree = new_document('<latex output>')
         new_sect = nodes.section()
         new_sect += nodes.title('<Set title in conf.py>',
                                 '<Set title in conf.py>')
         new_tree += new_sect
         for node in tree.traverse(addnodes.toctree):
             new_sect += node
         tree = new_tree
     largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
                                     darkgreen, [indexfile])
     largetree['docname'] = indexfile
     for docname in appendices:
         appendix = self.env.get_doctree(docname)
         appendix['docname'] = docname
         largetree.append(appendix)
     logger.info('')
     logger.info(__("resolving references..."))
     self.env.resolve_references(largetree, indexfile, self)
     # resolve :ref:s to distant tex files -- we can't add a cross-reference,
     # but append the document name
     for pendingnode in largetree.traverse(addnodes.pending_xref):
         docname = pendingnode['refdocname']
         sectname = pendingnode['refsectname']
         newnodes = [nodes.emphasis(sectname, sectname)]  # type: List[Node]
         for subdir, title in self.titles:
             if docname.startswith(subdir):
                 newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                 newnodes.append(nodes.emphasis(title, title))
                 newnodes.append(nodes.Text(')', ')'))
                 break
         else:
             pass
         pendingnode.replace_self(newnodes)
     return largetree
示例#8
0
文件: texinfo.py 项目: ahviplc/sphinx
 def assemble_doctree(self, indexfile, toctree_only, appendices):
     # type: (unicode, bool, List[unicode]) -> nodes.document
     self.docnames = set([indexfile] + appendices)
     logger.info(darkgreen(indexfile) + " ", nonl=1)
     tree = self.env.get_doctree(indexfile)
     tree['docname'] = indexfile
     if toctree_only:
         # extract toctree nodes from the tree and put them in a
         # fresh document
         new_tree = new_document('<texinfo output>')
         new_sect = nodes.section()
         new_sect += nodes.title(u'<Set title in conf.py>',
                                 u'<Set title in conf.py>')
         new_tree += new_sect
         for node in tree.traverse(addnodes.toctree):
             new_sect += node
         tree = new_tree
     largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
                                     darkgreen, [indexfile])
     largetree['docname'] = indexfile
     for docname in appendices:
         appendix = self.env.get_doctree(docname)
         appendix['docname'] = docname
         largetree.append(appendix)
     logger.info('')
     logger.info(__("resolving references..."))
     self.env.resolve_references(largetree, indexfile, self)
     # TODO: add support for external :ref:s
     for pendingnode in largetree.traverse(addnodes.pending_xref):
         docname = pendingnode['refdocname']
         sectname = pendingnode['refsectname']
         newnodes = [nodes.emphasis(sectname,
                                    sectname)]  # type: List[nodes.Node]
         for subdir, title in self.titles:
             if docname.startswith(subdir):
                 newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                 newnodes.append(nodes.emphasis(title, title))
                 newnodes.append(nodes.Text(')', ')'))
                 break
         else:
             pass
         pendingnode.replace_self(newnodes)
     return largetree
示例#9
0
 def assemble_doctree(self, indexfile, toctree_only, appendices):
     # type: (str, bool, List[str]) -> nodes.document
     self.docnames = set([indexfile] + appendices)
     logger.info(darkgreen(indexfile) + " ", nonl=True)
     tree = self.env.get_doctree(indexfile)
     tree['docname'] = indexfile
     if toctree_only:
         # extract toctree nodes from the tree and put them in a
         # fresh document
         new_tree = new_document('<texinfo output>')
         new_sect = nodes.section()
         new_sect += nodes.title('<Set title in conf.py>',
                                 '<Set title in conf.py>')
         new_tree += new_sect
         for node in tree.traverse(addnodes.toctree):
             new_sect += node
         tree = new_tree
     largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
                                     darkgreen, [indexfile])
     largetree['docname'] = indexfile
     for docname in appendices:
         appendix = self.env.get_doctree(docname)
         appendix['docname'] = docname
         largetree.append(appendix)
     logger.info('')
     logger.info(__("resolving references..."))
     self.env.resolve_references(largetree, indexfile, self)
     # TODO: add support for external :ref:s
     for pendingnode in largetree.traverse(addnodes.pending_xref):
         docname = pendingnode['refdocname']
         sectname = pendingnode['refsectname']
         newnodes = [nodes.emphasis(sectname, sectname)]  # type: List[nodes.Node]
         for subdir, title in self.titles:
             if docname.startswith(subdir):
                 newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
                 newnodes.append(nodes.emphasis(title, title))
                 newnodes.append(nodes.Text(')', ')'))
                 break
         else:
             pass
         pendingnode.replace_self(newnodes)
     return largetree
示例#10
0
def test_extract_summary(capsys):
    settings = Mock(language_code='',
                    id_prefix='',
                    auto_id_prefix='',
                    pep_reference=False,
                    rfc_reference=False)
    document = new_document('', settings)

    # normal case
    doc = ['',
           'This is a first sentence. And second one.',
           '',
           'Second block is here']
    assert extract_summary(doc, document) == 'This is a first sentence.'

    # inliner case
    doc = ['This sentence contains *emphasis text having dots.*,',
           'it does not break sentence.']
    assert extract_summary(doc, document) == ' '.join(doc)

    # abbreviations
    doc = ['Blabla, i.e. bla.']
    assert extract_summary(doc, document) == ' '.join(doc)

    # literal
    doc = ['blah blah::']
    assert extract_summary(doc, document) == 'blah blah.'

    # heading
    doc = ['blah blah',
           '=========']
    assert extract_summary(doc, document) == 'blah blah'

    # hyperlink target
    doc = ['Do `this <https://www.sphinx-doc.org/>`_ and that. '
           'blah blah blah.']
    assert (extract_summary(doc, document) ==
            'Do `this <https://www.sphinx-doc.org/>`_ and that.')

    _, err = capsys.readouterr()
    assert err == ''
示例#11
0
def test_extract_summary(capsys):
    from sphinx.util.docutils import new_document
    from mock import Mock
    settings = Mock(language_code='',
                    id_prefix='',
                    auto_id_prefix='',
                    pep_reference=False,
                    rfc_reference=False)
    document = new_document('', settings)

    # normal case
    doc = [
        '', 'This is a first sentence. And second one.', '',
        'Second block is here'
    ]
    assert extract_summary(doc, document) == 'This is a first sentence.'

    # inliner case
    doc = [
        'This sentence contains *emphasis text having dots.*,',
        'it does not break sentence.'
    ]
    assert extract_summary(doc, document) == ' '.join(doc)

    # abbreviations
    doc = ['Blabla, i.e. bla.']
    assert extract_summary(doc, document) == 'Blabla, i.e.'

    # literal
    doc = ['blah blah::']
    assert extract_summary(doc, document) == 'blah blah.'

    # heading
    doc = ['blah blah', '=========']
    assert extract_summary(doc, document) == 'blah blah'

    _, err = capsys.readouterr()
    assert err == ''
示例#12
0
    def __init__(self, app):
        # After much digging through source code, this is how we emulate Sphinx's builtin reST parsing state
        # https://github.com/sphinx-doc/sphinx/blob/68cc0f7e94f360a2c62ebcb761f8096e04ebf07f/sphinx/io.py#L204
        # Here we're bypassing the RST Parser, and just doing the relevant code ops
        parser = app.registry.create_source_parser(app, "restructuredtext")
        # autosummary uses tab width 8; not set by publisher/env for some reason
        settings = dict(app.env.settings)
        if "tab_width" not in settings:
            settings["tab_width"] = 8
        p2 = Publisher()
        p2.process_programmatic_settings(None, settings, None)
        document = new_document('dummy_kissapi_source',
                                p2.settings)  # (source path, settings)
        document.reporter = NullReporter()
        state_machine = RSTStateMachine(
            state_classes, 'Body')  # (state machine classes, initial state)
        # needed to set various self.[attr] values of state_machine
        state_machine.run([""], document)  # (input lines, document)

        # the directive attrs that are needed
        self.state = state_machine.get_state()
        self.env = app.env
        self.lineno = 0
示例#13
0
def test_extract_summary():
    from sphinx.util.docutils import new_document
    from mock import Mock
    settings = Mock(language_code='',
                    id_prefix='',
                    auto_id_prefix='',
                    pep_reference=False,
                    rfc_reference=False)
    document = new_document('', settings)

    # normal case
    doc = [
        '', 'This is a first sentence. And second one.', '',
        'Second block is here'
    ]
    assert extract_summary(doc, document) == 'This is a first sentence.'

    # inliner case
    doc = [
        'This sentence contains *emphasis text having dots.*,',
        'it does not break sentence.'
    ]
    assert extract_summary(doc, document) == ' '.join(doc)
示例#14
0
def extract_summary(doc, document):
    # type: (List[unicode], Any) -> unicode
    """Extract summary from docstring."""

    # Skip a blank lines at the top
    while doc and not doc[0].strip():
        doc.pop(0)

    # If there's a blank line, then we can assume the first sentence /
    # paragraph has ended, so anything after shouldn't be part of the
    # summary
    for i, piece in enumerate(doc):
        if not piece.strip():
            doc = doc[:i]
            break

    # Try to find the "first sentence", which may span multiple lines
    sentences = periods_re.split(" ".join(doc))  # type: ignore
    if len(sentences) == 1:
        summary = sentences[0].strip()
    else:
        summary = ''
        state_machine = RSTStateMachine(state_classes, 'Body')
        while sentences:
            summary += sentences.pop(0) + '.'
            node = new_document('', document.settings)
            node.reporter = NullReporter()
            state_machine.run([summary], node)
            if not node.traverse(nodes.system_message):
                # considered as that splitting by period does not break inline markups
                break

    # strip literal notation mark ``::`` from tail of summary
    summary = literal_re.sub('.', summary)

    return summary
示例#15
0
    def process_solutions(self, doctree: nodes.document, src: str) -> None:
        """Handle any solutions contained in the document.

        This ensures that a ``*.py`` file is created in the ``resources`` directory
        containing the actual solution.

        It then also rewrites the given doctree to output a pair of code cells in
        the resulting notebook. The first is a prompt for the user to input their
        solution and the second contains a :magic:`ipython:load` declaration to
        give the user the option to load in the solution if they wish to see it.

        Parameters
        ----------
        doctree:
           The doctree to process
        src:
           The path to the file containing the document being processed
        """

        docpath = pathlib.Path(src)
        logger.debug("[tutorial]: processing solutions for: %s", docpath)
        basename = f"{docpath.stem}-soln"

        for idx, soln in enumerate(doctree.traverse(condition=solution)):

            name = f"{basename}-{idx+1:02d}.py"
            destination = pathlib.Path("resources", docpath.with_suffix(""), name)
            refuri = relative_uri(src, str(destination))

            # Convert the solution to a valid Python document that can be executed.
            document = new_document("<solution>")
            document += soln

            # Rather than go through the trouble of maintaining 2 document translators,
            # one for notebooks and another for Python files. Let's just use the notebook
            # translator and do some post-processing on the result - much easier.
            translator = NotebookTranslator(document)
            document.walkabout(translator)
            notebook = translator.asnotebook()

            blocks = []
            for cell in notebook.cells:
                source = cell.source

                # Comment out the lines containing markdown.
                if cell.cell_type == "markdown":
                    source = textwrap.indent(source, "# ")

                blocks.append(source)

            self.resources[str(destination)] = ("create", "\n".join(blocks))

            # TODO: Expose config options for these
            # TODO: Translations?
            your_soln = nodes.literal_block(
                "", "# Write your solution here...\n", language="python"
            )
            load_soln = nodes.literal_block(
                "",
                f"# Execute this cell to load the example solution\n%load {refuri}\n",
                language="python",
            )

            # Replace the actual solution with the 2 cells defined above.
            soln.children = [your_soln, load_soln]
def process_feed(app, doctree, fromdocname):
    env = app.builder.env
    document = new_document('')

    for node in doctree.traverse(feed):
        rss_filename = node['rss']
        rss_title = node['title']
        rss_link = node['link']
        rss_description = node['description']
        rss_date = datetime.datetime.utcnow()
        rss_items = []
        for docname in node['entries']:
            entry = env.get_doctree(docname)

            for field in entry.traverse(nodes.field):
                field_date = [f for f in field.traverse(nodes.field_name)][0]
                pars = []
                for b in field.traverse(nodes.field_body):
                    for p in b.traverse(nodes.paragraph):
                        pars.append(p)

                rss_item_description = nodes.compound()
                for p in pars[1:]:
                    rss_item_description += p.deepcopy()

                document += rss_item_description
                app.env.resolve_references(
                    document,
                    node['link'],
                    app.builder,
                )
                document.remove(rss_item_description)

                if app.builder.format == 'html':
                    rss_item_description = "\n".join(
                        app.builder.render_partial(p)['body']
                        for p in rss_item_description
                    )
                    rss_item_date = dateutil.parser.parse(field_date.astext().strip())
                    rss_item_title = "%s: %s" % (field_date.astext(), pars[0].astext())
                    rss_item = RSSItem(
                        rss_item_title,
                        node['link'],
                        rss_item_description,
                        rss_item_date,
                    )
                    rss_items.append(rss_item)

        node.replace_self([])

        if app.builder.format == 'html':
            rss_feed = RSSFeed(
                rss_title,
                rss_link,
                rss_description,
                rss_date,
                rss_items,
            )
            if rss_filename:
                rss_path = os.path.join(app.builder.outdir, rss_filename)
                with open(rss_path, 'wb') as rss_stream:
                    write_rss(rss_feed, rss_stream)
示例#17
0
def process_feed(app, doctree, fromdocname):
    env = app.builder.env
    document = new_document('')

    for node in doctree.traverse(feed):
        rss_filename = node['rss']
        rss_title = node['title']
        rss_link = node['link']
        rss_description = node['description']
        rss_date = datetime.datetime.utcnow()
        rss_items = []
        for docname in node['entries']:
            entry = env.get_doctree(docname)
            title = entry.next_node(nodes.section)
            first_year = title.next_node(nodes.section)

            for year in first_year.traverse(
                nodes.section, descend=False, siblings=True
            ):
                first_subsection = year.next_node(nodes.section)
                for subsection in first_subsection.traverse(
                    nodes.section, descend=False, siblings=True
                ):
                    if app.builder.format == 'html':
                        # date
                        date = subsection.next_node(nodes.title).astext().split(":")[0]
                        rss_item_date = dateutil.parser.parse(date.strip())

                        # link
                        rss_item_link = node["link"] + "#" + subsection['ids'][0]

                        # title
                        title = subsection.next_node(nodes.title)

                        document += title
                        app.env.resolve_references(
                            document,
                            node['link'],
                            app.builder,
                        )
                        document.remove(title)

                        rss_item_title = title.astext()

                        # description
                        pars = []
                        for b in subsection.traverse(nodes.paragraph):
                            for p in b.traverse(nodes.paragraph):
                                pars.append(p)

                        rss_item_description = nodes.compound()
                        for p in pars:
                            rss_item_description += p.deepcopy()

                        document += rss_item_description
                        app.env.resolve_references(
                            document,
                            node['link'],
                            app.builder,
                        )
                        document.remove(rss_item_description)

                        rss_item_description = "\n".join(
                            app.builder.render_partial(p)['body']
                            for p in rss_item_description
                        )
                        if True:
                            print("--------------------------------------------")
                            print("--------------------------------------------")
                            print("--------------------------------------------")
                            print("newsfeed item:")
                            print("    title:", rss_item_title)
                            print("    date:", rss_item_date)
                            print("    link:", rss_item_link)
                            print("    desc:", rss_item_description)
                            print(" ", flush=True)

                        rss_item = RSSItem(
                            rss_item_title,
                            rss_item_link,
                            rss_item_description,
                            rss_item_date,
                        )
                        rss_items.append(rss_item)

        node.replace_self([])

        if app.builder.format == 'html':
            rss_feed = RSSFeed(
                rss_title,
                rss_link,
                rss_description,
                rss_date,
                rss_items,
            )
            if rss_filename:
                rss_path = os.path.join(app.builder.outdir, rss_filename)
                with open(rss_path, 'wb') as rss_stream:
                    write_rss(rss_feed, rss_stream)