def run(self):
        indexnode, node = super(DjangoAdminModel, self).run()
        sig = self.arguments[0]
        lst = []

        if not 'noautodoc' in self.options:
            exclude = [
                a.strip() for a in self.options.get('exclude', '').split(',')
            ]
            app_label, model_name = sig.split('.')
            for name, opts in model_attributes(app_label, model_name).items():
                if name in exclude:
                    continue
                lst.append(".. djangoadmin:attribute:: %s.%s" % (sig, name))
                lst.append('')
                lst.append("   %s" % unicode(opts['description']))
                lst.append('')
            text = '\n'.join(lst)
            new_doc = new_document('temp-string', self.state.document.settings)
            parser = Parser()
            parser.parse(text, new_doc)
            container = nodes.container()
            container.extend(new_doc.children)
            node[1].extend(container)

        return [indexnode, node]
Example #2
0
def check_rst_document(source, source_path='<string>', settings=None):
    """Returns a list of objects containing problems in the
        provided reStructuredText document ``source``.

        ``settings`` is the settings object for the docutils document instance.
        If None, the default settings are used.
        """
    alist = []

    def accumulate(x):
        return alist.append(x)
    document = utils.new_document(source_path, settings=settings)
    document.reporter.attach_observer(accumulate)
    if settings is None:  # Fill in some values to prevent AttributeError
        document.settings.tab_width = 8
        document.settings.pep_references = None
        document.settings.rfc_references = None
        document.settings.smart_quotes = True
        document.settings.file_insertion_enabled = True
    parser = Parser()
    parser.parse(source, document)
    # Now apply transforms to get more warnings
    document.transformer.add_transforms(check_transforms)
    document.transformer.apply_transforms()
    return alist
Example #3
0
    def parse(self, source, document):
        """ Parse ``source``, write results to ``document``.

        """
        # This is lame, but seems to be required for python 2
        source = CODING.sub("", source)

        env = document.settings.env
        filename = env.doc2path(env.docname) # e.g. full path to docs/user_guide/examples/layout_vertical

        # This code splits the source into two parts: the docstring (or None if
        # there is not one), and the remaining source code after
        m = ast.parse(source)
        docstring = ast.get_docstring(m)
        if docstring is not None:
            lines = source.split("\n")
            lineno = m.body[0].lineno # assumes docstring is m.body[0]
            source = "\n".join(lines[lineno:])

        js_name = "bokeh-plot-%s.js" % hashlib.md5(env.docname.encode('utf-8')).hexdigest()

        (script, js, js_path, source) = _process_script(source, filename, env.bokeh_plot_auxdir, js_name)

        env.bokeh_plot_files[env.docname] = (script, js, js_path, source)

        rst = PLOT_PAGE.render(source=source,
                               filename=basename(filename),
                               docstring=docstring,
                               script=script)

        document['bokeh_plot_include_bokehjs'] = True

        # can't use super, Sphinx Parser classes don't inherit object
        Parser.parse(self, rst, document)
Example #4
0
    def _check_rst_data(self, data):
        """Returns warnings when the provided data doesn't compile."""
        source_path = StringIO()
        parser = Parser()
        settings = frontend.OptionParser().get_default_values()
        settings.tab_width = 4
        settings.pep_references = None
        settings.rfc_references = None
        reporter = SilentReporter(source_path,
                          settings.report_level,
                          settings.halt_level,
                          stream=settings.warning_stream,
                          debug=settings.debug,
                          encoding=settings.error_encoding,
                          error_handler=settings.error_encoding_error_handler)

        document = nodes.document(settings, reporter, source=source_path)
        document.note_source(source_path, -1)
        try:
            parser.parse(data, document)
        except AttributeError:
            reporter.messages.append((-1, 'Could not finish the parsing.',
                                      '', {}))

        return reporter.messages
Example #5
0
def parse(filehandle):
    """ Parse a document read from the given filehandle into a
    :class:`dmr.data.Document` object.

    The document must contain:

    * A top-level title, the resume owner's name;
    * A :class:`docutils.nodes.line_block` containing contact
      information for the resume, to be parsed with
      :func:`dmr.data.Contact.parse`; and
    * Any number of subsections that conform to the restrictions of
      the various :class:`dmr.data.Section` subclasses.

    :param filehandle: The file-like object to parse the document from.
    :type filehandle: file
    :returns: :class:`dmr.data.Document`
    """
    parser = Parser()
    settings = OptionParser(components=(Parser,)).get_default_values()
    logger.info("Parsing document from %s" % filehandle.name)
    document = new_document(filehandle.name, settings)
    try:
        parser.parse(filehandle.read(), document)
    except IOError:
        fatal("Could not parse %s: %s" % (filehandle.name, sys.exc_info()[1]))

    top = None
    options = dict()
    for child in document.children:
        if isinstance(child, docutils.nodes.Structural):
            if top:
                fatal("Document must have exactly one top-level heading")
            top = child
        elif isinstance(child, docutils.nodes.comment):
            contents = child_by_class(child, docutils.nodes.Text)
            if contents and contents.startswith("options"):
                opts = contents.splitlines()
                try:
                    # see if this is a format-specific option block
                    ofmt = opts[0].split("=")[1]
                    logger.debug("Found document options for %s: %s" %
                                 (ofmt, opts[1:]))
                except IndexError:
                    ofmt = None
                    logger.debug("Found default document options: %s" %
                                 opts[1:])
                options[ofmt] = opts[1:]
        else:
            logger.info("Skipping unknown node %s" % child)

    for ofmt in [None, config.format]:
        if ofmt in options:
            parse_document_options(options[ofmt])

    doc = Document.parse(top)
    doc.source = document
    return doc
Example #6
0
 def parse_(rst):
     document = utils.new_document(b'test data', settings)
     document['file'] = 'dummy'
     parser = RstParser()
     parser.parse(rst, document)
     for msg in document.traverse(nodes.system_message):
         if msg['level'] == 1:
             msg.replace_self([])
     return document
Example #7
0
    def run(self, cmd, code):
        """Attempt to parse code as reStructuredText."""
        import docutils
        from docutils.nodes import Element
        from docutils.parsers.rst import Parser

        # Generate a new parser
        parser = Parser()

        settings = docutils.frontend.OptionParser(
            components=(docutils.parsers.rst.Parser,)
        ).get_default_values()

        document = docutils.utils.new_document(None, settings=settings)
        document.reporter.stream = None
        document.reporter.halt_level = 5

        # Collect errors via an observer
        def error_collector(data):
            # Mutate the data since it was just generated
            data.type = data['type']
            data.level = data['level']
            data.message = Element.astext(data.children[0])
            data.full_message = Element.astext(data)

            # Save the error
            errors.append(data)

        errors = []
        document.reporter.attach_observer(error_collector)
        parser.parse(code, document)

        for data in errors:
            message = data.message.replace("\n", " ")

            if 'Unknown directive type' in message:
                continue
            if 'Unknown interpreted text role' in message:
                continue
            if 'Substitution definition contains illegal element' in message:
                # there will be error message for the contents it
                # self so let's ignore it.
                continue

            if data.level >= 3:
                error_type = highlight.ERROR
            else:
                error_type = highlight.WARNING
            
            if data.level <= 1:
                return

            line = data['line'] - 1

            self.highlight.range(line, 0, error_type=error_type)
            self.error(line, 0, message, error_type)
Example #8
0
def make_citation(label, text, settings):
    name = fully_normalize_name(label)
    citation = nodes.citation(text)
    citation += nodes.label('', label)
    new_doc = new_document('temp-string', settings)
    parser = Parser()
    parser.parse(text, new_doc)
    citation['names'].append(name)
    citation += new_doc.children
    return citation
def parse_rst_content(filename, source_content):
    settings = OptionParser(components=(Parser,)).get_default_values()
    parser = Parser()
    document = new_document(filename, settings)
    parser.parse(source_content, document)

    def _walk(node):
        if type(node) is Text:
            yield node
        if not isinstance(node, ignored_node_types):
            for child in node.children:
                for n in _walk(child):
                    yield n
    return ' '.join(n for n in _walk(document))
Example #10
0
def main():
    # process cmdline arguments:
    inputFile, outputFile, outputFormat, optargs = getArgs()
    settings = OptionParser(components=(Parser,)).get_default_values()
    settings.debug = optargs['debug']
    parser = Parser()
    input = inputFile.read()
    document = new_document(inputFile.name, settings)
    parser.parse(input, document)
    output = format(outputFormat, input, document, optargs)
    outputFile.write(output)
    if optargs['attributes']:
        import pprint
        pprint.pprint(document.__dict__)
    def run(self):
        rawtext = """
.. image:: /../images/examples/%s.jpg
  :width: %d

**Running the Example**::

  openrave.py --example %s

"""%(self.arguments[0],self.options.get('image-width',640),self.arguments[0])
        parser = Parser()
        document = docutils.utils.new_document("<partial node>")
        document.settings = self.state.document.settings
        parser.parse(rawtext,document)
        return document.children
 def parse_rst(rst_string):
     from abjad.tools import abjadbooktools
     parser = Parser()
     directives.register_directive(
         'abjad', abjadbooktools.AbjadDirective,
         )
     directives.register_directive(
         'import', abjadbooktools.ImportDirective,
         )
     directives.register_directive('shell', abjadbooktools.ShellDirective)
     settings = OptionParser(components=(Parser,)).get_default_values()
     document = new_document('test', settings)
     parser.parse(rst_string, document)
     document = parser.document
     return document
        def run(self): # check if there are spaces in the notebook name
            md_path = self.arguments[0]
            if ' ' in md_path: raise ValueError(
                "Due to issues with docutils stripping spaces from links, white "
                "space is not allowed in notebook filenames '{0}'".format(md_path))

            # check if raw html is supported
            if not self.state.document.settings.raw_enabled:
                raise self.warning('"%s" directive disabled.' % self.name)

            # get path to markdown file
            md_filename = self.arguments[0]
            md_dir = os.path.join(setup.confdir, '..')
            md_abs_path = os.path.abspath(os.path.join(md_dir, md_filename))

            with open(md_abs_path) as file:
                source = file.read()

            ptype = self.pandoc_from

            if 'from' in self.options:
                ptype = self.options['from']

            if ptype is '':
                ptype = 'markdown'

            # if ptype != '':
            #     arglist = ['pandoc', '--from=' + ptype, '--to=rst', md_abs_path]
            # else:
            #     arglist = ['pandoc', '--to=rst', md_abs_path]
            #
            # p = subprocess.Popen(arglist,
            #     stdout=subprocess.PIPE,
            #     stderr=subprocess.PIPE
            # )
            #
            # out, err = p.communicate()
            #
            # print(out)

            out = pandoc(source, ptype, 'rst')

            settings = OptionParser(components=(Parser,)).get_default_values()
            parser = Parser()
            document = new_document('DOC', settings)
            parser.parse(out, document)

            return [node for node in document]
Example #14
0
def rest_document(filename):
    """Return an reST document.
    """

    from docutils.parsers.rst import Parser

    file = open(filename)
    try:
        text = file.read()
    finally:
        file.close()

    parser = Parser()
    docroot = docutils.utils.new_document()
    parser.parse(text,docroot)
    return docroot
Example #15
0
    def auto_code_block(self, node):
        """Try to automatically generate nodes for codeblock syntax.

        Parameters
        ----------
        node : nodes.literal_block
            Original codeblock node
        Returns
        -------
        tocnode: docutils node
            The converted toc tree node, None if conversion is not possible.
        """
        assert isinstance(node, nodes.literal_block)
        original_node = node
        if 'language' not in node:
            return None
        self.state_machine.reset(self.document,
                                 node.parent,
                                 self.current_level)
        content = node.rawsource.split('\n')
        language = node['language']
        if language == 'math':
            if self.config['enable_math']:
                return self.state_machine.run_directive(
                    'math', content=content)
        elif language == 'eval_rst':
            if self.config['enable_eval_rst']:
                # allow embed non section level rst
                node = nodes.section()
                self.state_machine.state.nested_parse(
                    StringList(content, source=original_node.source),
                    0, node=node, match_titles=False)
                return node.children[:]
        else:
            match = re.search('[ ]?[\w_-]+::.*', language)
            if match:
                parser = Parser()
                new_doc = new_document(None, self.document.settings)
                newsource = u'.. ' + match.group(0) + '\n' + node.rawsource
                parser.parse(newsource, new_doc)
                return new_doc.children[:]
            else:
                return self.state_machine.run_directive(
                    'code-block', arguments=[language],
                    content=content)
        return None
    def run(self):
        rawtext = """
Command-line
------------

.. shell-block:: openrave.py --example %s --help

Main Python Code
----------------

.. literalinclude:: /../openravepy/examples/%s.py
  :pyobject: main

Class Definitions
-----------------
"""%(self.arguments[0],self.arguments[0])
        parser = Parser()
        document = docutils.utils.new_document("<partial node>")
        document.settings = self.state.document.settings
        parser.parse(rawtext,document)
        return document.children
Example #17
0
def parse_entries(input_file):
    global all_months, all_entries

    file = codecs.open(input_file, 'r', 'utf-8')
    try:
        text = file.read()
    finally:
        file.close()

    parser = Parser()
    settings = OptionParser(
        components=(Parser,
                    docutils.writers.html4css1.Writer)).get_default_values()
    docroot = docutils.utils.new_document(file.name, settings)
    parser.parse(text, docroot)

    for i in docroot.traverse(condition=docutils.nodes.section):
        try:
            date_string = re.findall(r'(\d{4}-\d{1,2}-\d{1,2})',
                                     str(i.children[0]))[0]
            logging.debug("Found entry: %s" % date_string)
            date = datetime.strptime(date_string, "%Y-%m-%d")
        except IndexError:
            sys.stderr.write("can not parse section : %s\n" %
                             str(i.children[0]))
            sys.exit(1)

        translator = HTMLTranslator(docroot)
        i.walkabout(translator)
        body = ''.join(translator.body)

        entry = Entry(date, body)

        all_months.add(entry.month)
        all_entries[entry.month].append(entry)

    all_months = sorted(all_months, reverse=True)
Example #18
0
    def update_changelog(self, changelog):
        """
        Updates changelog in db for wrapped product
        with given `changelog` multiline string

        """
        docsettings = OptionParser(components=(Parser,),
                                   defaults={'report_level': 4}).get_default_values()
        document = new_document(u'Changelog Document Instance', docsettings)
        parser = Parser()
        parser.parse(changelog, document)
        if not len(document.children):
            raise EmptyChangelog()

        releases = {}
        for block in document.children:
            headline = block[0].astext()
            r_date = self._get_date(headline)
            version = self._get_sortable_version(headline)
            text = ''
            if len(block) == 1:
                # must be unreleased
                assert(r_date is None)
            else:
                # TODO: figure out a better way to get rst of doctree
                entries = block[1].astext().split(block.child_text_separator)
                text = '* ' + '\n* '.join([e.replace('\n', ' ') for e in entries])
            releases[version] = {'datev': r_date, 'changelog': text}

        found_versions = releases.keys()
        found_versions.sort()
        last_released = Release.objects.filter(product=self.product, datev__isnull=False)
        last_released = last_released.order_by('-datev')

        try:
            last_released = last_released[0]
            last_released_version = last_released.version
        except:
            last_released_version = ''
        after_last_released = False

        needs_blame = []
        for version in found_versions:
            # walk versions in new changelog, from oldest to newest
            if after_last_released:
                needs_blame.append(pversion(version))
                # this is unreleased in db, probably totally changed in changelog
                # update 1 on 1, regardless of version
                (unreleased, c) = Release.objects.get_or_create(product=self.product, datev__isnull=True)
                unreleased.version = version
                unreleased.datev = releases[version]['datev']
                unreleased.changelog = releases[version]['changelog']
                unreleased.save()
                after_last_released = False
            else:
                # either exists in db, either totally new
                added = Release_update_or_add(self.product, version,
                                              releases[version]['datev'],
                                              releases[version]['changelog'])
                if added:
                    needs_blame.append(pversion(version))
                if version == last_released_version:
                    after_last_released = True

        self.update_commit_info(needs_blame)
Example #19
0
    from docutils.frontend import OptionParser
    # import pygments
    settings = OptionParser(components=(Parser, )).get_default_values()
    document = new_document('(generated) DESCRIPTION.rst', settings=settings)

    from distutils.command.check import SilentReporter
    reporter = SilentReporter(
        '(generated) DESCRIPTION.rst',
        settings.report_level,
        settings.halt_level,
        stream=settings.warning_stream,
        debug=settings.debug,
        encoding=settings.error_encoding,
        error_handler=settings.error_encoding_error_handler)
    document.reporter = reporter
    parser = Parser()
    parser.parse(LONG_DESCRIPTION, document)
    from warnings import warn
    if len(reporter.messages) > 0:
        # display all errors
        for warning in reporter.messages:
            line = warning[-1].get('line')
            if line is None:
                warning = warning[1]
            else:
                warning = '%s (line %s)' % (warning[1], line)
            warn(warning)
        # dump the created file so that one can have a look
        with open('GENERATED_DESCRIPTION_TO_DELETE.rst', 'wb') as f:
            f.write(LONG_DESCRIPTION.encode('utf-8'))
        print(
Example #20
0
def parse_entries(input_file):
    """Parse entries from input file

    Go through the input_file and pull out each section; parse the date
    and create an Entry() object.  Entries go into all_entries keyed by
    the month it was created in; each month we see gets an entry in
    all_months

    Returns a dict with three keys:

     - all_entries : dict keyed by month with list of Entry objects
     - all_months : set of all months in reverse date order
             (i.e. latest month first)
     - todo : the todo list section, or None if not available
    """

    # a dict that keeps entries keyed by month.  later, we can walk
    # each key kept in all_months to build the pages
    all_entries = defaultdict(list)

    # set of keys for all_entries.  sorted by parse_entries into
    # reverse date order (i.e. latest month is first)
    all_months = set()

    # the todo list section
    todo = None

    file = codecs.open(input_file, 'r', 'utf-8')
    try:
        text = file.read()
    finally:
        file.close()

    parser = Parser()
    settings = OptionParser(
        components=(Parser,
                    docutils.writers.html4css1.Writer)).get_default_values()
    docroot = docutils.utils.new_document(file.name, settings)
    parser.parse(text, docroot)

    for i in docroot.traverse(condition=docutils.nodes.section):
        try:
            if str(i.children[0]) == "<title>todo</title>":
                logging.debug("Found todo section")
                translator = HTMLTranslator(docroot)
                i.walkabout(translator)
                body = ''.join(translator.body)
                todo = Todo(body)
                continue
        except IndexError:
            pass

        try:
            date_string = re.findall(r'(\d{4}-\d{1,2}-\d{1,2})',
                                     str(i.children[0]))[0]
            logging.debug("Found entry: %s" % date_string)
            date = datetime.strptime(date_string, "%Y-%m-%d")
        except IndexError:
            sys.stderr.write("can not parse section : %s\n" %
                             str(i.children[0]))
            sys.exit(1)

        translator = HTMLTranslator(docroot)
        i.walkabout(translator)
        body = ''.join(translator.body)

        entry = Entry(date, body)

        all_months.add(entry.month)
        all_entries[entry.month].append(entry)

    all_months = sorted(all_months, reverse=True)

    return {'all_months': all_months,
            'all_entries': all_entries,
            'todo': todo}
Example #21
0
    def parse(self, input_string, document):
        output_string = self.convert(input_string)

        if output_string:
            Parser.parse(self, output_string, document)
Example #22
0
def suite():
    parser = Parser()
    s = DocutilsTestSupport.TransformTestSuite(
        parser, suite_settings={'line_length_limit': 80})
    s.generateTests(totest)
    return s
Example #23
0
    def apply(self):
        env = self.document.settings.env
        settings, source = self.document.settings, self.document['source']
        # XXX check if this is reliable
        assert source.startswith(env.srcdir)
        docname = path.splitext(relative_path(env.srcdir, source))[0]
        textdomain = find_catalog(docname,
                                  self.document.settings.gettext_compact)

        # fetch translations
        dirs = [path.join(env.srcdir, directory)
                for directory in env.config.locale_dirs]
        catalog, has_catalog = init_locale(dirs, env.config.language,
                                           textdomain)
        if not has_catalog:
            return

        parser = RSTParser()

        #phase1: replace reference ids with translated names
        for node, msg in extract_messages(self.document):
            msgstr = catalog.gettext(msg)
            # XXX add marker to untranslated parts
            if not msgstr or msgstr == msg or not msgstr.strip():
                # as-of-yet untranslated
                continue

            # Avoid "Literal block expected; none found." warnings.
            # If msgstr ends with '::' then it cause warning message at
            # parser.parse() processing.
            # literal-block-warning is only appear in avobe case.
            if msgstr.strip().endswith('::'):
                msgstr += '\n\n   dummy literal'
                # dummy literal node will discard by 'patch = patch[0]'

            patch = new_document(source, settings)
            CustomLocaleReporter(node.source, node.line).set_reporter(patch)
            parser.parse(msgstr, patch)
            patch = patch[0]
            # XXX doctest and other block markup
            if not isinstance(patch, nodes.paragraph):
                continue # skip for now

            processed = False  # skip flag

            # update title(section) target name-id mapping
            if isinstance(node, nodes.title):
                section_node = node.parent
                new_name = nodes.fully_normalize_name(patch.astext())
                old_name = nodes.fully_normalize_name(node.astext())

                if old_name != new_name:
                    # if name would be changed, replace node names and
                    # document nameids mapping with new name.
                    names = section_node.setdefault('names', [])
                    names.append(new_name)
                    if old_name in names:
                        names.remove(old_name)

                    _id = self.document.nameids.get(old_name, None)
                    explicit = self.document.nametypes.get(old_name, None)

                    # * if explicit: _id is label. title node need another id.
                    # * if not explicit:
                    #
                    #   * _id is None:
                    #
                    #     _id is None means _id was duplicated.
                    #     old_name entry still exists in nameids and
                    #     nametypes for another duplicated entry.
                    #
                    #   * _id is provided: bellow process
                    if not explicit and _id:
                        # _id was not duplicated.
                        # remove old_name entry from document ids database
                        # to reuse original _id.
                        self.document.nameids.pop(old_name, None)
                        self.document.nametypes.pop(old_name, None)
                        self.document.ids.pop(_id, None)

                    # re-entry with new named section node.
                    self.document.note_implicit_target(
                            section_node, section_node)

                    # replace target's refname to new target name
                    def is_named_target(node):
                        return isinstance(node, nodes.target) and  \
                            node.get('refname') == old_name
                    for old_target in self.document.traverse(is_named_target):
                        old_target['refname'] = new_name

                    processed = True

            # glossary terms update refid
            if isinstance(node, nodes.term):
                gloss_entries = env.temp_data.setdefault('gloss_entries', set())
                ids = []
                termnodes = []
                for _id in node['names']:
                    if _id in gloss_entries:
                        gloss_entries.remove(_id)
                    _id, _, new_termnodes = \
                        make_termnodes_from_paragraph_node(env, patch, _id)
                    ids.append(_id)
                    termnodes.extend(new_termnodes)

                if termnodes and ids:
                    patch = make_term_from_paragraph_node(termnodes, ids)
                    node['ids'] = patch['ids']
                    node['names'] = patch['names']
                    processed = True

            # update leaves with processed nodes
            if processed:
                for child in patch.children:
                    child.parent = node
                node.children = patch.children
                node['translated'] = True


        #phase2: translation
        for node, msg in extract_messages(self.document):
            if node.get('translated', False):
                continue

            msgstr = catalog.gettext(msg)
            # XXX add marker to untranslated parts
            if not msgstr or msgstr == msg: # as-of-yet untranslated
                continue

            # Avoid "Literal block expected; none found." warnings.
            # If msgstr ends with '::' then it cause warning message at
            # parser.parse() processing.
            # literal-block-warning is only appear in avobe case.
            if msgstr.strip().endswith('::'):
                msgstr += '\n\n   dummy literal'
                # dummy literal node will discard by 'patch = patch[0]'

            patch = new_document(source, settings)
            CustomLocaleReporter(node.source, node.line).set_reporter(patch)
            parser.parse(msgstr, patch)
            patch = patch[0]
            # XXX doctest and other block markup
            if not isinstance(patch, nodes.paragraph):
                continue # skip for now

            # auto-numbered foot note reference should use original 'ids'.
            def is_autonumber_footnote_ref(node):
                return isinstance(node, nodes.footnote_reference) and \
                    node.get('auto') == 1
            def list_replace_or_append(lst, old, new):
                if old in lst:
                    lst[lst.index(old)] = new
                else:
                    lst.append(new)
            old_foot_refs = node.traverse(is_autonumber_footnote_ref)
            new_foot_refs = patch.traverse(is_autonumber_footnote_ref)
            if len(old_foot_refs) != len(new_foot_refs):
                env.warn_node('inconsistent footnote references in '
                              'translated message', node)
            old_foot_namerefs = {}
            for r in old_foot_refs:
                old_foot_namerefs.setdefault(r.get('refname'), []).append(r)
            for new in new_foot_refs:
                refname = new.get('refname')
                refs = old_foot_namerefs.get(refname, [])
                if not refs:
                    continue

                old = refs.pop(0)
                new['ids'] = old['ids']
                for id in new['ids']:
                    self.document.ids[id] = new
                list_replace_or_append(
                        self.document.autofootnote_refs, old, new)
                if refname:
                    list_replace_or_append(
                        self.document.footnote_refs.setdefault(refname, []),
                        old, new)
                    list_replace_or_append(
                        self.document.refnames.setdefault(refname, []),
                        old, new)

            # reference should use new (translated) 'refname'.
            # * reference target ".. _Python: ..." is not translatable.
            # * use translated refname for section refname.
            # * inline reference "`Python <...>`_" has no 'refname'.
            def is_refnamed_ref(node):
                return isinstance(node, nodes.reference) and  \
                    'refname' in node
            old_refs = node.traverse(is_refnamed_ref)
            new_refs = patch.traverse(is_refnamed_ref)
            if len(old_refs) != len(new_refs):
                env.warn_node('inconsistent references in '
                              'translated message', node)
            old_ref_names = [r['refname'] for r in old_refs]
            new_ref_names = [r['refname'] for r in new_refs]
            orphans = list(set(old_ref_names) - set(new_ref_names))
            for new in new_refs:
                if not self.document.has_name(new['refname']):
                    # Maybe refname is translated but target is not translated.
                    # Note: multiple translated refnames break link ordering.
                    if orphans:
                        new['refname'] = orphans.pop(0)
                    else:
                        # orphan refnames is already empty!
                        # reference number is same in new_refs and old_refs.
                        pass

                self.document.note_refname(new)

            # refnamed footnote and citation should use original 'ids'.
            def is_refnamed_footnote_ref(node):
                footnote_ref_classes = (nodes.footnote_reference,
                                        nodes.citation_reference)
                return isinstance(node, footnote_ref_classes) and \
                    'refname' in node
            old_refs = node.traverse(is_refnamed_footnote_ref)
            new_refs = patch.traverse(is_refnamed_footnote_ref)
            refname_ids_map = {}
            if len(old_refs) != len(new_refs):
                env.warn_node('inconsistent references in '
                              'translated message', node)
            for old in old_refs:
                refname_ids_map[old["refname"]] = old["ids"]
            for new in new_refs:
                refname = new["refname"]
                if refname in refname_ids_map:
                    new["ids"] = refname_ids_map[refname]

            # Original pending_xref['reftarget'] contain not-translated
            # target name, new pending_xref must use original one.
            # This code restricts to change ref-targets in the translation.
            old_refs = node.traverse(addnodes.pending_xref)
            new_refs = patch.traverse(addnodes.pending_xref)
            xref_reftarget_map = {}
            if len(old_refs) != len(new_refs):
                env.warn_node('inconsistent term references in '
                              'translated message', node)
            def get_ref_key(node):
                case = node["refdomain"], node["reftype"]
                if case == ('std', 'term'):
                    return None
                else:
                    return (
                        node["refdomain"],
                        node["reftype"],
                        node['reftarget'],)

            for old in old_refs:
                key = get_ref_key(old)
                if key:
                    xref_reftarget_map[key] = old["reftarget"]
            for new in new_refs:
                key = get_ref_key(new)
                if key in xref_reftarget_map:
                    new['reftarget'] = xref_reftarget_map[key]

            # update leaves
            for child in patch.children:
                child.parent = node
            node.children = patch.children
            node['translated'] = True

        # Extract and translate messages for index entries.
        for node, entries in traverse_translatable_index(self.document):
            new_entries = []
            for type, msg, tid, main in entries:
                msg_parts = split_index_msg(type, msg)
                msgstr_parts = []
                for part in msg_parts:
                    msgstr = catalog.gettext(part)
                    if not msgstr:
                        msgstr = part
                    msgstr_parts.append(msgstr)

                new_entries.append((type, ';'.join(msgstr_parts), tid, main))

            node['raw_entries'] = entries
            node['entries'] = new_entries
Example #24
0
def rst2nodes(text, settings):
    new_doc = new_document('temp-string', settings)
    parser = Parser()
    parser.parse(text, new_doc)
    return new_doc.children
Example #25
0
def suite():
    parser = Parser()
    s = DocutilsTestSupport.TransformTestSuite(parser)
    s.generateTests(totest)
    return s
Example #26
0
    def apply(self):
        env = self.document.settings.env
        settings, source = self.document.settings, self.document['source']
        # XXX check if this is reliable
        assert source.startswith(env.srcdir)
        docname = path.splitext(relative_path(env.srcdir, source))[0]
        textdomain = find_catalog(docname,
                                  self.document.settings.gettext_compact)

        # fetch translations
        dirs = [path.join(env.srcdir, directory)
                for directory in env.config.locale_dirs]
        catalog, has_catalog = init_locale(dirs, env.config.language,
                                           textdomain)
        if not has_catalog:
            return

        parser = RSTParser()

        for node, msg in extract_messages(self.document):
            msgstr = catalog.gettext(msg)
            # XXX add marker to untranslated parts
            if not msgstr or msgstr == msg: # as-of-yet untranslated
                continue

            # Avoid "Literal block expected; none found." warnings.
            # If msgstr ends with '::' then it cause warning message at
            # parser.parse() processing.
            # literal-block-warning is only appear in avobe case.
            if msgstr.strip().endswith('::'):
                msgstr += '\n\n   dummy literal'
                # dummy literal node will discard by 'patch = patch[0]'

            patch = new_document(source, settings)
            CustomLocaleReporter(node.source, node.line).set_reporter(patch)
            parser.parse(msgstr, patch)
            patch = patch[0]
            # XXX doctest and other block markup
            if not isinstance(patch, nodes.paragraph):
                continue # skip for now

            # auto-numbered foot note reference should use original 'ids'.
            def is_autonumber_footnote_ref(node):
                return isinstance(node, nodes.footnote_reference) and \
                    node.get('auto') == 1
            old_foot_refs = node.traverse(is_autonumber_footnote_ref)
            new_foot_refs = patch.traverse(is_autonumber_footnote_ref)
            if len(old_foot_refs) != len(new_foot_refs):
                env.warn_node('inconsistent footnote references in '
                              'translated message', node)
            for old, new in zip(old_foot_refs, new_foot_refs):
                new['ids'] = old['ids']
                for id in new['ids']:
                    self.document.ids[id] = new
                self.document.autofootnote_refs.remove(old)
                self.document.note_autofootnote_ref(new)

            # reference should use original 'refname'.
            # * reference target ".. _Python: ..." is not translatable.
            # * section refname is not translatable.
            # * inline reference "`Python <...>`_" has no 'refname'.
            def is_refnamed_ref(node):
                return isinstance(node, nodes.reference) and  \
                    'refname' in node
            old_refs = node.traverse(is_refnamed_ref)
            new_refs = patch.traverse(is_refnamed_ref)
            applied_refname_map = {}
            if len(old_refs) != len(new_refs):
                env.warn_node('inconsistent references in '
                              'translated message', node)
            for new in new_refs:
                if new['refname'] in applied_refname_map:
                    # 2nd appearance of the reference
                    new['refname'] = applied_refname_map[new['refname']]
                elif old_refs:
                    # 1st appearance of the reference in old_refs
                    old = old_refs.pop(0)
                    refname = old['refname']
                    new['refname'] = refname
                    applied_refname_map[new['refname']] = refname
                else:
                    # the reference is not found in old_refs
                    applied_refname_map[new['refname']] = new['refname']

                self.document.note_refname(new)

            # refnamed footnote and citation should use original 'ids'.
            def is_refnamed_footnote_ref(node):
                footnote_ref_classes = (nodes.footnote_reference,
                                        nodes.citation_reference)
                return isinstance(node, footnote_ref_classes) and \
                    'refname' in node
            old_refs = node.traverse(is_refnamed_footnote_ref)
            new_refs = patch.traverse(is_refnamed_footnote_ref)
            refname_ids_map = {}
            if len(old_refs) != len(new_refs):
                env.warn_node('inconsistent references in '
                              'translated message', node)
            for old in old_refs:
                refname_ids_map[old["refname"]] = old["ids"]
            for new in new_refs:
                refname = new["refname"]
                if refname in refname_ids_map:
                    new["ids"] = refname_ids_map[refname]

            # Original pending_xref['reftarget'] contain not-translated
            # target name, new pending_xref must use original one.
            # This code restricts to change ref-targets in the translation.
            old_refs = node.traverse(addnodes.pending_xref)
            new_refs = patch.traverse(addnodes.pending_xref)
            xref_reftarget_map = {}
            if len(old_refs) != len(new_refs):
                env.warn_node('inconsistent term references in '
                              'translated message', node)
            for old in old_refs:
                key = old["reftype"], old["refdomain"]
                xref_reftarget_map[key] = old["reftarget"]
            for new in new_refs:
                key = new["reftype"], new["refdomain"]
                if key in xref_reftarget_map:
                    new['reftarget'] = xref_reftarget_map[key]

            # update leaves
            for child in patch.children:
                child.parent = node
            node.children = patch.children

        # Extract and translate messages for index entries.
        for node, entries in traverse_translatable_index(self.document):
            new_entries = []
            for type, msg, tid, main in entries:
                msg_parts = split_index_msg(type, msg)
                msgstr_parts = []
                for part in msg_parts:
                    msgstr = catalog.gettext(part)
                    if not msgstr:
                        msgstr = part
                    msgstr_parts.append(msgstr)

                new_entries.append((type, ';'.join(msgstr_parts), tid, main))

            node['raw_entries'] = entries
            node['entries'] = new_entries
Example #27
0
def rst_document(rst_string: str) -> document:
    default_settings = OptionParser(components=(Parser, )).get_default_values()
    document = new_document(rst_string, default_settings)
    parser = Parser()
    parser.parse(rst_string, document)
    return document
def suite():
    parser = Parser()
    s = DocutilsTestSupport.TransformTestSuite(
        parser, suite_settings={'strip_comments': 1})
    s.generateTests(totest)
    return s
Example #29
0
def convert_function_info(function_name, doc_string, defaults=[]):
    """ Given a documentation string, writes html documentation. First tries
        to identify the standard docstring format for has_units objects. If
        sucessful, outputs a nicely formatted html. Otherwise, converts to html
        with no other changes.

        Code borrowed from numerical_modelling's has_unit.py
    """
    system = platform.system()

    stripped_lines = [line.strip() for line in doc_string.splitlines()]

    # XXX: hack to avoid expensive formatting. Remove later.
    if len(stripped_lines) > 100:
        return pre_formatted_html(doc_string)

    try:
        # Parse the lines using docutil parser to get a document-tree
        settings = OptionParser(components=(Parser, )).get_default_values()
        document = new_document("Docstring", settings)
        Parser().parse("\n".join(stripped_lines), document)

        # Filter out children of the root of the document-tree which are tagged
        # as "sections". Usually section has "title" and "paragraph" as
        # children. The inputs and outputs we are looking for are in the
        # "paragraph" section of the "section".

        sections = [
            child for child in document.children
            if child.tagname.lower() == "section"
        ]

        # Inputs are in the section with title "Parameters" and outputs are in
        # the section with title "Returns".
        inputtext = [
            section.children[1].children[0].data for section in sections
            if 'parameters' in section['names']
        ]
        outputtext = [
            section.children[1].children[0].data for section in sections
            if 'returns' in section['names']
        ]

        # If things aren't looking right at this point, the docstring isn't
        # properly formatted, so we're done.
        if len(sections) == 0 or len(inputtext) == 0 or len(outputtext) == 0:
            html = _html_header()
            # For some reason, the font size is huge when the native wx widgt is
            # used. Reduce its size:
            if system is not 'Windows' and wx.VERSION[1] < 8:
                html += '<font size="-2">\n'
            html += convert_string_fragment(doc_string)
            if system is not 'Windows' and wx.VERSION[1] < 8:
                html += '</font>\n'
            html += "</body>\n</html>"

        # Continue building the 'proper' html
        else:
            # Data in a paragraph comprises of variables in separate lines.
            # However each line for a variable is a combination of multiple lines,
            # and we are interested in retrieving only the first line for each
            # variable (in both inputs and outputs). Hence we join the separated
            # lines and then split all of them

            inputlines = "\n".join(inputtext).splitlines()
            outputlines = "\n".join(outputtext).splitlines()

            # Split into lines which give description and line which give
            # variable data with units

            inputdesc = [
                line for line in inputlines
                if not " :" in line and _count_indent(line) == 0
            ]
            outputdesc = [
                line for line in outputlines
                if not " :" in line and _count_indent(line) == 0
            ]
            inputlines = [
                line.split(" :") for line in inputlines
                if " :" in line or _count_indent(line) > 0
            ]
            outputlines = [
                line.split(" :") for line in outputlines
                if " :" in line or _count_indent(line) > 0
            ]

            # Create first line, listing function parameters and return vals.
            html = _html_header()
            if system is not 'Windows' and wx.VERSION[1] < 8:
                html += '<font size="-2">\n'
            html += "<p>"
            check_returns = False
            for i, var in enumerate(outputlines):
                check_returns = True
                html += var[0]
                if i < len(outputlines) - 1:
                    html += ", "
                else:
                    html += " "
            if check_returns:
                html += "="
            html += " <b>" + function_name + "</b>("
            for i, var in enumerate(inputlines):
                html += var[0]
                if len(defaults) is not 0:
                    index = len(defaults) - len(inputlines) + i
                    if index >= 0:
                        html += "=" + str(defaults[index])
                if i < len(inputlines) - 1:
                    html += ", "
            html += ")\n"

            # Add a brief description. Should be the first line of docstring.
            html += "<br>" + stripped_lines[0] + "</p>\n"

            # Add a list of inputs
            html += "<p><u>Inputs</u>\n<table>"
            for var, desc in map(None, inputlines, inputdesc):
                # fixme: Failing for "marine_environment" function.
                html += "\n<tr><td></td><td><b>" + var[0] + "</b>"
                # The format for the units section is ' units=x', so we slice
                # off the first seven characters. fixme: support for spacing
                # between equal sign, despite the incorrectness of this syntax.
                if (len(var) == 3) and (var[2][7:] != 'dimensionless'):
                    html += " [" + var[2][7:] + "]"
                if desc is None:
                    html += "</td>"
                else:
                    html += ":</td><td>" + desc + "</td>"
                html += "</tr>"
            html += "\n</table></p>"

            # Add a list of ouputs
            html += "\n<p><u>Outputs</u>\n<table>"
            for var, desc in map(None, outputlines, outputdesc):
                html += "\n<tr><td>"
                if var is not None:
                    html += "</td><td><b>" + var[0] + "</b>"
                    if (len(var) == 3) and (var[2][7:] != 'dimensionless'):
                        html += " [" + var[2][7:] + "]"
                    if desc is None:
                        html += "</td>"
                    else:
                        html += ":</td><td>" + desc + "</td>"
                html += "</tr>"
            html += "\n</table></p>"

            # Give a more detailed description, if available
            # Get the description text directly from the string. The parser will not
            # produce useful output for a description with blank lines in the section
            # (these are required for certain reST structures)
            try:
                desc_html = convert_string_fragment(doc_string)
                if system is 'Windows':
                    desc_html = desc_html.replace(
                        '<p>', '<p style="margin: 0px; padding:0px">')
                startSearch = 'ion</a></h5>'
                start = desc_html.rindex(startSearch)
                end = desc_html.rindex(r'</div>', start)
                html += '\n<p style="margin: 0px; padding:0px">'
                html += '<u>Description</u></p>\n<table><tr><td></td><td>'
                if system is not 'Windows' and wx.VERSION[1] < 8:
                    html += '<font size="-2">'
                html += desc_html[start + len(startSearch):end]
                if system is not 'Windows' and wx.VERSION[1] < 8:
                    html += '</font>'
                html += "\n</td></tr></table>\n"
            except ValueError:
                pass

            if system is not 'Windows' and wx.VERSION[1] < 8:
                html += '</font>\n'
            html += "</body>\n</html>"
    except Exception, e:
        logger.warning('Could not parse docstring; %s: %s' %
                       (e.__class__.__name__, e))
        html = pre_formatted_html(doc_string)
def suite():
    parser = Parser()
    s = DocutilsTestSupport.TransformTestSuite(
        parser, suite_settings={'expose_internals': ['rawsource', 'source']})
    s.generateTests(totest)
    return s
Example #31
0
    def run(self):
        env = self.state.document.settings.env
        baseurl = env.config.rss_baseurl
        assert baseurl, 'rss_baseurl must be defined in your config.py'

        source = self.state_machine.input_lines.source(
            self.lineno - self.state_machine.input_offset - 1)

        rss_doc = utils.new_document(b('<rss>'), self.state.document.settings)
        Parser().parse('\n'.join(self.content), rss_doc)

        rst_suffix = env.config.source_suffix
        path = os.path.relpath(source, env.srcdir).replace(rst_suffix, '.html')

        builder = env.app.builder
        docwriter = HTMLWriter(self)
        docsettings = OptionParser(
            defaults=env.settings,
            components=(docwriter, )).get_default_values()
        docsettings.compact_lists = bool(env.config.html_compact_lists)

        dest = os.path.join(env.app.outdir, os_path(env.docname) + '.rss')
        pageurl = '%s/%s' % (baseurl, path)
        with open(dest, 'w') as rss:
            title = self.options.get('title', '')
            description = self.options.get('description', None)
            rss.write('<?xml version="1.0" encoding="ISO-8859-1" ?>\n')
            rss.write('<rss version="2.0">\n')
            rss.write('<channel>\n')
            rss.write('<title>%s</title>\n' % cgi.escape(title))
            rss.write('<link>%s</link>\n' % pageurl)
            if description:
                rss.write('<description>%s</description>\n' %
                          cgi.escape(description))

            for child in rss_doc.children:
                if not isinstance(child, nodes.section):
                    continue

                title_index = child.first_child_matching_class(nodes.title)
                if title_index is None:
                    continue

                node = nodes.paragraph()
                node.extend(child.children[title_index + 1:])

                sec_doc = utils.new_document(b('<rss-section>'), docsettings)
                sec_doc.append(node)
                visitor = RssTranslator(builder, sec_doc)
                sec_doc.walkabout(visitor)

                title = child.children[title_index].astext()
                sectionurl = '%s#%s' % (pageurl, child.get('ids')[0])
                description = ''.join(visitor.body)

                rss.write('<item>\n')
                rss.write('<title>%s</title>\n' % cgi.escape(title))
                rss.write('<link>%s</link>\n' % sectionurl)
                rss.write('<description><![CDATA[%s]]></description>\n' %
                          description)
                rss.write('</item>\n')
            rss.write('</channel>\n')
            rss.write('</rss>\n')

        return []
Example #32
0
    def parse(self, content):
        settings = OptionParser(components=(Parser, Writer)) \
                   .get_default_values()
        doc = new_document('doc', settings)
        parser = Parser()
        parser.parse(content, doc)

        stories = []
        for node in doc:
            if isinstance(node, docutils.nodes.section):
                # Каждая секция - это история
                if isinstance(node[0], docutils.nodes.title):
                    story_title = node.pop(0).astext()
                else:
                    warnings.warn('Найдена история без заголовка: %r' % node)
                    continue

                tasks = []
                points = None
                if isinstance(node[-1], docutils.nodes.bullet_list):
                    # Задачи расположены в списке в конце истории
                    tasklist = node.pop()
                    for line in tasklist:
                        line = line.astext()
                        # Оценка задачи указывается в круглых скобках в самом
                        # конце, слово "дней" опционально.
                        match = re.search(ur'^.+\((\d+)[^\)]{0,5}\)$', line,
                                          re.UNICODE | re.DOTALL)
                        if match:
                            points = int(match.group(1))
                            line = re.sub(ur'^(.+?)\(\d+[^\)]{0,5}\)$', r'\1',
                                          line)
                        else:
                            points = 0

                        # Ответственный указывается перед задачей и отделяется
                        # двоеточием.
                        match = re.search(ur'^\+?([\w]+):\s*(.+)$', line,
                                          re.UNICODE | re.DOTALL)
                        if match:
                            person = match.group(1)
                            task_title = match.group(2)
                            state = Task.WORK
                        else:
                            task_title = line
                            person = None
                            state = Task.NEW

                        if line.startswith('+'):
                            state = Task.DONE

                        task = Task(task_title,
                                    state,
                                    person=person,
                                    points=points)
                        tasks.append(task)

                # Все остальное в истории - ее описание.
                writer = Writer()
                pseudo_doc = new_document(story_title, settings)
                pseudo_doc.children = [node]
                writer.document = pseudo_doc
                writer.translate()
                description = ''.join(writer.body)

                stories.append(Story(story_title, description, tasks))

        return stories
Example #33
0
def build_doc(name):
    doc = new_document(name)
    doc.settings.tab_width = 4
    doc.settings.character_level_inline_markup = "\ "
    doc.settings.file_insertion_enabled = True
    doc.settings.pep_references = "http://www.python.org/dev/peps/"
    doc.settings.rfc_references = "http://tools.ietf.org/html/"
    return doc


text = """
hello
========================================

this is content

subsection
----------------------------------------

- foo
- bar
- boo

.. include:: sub.rst
"""

p = Parser()
doc = build_doc("<nosource>")
p.parse(text, doc)
print(publish_from_doctree(doc, writer_name='pseudoxml').decode("utf-8"))
Example #34
0
def parsePartial(rawtext, settings):
    parser = Parser()
    document = utils.new_document("<partial node>")
    document.settings = settings
    parser.parse(rawtext, document)
    return document.children
Example #35
0
    def apply(self):
        env = self.document.settings.env
        settings, source = self.document.settings, self.document['source']
        # XXX check if this is reliable
        assert source.startswith(env.srcdir)
        docname = path.splitext(relative_path(path.join(env.srcdir, 'dummy'),
                                              source))[0]
        textdomain = find_catalog(docname,
                                  self.document.settings.gettext_compact)

        # fetch translations
        dirs = [path.join(env.srcdir, directory)
                for directory in env.config.locale_dirs]
        catalog, has_catalog = init_locale(dirs, env.config.language,
                                           textdomain,
                                           charset=env.config.source_encoding)
        if not has_catalog:
            return

        parser = RSTParser()

        # phase1: replace reference ids with translated names
        for node, msg in extract_messages(self.document):
            msgstr = catalog.gettext(msg)
            # XXX add marker to untranslated parts
            if not msgstr or msgstr == msg or not msgstr.strip():
                # as-of-yet untranslated
                continue

            # Avoid "Literal block expected; none found." warnings.
            # If msgstr ends with '::' then it cause warning message at
            # parser.parse() processing.
            # literal-block-warning is only appear in avobe case.
            if msgstr.strip().endswith('::'):
                msgstr += '\n\n   dummy literal'
                # dummy literal node will discard by 'patch = patch[0]'

            # literalblock need literal block notation to avoid it become
            # paragraph.
            if isinstance(node, LITERAL_TYPE_NODES):
                msgstr = '::\n\n' + indent(msgstr, ' '*3)

            patch = new_document(source, settings)
            CustomLocaleReporter(node.source, node.line).set_reporter(patch)
            parser.parse(msgstr, patch)
            try:
                patch = patch[0]
            except IndexError:  # empty node
                pass
            # XXX doctest and other block markup
            if not isinstance(patch, nodes.paragraph):
                continue  # skip for now

            processed = False  # skip flag

            # update title(section) target name-id mapping
            if isinstance(node, nodes.title):
                section_node = node.parent
                new_name = nodes.fully_normalize_name(patch.astext())
                old_name = nodes.fully_normalize_name(node.astext())

                if old_name != new_name:
                    # if name would be changed, replace node names and
                    # document nameids mapping with new name.
                    names = section_node.setdefault('names', [])
                    names.append(new_name)
                    # Original section name (reference target name) should be kept to refer
                    # from other nodes which is still not translated or uses explicit target
                    # name like "`text to display <explicit target name_>`_"..
                    # So, `old_name` is still exist in `names`.

                    _id = self.document.nameids.get(old_name, None)
                    explicit = self.document.nametypes.get(old_name, None)

                    # * if explicit: _id is label. title node need another id.
                    # * if not explicit:
                    #
                    #   * if _id is None:
                    #
                    #     _id is None means:
                    #
                    #     1. _id was not provided yet.
                    #
                    #     2. _id was duplicated.
                    #
                    #        old_name entry still exists in nameids and
                    #        nametypes for another duplicated entry.
                    #
                    #   * if _id is provided: bellow process
                    if _id:
                        if not explicit:
                            # _id was not duplicated.
                            # remove old_name entry from document ids database
                            # to reuse original _id.
                            self.document.nameids.pop(old_name, None)
                            self.document.nametypes.pop(old_name, None)
                            self.document.ids.pop(_id, None)

                        # re-entry with new named section node.
                        #
                        # Note: msgnode that is a second parameter of the
                        # `note_implicit_target` is not necessary here because
                        # section_node has been noted previously on rst parsing by
                        # `docutils.parsers.rst.states.RSTState.new_subsection()`
                        # and already has `system_message` if needed.
                        self.document.note_implicit_target(section_node)

                    # replace target's refname to new target name
                    def is_named_target(node):
                        return isinstance(node, nodes.target) and  \
                            node.get('refname') == old_name
                    for old_target in self.document.traverse(is_named_target):
                        old_target['refname'] = new_name

                    processed = True

            # glossary terms update refid
            if isinstance(node, nodes.term):
                gloss_entries = env.temp_data.setdefault('gloss_entries', set())
                ids = []
                termnodes = []
                for _id in node['names']:
                    if _id in gloss_entries:
                        gloss_entries.remove(_id)
                    _id, _, new_termnodes = \
                        make_termnodes_from_paragraph_node(env, patch, _id)
                    ids.append(_id)
                    termnodes.extend(new_termnodes)

                if termnodes and ids:
                    patch = make_term_from_paragraph_node(termnodes, ids)
                    node['ids'] = patch['ids']
                    node['names'] = patch['names']
                    processed = True

            # update leaves with processed nodes
            if processed:
                for child in patch.children:
                    child.parent = node
                node.children = patch.children
                node['translated'] = True

        # phase2: translation
        for node, msg in extract_messages(self.document):
            if node.get('translated', False):
                continue

            msgstr = catalog.gettext(msg)
            # XXX add marker to untranslated parts
            if not msgstr or msgstr == msg:  # as-of-yet untranslated
                continue

            # Avoid "Literal block expected; none found." warnings.
            # If msgstr ends with '::' then it cause warning message at
            # parser.parse() processing.
            # literal-block-warning is only appear in avobe case.
            if msgstr.strip().endswith('::'):
                msgstr += '\n\n   dummy literal'
                # dummy literal node will discard by 'patch = patch[0]'

            # literalblock need literal block notation to avoid it become
            # paragraph.
            if isinstance(node, LITERAL_TYPE_NODES):
                msgstr = '::\n\n' + indent(msgstr, ' '*3)

            patch = new_document(source, settings)
            CustomLocaleReporter(node.source, node.line).set_reporter(patch)
            parser.parse(msgstr, patch)
            try:
                patch = patch[0]
            except IndexError:  # empty node
                pass
            # XXX doctest and other block markup
            if not isinstance(
                    patch,
                    (nodes.paragraph,) + LITERAL_TYPE_NODES + IMAGE_TYPE_NODES):
                continue  # skip for now

            # auto-numbered foot note reference should use original 'ids'.
            def is_autonumber_footnote_ref(node):
                return isinstance(node, nodes.footnote_reference) and \
                    node.get('auto') == 1

            def list_replace_or_append(lst, old, new):
                if old in lst:
                    lst[lst.index(old)] = new
                else:
                    lst.append(new)
            old_foot_refs = node.traverse(is_autonumber_footnote_ref)
            new_foot_refs = patch.traverse(is_autonumber_footnote_ref)
            if len(old_foot_refs) != len(new_foot_refs):
                env.warn_node('inconsistent footnote references in '
                              'translated message', node)
            old_foot_namerefs = {}
            for r in old_foot_refs:
                old_foot_namerefs.setdefault(r.get('refname'), []).append(r)
            for new in new_foot_refs:
                refname = new.get('refname')
                refs = old_foot_namerefs.get(refname, [])
                if not refs:
                    continue

                old = refs.pop(0)
                new['ids'] = old['ids']
                for id in new['ids']:
                    self.document.ids[id] = new
                list_replace_or_append(
                    self.document.autofootnote_refs, old, new)
                if refname:
                    list_replace_or_append(
                        self.document.footnote_refs.setdefault(refname, []),
                        old, new)
                    list_replace_or_append(
                        self.document.refnames.setdefault(refname, []),
                        old, new)

            # reference should use new (translated) 'refname'.
            # * reference target ".. _Python: ..." is not translatable.
            # * use translated refname for section refname.
            # * inline reference "`Python <...>`_" has no 'refname'.
            def is_refnamed_ref(node):
                return isinstance(node, nodes.reference) and  \
                    'refname' in node
            old_refs = node.traverse(is_refnamed_ref)
            new_refs = patch.traverse(is_refnamed_ref)
            if len(old_refs) != len(new_refs):
                env.warn_node('inconsistent references in '
                              'translated message', node)
            old_ref_names = [r['refname'] for r in old_refs]
            new_ref_names = [r['refname'] for r in new_refs]
            orphans = list(set(old_ref_names) - set(new_ref_names))
            for new in new_refs:
                if not self.document.has_name(new['refname']):
                    # Maybe refname is translated but target is not translated.
                    # Note: multiple translated refnames break link ordering.
                    if orphans:
                        new['refname'] = orphans.pop(0)
                    else:
                        # orphan refnames is already empty!
                        # reference number is same in new_refs and old_refs.
                        pass

                self.document.note_refname(new)

            # refnamed footnote and citation should use original 'ids'.
            def is_refnamed_footnote_ref(node):
                footnote_ref_classes = (nodes.footnote_reference,
                                        nodes.citation_reference)
                return isinstance(node, footnote_ref_classes) and \
                    'refname' in node
            old_refs = node.traverse(is_refnamed_footnote_ref)
            new_refs = patch.traverse(is_refnamed_footnote_ref)
            refname_ids_map = {}
            if len(old_refs) != len(new_refs):
                env.warn_node('inconsistent references in '
                              'translated message', node)
            for old in old_refs:
                refname_ids_map[old["refname"]] = old["ids"]
            for new in new_refs:
                refname = new["refname"]
                if refname in refname_ids_map:
                    new["ids"] = refname_ids_map[refname]

            # Original pending_xref['reftarget'] contain not-translated
            # target name, new pending_xref must use original one.
            # This code restricts to change ref-targets in the translation.
            old_refs = node.traverse(addnodes.pending_xref)
            new_refs = patch.traverse(addnodes.pending_xref)
            xref_reftarget_map = {}
            if len(old_refs) != len(new_refs):
                env.warn_node('inconsistent term references in '
                              'translated message', node)

            def get_ref_key(node):
                case = node["refdomain"], node["reftype"]
                if case == ('std', 'term'):
                    return None
                else:
                    return (
                        node["refdomain"],
                        node["reftype"],
                        node['reftarget'],)

            for old in old_refs:
                key = get_ref_key(old)
                if key:
                    xref_reftarget_map[key] = old.attributes
            for new in new_refs:
                key = get_ref_key(new)
                # Copy attributes to keep original node behavior. Especially
                # copying 'reftarget', 'py:module', 'py:class' are needed.
                for k, v in xref_reftarget_map.get(key, {}).items():
                    # Note: This implementation overwrite all attributes.
                    # if some attributes `k` should not be overwritten,
                    # you should provide exclude list as:
                    # `if k not in EXCLUDE_LIST: new[k] = v`
                    new[k] = v

            # update leaves
            for child in patch.children:
                child.parent = node
            node.children = patch.children

            # for highlighting that expects .rawsource and .astext() are same.
            if isinstance(node, LITERAL_TYPE_NODES):
                node.rawsource = node.astext()

            if isinstance(node, IMAGE_TYPE_NODES):
                node.update_all_atts(patch)

            node['translated'] = True

        if 'index' in env.config.gettext_additional_targets:
            # Extract and translate messages for index entries.
            for node, entries in traverse_translatable_index(self.document):
                new_entries = []
                for type, msg, tid, main in entries:
                    msg_parts = split_index_msg(type, msg)
                    msgstr_parts = []
                    for part in msg_parts:
                        msgstr = catalog.gettext(part)
                        if not msgstr:
                            msgstr = part
                        msgstr_parts.append(msgstr)

                    new_entries.append((type, ';'.join(msgstr_parts), tid, main))

                node['raw_entries'] = entries
                node['entries'] = new_entries
Example #36
0
#!/usr/bin/python

# $Id: rst2odt.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <*****@*****.**>
# Copyright: This module has been placed in the public domain.
"""
A front end to the Docutils Publisher, producing OpenOffice documents.
"""

try:
    import locale
    locale.setlocale(locale.LC_ALL, '')
except:
    pass

from docutils.core import default_description, default_usage
from docutils.writers.odf_odt import Writer, Reader, ODFTranslator, SubElement
from docutils.parsers.rst import Parser
from zoteroODFScan import zotero_odf_scan_publish_cmdline_to_binary

writer = Writer()
reader = Reader()
parser = Parser()

zotero_odf_scan_publish_cmdline_to_binary(reader=reader,
                                          parser=parser,
                                          writer=writer)
Example #37
0
    def parse(src):
        parser = Parser()
        settings = rst_mock_settings

        reader = Reader()
        return reader.read(StringInput(src), parser, settings)
Example #38
0
    def parse(self, content):
        settings = OptionParser(components=(Parser, Writer)) \
                   .get_default_values()
        doc = new_document('doc', settings)
        parser = Parser()
        parser.parse(content, doc)

        stories = []
        for node in doc:
            if isinstance(node, docutils.nodes.section):
                # Каждая секция - это история
                if isinstance(node[0], docutils.nodes.title):
                    story_title = node.pop(0).astext()
                else:
                    warnings.warn('Найдена история без заголовка: %r' % node)
                    continue

                tasks = []
                points = None
                if isinstance(node[-1], docutils.nodes.bullet_list):
                    # Задачи расположены в списке в конце истории
                    tasklist = node.pop()
                    for line in tasklist:
                        line = line.astext()
                        # Оценка задачи указывается в круглых скобках в самом
                        # конце, слово "дней" опционально.
                        match = re.search(ur'^.+\((\d+)[^\)]{0,5}\)$', line,
                                          re.UNICODE | re.DOTALL)
                        if match:
                            points = int(match.group(1))
                            line = re.sub(ur'^(.+?)\(\d+[^\)]{0,5}\)$', r'\1',
                                          line)
                        else:
                            points = 0

                        # Ответственный указывается перед задачей и отделяется
                        # двоеточием.
                        match = re.search(ur'^\+?([\w]+):\s*(.+)$', line,
                                          re.UNICODE | re.DOTALL)
                        if match:
                            person = match.group(1)
                            task_title = match.group(2)
                            state = Task.WORK
                        else:
                            task_title = line
                            person = None
                            state = Task.NEW

                        if line.startswith('+'):
                            state = Task.DONE

                        task = Task(task_title, state, person=person, points=points)
                        tasks.append(task)

                # Все остальное в истории - ее описание.
                writer = Writer()
                pseudo_doc = new_document(story_title, settings)
                pseudo_doc.children = [node]
                writer.document = pseudo_doc
                writer.translate()
                description = ''.join(writer.body)

                stories.append(Story(story_title, description, tasks))

        return stories
Example #39
0
    def run(self):
        """
        Implements the directive
        """
        # Get content and options
        file_path = self.arguments[0]
        use_title = 'show-title' in self.options
        use_header = 'show-header' in self.options
        main_key = self.options.get('key', None)
        show_key = 'show-key' in self.options
        if not file_path:
            return [self._report('file_path -option missing')]

        # Transform the path suitable for processing
        file_path = self._get_directive_path(file_path)

        parset = ParameterSet(file_path)
        if main_key:
            parset = parset[main_key]

        title, messages = self.make_title()

        if not parset:
            return [nodes.paragraph(text='')]

        table_data = []
        docparser = Parser()
        # Iterates rows: put the given data in rst elements
        for key in parset.keys():
            the_val = encode(parset[key])
            the_doc = parset.get_doc(key) or ''
            if main_key and show_key:
                key = ".".join([main_key.split(".")[-1],key])
            node1 = nodes.strong(text=key)
            node2 = nodes.literal(text=the_val)
            subdoc = utils.new_document('<>', self.state.document.settings)
            docparser.parse(the_doc, subdoc)
            node3 = subdoc.children
            table_data.append([node1, node2, node3])


        col_widths = self.get_column_widths(3)
        self.check_table_dimensions(table_data, 0, 0)
        header_rows = 0
        if use_header:
            header_rows = 1
            table_data.insert(0, [nodes.strong(text="Key"),
                                  nodes.strong(text="Default"),
                                  nodes.strong(text="Description"),
                                  ])

        # Generate the table node from the given list of elements
        table_node = self.build_table_from_list(table_data, col_widths, 
                                                header_rows, 0)

        # Optional class parameter
        table_node['classes'] += self.options.get('class', [])

        if use_title and title:
            if main_key:
                ttxt = title.astext()
                title = nodes.title(text="".join([ttxt,' (',main_key,')']))
            table_node.insert(0, title)

        return [table_node] + messages
Example #40
0
    def run(self):
        gallerytype = self.arguments[0]
        includedocstring = True
        maxwidth = self.options.get('image-width',630 if gallerytype == 'databases' else 200)
        maxheight = self.options.get('image-height',150)
        maxcolumns = self.options.get('columns',1 if gallerytype == 'databases' else 3)

        #self.state.document.settings.env.images
        #builder=self.state.document.settings.env.app.builder
        buildertype = self.state.document.settings.env.app.builder.name
        outdir = self.state.document.settings.env.app.builder.outdir
        #docname=self.state.document.settings.env.docname
        imagewritedir = os.path.join(os.path.join(outdir,'_images'),gallerytype)
        imagereaddir = os.path.join(self.state.document.settings.env.srcdir,'..','images',gallerytype)
        imagelinkdir = '_images'
        linkdir = 'openravepy'
        imageext = 'jpg'

        try:
            os.makedirs(imagewritedir)
        except OSError:
            pass
        parentmodule = __import__('openravepy.'+gallerytype,fromlist=['openravepy'])
        modulenames = []
        for name in dir(parentmodule):
            if not name.startswith('__'):
                try:
                    modulename = 'openravepy.'+gallerytype+'.'+name
                    m=__import__(modulename,fromlist=['openravepy'])
                    if type(m) is ModuleType:
                        docstring = ''
                        if m.__doc__ is not None and includedocstring:
                            endindex = m.__doc__.find('\n')
                            docstring = m.__doc__[:endindex] if endindex > 0 else ''
                        modulenames.append([modulename,name,docstring])
                except ImportError:
                    pass

        # copy the images
        link_templates = {'html':'<td><p><b>%s</b></p><a href="%s.html"><img src="%s" border="0" class="thumbimage" alt="%s"/></a>%s</td>\n', 'json':'<td><p><b>%s</b></p><a href="../%s/"><img src="../%s" border="0" class="thumbimage" alt="../%s"/></a>%s</td>\n'}
        link_template = link_templates.get(buildertype,link_templates['html'])
        rows = []
        for modulename, name, docstring in modulenames:
            imthumbname = name+'_thumb.'+imageext
            try:
                im = Image.open(os.path.join(imagereaddir,name+'_thumb.'+imageext))
            except IOError:
                try:
                    im = Image.open(os.path.join(imagereaddir,name+'.'+imageext))
                except IOError:
                    im = None
            if im is not None:
                if im.size[0]*maxheight/im.size[1] > maxwidth:
                    newsize = [maxwidth,im.size[1]*maxwidth/im.size[0]]
                else:
                    newsize = [im.size[0]*maxheight/im.size[1],maxheight]
                imthumb = im.resize(newsize, Image.ANTIALIAS)
                with open(os.path.join(imagewritedir,imthumbname),'w') as f:
                    imthumb.save(f)
                if len(docstring) > 0:
                    docstring = '<p>%s</p>'%docstring
                rows.append(link_template%(name,linkdir+'/'+gallerytype+'.'+name, imagelinkdir+'/'+gallerytype+'/'+imthumbname, name,docstring))
                    
        # have to have different links for different builders
        #for buildertype,link_template in link_templates.iteritems():
# 
#             for modulename, name, docstring in modulenames:
#                 imthumbname = name+'_thumb.'+imageext


            # Only write out the file if the contents have actually changed.
            # Otherwise, this triggers a full rebuild of the docs
        rowstext = '<table>'
        for irow,row in enumerate(rows):
            if irow%maxcolumns == 0:
                rowstext += '<tr>'
            rowstext += row
            if irow%maxcolumns == maxcolumns-1:
                rowstext += '</tr>\n'
        rowstext += '</table>'
        # add two spaces for every new line since using htmlonly tag
        content = '.. raw:: html\n\n  '+rowstext.replace('\n','\n    ')+'\n\n'
            
        parser = Parser()
        document = docutils.utils.new_document("<partial node>")
        document.settings = self.state.document.settings
        parser.parse(content,document)
        return document.children
Example #41
0
def rst2nodes(text, settings):
    new_doc = new_document('temp-string', settings)
    parser = Parser()
    parser.parse(text, new_doc)
    return new_doc.children
Example #42
0
def parse(data):
    parser = Parser()
    settings = OptionParser(components=(Parser,)).get_default_values()
    document = new_document("/tmp/fake", settings)
    parser.parse(data, document)
    return document
Example #43
0
    def apply(self):
        env = self.document.settings.env
        settings, source = self.document.settings, self.document['source']
        # XXX check if this is reliable
        assert source.startswith(env.srcdir)
        docname = path.splitext(relative_path(env.srcdir, source))[0]
        textdomain = find_catalog(docname,
                                  self.document.settings.gettext_compact)

        # fetch translations
        dirs = [
            path.join(env.srcdir, directory)
            for directory in env.config.locale_dirs
        ]
        catalog, has_catalog = init_locale(dirs, env.config.language,
                                           textdomain)
        if not has_catalog:
            return

        parser = RSTParser()

        #phase1: replace reference ids with translated names
        for node, msg in extract_messages(self.document):
            msgstr = catalog.gettext(msg)
            # XXX add marker to untranslated parts
            if not msgstr or msgstr == msg or not msgstr.strip():
                # as-of-yet untranslated
                continue

            # Avoid "Literal block expected; none found." warnings.
            # If msgstr ends with '::' then it cause warning message at
            # parser.parse() processing.
            # literal-block-warning is only appear in avobe case.
            if msgstr.strip().endswith('::'):
                msgstr += '\n\n   dummy literal'
                # dummy literal node will discard by 'patch = patch[0]'

            patch = new_document(source, settings)
            CustomLocaleReporter(node.source, node.line).set_reporter(patch)
            parser.parse(msgstr, patch)
            try:
                patch = patch[0]
            except IndexError:  # empty node
                pass
            # XXX doctest and other block markup
            if not isinstance(patch, nodes.paragraph):
                continue  # skip for now

            processed = False  # skip flag

            # update title(section) target name-id mapping
            if isinstance(node, nodes.title):
                section_node = node.parent
                new_name = nodes.fully_normalize_name(patch.astext())
                old_name = nodes.fully_normalize_name(node.astext())

                if old_name != new_name:
                    # if name would be changed, replace node names and
                    # document nameids mapping with new name.
                    names = section_node.setdefault('names', [])
                    names.append(new_name)
                    if old_name in names:
                        names.remove(old_name)

                    _id = self.document.nameids.get(old_name, None)
                    explicit = self.document.nametypes.get(old_name, None)

                    # * if explicit: _id is label. title node need another id.
                    # * if not explicit:
                    #
                    #   * _id is None:
                    #
                    #     _id is None means _id was duplicated.
                    #     old_name entry still exists in nameids and
                    #     nametypes for another duplicated entry.
                    #
                    #   * _id is provided: bellow process
                    if not explicit and _id:
                        # _id was not duplicated.
                        # remove old_name entry from document ids database
                        # to reuse original _id.
                        self.document.nameids.pop(old_name, None)
                        self.document.nametypes.pop(old_name, None)
                        self.document.ids.pop(_id, None)

                    # re-entry with new named section node.
                    self.document.note_implicit_target(section_node)

                    # replace target's refname to new target name
                    def is_named_target(node):
                        return isinstance(node, nodes.target) and  \
                            node.get('refname') == old_name

                    for old_target in self.document.traverse(is_named_target):
                        old_target['refname'] = new_name

                    processed = True

            # glossary terms update refid
            if isinstance(node, nodes.term):
                gloss_entries = env.temp_data.setdefault(
                    'gloss_entries', set())
                ids = []
                termnodes = []
                for _id in node['names']:
                    if _id in gloss_entries:
                        gloss_entries.remove(_id)
                    _id, _, new_termnodes = \
                        make_termnodes_from_paragraph_node(env, patch, _id)
                    ids.append(_id)
                    termnodes.extend(new_termnodes)

                if termnodes and ids:
                    patch = make_term_from_paragraph_node(termnodes, ids)
                    node['ids'] = patch['ids']
                    node['names'] = patch['names']
                    processed = True

            # update leaves with processed nodes
            if processed:
                for child in patch.children:
                    child.parent = node
                node.children = patch.children
                node['translated'] = True

        #phase2: translation
        for node, msg in extract_messages(self.document):
            if node.get('translated', False):
                continue

            msgstr = catalog.gettext(msg)
            # XXX add marker to untranslated parts
            if not msgstr or msgstr == msg:  # as-of-yet untranslated
                continue

            # Avoid "Literal block expected; none found." warnings.
            # If msgstr ends with '::' then it cause warning message at
            # parser.parse() processing.
            # literal-block-warning is only appear in avobe case.
            if msgstr.strip().endswith('::'):
                msgstr += '\n\n   dummy literal'
                # dummy literal node will discard by 'patch = patch[0]'

            patch = new_document(source, settings)
            CustomLocaleReporter(node.source, node.line).set_reporter(patch)
            parser.parse(msgstr, patch)
            try:
                patch = patch[0]
            except IndexError:  # empty node
                pass
            # XXX doctest and other block markup
            if not isinstance(patch, nodes.paragraph):
                continue  # skip for now

            # auto-numbered foot note reference should use original 'ids'.
            def is_autonumber_footnote_ref(node):
                return isinstance(node, nodes.footnote_reference) and \
                    node.get('auto') == 1

            def list_replace_or_append(lst, old, new):
                if old in lst:
                    lst[lst.index(old)] = new
                else:
                    lst.append(new)

            old_foot_refs = node.traverse(is_autonumber_footnote_ref)
            new_foot_refs = patch.traverse(is_autonumber_footnote_ref)
            if len(old_foot_refs) != len(new_foot_refs):
                env.warn_node(
                    'inconsistent footnote references in '
                    'translated message', node)
            old_foot_namerefs = {}
            for r in old_foot_refs:
                old_foot_namerefs.setdefault(r.get('refname'), []).append(r)
            for new in new_foot_refs:
                refname = new.get('refname')
                refs = old_foot_namerefs.get(refname, [])
                if not refs:
                    continue

                old = refs.pop(0)
                new['ids'] = old['ids']
                for id in new['ids']:
                    self.document.ids[id] = new
                list_replace_or_append(self.document.autofootnote_refs, old,
                                       new)
                if refname:
                    list_replace_or_append(
                        self.document.footnote_refs.setdefault(refname, []),
                        old, new)
                    list_replace_or_append(
                        self.document.refnames.setdefault(refname, []), old,
                        new)

            # reference should use new (translated) 'refname'.
            # * reference target ".. _Python: ..." is not translatable.
            # * use translated refname for section refname.
            # * inline reference "`Python <...>`_" has no 'refname'.
            def is_refnamed_ref(node):
                return isinstance(node, nodes.reference) and  \
                    'refname' in node

            old_refs = node.traverse(is_refnamed_ref)
            new_refs = patch.traverse(is_refnamed_ref)
            if len(old_refs) != len(new_refs):
                env.warn_node(
                    'inconsistent references in '
                    'translated message', node)
            old_ref_names = [r['refname'] for r in old_refs]
            new_ref_names = [r['refname'] for r in new_refs]
            orphans = list(set(old_ref_names) - set(new_ref_names))
            for new in new_refs:
                if not self.document.has_name(new['refname']):
                    # Maybe refname is translated but target is not translated.
                    # Note: multiple translated refnames break link ordering.
                    if orphans:
                        new['refname'] = orphans.pop(0)
                    else:
                        # orphan refnames is already empty!
                        # reference number is same in new_refs and old_refs.
                        pass

                self.document.note_refname(new)

            # refnamed footnote and citation should use original 'ids'.
            def is_refnamed_footnote_ref(node):
                footnote_ref_classes = (nodes.footnote_reference,
                                        nodes.citation_reference)
                return isinstance(node, footnote_ref_classes) and \
                    'refname' in node

            old_refs = node.traverse(is_refnamed_footnote_ref)
            new_refs = patch.traverse(is_refnamed_footnote_ref)
            refname_ids_map = {}
            if len(old_refs) != len(new_refs):
                env.warn_node(
                    'inconsistent references in '
                    'translated message', node)
            for old in old_refs:
                refname_ids_map[old["refname"]] = old["ids"]
            for new in new_refs:
                refname = new["refname"]
                if refname in refname_ids_map:
                    new["ids"] = refname_ids_map[refname]

            # Original pending_xref['reftarget'] contain not-translated
            # target name, new pending_xref must use original one.
            # This code restricts to change ref-targets in the translation.
            old_refs = node.traverse(addnodes.pending_xref)
            new_refs = patch.traverse(addnodes.pending_xref)
            xref_reftarget_map = {}
            if len(old_refs) != len(new_refs):
                env.warn_node(
                    'inconsistent term references in '
                    'translated message', node)

            def get_ref_key(node):
                case = node["refdomain"], node["reftype"]
                if case == ('std', 'term'):
                    return None
                else:
                    return (
                        node["refdomain"],
                        node["reftype"],
                        node['reftarget'],
                    )
Example #44
0
def parsePartial(rawtext, settings):
    parser = Parser()
    document = utils.new_document("<partial node>")
    document.settings = settings
    parser.parse(rawtext, document)
    return document.children
Example #45
0
def read2(text):
  parser = Parser()
  source_path = "<string>"
  document = new_document(source_path, get_settings())
  parser.parse(text, document)
  return document