Esempio n. 1
0
 def assertParses(self, source, expected, alt=False):  # noqa
     parser = CommonMarkParser()
     parser.parse(dedent(source), new_document('<string>'))
     self.assertMultiLineEqual(
         dedent(expected).lstrip(),
         dedent(parser.document.asdom().toprettyxml(indent='  ')),
     )
Esempio n. 2
0
 def assertParses(self, source, expected, alt=False):  # noqa
     parser = CommonMarkParser()
     parser.parse(dedent(source), new_document('<string>'))
     self.assertMultiLineEqual(
         dedent(expected).lstrip(),
         dedent(parser.document.asdom().toprettyxml(indent='  ')),
     )
Esempio n. 3
0
    def run(self):
        parser = CommonMarkParser()

        document = self.state.document
        filename = self.arguments[0]
        curdir = getattr(document.settings, "_source", None) or os.getcwd()
        filepath = os.path.join(curdir, filename)

        with open(filepath) as rf:
            text = rf.read()
            subdocument = new_document(filepath)
            parser.parse(text, subdocument)
        return subdocument.children
    def cell(self, text, morecols=0, source=None):
        entry = nodes.entry(morecols=morecols)
        if not isinstance(text, string_types):
            text = str(text)

        parser = CommonMarkParser()
        new_doc = new_document(None)
        parser.parse(text, new_doc)

        for child in new_doc.children[:]:
            child.source = source
            entry += child
        return entry
Esempio n. 5
0
    def _parse_md_to(self, node, content):
        # HACK: make the CommonMarkParser think that `node` is actually the full
        # document
        assert not hasattr(node, "reporter")
        assert not hasattr(node, "note_parse_message")

        node.reporter = self.state.document.reporter
        node.note_parse_message = self.state.document.note_parse_message

        md_parser = CommonMarkParser()
        md_parser.parse(content, node)

        del node.reporter
        del node.note_parse_message
Esempio n. 6
0
    def run(self):
        from recommonmark.parser import CommonMarkParser
        parser = CommonMarkParser()

        document = self.state.document
        filename = self.arguments[0]
        filepath = os.path.join(os.path.dirname(document.settings._source), filename)

        with open(filepath) as rf:
            text = rf.read()
            from docutils.utils import new_document
            subdocument = new_document(filepath)
            # passing subdocument (not document)
            parser.parse(text, subdocument)
        return subdocument.children
Esempio n. 7
0
    def run(self):
        from recommonmark.parser import CommonMarkParser
        parser = CommonMarkParser()

        document = self.state.document
        filename = self.arguments[0]
        filepath = os.path.join(os.path.dirname(document.settings._source),
                                filename)

        with open(filepath) as rf:
            text = rf.read()
            from docutils.utils import new_document
            subdocument = new_document(filepath)
            # passing subdocument (not document)
            parser.parse(text, subdocument)
        return subdocument.children
Esempio n. 8
0
 def iter_properties(self, cls):
     """Iterate over (property, type, description)"""
     schema = cls.resolve_references(cls._schema)
     properties = schema.get('properties', {})
     mdparser = CommonMarkParser()
     for prop, propschema in properties.items():
         doc = utils.new_document("md_doc")
         mdparser.parse(propschema.get('description', ' '), doc)
         descr = nodes.paragraph()
         descr.document = self.state.document
         nested_parse_with_titles(self.state, nodes.paragraph( text =self.type_description(propschema)), descr)
         
         yield ( [nodes.paragraph(text = prop)],
                # [nodes.paragraph(text = self.type_description(propschema))], # ?class
                 descr.children,
                 doc.children)
Esempio n. 9
0
    def test_basic_parser(self):
        source = '# Header'

        ret = publish_parts(source=source,
                            writer_name='html',
                            parser=CommonMarkParser())
        self.assertTrue(ret['title'], 'Header')
Esempio n. 10
0
def test_basic_parser():
    source = '# Header'

    ret = publish_parts(source=source,
                        writer_name='html',
                        parser=CommonMarkParser())

    assert ret['title'] == 'Header'
Esempio n. 11
0
def build_row(item):
    """Return nodes.row with property description"""

    prop, propschema, required = item
    row = nodes.row()

    # Property 
    row += nodes.entry('', nodes.paragraph(text=prop))

    # Type
    str_type = type_description(propschema) 
    par_type = nodes.paragraph()

    is_text = True
    for part in reClassDef.split(str_type):
        if part:
            if is_text:
                par_type += nodes.Text(part)
            else:
                par_type += addnodes.pending_xref(
                    reftarget=part,
                    reftype="class",
                    refdomain=None,  # py:class="None" py:module="altair" refdoc="user_guide/marks"
                    refexplicit=False,
                    refwarn=False
                )
                par_type += nodes.literal(text = part,classes="xref py py-class")      
        is_text = not is_text

    row += nodes.entry('', par_type)

    # Description
    md_parser = CommonMarkParser()
    str_descr = "***Required.*** " if required else ""
    str_descr += propschema.get('description', ' ')
    doc_descr = utils.new_document("schema_description")   
    md_parser.parse(str_descr, doc_descr)   
    
    row += nodes.entry('', *doc_descr.children)

    return row
Esempio n. 12
0
def build_row(item):
    """Return nodes.row with property description"""

    prop, propschema, required = item
    row = nodes.row()

    # Property 
    
    row += nodes.entry('', nodes.paragraph(text=prop), classes=["vl-prop"])

    # Type
    str_type = type_description(propschema) 
    par_type = nodes.paragraph()

    is_text = True
    for part in reClassDef.split(str_type):
        if part:
            if is_text:
                add_text(par_type, part)
            else:
                add_class_def(par_type, part)
        is_text = not is_text

    # row += nodes.entry('')
    row += nodes.entry('', par_type) #, classes=["vl-type-def"]

    # Description
    md_parser = CommonMarkParser()
    #str_descr = "***Required.*** " if required else ""
    str_descr = ""
    str_descr += propschema.get('description', ' ')
    doc_descr = utils.new_document("schema_description")   
    md_parser.parse(str_descr, doc_descr)   
    
    # row += nodes.entry('', *doc_descr.children, classes="vl-decsr")
    row += nodes.entry('', *doc_descr.children, classes=["vl-decsr"])

    return row
Esempio n. 13
0
def build_row(item):
    """Return nodes.row with property description"""

    prop, propschema, required = item
    row = nodes.row()

    # Property

    row += nodes.entry('', nodes.paragraph(text=prop), classes=["vl-prop"])

    # Type
    str_type = type_description(propschema)
    par_type = nodes.paragraph()

    is_text = True
    for part in reClassDef.split(str_type):
        if part:
            if is_text:
                add_text(par_type, part)
            else:
                add_class_def(par_type, part)
        is_text = not is_text

    # row += nodes.entry('')
    row += nodes.entry('', par_type)  #, classes=["vl-type-def"]

    # Description
    md_parser = CommonMarkParser()
    #str_descr = "***Required.*** " if required else ""
    str_descr = ""
    str_descr += propschema.get('description', ' ')
    doc_descr = utils.new_document("schema_description")
    md_parser.parse(str_descr, doc_descr)

    # row += nodes.entry('', *doc_descr.children, classes="vl-decsr")
    row += nodes.entry('', *doc_descr.children, classes=["vl-decsr"])

    return row
Esempio n. 14
0
    def run(self):
        """Include a file as part of the content of this reST file."""

        # copied from docutils.parsers.rst.directives.misc.Include
        if not self.state.document.settings.file_insertion_enabled:
            raise self.warning('"%s" directive disabled.' % self.name)
        source = self.state_machine.input_lines.source(
            self.lineno - self.state_machine.input_offset - 1)
        source_dir = os.path.dirname(os.path.abspath(source))
        path = directives.path(self.arguments[0])
        if path.startswith('<') and path.endswith('>'):
            path = os.path.join(self.standard_include_path, path[1:-1])
        path = os.path.normpath(os.path.join(source_dir, path))
        path = utils.relative_path(None, path)
        path = nodes.reprunicode(path)
        encoding = self.options.get(
            'encoding', self.state.document.settings.input_encoding)
        e_handler = self.state.document.settings.input_encoding_error_handler
        try:
            self.state.document.settings.record_dependencies.add(path)
            include_file = io.FileInput(source_path=path,
                                        encoding=encoding,
                                        error_handler=e_handler)
        except UnicodeEncodeError as error:
            raise self.severe(u'Problems with "%s" directive path:\n'
                              'Cannot encode input file path "%s" '
                              '(wrong locale?).' %
                              (self.name, SafeString(path)))
        except IOError as error:
            raise self.severe(u'Problems with "%s" directive path:\n%s.' %
                              (self.name, ErrorString(error)))
        startline = self.options.get('start-line', None)
        endline = self.options.get('end-line', None)
        try:
            if startline or (endline is not None):
                lines = include_file.readlines()
                rawtext = ''.join(lines[startline:endline])
            else:
                rawtext = include_file.read()
        except UnicodeError as error:
            raise self.severe(u'Problem with "%s" directive:\n%s' %
                              (self.name, ErrorString(error)))
        # start-after/end-before: no restrictions on newlines in match-text,
        # and no restrictions on matching inside lines vs. line boundaries
        after_text = self.options.get('start-after', None)
        if after_text:
            # skip content in rawtext before *and incl.* a matching text
            after_index = rawtext.find(after_text)
            if after_index < 0:
                raise self.severe('Problem with "start-after" option of "%s" '
                                  'directive:\nText not found.' % self.name)
            rawtext = rawtext[after_index + len(after_text):]
        before_text = self.options.get('end-before', None)
        if before_text:
            # skip content in rawtext after *and incl.* a matching text
            before_index = rawtext.find(before_text)
            if before_index < 0:
                raise self.severe('Problem with "end-before" option of "%s" '
                                  'directive:\nText not found.' % self.name)
            rawtext = rawtext[:before_index]

        # copied code ends
        parser = CommonMarkParser()
        md_document = utils.new_document(path, self.state.document.settings)
        parser.parse(rawtext, md_document)
        return md_document.children
        def parse(self, inputstring, document):
            """Use the upstream parser and clean up afterwards.
            """
            # check for exorbitantly long lines
            for i, line in enumerate(inputstring.split('\n')):
                if len(line) > document.settings.line_length_limit:
                    error = document.reporter.error(
                        'Line %d exceeds the line-length-limit.'%(i+1))
                    document.append(error)
                    return

            # pass to upstream parser
            try:
                CommonMarkParser.parse(self, inputstring, document)
            except Exception as err:
                error = document.reporter.error('Parsing with "recommonmark" '
                                                'returned the error:\n%s'%err)
                document.append(error)

            # Post-Processing
            # ---------------

            # merge adjoining Text nodes:
            for node in document.findall(nodes.TextElement):
                children = node.children
                i = 0
                while i+1 < len(children):
                    if (isinstance(children[i], nodes.Text)
                        and isinstance(children[i+1], nodes.Text)):
                        children[i] = nodes.Text(children[i]+children.pop(i+1))
                        children[i].parent = node
                    else:
                        i += 1

            # add "code" class argument to literal elements (inline and block)
            for node in document.findall(lambda n: isinstance(n,
                                    (nodes.literal, nodes.literal_block))):
                node['classes'].append('code')
            # move "language" argument to classes
            for node in document.findall(nodes.literal_block):
                if 'language' in node.attributes:
                    node['classes'].append(node['language'])
                    del node['language']

            # remove empty target nodes
            for node in list(document.findall(nodes.target)):
                # remove empty name
                node['names'] = [v for v in node['names'] if v]
                if node.children or [v for v in node.attributes.values() if v]:
                    continue
                node.parent.remove(node)

            # replace raw nodes if raw is not allowed
            if not document.settings.raw_enabled:
                for node in document.findall(nodes.raw):
                    warning = document.reporter.warning('Raw content disabled.')
                    node.parent.replace(node, warning)

            # fix section nodes
            for node in document.findall(nodes.section):
                # remove spurious IDs (first may be from duplicate name)
                if len(node['ids']) > 1:
                    node['ids'].pop()
                # fix section levels (recommonmark 0.4.0
                # later versions silently ignore incompatible levels)
                if 'level' in node:
                    section_level = self.get_section_level(node)
                    if node['level'] != section_level:
                        warning = document.reporter.warning(
                            'Title level inconsistent. Changing from %d to %d.'
                            %(node['level'], section_level),
                            nodes.literal_block('', node[0].astext()))
                        node.insert(1, warning)
                        # remove non-standard attribute "level"
                        del node['level']
    
            # drop pending_xref (Sphinx cross reference extension)
            for node in document.findall(addnodes.pending_xref):
                reference = node.children[0]
                if 'name' not in reference:
                    reference['name'] = nodes.fully_normalize_name(
                                                        reference.astext())
                node.parent.replace(node, reference)
Esempio n. 16
0
from recommonmark.parser import CommonMarkParser
# from recommonmark.transform import AutoStructify

parser = CommonMarkParser()
with open("./src/doc.md") as rf:
    t = parser.parse(rf.read())
    print(t)
Esempio n. 17
0
def cm2xetex():
    description = ('Generate xetex document from markdown sources. ' +
                   default_description)
    publish_cmdline(writer_name='latex',
                    parser=CommonMarkParser(),
                    description=description)
Esempio n. 18
0
class WPTLintRules(Directive):
    """A docutils directive to generate documentation for the
    web-platform-test-test's linting tool from its source code. Requires a
    single argument: a Python module specifier for a file which declares
    linting rules."""
    has_content = True
    required_arguments = 1
    optional_arguments = 0
    _md_parser = CommonMarkParser()

    @staticmethod
    def _parse_markdown(markdown):
        WPTLintRules._md_parser.parse(markdown, new_document("<string>"))
        return WPTLintRules._md_parser.document.children[0]

    @property
    def module_specifier(self):
        return self.arguments[0]

    def _get_rules(self):
        try:
            module = importlib.import_module(self.module_specifier)
        except ImportError:
            raise ImportError(
                """wpt-lint-rules: unable to resolve the module at "{}".""".
                format(self.module_specifier))

        for binding_name, value in iteritems(module.__dict__):
            if hasattr(value, "__abstractmethods__") and len(
                    value.__abstractmethods__):
                continue

            description = getattr(value, "description", None)
            name = getattr(value, "name", None)
            to_fix = getattr(value, "to_fix", None)

            if description is None:
                continue

            if to_fix is not None:
                to_fix = textwrap.dedent(to_fix)

            yield {
                "name": name,
                "description": textwrap.dedent(description),
                "to_fix": to_fix
            }

    def run(self):
        definition_list = nodes.definition_list()

        for rule in sorted(self._get_rules(), key=lambda rule: rule['name']):
            item = nodes.definition_list_item()
            definition = nodes.definition()
            term = nodes.term()
            item += term
            item += definition
            definition_list += item

            term += nodes.literal(text=rule["name"])
            definition += WPTLintRules._parse_markdown(rule["description"])

            if rule["to_fix"]:
                definition += nodes.strong(text="To fix:")
                definition += WPTLintRules._parse_markdown(rule["to_fix"])

        if len(definition_list.children) == 0:
            raise Exception(
                """wpt-lint-rules: no linting rules found at "{}".""".format(
                    self.module_specifier))

        return [definition_list]
Esempio n. 19
0
 def parse(self, inputstring, document):
     content = preprocess_markdown(inputstring)
     CommonMarkParser.parse(self, content, document)
Esempio n. 20
0
 def get_transforms(self):
     return CommonMarkParser.get_transforms(self) + [StringStructify]
Esempio n. 21
0
#!/usr/bin/env python
"""
Confluence Wiki output generator for the Docutils Publisher.
"""

try:
    import locale
    locale.setlocale(locale.LC_ALL, '')
except:
    pass

from docutils.core import publish_cmdline, default_description

from rst2confluence import confluence
from recommonmark.parser import CommonMarkParser

description = ('Generates documents in Confluence Wiki format from standalone '
               'markdown sources.  ' + default_description)

publish_cmdline(writer=confluence.Writer(),
                description=description,
                parser=CommonMarkParser())
Esempio n. 22
0
def cm2man():
    description = ('Generate a manpage from markdown sources. ' +
                   default_description)
    publish_cmdline(writer_name='manpage',
                    parser=CommonMarkParser(),
                    description=description)
Esempio n. 23
0
def cm2pseudoxml():
    description = ('Generate pseudo-XML document from markdown sources. ' +
                   default_description)
    publish_cmdline(writer_name='pseudoxml',
                    parser=CommonMarkParser(),
                    description=description)