def assertParses(self, source, expected, alt=False): # noqa parser = CommonMarkParser() parser.parse(dedent(source), new_document('<string>')) self.assertMultiLineEqual( dedent(expected).lstrip(), dedent(parser.document.asdom().toprettyxml(indent=' ')), )
def cell(self, text, morecols=0, source=None): entry = nodes.entry(morecols=morecols) if not isinstance(text, string_types): text = str(text) parser = CommonMarkParser() new_doc = new_document(None) parser.parse(text, new_doc) for child in new_doc.children[:]: child.source = source entry += child return entry
def run(self): parser = CommonMarkParser() document = self.state.document filename = self.arguments[0] curdir = getattr(document.settings, "_source", None) or os.getcwd() filepath = os.path.join(curdir, filename) with open(filepath) as rf: text = rf.read() subdocument = new_document(filepath) parser.parse(text, subdocument) return subdocument.children
def _parse_md_to(self, node, content): # HACK: make the CommonMarkParser think that `node` is actually the full # document assert not hasattr(node, "reporter") assert not hasattr(node, "note_parse_message") node.reporter = self.state.document.reporter node.note_parse_message = self.state.document.note_parse_message md_parser = CommonMarkParser() md_parser.parse(content, node) del node.reporter del node.note_parse_message
def run(self): from recommonmark.parser import CommonMarkParser parser = CommonMarkParser() document = self.state.document filename = self.arguments[0] filepath = os.path.join(os.path.dirname(document.settings._source), filename) with open(filepath) as rf: text = rf.read() from docutils.utils import new_document subdocument = new_document(filepath) # passing subdocument (not document) parser.parse(text, subdocument) return subdocument.children
def iter_properties(self, cls): """Iterate over (property, type, description)""" schema = cls.resolve_references(cls._schema) properties = schema.get('properties', {}) mdparser = CommonMarkParser() for prop, propschema in properties.items(): doc = utils.new_document("md_doc") mdparser.parse(propschema.get('description', ' '), doc) descr = nodes.paragraph() descr.document = self.state.document nested_parse_with_titles(self.state, nodes.paragraph( text =self.type_description(propschema)), descr) yield ( [nodes.paragraph(text = prop)], # [nodes.paragraph(text = self.type_description(propschema))], # ?class descr.children, doc.children)
def build_row(item): """Return nodes.row with property description""" prop, propschema, required = item row = nodes.row() # Property row += nodes.entry('', nodes.paragraph(text=prop)) # Type str_type = type_description(propschema) par_type = nodes.paragraph() is_text = True for part in reClassDef.split(str_type): if part: if is_text: par_type += nodes.Text(part) else: par_type += addnodes.pending_xref( reftarget=part, reftype="class", refdomain=None, # py:class="None" py:module="altair" refdoc="user_guide/marks" refexplicit=False, refwarn=False ) par_type += nodes.literal(text = part,classes="xref py py-class") is_text = not is_text row += nodes.entry('', par_type) # Description md_parser = CommonMarkParser() str_descr = "***Required.*** " if required else "" str_descr += propschema.get('description', ' ') doc_descr = utils.new_document("schema_description") md_parser.parse(str_descr, doc_descr) row += nodes.entry('', *doc_descr.children) return row
def build_row(item): """Return nodes.row with property description""" prop, propschema, required = item row = nodes.row() # Property row += nodes.entry('', nodes.paragraph(text=prop), classes=["vl-prop"]) # Type str_type = type_description(propschema) par_type = nodes.paragraph() is_text = True for part in reClassDef.split(str_type): if part: if is_text: add_text(par_type, part) else: add_class_def(par_type, part) is_text = not is_text # row += nodes.entry('') row += nodes.entry('', par_type) #, classes=["vl-type-def"] # Description md_parser = CommonMarkParser() #str_descr = "***Required.*** " if required else "" str_descr = "" str_descr += propschema.get('description', ' ') doc_descr = utils.new_document("schema_description") md_parser.parse(str_descr, doc_descr) # row += nodes.entry('', *doc_descr.children, classes="vl-decsr") row += nodes.entry('', *doc_descr.children, classes=["vl-decsr"]) return row
def run(self): """Include a file as part of the content of this reST file.""" # copied from docutils.parsers.rst.directives.misc.Include if not self.state.document.settings.file_insertion_enabled: raise self.warning('"%s" directive disabled.' % self.name) source = self.state_machine.input_lines.source( self.lineno - self.state_machine.input_offset - 1) source_dir = os.path.dirname(os.path.abspath(source)) path = directives.path(self.arguments[0]) if path.startswith('<') and path.endswith('>'): path = os.path.join(self.standard_include_path, path[1:-1]) path = os.path.normpath(os.path.join(source_dir, path)) path = utils.relative_path(None, path) path = nodes.reprunicode(path) encoding = self.options.get( 'encoding', self.state.document.settings.input_encoding) e_handler = self.state.document.settings.input_encoding_error_handler try: self.state.document.settings.record_dependencies.add(path) include_file = io.FileInput(source_path=path, encoding=encoding, error_handler=e_handler) except UnicodeEncodeError as error: raise self.severe(u'Problems with "%s" directive path:\n' 'Cannot encode input file path "%s" ' '(wrong locale?).' % (self.name, SafeString(path))) except IOError as error: raise self.severe(u'Problems with "%s" directive path:\n%s.' % (self.name, ErrorString(error))) startline = self.options.get('start-line', None) endline = self.options.get('end-line', None) try: if startline or (endline is not None): lines = include_file.readlines() rawtext = ''.join(lines[startline:endline]) else: rawtext = include_file.read() except UnicodeError as error: raise self.severe(u'Problem with "%s" directive:\n%s' % (self.name, ErrorString(error))) # start-after/end-before: no restrictions on newlines in match-text, # and no restrictions on matching inside lines vs. line boundaries after_text = self.options.get('start-after', None) if after_text: # skip content in rawtext before *and incl.* a matching text after_index = rawtext.find(after_text) if after_index < 0: raise self.severe('Problem with "start-after" option of "%s" ' 'directive:\nText not found.' % self.name) rawtext = rawtext[after_index + len(after_text):] before_text = self.options.get('end-before', None) if before_text: # skip content in rawtext after *and incl.* a matching text before_index = rawtext.find(before_text) if before_index < 0: raise self.severe('Problem with "end-before" option of "%s" ' 'directive:\nText not found.' % self.name) rawtext = rawtext[:before_index] # copied code ends parser = CommonMarkParser() md_document = utils.new_document(path, self.state.document.settings) parser.parse(rawtext, md_document) return md_document.children
def parse(self, inputstring, document): """Use the upstream parser and clean up afterwards. """ # check for exorbitantly long lines for i, line in enumerate(inputstring.split('\n')): if len(line) > document.settings.line_length_limit: error = document.reporter.error( 'Line %d exceeds the line-length-limit.'%(i+1)) document.append(error) return # pass to upstream parser try: CommonMarkParser.parse(self, inputstring, document) except Exception as err: error = document.reporter.error('Parsing with "recommonmark" ' 'returned the error:\n%s'%err) document.append(error) # Post-Processing # --------------- # merge adjoining Text nodes: for node in document.findall(nodes.TextElement): children = node.children i = 0 while i+1 < len(children): if (isinstance(children[i], nodes.Text) and isinstance(children[i+1], nodes.Text)): children[i] = nodes.Text(children[i]+children.pop(i+1)) children[i].parent = node else: i += 1 # add "code" class argument to literal elements (inline and block) for node in document.findall(lambda n: isinstance(n, (nodes.literal, nodes.literal_block))): node['classes'].append('code') # move "language" argument to classes for node in document.findall(nodes.literal_block): if 'language' in node.attributes: node['classes'].append(node['language']) del node['language'] # remove empty target nodes for node in list(document.findall(nodes.target)): # remove empty name node['names'] = [v for v in node['names'] if v] if node.children or [v for v in node.attributes.values() if v]: continue node.parent.remove(node) # replace raw nodes if raw is not allowed if not document.settings.raw_enabled: for node in document.findall(nodes.raw): warning = document.reporter.warning('Raw content disabled.') node.parent.replace(node, warning) # fix section nodes for node in document.findall(nodes.section): # remove spurious IDs (first may be from duplicate name) if len(node['ids']) > 1: node['ids'].pop() # fix section levels (recommonmark 0.4.0 # later versions silently ignore incompatible levels) if 'level' in node: section_level = self.get_section_level(node) if node['level'] != section_level: warning = document.reporter.warning( 'Title level inconsistent. Changing from %d to %d.' %(node['level'], section_level), nodes.literal_block('', node[0].astext())) node.insert(1, warning) # remove non-standard attribute "level" del node['level'] # drop pending_xref (Sphinx cross reference extension) for node in document.findall(addnodes.pending_xref): reference = node.children[0] if 'name' not in reference: reference['name'] = nodes.fully_normalize_name( reference.astext()) node.parent.replace(node, reference)
from recommonmark.parser import CommonMarkParser # from recommonmark.transform import AutoStructify parser = CommonMarkParser() with open("./src/doc.md") as rf: t = parser.parse(rf.read()) print(t)
def parse(self, inputstring, document): content = preprocess_markdown(inputstring) CommonMarkParser.parse(self, content, document)