def test_nest_tokens(): tokens = nest_tokens([ Token("start", "", 0), Token("open", "", 1), Token("open_inner", "", 1), Token("inner", "", 0), Token("close_inner", "", -1), Token("close", "", -1), Token("end", "", 0), ]) assert [t.type for t in tokens] == ["start", "open", "end"] assert isinstance(tokens[0], Token) assert isinstance(tokens[1], NestedTokens) assert isinstance(tokens[2], Token) nested = tokens[1] assert nested.opening.type == "open" assert nested.closing.type == "close" assert len(nested.children) == 1 assert nested.children[0].type == "open_inner" nested2 = nested.children[0] assert nested2.opening.type == "open_inner" assert nested2.closing.type == "close_inner" assert len(nested2.children) == 1 assert nested2.children[0].type == "inner"
def nested_render_text(self, text: str, lineno: int): """Render unparsed text.""" tokens = self.md.parse(text + "\n", self.env) if tokens and tokens[0].type == "front_matter": tokens.pop(0) # set correct line numbers for token in tokens: if token.map: token.map = [token.map[0] + lineno, token.map[1] + lineno] for child in token.children or []: child.map = token.map # nest tokens nested_tokens = nest_tokens(tokens) # move footnote definitions to env self.env.setdefault("foot_refs", {}) new_tokens = [] for nest_token in nested_tokens: if nest_token.type == "footnote_reference_open": label = nest_token.meta["label"] self.env["foot_refs"].setdefault(label, []).append(nest_token) else: new_tokens.append(nest_token) nested_tokens = new_tokens # render for nest_token in nested_tokens: # skip hidden? if f"render_{nest_token.type}" in self.rules: self.rules[f"render_{nest_token.type}"](nest_token) else: self.create_warning( f"No render method for: {nest_token.type}", line=token_line(nest_token, default=0), subtype="render", append_to=self.current_node, )
def render(self, tokens: List[Token], options, env: AttrDict): """Run the render on a token stream. :param tokens: list on block tokens to render :param options: params of parser instance :param env: the environment sandbox associated with the tokens, containing additional metadata like reference info """ self.setup_render(options, env) # propagate line number down to inline elements for token in tokens: if token.map: # For docutils we want 1 based line numbers (not 0) token.map = [token.map[0] + 1, token.map[1] + 1] for child in token.children or []: child.map = token.map # nest tokens tokens = nest_tokens(tokens) # move footnote definitions to env self.env.setdefault("foot_refs", {}) new_tokens = [] for token in tokens: if token.type == "footnote_reference_open": label = token.meta["label"] self.env["foot_refs"].setdefault(label, []).append(token) else: new_tokens.append(token) tokens = new_tokens # render for i, token in enumerate(tokens): # skip hidden? if f"render_{token.type}" in self.rules: self.rules[f"render_{token.type}"](token) else: self.current_node.append( self.reporter.warning( f"No render method for: {token.type}", line=token.map[0])) # log warnings for duplicate reference definitions # "duplicate_refs": [{"href": "ijk", "label": "B", "map": [4, 5], "title": ""}], for dup_ref in self.env.get("duplicate_refs", []): self.document.append( self.reporter.warning( f"Duplicate reference definition: {dup_ref['label']}", line=dup_ref["map"][0] + 1, )) if not self.config.get("output_footnotes", True): return self.document # we don't use the foot_references stored in the env # since references within directives/roles will have been added after # those from the initial markdown parse # instead we gather them from a walk of the created document foot_refs = OrderedDict() for refnode in self.document.traverse(nodes.footnote_reference): if refnode["refname"] not in foot_refs: foot_refs[refnode["refname"]] = True # TODO log warning for duplicate footnote definitions if foot_refs: self.current_node.append(nodes.transition()) for footref in foot_refs: self.render_footnote_reference_open( self.env["foot_refs"][footref][0]) return self.document