Пример #1
0
def ordered_list_close(text: str, tokens: List[Token], idx: int, options: dict,
                       env: dict) -> str:
    last_item_closing_tkn = tokens[idx - 1]
    number_marker = last_item_closing_tkn.markup

    text = removesuffix(text, MARKERS.BLOCK_SEPARATOR)
    if is_tight_list(tokens, idx):
        text = text.replace(MARKERS.BLOCK_SEPARATOR, "\n")
    else:
        text = text.replace(MARKERS.BLOCK_SEPARATOR, "\n\n")

    # Replace first MARKERS.LIST_ITEM with the starting number of the list.
    # Replace following MARKERS.LIST_ITEMs with number one prefixed by zeros
    # to make the marker of even length with the first one.
    # E.g.
    #   5321. This is the first list item
    #   0001. Second item
    #   0001. Third item
    opening_token = find_opening_token(tokens, idx)
    starting_number = opening_token.attrGet("start")
    if starting_number is None:
        starting_number = 1
    first_item_marker = f"{starting_number}{number_marker} "
    other_item_marker = ("0" * (len(str(starting_number)) - 1) + "1" +
                         number_marker + " ")
    indentation = " " * len(first_item_marker)
    text = text.replace(MARKERS.LIST_ITEM, first_item_marker, 1)
    text = text.replace(MARKERS.LIST_ITEM, other_item_marker)
    text = text.replace(MARKERS.INDENTATION, indentation)

    return text + MARKERS.BLOCK_SEPARATOR
Пример #2
0
def blockquote_close(text: str, tokens: List[Token], idx: int, options: dict,
                     env: dict) -> str:
    text = removesuffix(text, MARKERS.BLOCK_SEPARATOR)
    text = text.replace(MARKERS.BLOCK_SEPARATOR, "\n\n")
    lines = text.splitlines()
    if not lines:
        return ">" + MARKERS.BLOCK_SEPARATOR
    quoted_lines = (f"> {line}" if line else ">" for line in lines)
    quoted_str = "\n".join(quoted_lines)
    return quoted_str + MARKERS.BLOCK_SEPARATOR
Пример #3
0
def bullet_list_close(text: str, tokens: List[Token], idx: int, options: dict,
                      env: dict) -> str:
    last_item_closing_tkn = tokens[idx - 1]

    text = removesuffix(text, MARKERS.BLOCK_SEPARATOR)
    if is_tight_list(tokens, idx):
        text = text.replace(MARKERS.BLOCK_SEPARATOR, "\n")
    else:
        text = text.replace(MARKERS.BLOCK_SEPARATOR, "\n\n")

    bullet_marker = last_item_closing_tkn.markup + " "
    indentation = " " * len(bullet_marker)
    text = text.replace(MARKERS.LIST_ITEM, bullet_marker)
    text = text.replace(MARKERS.INDENTATION, indentation)
    return text + MARKERS.BLOCK_SEPARATOR
Пример #4
0
def list_item_close(text: str, tokens: List[Token], idx: int, options: dict,
                    env: dict) -> str:
    """Return one list item as string.

    The string contains MARKERS.LIST_ITEMs and MARKERS.INDENTATIONs
    which have to be replaced in later processing.
    """
    text = removesuffix(text, MARKERS.BLOCK_SEPARATOR)
    if is_tight_list_item(tokens, idx):
        text = text.replace(MARKERS.BLOCK_SEPARATOR, "\n")
    else:
        text = text.replace(MARKERS.BLOCK_SEPARATOR, "\n\n")

    lines = text.splitlines()
    if not lines:
        return MARKERS.LIST_ITEM + MARKERS.BLOCK_SEPARATOR
    indented = []
    for i, line in enumerate(lines):
        if i == 0:
            indented.append(MARKERS.LIST_ITEM + line)
        else:
            indented.append(MARKERS.INDENTATION + line if line else line)
    tabbed_str = "\n".join(indented) + MARKERS.BLOCK_SEPARATOR
    return tabbed_str
Пример #5
0
    def render(
        self,
        tokens: List[Token],
        options: dict,
        env: dict,
        *,
        _recursion_level: int = 0,
    ) -> str:
        """Takes token stream and generates Markdown.

        Args:
            tokens: A list of block tokens to render
            options: Params of parser instance
            env: Additional data from parsed input
        """
        assert _recursion_level in {
            0,
            1,
        }, "There should be no more than one level of recursion in tokens"
        text_stack = [""]

        for i, token in enumerate(tokens):

            # Render text of the current token.
            if token.type == "inline":
                result = self.render(token.children,
                                     options,
                                     env,
                                     _recursion_level=_recursion_level + 1)
            else:
                tkn_renderer = getattr(token_renderers, token.type,
                                       token_renderers.default)
                result = tkn_renderer(tokens, i, options, env)

            # If the token opens a new container block, create a new item for
            # it in the text stack.
            if token.nesting == 1:
                text_stack.append(result)
            # If the token doesn't change nesting, write in the immediate container
            # block's stack item.
            elif token.nesting == 0:
                text_stack[-1] = text_stack[-1] + result
            # If the token ends a container block, pop the block's stack item,
            # format all markdown of that block, and append formatted markdown
            # to the block's container's stack item.
            else:  # token.nesting == -1
                container_result = text_stack.pop() + result
                container_renderer = getattr(container_renderers, token.type,
                                             container_renderers.default)
                container_result = container_renderer(container_result, tokens,
                                                      i, options, env)
                text_stack[-1] = text_stack[-1] + container_result

        rendered_content = text_stack.pop()
        assert not text_stack, "Text stack should be empty before returning"

        if not _recursion_level:
            rendered_content = removesuffix(rendered_content,
                                            MARKERS.BLOCK_SEPARATOR)
            rendered_content = rendered_content.replace(
                MARKERS.BLOCK_SEPARATOR, "\n\n")
            rendered_content += "\n"
        return rendered_content