def parse_indented_code_block( parser_state: ParserState, position_marker: PositionMarker, extracted_whitespace: Optional[str], removed_chars_at_start: Optional[int], last_block_quote_index: int, last_list_start_index: int, ) -> List[MarkdownToken]: """ Handle the parsing of an indented code block """ new_tokens: List[MarkdownToken] = [] assert extracted_whitespace is not None assert removed_chars_at_start is not None if ( ParserHelper.is_length_greater_than_or_equal_to( extracted_whitespace, 4, start_index=removed_chars_at_start ) and not parser_state.token_stack[-1].is_paragraph ): if not parser_state.token_stack[-1].is_indented_code_block: ( last_block_quote_index, extracted_whitespace, ) = LeafBlockProcessor.__create_indented_block( parser_state, last_list_start_index, last_block_quote_index, position_marker, extracted_whitespace, new_tokens, ) assert extracted_whitespace is not None new_tokens.append( TextMarkdownToken( position_marker.text_to_parse[position_marker.index_number :], extracted_whitespace, position_marker=position_marker, ) ) return new_tokens
def parse_indented_code_block( parser_state, position_marker, extracted_whitespace, removed_chars_at_start, original_line_to_parse, last_block_quote_index, last_list_start_index, ): """ Handle the parsing of an indented code block """ new_tokens = [] if (ParserHelper.is_length_greater_than_or_equal_to( extracted_whitespace, 4, start_index=removed_chars_at_start) and not parser_state.token_stack[-1].is_paragraph): if not parser_state.token_stack[-1].is_indented_code_block: parser_state.token_stack.append(IndentedCodeBlockStackToken()) LOGGER.debug(">>__adjust_for_list_start") ( did_process, offset_index, last_block_quote_index, ) = LeafBlockProcessor.__adjust_for_list_start( original_line_to_parse, last_list_start_index, last_block_quote_index, ) LOGGER.debug("<<__adjust_for_list_start<<%s", str(did_process)) force_me = False kludge_adjust = 0 if not did_process: LOGGER.debug(">>>>%s", str(parser_state.token_stack[-2])) if parser_state.token_stack[-2].is_list: LOGGER.debug( ">>indent>>%s", parser_state.token_stack[-2].indent_level, ) last_block_quote_index = 0 kludge_adjust = 1 force_me = True LOGGER.debug(">>__adjust_for_block_quote_start") ( did_process, special_parse_start_index, whitespace_to_parse, block_quote_adjust_delta, ) = LeafBlockProcessor.__adjust_for_block_quote_start( force_me, original_line_to_parse, last_block_quote_index, position_marker, extracted_whitespace, ) LOGGER.debug("<<__adjust_for_block_quote_start<<%s", str(did_process)) LOGGER.debug( "__recalculate_whitespace>>%s>>%s", whitespace_to_parse, str(offset_index), ) ( accumulated_whitespace_count, actual_whitespace_index, adj_ws, left_ws, ) = LeafBlockProcessor.__recalculate_whitespace( special_parse_start_index, whitespace_to_parse, offset_index) # TODO revisit with tabs line_number = position_marker.line_number column_number = (position_marker.index_number + position_marker.index_indent - len(extracted_whitespace) + 1) if special_parse_start_index: column_number = (actual_whitespace_index + special_parse_start_index + block_quote_adjust_delta) LOGGER.debug( "column_number(%s)=actual_whitespace_index(%s)+special_parse_start_index(%s)+block_quote_adjust_delta(%s)", str(column_number), str(actual_whitespace_index), str(special_parse_start_index), str(block_quote_adjust_delta), ) excess_whitespace_count = (accumulated_whitespace_count - 4 - offset_index) LOGGER.debug( "excess_whitespace_count(%s)=accumulated_whitespace_count(%s)-4-offset_index(%s)", str(excess_whitespace_count), str(accumulated_whitespace_count), str(offset_index), ) LOGGER.debug("before>>%s>>", left_ws.replace("\t", "\\t")) if excess_whitespace_count: excess_whitespace_count -= kludge_adjust left_ws = " ".rjust(excess_whitespace_count) + left_ws LOGGER.debug("after>>%s>>", left_ws.replace("\t", "\\t")) else: column_number += actual_whitespace_index LOGGER.debug("column_number>>%s", str(column_number)) new_tokens.append( IndentedCodeBlockMarkdownToken(adj_ws, line_number, column_number)) extracted_whitespace = left_ws LOGGER.debug("left_ws>>%s<<", extracted_whitespace.replace("\t", "\\t")) new_tokens.append( TextMarkdownToken( position_marker.text_to_parse[position_marker. index_number:], extracted_whitespace, )) return new_tokens