def __init__(self, string, transformers=None): """ In theory you never have to instanciate this parser yourself because the high level `parse()` function encapsulates this. However for the unittests it's important to be able to disable and enable the `transformers` by hand. If you don't provide any transformers the default transformers are used. """ self.string = string self.lexer = Lexer() self.stack_depth = 0 if transformers is None: transformers = ctx.get_implementations(ITransformer, instances=True) self.transformers = transformers #: node dispatchers self._handlers = { 'text': self.parse_text, 'raw': self.parse_raw, 'nl': self.parse_nl, 'highlighted_begin': self.parse_highlighted, 'conflict_begin': self.parse_conflict_left, 'conflict_switch': self.parse_conflict_middle, 'conflict_end': self.parse_conflict_end, 'metadata_begin': self.parse_metadata, 'headline_begin': self.parse_headline, 'strong_begin': self.parse_strong, 'emphasized_begin': self.parse_emphasized, 'escaped_code_begin': self.parse_escaped_code, 'code_begin': self.parse_code, 'underline_begin': self.parse_underline, 'stroke_begin': self.parse_stroke, 'small_begin': self.parse_small, 'big_begin': self.parse_big, 'sub_begin': self.parse_sub, 'sup_begin': self.parse_sup, 'footnote_begin': self.parse_footnote, 'color_begin': self.parse_color, 'size_begin': self.parse_size, 'font_begin': self.parse_font, 'quote_begin': self.parse_quote, 'list_item_begin': self.parse_list, 'definition_begin': self.parse_definition, 'external_link_begin': self.parse_external_link, 'free_link': self.parse_free_link, 'ruler': self.parse_ruler, 'pre_begin': self.parse_pre_block, 'table_row_begin': self.parse_table, 'box_begin': self.parse_box, 'sourcelink': self.parse_source_link } #: runtime information self.is_dirty = False
# -*- coding: utf-8 -*- """ test_markup_lexer ~~~~~~~~~~~~~~~~~ This unittest tests various features of the wiki lexer. Just the lexer, not the parser. :copyright: 2010-2011 by the Project Name Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from inyoka.core.markup.lexer import Lexer lexer = Lexer() def test_inline_markup(): expect = lexer.tokenize( u"''foo''" u"'''foo'''" u"__foo__" u',,(foo),,' u'^^(foo)^^' u'--(foo)--' u'`foo`' u'``foo``' u'~-(foo)-~' u'~+(foo)+~' u'((foo))' u'[mark]foo[/mark]'