def header(self, m): all = m.group(0) if all.find('!') != -1: token = Token(type='text', value=all.replace('!', '.')) else: token = Token(type='header', value=m.group(1)) self.ts.append(token)
def plugin(self, m): #We support escape by [! name = m.group('p_name2') all_tag = m.group('all_tag') if name.find('!') == 0: token = Token(type='text', value=all_tag.replace('[!', '[', 1)) elif name.find('/') == 0: token = Token(type='plugin_end', value='[%s]' % name) elif self.is_plugin.match(name): v = { 'inner_tag': name, 'all_tag': all_tag } token = Token(type='plugin_start', value=v) else: #It's not a plugin! token = None Lexer.tokenize(self, m.group('l_data1')) if token: self.ts.append(token) else: self.ts.append(Token(type='text', value='[')) Lexer.tokenize(self, name) self.ts.append(Token(type='text', value=']')) Lexer.tokenize(self, m.group('r_data1'))
def escapestyle(self, m): Lexer.tokenize(self, m.group('l_data2')) token = Token(type='text', value=m.group('inner')) self.ts.append(token) Lexer.tokenize(self, m.group('r_data2'))
def images(self, m): Lexer.tokenize(self, m.group('l_data3')) v = {'alt': m.group('alt1'), 'link': m.group('i_link1'), 'all_tag': m.group('all_tag1')} token = Token(type='image', value=v) self.ts.append(token) Lexer.tokenize(self, m.group('r_data3'))
def links(self, m): Lexer.tokenize(self, m.group('l_data2')) v = {'text': m.group('l_text'), 'link': m.group('l_link'), 'all_tag': m.group('all_tag3')} token = Token(type='link', value=v) self.ts.append(token) Lexer.tokenize(self, m.group('r_data2'))
def link_images(self, m): Lexer.tokenize(self, m.group('l_data4')) v = {'alt': m.group('alt2'), 'img_link': m.group('i_link2'), 'page_link': m.group('page_link'), 'all_tag': m.group('all_tag2')} token = Token(type='imagelink', value=v) self.ts.append(token) Lexer.tokenize(self, m.group('r_data4'))
def text_style(self, m): Lexer.tokenize(self, m.group('l_data1')) format = m.group('format') text = m.group('text') html_style = re.match('\(([bius])\s*\)', format) span_class = re.match('\((\w+)\)', format) span_style = re.match('{(\w+.+)}', format) token = None tag_or_cls = None if html_style: tag_or_cls = html_style.group(1) token = Token(type='html_tag_start', value=tag_or_cls) elif span_class: tag_or_cls = span_class.group(1) token = Token(type='span_class_start', value=tag_or_cls) elif span_style: span_inner = span_style.group(1) try: spl = span_inner.split(",") style = [] for item in spl: k, v = item.split(":") v = v.strip() style.append("%s: %s" % (k, v)) style = ';'.join(style) token = Token(type='span_style_start', value=style) except: token = Token(type='text', value=span_style.group(0)) else: token = Token(type='text', value=format) self.ts.append(token) #Recursively handle the inner text #since we need to support %(b) %(i) Hej%% Lexer.tokenize(self, text) if html_style: token = Token(type='html_tag_end', value=tag_or_cls) else: token = Token(type='span_end') self.ts.append(token) Lexer.tokenize(self, m.group('r_data1'))
def escape(self, m): print 'her!!' self.ts.append(Token(type='escape_start')) Lexer.tokenize(self, m.group(1)) self.ts.append(Token(type='escape_end'))
def line_break(self, m): token = Token(type='line_break', value='\n') self.ts.append(token)
def list_item(self, m): self.ts.append(Token(type='list_item_start')) Lexer.tokenize(self, m.group(1)) self.ts.append(Token(type='list_item_end'))
def appendData(self, data): if data: token = Token(type='data', value=data) self.ts.append(token)
def code(self, m): token = Token(type='code', value=m.group(1)) self.ts.append(token)
def text(self, m): token = Token(type='text', value=m.group(0)) self.ts.append(token)