def tokenize_code(self, code, tab_size, tokenizer1, tokenizer2, wide): lines = code.split('\n') tokens = [] mode = 1 for line in lines: line = line.rstrip('\r') line = line.rstrip() line = Examiner.tabs_to_spaces(line, tab_size) if mode == 1: line_tokens = self.tokenize_line(line, tokenizer1, wide) else: line_tokens = self.tokenize_line(line, tokenizer2, wide) for token in line_tokens: if token.group == 'comment-end': mode = 1 if token.group == 'comment-start': mode = 2 tokens += line_tokens return tokens
def tokenize_code(self, code, tab_size, tokenizer, wide): lines = code.split('\n') tokens = [] for line in lines: line = line.rstrip('\r') line = line.rstrip() line = Examiner.tabs_to_spaces(line, tab_size) line_tokens = self.tokenize_line(line, tokenizer, wide) tokens += line_tokens return tokens