def __init__(self): self.initLogger() self.examiner = Examiner() self.sec = Secretary() self.clr = Cleaner() self.login() self.init()
def cmdExamine(invoker): preprocess = not invoker.hasOption("*") expandPrototypes = invoker.hasOption("p") expandObjects = invoker.hasOption("x") filterIn = invoker.hasOption("+") filterOut = invoker.hasOption("-") invoker.checkUnsupportedOptions() if not invoker.getArguments(): raise CommandError, "%s: missing argument" % invoker.getName() if filterIn and filterOut: raise CommandError, "%s: options '+' and '-' are mutually exclusive" % invoker.getName( ) arguments = invoker.getArguments() limit = 0xffff if expandObjects or expandPrototypes: words = arguments.split(' ', 1) if len(words) > 1: try: limit = int(words[0]) arguments = words[1] except ValueError: pass if filterIn or filterOut: words = arguments.split(' ', 1) if len(words) != 2: raise CommandError, "%s: missing filter argument" % invoker.getName( ) filterArgument = words[0] arguments = words[1] if filterArgument[0] != '{' or filterArgument[-1] != '}': raise CommandError, "%s: invalid filter argument: %s" % ( invoker.getName(), filterArgument) names = filterArgument[1:-1].split(',') if len(names) == 0: raise CommandError, "%s: invalid filter argument: %s" % ( invoker.getName(), filterArgument) else: names = [] try: result = invoker.getRuntime().eval(arguments, True, preprocess) except KeyboardInterrupt: result = None if result: if not result.isObject(): raise CommandError, "%s: not an object: %s" % (invoker.getName(), arguments) examiner = Examiner(invoker.getRuntime(), result.getValue(), expandPrototypes and limit or 0, expandObjects and limit or 0, filterIn, filterOut, names, False) examiner.update() return examiner.getObjectPropertiesAsStrings(result.getValue())
def __init__(self): self.initLogger() self.examiner = Examiner() self.sec = Secretary() self.clr = Cleaner() self.questionsDb = QuestionsDb('XFQuestionsLib.db') self.login() self.init()
def tokenize_code(self, code, tab_size, tokenizer1, tokenizer2, wide): lines = code.split('\n') tokens = [] mode = 1 for line in lines: line = line.rstrip('\r') line = line.rstrip() line = Examiner.tabs_to_spaces(line, tab_size) if mode == 1: line_tokens = self.tokenize_line(line, tokenizer1, wide) else: line_tokens = self.tokenize_line(line, tokenizer2, wide) for token in line_tokens: if token.group == 'comment-end': mode = 1 if token.group == 'comment-start': mode = 2 tokens += line_tokens return tokens
def calc_line_format_confidence_ii(self): # remove tokens we don't care about drop_types = ['whitespace', 'comment', 'EOF'] tokens = Examiner.drop_tokens(self.tokens, drop_types) # join continued lines tokens = self.join_continued_lines(tokens) # split tokens by lines lines = self.split_tokens_into_lines(tokens) # check that each line either blank or starts with a keyword num_lines = len(lines) num_lines_correct = 0 for line in lines: if len(line) > 0: if line[0].group == 'keyword': num_lines_correct += 1 else: self.errors.append({ 'TYPE': 'LINE FORMAT', 'FIRST': line[0].group, 'SECOND': line[0].text }) else: num_lines_correct += 1 line_format_confidence = 1.0 if num_lines > 0: line_format_confidence = num_lines_correct / num_lines self.confidences['line format'] = line_format_confidence return tokens
def calc_confidences(self, operand_types, group_starts, group_mids, group_ends, indents): tokens = self.source_tokens() tokens = Examiner.join_all_lines(tokens) self.calc_token_confidence() self.calc_token_2_confidence() num_operators = self.count_my_tokens(['operator', 'invalid operator']) if num_operators > 0: self.calc_operator_confidence(num_operators) allow_pairs = [] self.calc_operator_2_confidence(tokens, num_operators, allow_pairs) self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs) self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs) self.calc_group_confidence(tokens, group_mids) operand_types_2 = ['number'] self.calc_operand_n_confidence(tokens, operand_types_2, 2) self.calc_operand_n_confidence(tokens, operand_types, 4) # self.calc_keyword_confidence() if indents is not None: self.calc_indent_confidence(indents)
def calc_line_format_confidence(self): drop_types = ['whitespace', 'comment', 'line continuation'] tokens = Examiner.drop_tokens(self.tokens, drop_types) line_bracket_count = 0 num_bracket_count = 0 prev2_token = Token('\n', 'newline', False) prev_token = Token('\n', 'newline', False) for token in tokens: if token.group == 'group' and token.text == '{': num_bracket_count += 1 if prev_token.group == 'newline' and\ (prev2_token.group != 'group' or prev2_token.text != '{'): line_bracket_count += 1 self.errors.append({ 'TYPE': 'LINE FORMAT', 'TOKEN': token.text }) prev2_token = prev_token prev_token = token line_format_confidence = 1.0 if num_bracket_count > 0: line_format_confidence = 1.0 - (line_bracket_count / num_bracket_count) self.confidences['line format'] = line_format_confidence
def check_expected_keywords(self): counts = { 'IDENTIFICATION': 0, 'ENVIRONMENT': 0, 'DATA': 0, 'PROCEDURE': 0 } drop_types = ['newline', 'whitespace', 'comment', 'line continuation'] tokens = Examiner.drop_tokens(self.tokens, drop_types) prev_text = '' for token in tokens: text = token.text if text == 'DIVISION' and prev_text in ['IDENTIFICATION', 'ID']: counts['IDENTIFICATION'] += 1 if text == 'DIVISION' and prev_text == 'ENVIRONMENT': counts['ENVIRONMENT'] += 1 if text == 'DIVISION' and prev_text == 'DATA': counts['DATA'] += 1 if text == 'DIVISION' and prev_text == 'PROCEDURE': counts['PROCEDURE'] += 1 prev_text = text expected_keyword_confidence = 1.00 if counts['IDENTIFICATION'] != 1: expected_keyword_confidence -= 0.01 self.errors.append({ 'TYPE': 'EXPECTED KEYWORD', 'MISSING': 'IDENTIFICATION or ID DIVISION' }) if counts['ENVIRONMENT'] != 1: expected_keyword_confidence != 0.01 self.errors.append({ 'TYPE': 'EXPECTED KEYWORD', 'MISSING': 'ENVIRONMENT DIVISION' }) if counts['DATA'] != 1: expected_keyword_confidence -= 0.01 self.errors.append({ 'TYPE': 'EXPECTED KEYWORD', 'MISSING': 'DATA DIVISION' }) if counts['PROCEDURE'] != 1: expected_keyword_confidence -= 0.01 self.errors.append({ 'TYPE': 'EXPECTED KEYWORD', 'MISSING': 'PROCEDURE DIVISION' }) return expected_keyword_confidence
def cmdExamine(invoker): preprocess = not invoker.hasOption("*") expandPrototypes = invoker.hasOption("p") expandObjects = invoker.hasOption("x") filterIn = invoker.hasOption("+") filterOut = invoker.hasOption("-") invoker.checkUnsupportedOptions() if not invoker.getArguments(): raise CommandError, "%s: missing argument" % invoker.getName() if filterIn and filterOut: raise CommandError, "%s: options '+' and '-' are mutually exclusive" % invoker.getName() arguments = invoker.getArguments() limit = 0xffff if expandObjects or expandPrototypes: words = arguments.split(' ', 1) if len(words) > 1: try: limit = int(words[0]) arguments = words[1] except ValueError: pass if filterIn or filterOut: words = arguments.split(' ', 1) if len(words) != 2: raise CommandError, "%s: missing filter argument" % invoker.getName() filterArgument = words[0] arguments = words[1] if filterArgument[0] != '{' or filterArgument[-1] != '}': raise CommandError, "%s: invalid filter argument: %s" % (invoker.getName(), filterArgument) names = filterArgument[1:-1].split(',') if len(names) == 0: raise CommandError, "%s: invalid filter argument: %s" % (invoker.getName(), filterArgument) else: names = [] try: result = invoker.getRuntime().eval(arguments, True, preprocess) except KeyboardInterrupt: result = None if result: if not result.isObject(): raise CommandError, "%s: not an object: %s" % (invoker.getName(), arguments) examiner = Examiner(invoker.getRuntime(), result.getValue(), expandPrototypes and limit or 0, expandObjects and limit or 0, filterIn, filterOut, names, False) examiner.update() return examiner.getObjectPropertiesAsStrings(result.getValue())
def tokenize_code(self, code, tab_size, tokenizer, wide): lines = code.split('\n') tokens = [] for line in lines: line = line.rstrip('\r') line = line.rstrip() line = Examiner.tabs_to_spaces(line, tab_size) line_tokens = self.tokenize_line(line, tokenizer, wide) tokens += line_tokens return tokens
def __init__(self, data): print "build batched lstmcrf..." self.label_alphabet = data.label_alphabet self.word_alphabet = data.word_alphabet self.crf = CRF(algorithm='lbfgs', c1=0.1, c2=0.1, max_iterations=100, all_possible_transitions=False) self.examiner = Examiner(data) self.useExaminer = False self.loss_function = nn.NLLLoss() self.topk = 5 self.X_train = [] self.Y_train = [] self.pos_mask_list = [] self.instances = [] self.scores_refs = [] self.pos_mask = None self.tag_size = data.label_alphabet_size
def calc_line_format_confidence(self): # certain keyword lines end in colon tokens = self.unwrap_code_lines(self.tokens) # drop tokens not used by interpreter drop_types = ['whitespace', 'comment'] tokens = Examiner.drop_tokens(tokens, drop_types) # split into lines lines = self.split_tokens_to_lines(tokens) # check certain lines end in colon num_lines = 0 num_lines_correct = 0 colon_keywords = ['class', 'def', 'for', 'while', 'if', 'else', 'elif'] for line in lines: if len(line) > 1: first_token = line[0] last_token = line[-1] if first_token.group == 'keyword' and first_token.text in colon_keywords: num_lines += 1 if last_token.group == 'operator' and last_token.text == ':': num_lines_correct += 1 else: self.errors.append({ 'TYPE': 'LINE FORMAT', 'FIRST': first_token.text, 'SECOND': "END '" + last_token.text + "' NOT ':'" }) line_format_2_confidence = 1.0 if num_lines > 0: line_format_2_confidence = num_lines_correct / num_lines self.confidences['line format'] = line_format_2_confidence
def calc_line_format_confidence(self): # check PICTURE keywords are followed by a picture element # and picture elements are preceded by a PICTURE keyword drop_types = [ 'newline', 'whitespace', 'comment', 'line description', 'line continuation' ] tokens = Examiner.drop_tokens(self.tokens, drop_types) errors = 0 prev_token = Token('\n', 'newline', False) for token in tokens: if prev_token.group == 'keyword' and prev_token.text in [ 'PIC', 'PICTURE' ]: if token.group != 'picture': errors += 1 self.errors.append({ 'TYPE': 'PICTURE', 'FIRST': prev_token.text, 'SECOND': token.text }) if token.group == 'picture': if prev_token.group != 'keyword' or prev_token.text not in [ 'PIC', 'PICTURE' ]: errors += 1 self.errors.append({ 'TYPE': 'PICTURE', 'FIRST': prev_token.text, 'SECOND': token.text }) picture_confidence = 1.0 if len(self.tokens) > 0: picture_confidence = errors / len(self.tokens) self.confidences['line format'] = picture_confidence
def check_paired_tokens(self, tokens, open_tokens, close_tokens): level = 0 min_level = 0 num_open = 0 num_close = 0 prev_token_lower = '' prev_token = Token('\n', 'newline', False) prev_reqs = [';', '='] conditional_openers = ['if', 'case', 'while', 'until', 'unless'] drop_types = ['whitespace', 'comment', 'line continuation'] tokens = Examiner.drop_tokens(tokens, drop_types) openers_stack = [] for token in tokens: token_lower = token.text.lower() if token.group == 'keyword': if token_lower in open_tokens or\ token_lower in conditional_openers and\ (prev_token.group == 'newline' or prev_token_lower in prev_reqs): num_open += 1 level += 1 openers_stack.append(token_lower) if token_lower in close_tokens: num_close += 1 level -= 1 if level < min_level: min_level = level if len(openers_stack) > 0: openers_stack = openers_stack[:-1] prev_token_lower = token_lower prev_token = token ok = level == 0 and min_level == 0 return ok, num_open, num_close
def calc_line_format_confidence(self): # remove tokens we don't care about drop_types = ['whitespace', 'comment', 'EOF'] tokens = Examiner.drop_tokens(self.tokens, drop_types) # join continued lines tokens = self.join_continued_lines(tokens) # split tokens by lines lines = self.split_tokens_into_lines(tokens) # check that line that begin with 'if' or 'elseif' end with 'then' num_lines = len(lines) num_lines_correct = 0 for line in lines: if len(line) > 0: if line[0].text.lower() in ['if', 'endif']: if line[-1].text.lower() == 'then': num_lines_correct += 1 else: self.errors.append({ 'TYPE': 'LINE FORMAT', 'FIRST': line[0].text, 'SECOND': line[-1].text }) else: num_lines_correct += 1 else: num_lines_correct += 1 line_format_confidence = 1.0 if num_lines > 0: line_format_confidence = num_lines_correct / num_lines self.confidences['line format'] = line_format_confidence return tokens
def __init__(self, code): super().__init__() operand_types = [] whitespace_tb = WhitespaceTokenBuilder() newline_tb = NewlineTokenBuilder() integer_tb = IntegerTokenBuilder(None) integer_exponent_tb = IntegerExponentTokenBuilder(None) real_tb = RealTokenBuilder(False, False, None) real_exponent_tb = RealExponentTokenBuilder(False, False, 'E', None) hex_constant_tb = PrefixedIntegerTokenBuilder( '0H', False, '0123456789ABCDEFabcdef') octal_constant_tb = PrefixedIntegerTokenBuilder( '0O', False, '01234567') binary_constant_tb = PrefixedIntegerTokenBuilder('0B', False, '01') operand_types.append('number') leads = '_$' extras = '_$' identifier_tb = IdentifierTokenBuilder(leads, extras) operand_types.append('identifier') quotes = ['"', "'", "’"] string_tb = EscapedStringTokenBuilder(quotes, 0) operand_types.append('string') slash_slash_comment_tb = SlashSlashCommentTokenBuilder() slash_star_comment_tb = SlashStarCommentTokenBuilder() terminators_tb = CaseInsensitiveListTokenBuilder( [';'], 'statement terminator', False) known_operators = [ '+', '-', '*', '/', '%', '=', '==', '!=', '===', '!==', '>', '>=', '<', '<=', '+=', '-=', '*=', '/=', '%=', '&=', '|=', '^=', '<<=', '>>=', '!', '&', '|', '~', '<<', '>>', '=>', '^', '.', ':', '++', '--', '&&', '||', '?', '$', '?.', 'new', 'delete' ] known_operator_tb = CaseSensitiveListTokenBuilder( known_operators, 'operator', False) self.unary_operators = [ '+', '-', '!', '~', '++', '--', ':', '$', 'new', 'delete' ] self.postfix_operators = ['++', '--', ':'] groupers = ['(', ')', ',', '[', ']', '{', '}'] group_starts = ['(', '[', ',', '{'] group_mids = [','] group_ends = [')', ']', '}'] groupers_tb = CaseSensitiveListTokenBuilder(groupers, 'group', False) regex_tb = RegexTokenBuilder() keywords = [ 'break', 'case', 'catch', 'class', 'const', 'continue', 'debugger', 'default', 'do', 'else', 'enum', 'export', 'extends', 'finally', 'for', 'function', 'if', 'import', 'in', 'instanceof', 'return', 'switch', 'throw', 'try', 'typeof', 'while', 'with', 'as', 'implements', 'interface', 'let', 'package', 'private', 'protected', 'public', 'static', 'yield', 'constructor', 'declare', 'get', 'module', 'require', 'set', 'type', 'from', 'of' ] keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', False) types = [ 'any', 'boolean', 'byte', 'char', 'number', 'string', 'symbol', 'void', 'never', 'object' ] types_tb = CaseSensitiveListTokenBuilder(types, 'type', True) operand_types.append('type') values = ['this', 'super', 'null', 'true', 'false', 'undefined'] values_tb = CaseSensitiveListTokenBuilder(values, 'value', True) operand_types.append('value') invalid_token_builder = InvalidTokenBuilder() tokenbuilders = [ newline_tb, whitespace_tb, terminators_tb, integer_tb, integer_exponent_tb, real_tb, real_exponent_tb, hex_constant_tb, octal_constant_tb, binary_constant_tb, keyword_tb, types_tb, values_tb, known_operator_tb, groupers_tb, regex_tb, identifier_tb, string_tb, slash_slash_comment_tb, slash_star_comment_tb, self.unknown_operator_tb, invalid_token_builder ] tokenizer = Tokenizer(tokenbuilders) tokens = tokenizer.tokenize(code) tokens = Examiner.combine_adjacent_identical_tokens( tokens, 'invalid operator') self.tokens = Examiner.combine_adjacent_identical_tokens( tokens, 'invalid') self.convert_keywords_to_identifiers(['.']) self.calc_statistics() tokens = self.source_tokens() tokens = Examiner.join_all_lines(tokens) self.calc_token_confidence() self.calc_token_2_confidence() num_operators = self.count_my_tokens(['operator', 'invalid operator']) if num_operators > 0: self.calc_operator_confidence(num_operators) allow_pairs = [] self.calc_operator_2_confidence(tokens, num_operators, allow_pairs) self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs) self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs) self.calc_group_confidence(tokens, group_mids) operand_types_2 = ['number', 'string', 'symbol'] self.calc_operand_n_confidence(tokens, operand_types_2, 2) self.calc_operand_n_confidence(tokens, operand_types, 4) self.calc_keyword_confidence() self.calc_paired_blockers_confidence(['{'], ['}']) self.calc_line_length_confidence(code, self.max_expected_line)
def __init__(self, code): super().__init__() operand_types = [] whitespace_tb = WhitespaceTokenBuilder() newline_tb = NewlineTokenBuilder() stmt_separator_tb = SingleCharacterTokenBuilder( ';', 'statement separator', False) integer_tb = IntegerTokenBuilder('_') integer_exponent_tb = IntegerExponentTokenBuilder('_') real_tb = RealTokenBuilder(True, True, '_') real_exponent_tb = RealExponentTokenBuilder(True, True, 'E', '_') operand_types.append('number') argument_tb = SwiftArgumentTokenBuilder() leads = '_' extras = '_' suffixes = '?' identifier_tb = SuffixedIdentifierTokenBuilder(leads, extras, suffixes) operand_types.append('identifier') attribute_tb = PrefixedIdentifierTokenBuilder('@', 'attribute', False) symbol_tb = SwiftSymbolTokenBuilder('.', 'symbol', True) operand_types.append('symbol') quotes = ['"', "'", "’"] string_tb = EscapedStringTokenBuilder(quotes, 10) triple_quote_comment_tb = TripleQuoteStringTokenBuilder(quotes) slash_slash_comment_tb = SlashSlashCommentTokenBuilder() slash_star_comment_tb = SlashStarCommentTokenBuilder() operand_types.append('string') known_operators = [ '+', '-', '*', '/', '%', '==', '!=', '>', '<', '>=', '<=', '&&', '||', '!', '&', '|', '^', '~', '<<', '>>', '===', '=', '+=', '-=', '*=', '/=', '%=', '<<=', '>>=', '&=', '^=', '|=', '...', '..<', '?', ':', '.', '++', '--', '->', '??', '\\.', '&+', '&-', '&*' ] known_operator_tb = CaseSensitiveListTokenBuilder( known_operators, 'operator', False) self.unary_operators = ['+', '-', '!', '~', '&', '++', '--', ':', '?'] self.postfix_operators = ['++', '--', ':', '!', '?'] groupers = ['(', ')', ',', '[', ']', '{', '}'] group_starts = ['(', '[', ',', '{'] group_mids = [','] group_ends = [')', ']', '}'] groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False) keywords = [ 'associatedtype', 'class', 'deinit', 'enum', 'extension', 'fileprivate', 'func', 'import', 'init', 'inout', 'internal', 'let', 'open', 'operator', 'private', 'protocol', 'public', 'static', 'struct', 'subscript', 'typealias', 'var', 'break', 'case', 'continue', 'default', 'defer', 'do', 'else', 'fallthrough', 'for', 'guard', 'if', 'in', 'repeat', 'return', 'switch', 'where', 'while', 'as', 'Any', 'catch', 'is', 'rethrows', 'super', 'throw', 'throws', 'try', 'try?', 'try!', '#available', '#colorLiteral', '#column', '#else', '#elseif', '#endif', '#file', '#fileLiteral', '#function', '#if', '#imageLiteral', '#line', '#selector', '#sourceLocation', 'associativity', 'convenience', 'dynamic', 'didSet', 'final', 'get', 'infix', 'indirect', 'lazy', 'left', 'mutating', 'none', 'nonmutating', 'optional', 'override', 'postfix', 'precedence', 'prefix', 'Protocol', 'required', 'right', 'set', 'Type', 'unowned', 'weak', 'willSet' ] keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', False) types = [ 'char', 'double', 'float', 'int', 'long', 'short', ] types_tb = CaseSensitiveListTokenBuilder(types, 'type', True) operand_types.append('type') values = ['nil', 'Self', 'false', 'true'] values_tb = CaseSensitiveListTokenBuilder(values, 'value', True) operand_types.append('value') invalid_token_builder = InvalidTokenBuilder() tokenbuilders = [ newline_tb, whitespace_tb, stmt_separator_tb, integer_tb, integer_exponent_tb, real_tb, real_exponent_tb, argument_tb, keyword_tb, types_tb, values_tb, known_operator_tb, groupers_tb, identifier_tb, attribute_tb, symbol_tb, string_tb, slash_slash_comment_tb, slash_star_comment_tb, triple_quote_comment_tb, self.unknown_operator_tb, invalid_token_builder ] tokenizer = Tokenizer(tokenbuilders) tokens = tokenizer.tokenize(code) tokens = Examiner.combine_adjacent_identical_tokens( tokens, 'invalid operator') self.tokens = Examiner.combine_adjacent_identical_tokens( tokens, 'invalid') self.convert_keywords_to_identifiers(['.']) self.calc_statistics() tokens = self.source_tokens() tokens = Examiner.join_all_lines(tokens) self.calc_token_confidence() self.calc_token_2_confidence() num_operators = self.count_my_tokens(['operator', 'invalid operator']) if num_operators > 0: self.calc_operator_confidence(num_operators) allow_pairs = [] self.calc_operator_2_confidence(tokens, num_operators, allow_pairs) self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs) self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs) self.calc_group_confidence(tokens, group_mids) operand_types = ['number', 'string', 'symbol'] self.calc_operand_n_confidence(tokens, operand_types, 2) self.calc_operand_n_confidence(tokens, operand_types, 4) self.calc_keyword_confidence() self.calc_line_length_confidence(code, self.max_expected_line)
def __init__(self, code): super().__init__() self.newlines_important = 'parens' operand_types = [] whitespace_tb = WhitespaceTokenBuilder() newline_tb = NewlineTokenBuilder() stmt_separator_tb = SingleCharacterTokenBuilder( ';', 'statement separator', False) integer_tb = IntegerTokenBuilder('_') integer_exponent_tb = IntegerExponentTokenBuilder('_') real_tb = RealTokenBuilder(True, True, '_') real_exponent_tb = RealExponentTokenBuilder(True, True, 'E', '_') operand_types.append('number') identifier_tb = RubyIdentifierTokenBuilder() operand_types.append('identifier') symbol_tb = PrefixedIdentifierTokenBuilder(':', 'symbol', True) operand_types.append('symbol') quotes = ['"', "'", "’"] string_tb = EscapedStringTokenBuilder(quotes, 10) operand_types.append('string') regex_tb = RegexTokenBuilder() operand_types.append('regex') heredoc_tb = HereDocTokenBuilder('<<-') hash_comment_tb = LeadToEndOfLineTokenBuilder('#', False, 'comment') known_operators = [ '!', '~', '**', '*', '/', '%', '+', '-', '<<', '>>', '&', '|', '^', '<', '<=', '>', '>=', '==', '===', '!=', '=~', '!~', '<=>', '&&', '||', '..', '...', '?', ':', '=', '**=', '*=', '/=', '%=', '+=', '-=', '<<=', '>>=', '&&=', '&=', '||=', '|=', '^=', 'not', 'and', 'or', 'in', '.', '.:', '=>', '::', '<<-' ] known_operator_tb = CaseSensitiveListTokenBuilder( known_operators, 'operator', False) self.unary_operators = ['+', '-', '!', '~', '&', '*', '**', '<<-'] self.postfix_operators = ['++', '--'] groupers = ['(', ')', ',', '[', ']', '{', '}'] group_starts = ['(', '[', ',', '{'] group_mids = [','] group_ends = [')', ']', '}'] groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False) keywords = [ 'BEGIN', 'END', 'alias', 'begin', 'break', 'case', 'class', 'def', 'defined?', 'do', 'else', 'elsif', 'end', 'ensure', 'for', 'if', 'module', 'next', 'redo', 'rescue', 'retry', 'return', 'then', 'undef', 'unless', 'until', 'when', 'while', 'yield' ] keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', False) values = ['nil', 'self', 'true', 'false', 'super'] values_tb = CaseSensitiveListTokenBuilder(values, 'value', True) operand_types.append('value') array_markers = ['%w', '%q', '%Q', '%i', '%s', '%x'] array_marker_tb = CaseSensitiveListTokenBuilder( array_markers, 'identifier', True) invalid_token_builder = InvalidTokenBuilder() tokenbuilders = [ newline_tb, whitespace_tb, stmt_separator_tb, integer_tb, integer_exponent_tb, real_tb, real_exponent_tb, keyword_tb, values_tb, symbol_tb, known_operator_tb, groupers_tb, regex_tb, identifier_tb, array_marker_tb, string_tb, heredoc_tb, hash_comment_tb, self.unknown_operator_tb, invalid_token_builder ] tokenizer = Tokenizer(tokenbuilders) tokens = tokenizer.tokenize(code) tokens = Examiner.combine_adjacent_identical_tokens( tokens, 'invalid operator') self.tokens = Examiner.combine_adjacent_identical_tokens( tokens, 'invalid') self.convert_bars_to_groups() self.convert_keywords_to_identifiers(['.']) self.convert_operators_to_identifiers() self.calc_statistics() tokens = self.source_tokens() tokens = Examiner.join_parens_continued_lines(tokens) tokens = Examiner.join_operator_continued_lines( tokens, self.postfix_operators) self.calc_token_confidence() self.calc_token_2_confidence() num_operators = self.count_my_tokens(['operator', 'invalid operator']) if num_operators > 0: self.calc_operator_confidence(num_operators) allow_pairs = [] self.calc_operator_2_confidence(tokens, num_operators, allow_pairs) self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs) self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs) self.calc_group_confidence(tokens, group_mids) operand_types_2 = ['number', 'string', 'symbol'] self.calc_operand_n_confidence(tokens, operand_types_2, 2) # self.calc_operand_n_confidence(tokens, operand_types, 4) self.calc_keyword_confidence() openers = ['begin', 'def', 'do', 'class', 'module'] closers = ['end'] self.calc_paired_blockers_confidence(openers, closers) self.calc_line_length_confidence(code, self.max_expected_line)
def __init__(self, code): super().__init__() operand_types = [] whitespace_tb = WhitespaceTokenBuilder() newline_tb = NewlineTokenBuilder() integer_tb = IntegerTokenBuilder('_') integer_exponent_tb = IntegerExponentTokenBuilder('_') real_tb = RealTokenBuilder(False, False, '_') real_exponent_tb = RealExponentTokenBuilder(False, False, 'E', '_') operand_types.append('number') leads = '_' extras = '_' identifier_tb = IdentifierTokenBuilder(leads, extras) operand_types.append('identifier') decorator_tb = PrefixedIdentifierTokenBuilder('@', 'decorator', False) quotes = ['"', "'", "’"] string_tb = EscapedStringTokenBuilder(quotes, 0) operand_types.append('string') class_type_tb = ClassTypeTokenBuilder() operand_types.append('class') slash_slash_comment_tb = SlashSlashCommentTokenBuilder() slash_star_comment_tb = SlashStarCommentTokenBuilder() terminators_tb = SingleCharacterTokenBuilder(';', 'statement terminator', False) known_operators = [ '+', '-', '*', '/', '%', '=', '==', '!=', '>', '>=', '<', '<=', '+=', '-=', '*=', '/=', '%=', '&=', '|=', '^=', '<<=', '>>=', '!', '&', '|', '~', '<<', '>>', '>>>', '>>>=', '^', '.', '::', '++', '--', '&&', '||', '?', '->', 'new' ] known_operator_tb = CaseSensitiveListTokenBuilder( known_operators, 'operator', False) self.unary_operators = ['+', '-', '!', '~', '++', '--', 'new'] self.postfix_operators = ['++', '--'] groupers = ['(', ')', ',', '[', ']', '{', '}', ':'] group_starts = ['(', '[', ',', '{'] group_ends = [')', ']', '}'] group_mids = [',', ':'] groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False) keywords = [ 'abstract', 'assert', 'break', 'case', 'catch', 'class', 'const', 'continue', 'default', 'do', 'else', 'enum', 'extends', 'final', 'finally', 'for', 'goto', 'if', 'implements', 'import', 'instanceof', 'interface', 'native', 'package', 'private', 'protected', 'public', 'return', 'static', 'strictfp', 'super', 'switch', 'synchronized', 'throw', 'throws', 'transient', 'try', 'volatile', 'while' ] keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', False) types = [ 'boolean', 'byte', 'char', 'double', 'float', 'int', 'long', 'short', 'string', 'void', 'Integer', 'String', 'StringBuilder', 'File', 'Exception', 'IOException' ] types_tb = CaseSensitiveListTokenBuilder(types, 'type', True) operand_types.append('type') values = ['false', 'null', 'this', 'true'] values_tb = CaseSensitiveListTokenBuilder(values, 'value', True) operand_types.append('value') invalid_token_builder = InvalidTokenBuilder() tokenbuilders = [ newline_tb, whitespace_tb, terminators_tb, integer_tb, integer_exponent_tb, real_tb, real_exponent_tb, keyword_tb, types_tb, values_tb, known_operator_tb, groupers_tb, identifier_tb, class_type_tb, decorator_tb, string_tb, slash_slash_comment_tb, slash_star_comment_tb, self.unknown_operator_tb, invalid_token_builder ] tokenizer = Tokenizer(tokenbuilders) tokens = tokenizer.tokenize(code) tokens = Examiner.combine_adjacent_identical_tokens( tokens, 'invalid operator') tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid') tokens = Examiner.combine_identifier_colon( tokens, ['statement terminator', 'newline'], ['{'], ['whitespace', 'comment']) self.tokens = tokens self.convert_identifiers_to_labels() self.convert_keywords_to_identifiers(['::', '.']) self.convert_operators_to_identifiers(['::', '.']) self.calc_statistics() tokens = self.source_tokens() tokens = Examiner.join_all_lines(tokens) self.calc_token_confidence() self.calc_token_2_confidence() num_operators = self.count_my_tokens(['operator', 'invalid operator']) if num_operators > 0: self.calc_operator_confidence(num_operators) allow_pairs = [] self.calc_operator_2_confidence(tokens, num_operators, allow_pairs) self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs) self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs) self.calc_group_confidence(tokens, group_mids) operand_types_2 = ['number', 'string', 'symbol'] self.calc_operand_n_confidence(tokens, operand_types_2, 2) self.calc_operand_n_confidence(tokens, operand_types, 4) self.calc_keyword_confidence() self.calc_paired_blockers_confidence(['{'], ['}']) self.calc_line_length_confidence(code, self.max_expected_line)
def __init__(self, code, version): super().__init__() ctrlz_char = '' code = self.TrimCtrlZText(code, ctrlz_char) self.newlines_important = 'always' operand_types = [] whitespace_tb = WhitespaceTokenBuilder() newline_tb = NewlineTokenBuilder() integer_tb = IntegerTokenBuilder("'") integer_exponent_tb = IntegerExponentTokenBuilder("'") real_tb = RealTokenBuilder(False, False, "'") real_exponent_tb = RealExponentTokenBuilder(False, False, 'E', "'") operand_types.append('number') leads = '_' extras = '_' if version == 'ii': extras = ":_'-" identifier_tb = IdentifierTokenBuilder(leads, extras) wild_card_identifier_tb = WildCardIdentifierTokenBuilder('*?', '*?:') operand_types.append('identifier') quotes = ['"', "'", "’"] string_tb = EscapedStringTokenBuilder(quotes, 0) bracket_string_tb = BracketedStringTokenBuilder() text_string_tb = TextBlockTokenBuilder('TEXT', 'ENDTEXT') operand_types.append('string') line_continuation_tb = SingleCharacterTokenBuilder(';', 'line continuation', False) comment_tb = AsteriskCommentTokenBuilder() line_comment_tb = LeadToEndOfLineTokenBuilder('&&', True, 'comment') if version == 'ii': known_operators = [ '+', '-', '*', '/', '**', '^', '=', '<>', '#', '>', '>=', '<', '<=', '$', '.NOT.', '.AND.', '.OR.', '&', '$', '#', '!' ] if version == 'iii': known_operators = [ '+', '-', '*', '/', '**', '^', '=', '<>', '#', '>', '>=', '<', '<=', '$', '.NOT.', '.AND.', '.OR.', '&', '$', '#', '!' ] known_operator_tb = CaseInsensitiveListTokenBuilder(known_operators, 'operator', False) if version == 'ii': self.unary_operators = [ '+', '-', '.NOT.', '&', '$', '#', '!' ] if version == 'iii': self.unary_operators = [ '+', '-', '.NOT.', '&' ] self.postfix_operators = [] special_chars = [] if version == 'ii': special_chars = ['*', '#'] previous = ['if', 'case', 'while', 'store', '(', '.and.', '.or', '.not.'] special_function_tb = DbaseSpecialFunctionTokenBuilder(special_chars, previous) groupers = ['(', ')', ','] group_starts = ['(', ','] group_mids = [','] group_ends = [')'] groupers_tb = CaseSensitiveListTokenBuilder(groupers, 'group', False) if version == 'ii': keywords = [ 'ACCEPT', 'ACCE', 'APPEND', 'APPE', 'CASE', 'CLEAR', 'CLEA', 'COPY', 'COUNT', 'CREATE', 'CREA', 'DELETE', 'DELE', 'DISPLAY', 'DISP', 'DO', 'EJECT', 'EJEC', 'ELSE', 'ENDCASE', 'ENDC', 'ENDDO', 'ENDD', 'ENDIF', 'ENDI', 'ENDWHILE', 'ENDW', 'ERASE', 'ERAS', 'FIND', 'FOR', 'FORMAT', 'FORM', 'GET', 'GO', 'GOTO', 'IF', 'INDEX', 'LIKE', 'LOCATE', 'LOCA', 'LOOP', 'OTHERWISE', 'OTHE', 'PACK', 'PICTURE', 'PICT', 'READ', 'RECALL', 'RECA', 'RELEASE', 'RELE', 'REPLACE', 'REPL', 'REPORT', 'REPO', 'RESTORE', 'REST', 'RETURN', 'RETU', 'SAVE', 'SAY', 'SELECT', 'SELE', 'SET', 'SKIP', 'SORT', 'STORE', 'STOR', 'SUM', 'TO', 'USE', 'USING', 'USIN', 'WAIT', 'WHILE', 'WHIL', 'WITH', '@', '?', '??', 'ALTERNATE', 'BELL', 'COLON', 'COLOR', 'CONSOLE', 'CONS', 'DELIMITERS', 'INTENSITY', 'PRINT', 'TALK' ] if version == 'iii': keywords = [ 'ACCEPT', 'APPEND', 'ASSIST', 'AVERAGE', 'BROWSE', 'CALL', 'CANCEL', 'CASE', 'CHANGE', 'CLEAR', 'CLOSE', 'CONTINUE', 'COPY', 'COUNT', 'CREATE', 'DELETE', 'DIR', 'DISPLAY', 'DISP', 'DO', 'EDIT', 'ELSE', 'ENDCASE', 'ENDDO', 'ENDIF', 'ENDWHILE', 'ERASE', 'EXIT', 'EXPORT', 'FIND', 'FOR', 'FROM', 'GET', 'GO', 'GOTO', 'HELP', 'IF', 'IMPORT', 'INDEX', 'INPUT', 'INSERT', 'JOIN', 'LABEL', 'LIKE', 'LIST', 'LOAD', 'LOCATE', 'LOOP', 'MODIFY', 'OTHERWISE', 'OTHE', 'PACK', 'PARAMETERS', 'PICTURE', 'PRIVATE', 'PROCEDURE', 'PUBLIC', 'QUIT', 'READ', 'RECALL', 'RELEASE', 'REPLACE', 'REPORT', 'RESTORE', 'RESUME', 'RETURN', 'RETRY', 'RUN', 'SAVE', 'SAY', 'SELECT', 'SELE', 'SEEK', 'SET', 'SKIP', 'SORT', 'STORE', 'SUM', 'SUSPEND', 'TO', 'TOTAL', 'TYPE', 'UPDATE', 'USE', 'WHILE', 'WITH', 'ZAP', '@', '?', '??', 'ALTERNATE', 'BELL', 'CARRY', 'CATALOG', 'CENTURY', 'COLOR', 'CONFIRM', 'CONSOLE', 'DATE', 'AMERICAN', 'ANSI', 'BRITISH', 'ITALIAN', 'FRENCH', 'GERMAN', 'DATABASES', 'DEBUG', 'DECIMALS', 'DEFAULT', 'DELETED', 'DELIMITERS', 'DEVICE', 'DOHISTORY', 'ECHO', 'ESCAPE', 'EXACT', 'FIELDS', 'FILTER', 'FIXED', 'FORMAT', 'FUNCTION', 'HEADING', 'HELP', 'HISTORY', 'INTENSITY', 'MARGIN', 'MEMO', 'WIDTH', 'MENUS', 'MESSAGE', 'ODOMETER', 'ORDER', 'PATH', 'PRINTER', 'RELATION', 'SAFETY', 'STATUS', 'STEP', 'TALK', 'TITLE', 'TYPEAHEAD', 'UNIQUE', 'VIEW', 'STRUCTURE', 'MEMORY', 'LABEL', 'QUERY', 'REPORT', 'GETS', 'LOCK', 'FREEZE', 'NOFOLLOW', 'NOMENU' ] keyword_tb = CaseInsensitiveListTokenBuilder(keywords, 'keyword', False) keyword_comments = [] if version == 'ii': keyword_comments = [ 'ELSE', 'ENDCASE', 'ENDC', 'ENDDO', 'ENDD', 'ENDIF', 'ENDI', 'ENDWHILE', 'ENDW', 'NOTE', 'REMARK', 'REMA' ] keyword_comment_tb = KeywordCommentTokenBuilder(keyword_comments, False) keyword_comment2_tb = KeywordComment2TokenBuilder(['DO', 'CASE'], False) if version == 'ii': values = [ 'ALL', 'BLANK', 'BLAN', 'BOTTOM', 'BOTT', 'EOF', 'OFF', 'ON', 'TOP', 'PRIMARY', 'PRIM', 'SECONDARY', 'SECO', '.T.', '.F.' ] if version == 'iii': values = [ 'ALL', 'BLANK', 'BLAN', 'BOTTOM', 'BOTT', 'EOF', 'OFF', 'ON', 'TOP', '.T.', '.F.' ] values_tb = CaseInsensitiveListTokenBuilder(values, 'value', True) operand_types.append('value') if version == 'ii': functions = [ 'ALLTRIM', 'CHR', 'CTOD', 'DATE', 'DATETIME', 'DAY', 'DELETED', 'DESCEND', 'DESC', 'DTOC', 'DTOS', 'IIF', 'LEFT', 'LTRIM', 'MONTH', 'PAGENO', 'RECCOUNT', 'RECNO', 'RIGHT', 'STOD', 'STR', 'SUBSTR', 'TIME', 'TRIM', 'UPPER', 'VAL', 'YEAR' ] if version == 'iii': functions = [ 'ABS', 'ASC', 'AT', 'BOF', 'CDOW', 'CHR', 'CMONTH', 'COL', 'CTOD', 'DATE', 'DAY', 'DBF', 'DELETED', 'DISKSPACE', 'DOW', 'DTOC', 'EOF', 'ERROR', 'EXP', 'FILE', 'FKMAX', 'FKLABEL', 'FIELD', 'FOUND', 'GETENV', 'IIF', 'INKEY', 'INT', 'ISALPHA', 'ISCOLOR', 'ISLOWER', 'ISUPPER', 'LEFT', 'LEN', 'LOG', 'LOWER', 'LTRIM', 'LUPDATE', 'MAX', 'MESSAGE', 'MIN', 'MOD', 'MONTH', 'NDX', 'OS', 'PCOL', 'PROW', 'READKEY', 'RECCOUNT', 'RECNO', 'RECSIZE', 'REPLICATE', 'RIGHT', 'RTRIM', 'ROUND', 'ROW', 'TIME', 'TYPE', 'SPACE', 'STUFF', 'SQRT', 'STR', 'SUBSTR', 'TRANSFORM', 'TRIM', 'UPPER', 'VAL', 'VERSION', 'YEAR' ] function_tb = CaseInsensitiveListTokenBuilder(functions, 'function', True) operand_types.append('function') filename_tb = DbaseFilenameTokenBuilder() invalid_token_builder = InvalidTokenBuilder() tokenbuilders1 = [ newline_tb, whitespace_tb, line_continuation_tb, integer_tb, integer_exponent_tb, real_tb, real_exponent_tb, keyword_tb, keyword_comment_tb, keyword_comment2_tb, values_tb, groupers_tb, special_function_tb, comment_tb, # before operators, to catch single asterisk on line known_operator_tb, function_tb, filename_tb, # before identifier identifier_tb, string_tb, text_string_tb ] tokenbuilders_ii = [ bracket_string_tb ] tokenbuilders_iii = [ line_comment_tb ] tokenbuilders2 = [ wild_card_identifier_tb, self.unknown_operator_tb, invalid_token_builder ] tokenbuilders = tokenbuilders1 if version == 'ii': tokenbuilders += tokenbuilders_ii if version == 'iii': tokenbuilders += tokenbuilders_iii tokenbuilders += tokenbuilders2 tokenizer = Tokenizer(tokenbuilders) tokens = tokenizer.tokenize(code) tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid operator') self.tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid') self.calc_statistics() tokens = self.source_tokens() self.calc_token_confidence() self.calc_token_2_confidence() num_operators = self.count_my_tokens(['operator', 'invalid operator']) if num_operators > 0: self.calc_operator_confidence(num_operators) allow_pairs = [] self.calc_operator_2_confidence(tokens, num_operators, allow_pairs) self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs) self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs) self.calc_group_confidence(tokens, group_mids) operand_types_2 = ['number', 'number', 'function', 'value', 'string', 'filename'] self.calc_operand_n_confidence(tokens, operand_types_2, 2) self.calc_operand_n_confidence(tokens, operand_types, 4) self.calc_keyword_confidence() if version == 'ii': self.calc_line_format_confidence_ii() else: self.calc_line_format_confidence() self.calc_line_length_confidence(code, self.max_expected_line)
def __init__(self, code): super().__init__() operand_types =[] whitespace_tb = WhitespaceTokenBuilder() newline_tb = NewlineTokenBuilder() line_continuation_tb = SingleCharacterTokenBuilder(['_'], 'line continuation', False) integer_tb = IntegerTokenBuilder(None) integer_exponent_tb = IntegerExponentTokenBuilder(None) real_tb = RealTokenBuilder(False, False, None) real_exponent_tb = RealExponentTokenBuilder(False, False, 'E', None) operand_types.append('number') variable_tb = VisualBasicVariableTokenBuilder('$%#!') operand_types.append('variable') leads = '_' extras = '_' suffixes = '$%#!' identifier_tb = SuffixedIdentifierTokenBuilder(leads, extras, suffixes) operand_types.append('identifier') quotes = ['"'] string_tb = EscapedStringTokenBuilder(quotes, 0) operand_types.append('string') remark_tb = RemarkTokenBuilder() comment_tb = LeadToEndOfLineTokenBuilder("'", True, 'comment') comment2_tb = LeadToEndOfLineTokenBuilder("’", True, 'comment') known_operators = [ '+', '-', '*', '/', '\\', 'Mod', '^', '&', '>', '>=', '<', '<=', '<>', '=', 'And', 'Or', 'Eqv', 'Is', 'Imp', 'Like', 'Not', 'Xor', '.' ] self.unary_operators = [ '+', '-', 'Not' ] groupers = ['(', ')', ',', '[', ']'] group_starts = ['(', '[', ','] group_mids = [','] group_ends = [')', ']'] groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False) known_operator_tb = CaseSensitiveListTokenBuilder(known_operators, 'operator', False) keywords = [ 'Access', 'Alias', 'Any', 'AppActivate', 'Append', 'AppendChunk', 'Arrange', 'As', 'Beep', 'BeginTrans', 'ByVal', 'Call', 'Case', 'Circle', 'Clear', 'Close', 'Cls', 'CommitTrans', 'Compare', 'Const', 'Controls', 'CreateDynaset', 'Data', 'DateSerial', 'DateValue', 'Declare', 'DefCur', 'DefDbl', 'DefInt', 'DefLng', 'DefSng', 'DefStr', 'DefVar', 'Delete', 'Dim', 'Do', 'DoEvents', 'Drag', 'Edit', 'Else', 'ElseIf', 'End', 'EndDoc', 'EndIf', 'Erase', 'ExecuteSQL', 'Exit', 'Explicit', 'FieldSize', 'FileAttr', 'FileCopy', 'FileDateTime', 'Fix', 'For', 'Form', 'Format', 'Format$', 'Forms', 'Function', 'Get', 'GetAttr', 'GetChunk', 'GetData', 'GetFormat', 'GetText', 'Global', 'GoSub', 'GoTo', 'Hide', 'If', 'Input', 'Input$', 'InputBox', 'InputBox$', 'Kill', 'Let', 'Lib', 'Line', 'LinkExecute', 'LinkPoke', 'LinkRequest', 'LinkSend', 'Load', 'LoadPicture', 'Loc', 'Local', 'Lock', 'LOF', 'Loop', 'LSet', 'MkDir', 'Move', 'MoveFirst', 'MoveLast', 'MoveNext', 'MovePrevious', 'MoveRelative', 'MsgBox', 'Name', 'New', 'NewPage', 'Next', 'NextBlock', 'On', 'Open', 'OpenDataBase', 'Option', 'Output', 'Point', 'Preserve', 'Print', 'PrintForm', 'Private', 'PSet', 'Put', 'QBColor', 'Random', 'Randomize', 'Read', 'ReDim', 'Refresh', 'RegisterDataBase', 'Rem', 'RemoveItem', 'Reset', 'Restore', 'Resume', 'Return', 'RmDir', 'Rollback', 'RSet', 'SavePicture', 'Scale', 'Seek', 'Select', 'SendKeys', 'Set', 'SetAttr', 'SetData', 'SetFocus', 'SetText', 'Shared', 'Shell', 'Show', 'Static', 'Step', 'Stop', 'Sub', 'System', 'Text', 'TextHeight', 'TextWidth', 'Then', 'Timer', 'TimeSerial', 'TimeValue', 'To', 'Type', 'TypeOf', 'Unload', 'Unlock', 'Until', 'Update', 'Using', 'VarType', 'Weekday', 'Wend', 'While', 'Width', 'Write', 'ZOrder' ] keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', False) functions = [ 'Abs', 'AddItem', 'AddNew', 'Asc', 'Atn', 'CCur', 'CDbl', 'ChDir', 'ChDrive', 'Chr', 'Chr$', 'CInt', 'CLng', 'Command', 'Command$', 'Cos', 'CSng', 'CStr', 'CurDir$', 'CVar', 'CVDate', 'Date', 'Date$', 'Day', 'Dir', 'Dir$', 'Environ$', 'EOF', 'Error', 'Error$', 'Exp', 'FileLen', 'FreeFile', 'Hex', 'Hex$', 'Hour', 'InStr', 'Int', 'InStrRev', 'IsDate', 'IsEmpty', 'IsNull', 'IsNumeric', 'Join', 'LBound', 'LCase', 'LCase$', 'Left', 'Left$', 'Len', 'Log', 'LTrim', 'LTrim$', 'Mid', 'Mid$', 'Minute', 'Mod', 'Month', 'Now', 'Oct', 'Oct$', 'RGB', 'Right', 'Right$', 'Rnd', 'RTrim', 'RTrim$', 'Second', 'Sgn', 'Sin', 'Space', 'Space$', 'Spc', 'Split', 'Sqr', 'Str', 'Str$', 'StrComp', 'String$', 'Tab', 'Tan', 'Time', 'Time$', 'Trim', 'Trim$', 'UBound', 'UCase', 'UCase$', 'Val', 'Year' ] function_tb = CaseSensitiveListTokenBuilder(functions, 'function', True) operand_types.append('function') types = [ 'Binary', 'Control', 'Currency', 'Double', 'Dynaset', 'Integer', 'Long', 'Single', 'String', 'Variant' ] types_tb = CaseSensitiveListTokenBuilder(types, 'type', True) operand_types.append('type') values = [ 'False', 'True', 'App', 'Base', 'Clipboard', 'Debug', 'Erl', 'Err', 'Printer', 'Me', 'Nothing', 'Null' ] values_tb = CaseSensitiveListTokenBuilder(values, 'value', True) operand_types.append('value') invalid_token_builder = InvalidTokenBuilder() tokenbuilders = [ newline_tb, whitespace_tb, line_continuation_tb, integer_tb, integer_exponent_tb, real_tb, real_exponent_tb, keyword_tb, groupers_tb, known_operator_tb, types_tb, values_tb, function_tb, variable_tb, identifier_tb, string_tb, remark_tb, comment_tb, comment2_tb, self.unknown_operator_tb, invalid_token_builder ] tokenizer = Tokenizer(tokenbuilders) tokens = tokenizer.tokenize(code) tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid operator') tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid') tokens = Examiner.combine_identifier_colon(tokens, ['newline'], [], ['whitespace', 'comment']) self.tokens = tokens self.convert_identifiers_to_labels() self.convert_keywords_to_identifiers(['.']) self.calc_statistics() tokens = self.source_tokens() tokens = Examiner.join_all_lines(tokens) self.calc_token_confidence() self.calc_token_2_confidence() num_operators = self.count_my_tokens(['operator', 'invalid operator']) if num_operators > 0: self.calc_operator_confidence(num_operators) allow_pairs = [] self.calc_operator_2_confidence(tokens, num_operators, allow_pairs) self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs) self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs) self.calc_group_confidence(tokens, group_mids) operand_types_2 = ['number', 'string', 'symbol'] self.calc_operand_n_confidence(tokens, operand_types_2, 2) self.calc_operand_n_confidence(tokens, operand_types, 4) self.calc_keyword_confidence() self.calc_line_format_confidence() self.calc_line_length_confidence(code, self.max_expected_line)
def __init__(self, code): super().__init__() operand_types = [] whitespace_tb = WhitespaceTokenBuilder() newline_tb = NewlineTokenBuilder() integer_tb = IntegerTokenBuilder(None) integer_exponent_tb = IntegerExponentTokenBuilder(None) real_tb = RealTokenBuilder(False, False, None) real_exponent_tb = RealExponentTokenBuilder(False, False, 'E', None) operand_types.append('number') leads = '_' extras = '_' identifier_tb = IdentifierTokenBuilder(leads, extras) operand_types.append('identifier') directive_tb = DirectiveTokenBuilder() quotes = ['"', "'", "’"] string_tb = EscapedStringTokenBuilder(quotes, 10) prefixed_string_tb = PrefixedStringTokenBuilder('@', False, quotes) operand_types.append('string') class_type_tb = ClassTypeTokenBuilder() operand_types.append('class') slash_slash_comment_tb = SlashSlashCommentTokenBuilder() slash_star_comment_tb = SlashStarCommentTokenBuilder() directives = [ '#define', '#undef', '#ifdef', '#ifndef', '#if', '#endif', '#else', '#elif', '#import', '#line', '#include' ] line_continuation_tb = SingleCharacterTokenBuilder( '\\', 'line continuation', False) c_preprocessor_tb = CaseSensitiveListTokenBuilder( directives, 'preprocessor', False) c_warning_tb = LeadToEndOfLineTokenBuilder('#warning', True, 'preprocessor') c_error_tb = LeadToEndOfLineTokenBuilder('#error', True, 'preprocessor') c_pragma_tb = LeadToEndOfLineTokenBuilder('#pragma', True, 'preprocessor') terminators_tb = SingleCharacterTokenBuilder(';', 'statement terminator', False) known_operators = [ '+', '-', '*', '/', '%', '=', '==', '!=', '>', '>=', '<', '<=', '+=', '-=', '*=', '/=', '%=', '&=', '|=', '^=', '<<=', '>>=', '!', '&', '|', '<<', '>>', '~', '.', '->', '++', '--', '&&', '||', '^', '?', '##' ] self.unary_operators = [ '+', '-', '*', '!', '&', '^', '~', '++', '--', '##' ] self.postfix_operators = ['++', '--', '&', '->', '*', '^'] groupers = ['(', ')', ',', '[', ']', '{', '}', ':'] group_starts = ['(', '[', ',', '{'] group_ends = [')', ']', '}'] group_mids = [',', ':'] groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False) known_operator_tb = CaseSensitiveListTokenBuilder( known_operators, 'operator', False) keywords = [ 'atomic', 'break', 'bycopy', 'byref', 'case', 'continue', 'default', 'do', 'else', 'for', 'goto', 'if', 'IMP', 'in', 'inline', 'inout', 'nonatomic', 'oneway', 'out', 'Protocol', 'restrict', 'retain', 'return', 'SEL', 'sizeof', 'switch', 'typedef', 'while', '@interface', '@end', '@implementation', '@protocol', '@class', '@public', '@protected', '@private', '@property', '@try', '@throw', '@catch()', '@finally', '@synthesize', '@dynamic', '@selector' ] keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', False) types = [ 'auto', 'char', 'const', 'double', 'enum', 'extern', 'float', 'id', 'int', 'long', 'register', 'short', 'signed', 'static', 'struct', 'union', 'unsigned', 'void', 'volatile', '_Bool', '_Complex', '_Imaginary', 'BOOL', 'Class' ] types_tb = CaseSensitiveListTokenBuilder(types, 'type', True) operand_types.append('type') values = ['self', 'super', 'nil', 'YES', 'NO', 'NULL', '...'] values_tb = CaseSensitiveListTokenBuilder(values, 'value', True) operand_types.append('value') invalid_token_builder = InvalidTokenBuilder() tokenbuilders = [ newline_tb, whitespace_tb, line_continuation_tb, terminators_tb, integer_tb, integer_exponent_tb, real_tb, real_exponent_tb, keyword_tb, types_tb, values_tb, groupers_tb, known_operator_tb, directive_tb, identifier_tb, class_type_tb, string_tb, prefixed_string_tb, slash_slash_comment_tb, slash_star_comment_tb, c_preprocessor_tb, c_warning_tb, c_error_tb, c_pragma_tb, self.unknown_operator_tb, invalid_token_builder ] tokenizer = Tokenizer(tokenbuilders) tokens = tokenizer.tokenize(code) tokens = Examiner.combine_adjacent_identical_tokens( tokens, 'invalid operator') tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid') # tokens = Examiner.combine_identifier_colon(tokens, ['statement terminator', 'newline'], ['{'], ['whitespace', 'comment', 'line description']) self.tokens = tokens self.convert_identifiers_to_labels() self.convert_values_to_operators() self.calc_statistics() tokens = self.source_tokens() tokens = Examiner.join_all_lines(tokens) self.calc_token_confidence() self.calc_token_2_confidence(['*', ';']) num_operators = self.count_my_tokens(['operator', 'invalid operator']) if num_operators > 0: self.calc_operator_confidence(num_operators) allow_pairs = [] self.calc_operator_2_confidence(tokens, num_operators, allow_pairs) self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs) self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs) self.calc_group_confidence(tokens, group_mids) operand_types_2 = ['number', 'string', 'symbol'] self.calc_operand_n_confidence(tokens, operand_types_2, 2) self.calc_operand_n_confidence(tokens, operand_types, 4) self.calc_keyword_confidence() self.calc_paired_blockers_confidence(['{'], ['}']) self.calc_line_length_confidence(code, self.max_expected_line)
def __init__(self, code): super().__init__() ctrlz_char = '' code = self.TrimCtrlZText(code, ctrlz_char) operand_types = [] whitespace_tb = WhitespaceTokenBuilder() newline_tb = NewlineTokenBuilder() stmt_separator_tb = SingleCharacterTokenBuilder( ';', 'statement separator', False) integer_tb = IntegerTokenBuilder(None) integer_exponent_tb = IntegerExponentTokenBuilder(None) real_tb = RealTokenBuilder(True, True, None) real_exponent_tb = RealExponentTokenBuilder(True, True, 'E', None) hex_constant_tb = SuffixedIntegerTokenBuilder( 'H', True, '0123456789ABCDEFabcdef') octal_constant_tb = SuffixedIntegerTokenBuilder('C', True, '01234567') binary_constant_tb = SuffixedIntegerTokenBuilder('B', True, '01') operand_types.append('number') leads = '_' extras = '_' identifier_tb = IdentifierTokenBuilder(leads, extras) operand_types.append('identifier') quotes = ["'", '"'] string_tb = StringTokenBuilder(quotes, 0) operand_types.append('string') paren_star_comment_tb = BlockTokenBuilder('(*', '*)', 'comment') known_operators = [ ':=', '=', '>', '>=', '<', '<=', '#', '<>', '+', '-', '*', '/', 'DIV', 'MOD', 'AND', 'OR', 'NOT', '^', '.', '..', 'IN', '&' ] known_operator_tb = CaseSensitiveListTokenBuilder( known_operators, 'operator', False) self.unary_operators = ['+', '-', 'NOT', '@', '^', '.'] self.postfix_operators = ['^'] groupers = ['(', ')', ',', '[', ']', '{', '}', ':', '|'] group_starts = ['(', '[', ',', '{'] group_mids = [',', ':', '|'] group_ends = [')', ']', '}'] groupers_tb = CaseSensitiveListTokenBuilder(groupers, 'group', False) keywords = [ 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DO', 'ELSE', 'ELSIF', 'END', 'EXCEPT', 'EXIT', 'EXPORT', 'FINALLY', 'FOR', 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'LOOP', 'MODULE', 'OF', 'PROCEDURE', 'QUALIFIED', 'REPEAT', 'THEN', 'TO', 'TYPE', 'VAR', 'WITH', 'WHILE' ] keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', False) types = [ 'ARRAY', 'BOOLEAN', 'CARDINAL', 'CHAR', 'INTEGER', 'POINTER', 'REAL', 'RECORD', 'SET' ] types_tb = CaseSensitiveListTokenBuilder(types, 'type', True) operand_types.append('type') values = ['FALSE', 'NIL', 'TRUE'] values_tb = CaseSensitiveListTokenBuilder(values, 'value', True) operand_types.append('value') invalid_token_builder = InvalidTokenBuilder() tokenbuilders = [ newline_tb, whitespace_tb, stmt_separator_tb, integer_tb, integer_exponent_tb, real_tb, real_exponent_tb, hex_constant_tb, octal_constant_tb, binary_constant_tb, keyword_tb, types_tb, values_tb, known_operator_tb, groupers_tb, identifier_tb, string_tb, paren_star_comment_tb, self.unknown_operator_tb, invalid_token_builder ] tokenizer = Tokenizer(tokenbuilders) tokens = tokenizer.tokenize(code) tokens = Examiner.combine_adjacent_identical_tokens( tokens, 'invalid operator') tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid') self.tokens = tokens self.calc_statistics() tokens = self.source_tokens() tokens = Examiner.join_all_lines(tokens) self.calc_token_confidence() self.calc_token_2_confidence() num_operators = self.count_my_tokens(['operator', 'invalid operator']) if num_operators > 0: self.calc_operator_confidence(num_operators) allow_pairs = [] self.calc_operator_2_confidence(tokens, num_operators, allow_pairs) self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs) self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs) self.calc_group_confidence(tokens, group_mids) operand_types_2 = ['number', 'string', 'identifier', 'variable'] self.calc_operand_n_confidence(tokens, operand_types_2, 2) self.calc_operand_n_confidence(tokens, operand_types, 4) self.calc_keyword_confidence() self.calc_paired_blockers_confidence( ['BEGIN', 'RECORD', 'CASE', 'DO', 'IF', 'WHILE'], ['END']) self.calc_paired_blockers_confidence(['REPEAT'], ['UNTIL']) self.calc_line_length_confidence(code, self.max_expected_line)
def __init__(self, code, extension): super().__init__() operand_types = [] whitespace_tb = WhitespaceTokenBuilder() newline_tb = NewlineTokenBuilder() integer_tb = IntegerTokenBuilder(None) integer_exponent_tb = IntegerExponentTokenBuilder(None) real_tb = RealTokenBuilder(True, True, None) real_exponent_tb = RealExponentTokenBuilder(True, True, 'E', None) operand_types.append('number') quotes = ["'", '"'] string_tb = StuffedQuoteStringTokenBuilder(quotes, False) operand_types.append('string') leads = '_' extras = '_' identifier_tb = IdentifierTokenBuilder(leads, extras) bracketed_identifier_tb = NullTokenBuilder() if extension in ['microsoft', 't-sql']: bracketed_identifier_tb = SqlBracketedIdentifierTokenBuilder() operand_types.append('identifier') terminators_tb = SingleCharacterTokenBuilder(';', 'statement terminator', False) comment_tb = LeadToEndOfLineTokenBuilder('--', True, 'comment') known_operators = [ '=', '>', '>=', '<', '<=', '<>', '!=', 'AND', 'OR', 'NOT', 'IN', 'EXISTS', 'LIKE', 'BETWEEN', 'ANY', 'ALL', '.' ] known_operator_tb = CaseSensitiveListTokenBuilder( known_operators, 'operator', False) self.unary_operators = ['NOT', 'EXISTS', 'ANY', 'ALL'] groupers = ['(', ')', ','] group_starts = ['(', ','] group_mids = [','] group_ends = [')'] groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False) keywords = [ 'ACOS', 'ASIN', 'ATAN', 'ABSOLUTE', 'ACTION', 'ADD', 'ALL', 'ALLOCATE', 'ALTER', 'ARE', 'ABS', 'ARRAY_AGG', 'AVG', 'AS', 'ASC', 'ASSERTION', 'AT', 'AUTHORIZATION', 'AFTER', 'ARRAY', 'ASENSITIVE', 'ASYMMETRIC', 'ATOMIC', 'ARRAY_MAX_CARDINALITY', 'BEFORE', 'BEGIN', 'BETWEEN', 'BIT_LENGTH', 'BOTH', 'BY', 'BEGIN_FRAME', 'BEGIN_PARTITION', 'BINARY', 'BOOLEAN', 'BREADTH', 'CALL', 'CASCADE', 'CASCADED', 'CASE', 'CAST', 'CATALOG', 'CALLED', 'CHAR_LENGTH', 'CHARACTER_LENGTH', 'CHECK', 'COALESCE', 'COLLATE', 'COLLATION', 'COLUMN', 'COMMIT', 'CONDITION', 'CONNECT', 'CONNECTION', 'CONSTRAINT', 'CONSTRAINTS', 'CONTAINS', 'CONTINUE', 'CONVERT', 'CORRESPONDING', 'COUNT', 'CREATE', 'CROSS', 'CURRENT', 'CURRENT_DATE', 'CURRENT_PATH', 'CURRENT_TIME', 'CURRENT_TIMESTAMP', 'CURRENT_USER', 'CURSOR', 'CLOSE', 'CONSTRUCTOR', 'CUBE', 'CURRENT_DEFAULT_TRANSFORM_GROUP', 'CURRENT_ROLE', 'CURRENT_TRANSFORM_GROUP_FOR_TYPE', 'CYCLE', 'CARDINALITY', 'CEIL', 'CEILING', 'CONVERT', 'CORR', 'COVAR_POP', 'COVAR_SAMPLE', 'CUME_DIST', 'CURRENT_CATALOG', 'CURRENT_SCHEMA', 'CLASSIFIER', 'COS', 'COSH', 'DAY', 'DEALLOCATE', 'DEC', 'DECLARE', 'DEFAULT', 'DECFLOAT', 'DEFINE', 'DEFERRABLE', 'DEFERRED', 'DELETE', 'DEPTH', 'DESC', 'DESCRIBE', 'DENSE_RANK', 'DESCRIPTOR', 'DETERMINISTIC', 'DIAGNOSTICS', 'DISCONNECT', 'DISTINCT', 'DO', 'DOMAIN', 'DROP', 'DYNAMIC', 'ELSE', 'END', 'ESCAPE', 'EXCEPT', 'EXCEPTION', 'ELEMENT', 'EXEC', 'EXECUTE', 'EXISTS', 'EXIT', 'EXTERNAL', 'EXTRACT', 'EACH', 'ELSEIF', 'EQUALS', 'END_EXEC', 'EVERY', 'EXP', 'EMPTY', 'EQUALS', 'FETCH', 'FIRST', 'FOR', 'FOREIGN', 'FOUND', 'FROM', 'FULL', 'FUNCTION', 'FUSION', 'FILTER', 'FREE', 'FIRST_VALUE', 'FRAME_ROW', 'GENERAL', 'GET', 'GLOBAL', 'GO', 'GOTO', 'GRANT', 'GROUP', 'GROUPING', 'GROUPS', 'HANDLER', 'HAVING', 'HOUR', 'HOLD', 'IDENTITY', 'IF', 'IMMEDIATE', 'IN', 'INDICATOR', 'INITIALLY', 'INNER', 'INOUT', 'INPUT', 'INSENSITIVE', 'INSERT', 'INT', 'INTERSECT', 'INITIAL', 'INTERVAL', 'INTO', 'IS', 'ISOLATION', 'INTERSECTION', 'ITERATE', 'JOIN', 'JSON_ARRY', 'JSON_ARRAYAGG', 'JSON_EXISTS', 'JSON_OBJECT', 'JSON_OBJECTAGG', 'JSON_QUERY', 'JSON_TABLE', 'JSON_TABLE_PRIMITIVE', 'JSON_VALUE', 'KEY', 'LANGUAGE', 'LAST', 'LEADING', 'LEFT', 'LEVEL', 'LIKE', 'LOCAL', 'LARGE', 'LATERAL', 'LEAVE', 'LOCALTIME', 'LOCALTIMESTAMP', 'LOCATOR', 'LOOP', 'LAG', 'LISTAGG', 'LOG', 'LOG10', 'LIKE_REGEX', 'LN', 'LOWER', 'LAST_VALUE', 'LEAD', 'MATCH', 'MAX', 'MIN', 'MINUTE', 'MODULE', 'MONTH', 'MAP', 'METHOD', 'MODIFIES', 'MATCH_NUMBER', 'MATCH_RECOGNIZE', 'MATCHES', 'MEMBER', 'MERGE', 'MULTISET', 'MOD', 'NAMES', 'NATIONAL', 'NATURAL', 'NEXT', 'NO', 'NOT', 'NULLIF', 'NUMERIC', 'NTH_VALUE', 'NTILE', 'NEW', 'NORMALIZE', 'OCTET_LENGTH', 'OF', 'ONLY', 'OPEN', 'OPTION', 'ORDER', 'OUTPUT', 'OVERLAPS', 'OBJECT', 'OLD', 'ORDINALITY', 'OUT', 'OUTER', 'OCTET_LENGTH', 'OFFSET', 'OMIT', 'OCCURRENCES_REGEX', 'ONE', 'OVER', 'OVERLAY', 'PAD', 'PARAMETER', 'PARTIAL', 'PRECISION', 'PREPARE', 'PRESERVE', 'PRIMARY', 'PRIOR', 'PRIVILEGES', 'PROCEDURE', 'PUBLIC', 'PATTERN', 'PER', 'PTF', 'PARTITION', 'PERCENT_RANK', 'PERCENTILE_CONT', 'PERCENTILE_DISC', 'POSITION', 'PERCENT', 'PERIOD', 'PORTION', 'PRECEDES', 'POSITION_REGEX', 'POWER', 'RANGE', 'READ', 'REFERENCES', 'RELATIVE', 'RESTRICT', 'RETURN', 'RETURNS', 'REVOKE', 'RIGHT', 'ROLLBACK', 'ROLLUP', 'READS', 'ROWS', 'RECURSIVE', 'REF', 'REFERENCING', 'RELEASE', 'REPEAT', 'REGIONAL', 'RESULT', 'ROW', 'RANK', 'REGR_AVGX', 'REGR_AVGY', 'REGR_COUNT', 'REGR_INTERCEPT', 'REGR_R2', 'REGR_SLOPE', 'REGR_SXX', 'REGR_SXY', 'REGR_SYY', 'ROW_NUMBER', 'RUNNING', 'SCHEMA', 'SCROLL', 'SECOND', 'SECTION', 'SELECT', 'SESSION', 'SESSION_USER', 'SET', 'SIZE', 'SOME', 'SPACE', 'SPECIFIC', 'SQL', 'SQLCODE', 'SQLERROR', 'SQLEXCEPTION', 'SQLSTATE', 'SQLWARNING', 'SUBSTRING', 'SUM', 'SQRT', 'STDDEV_POP', 'STDDEV_SAMP', 'SUBSTRING_REGEX', 'SUM', 'SEEK', 'SHOW', 'SIN', 'SINH', 'SUBSET', 'SUBMULTISET', 'SYSTEM_USER', 'SAVEPOINT', 'SCOPE', 'SEARCH', 'SENSITIVE', 'SETS', 'SIGNAL', 'SIMILAR', 'SPECIFICTYPE', 'START', 'STATE', 'STATIC', 'SYMMETRIC', 'SYSTEM', 'TABLE', 'TEMPORARY', 'THEN', 'TIME', 'TIMESTAMP', 'TIMEZONE_HOUR', 'TABLESAMPLE' 'TAN', 'TANH' 'TIMEZONE_MINUTE', 'TO', 'TRAILING', 'TRANSACTION', 'TRANSLATE', 'TRANSLATION', 'TRIM', 'TRANSLATE', 'TRANSLATE_REGEX', 'TRUNCATE', 'TREAT', 'TRIGGER', 'TRIM_ARRAY', 'UNDO', 'UNION', 'UNIQUE', 'UNKNOWN', 'UPDATE', 'UPPER', 'USAGE', 'USER', 'USING', 'UNDER', 'UNNEST', 'UNTIL', 'UESCAPE', 'UPPER', 'VALUE', 'VALUES', 'VARYING', 'VIEW', 'VAR_POP', 'VAR_SAMP', 'VALUE_OF', 'VERSIONING' 'WHEN', 'WHENEVER', 'WHERE', 'WITH', 'WORK', 'WRITE', 'WHILE', 'WINDOW', 'WITHIN', 'WITHOUT' 'WIDTH_BUCKET' 'YEAR', 'ZONE' ] keywords_tsql = [ 'INSTEAD', 'CASE', 'UPDLOCK', 'DATEADD', 'GETDATE', 'TEXTIMAGE_ON', 'CLUSTERED', 'GENERATED', 'DECLARE', 'SET', 'BEGIN', 'END', 'BREAK', 'CONTINUE', 'GOTO', 'ELSE', 'RETURN', 'WAITFOR', 'BULK', 'TRY', 'CATCH' ] keywords_plsql = [ '%TYPE', 'BEFORE', 'DECODE', 'DESCRIBE', 'DUAL', 'INTERSECT', 'MINUS', 'SYSDATE', 'USER' ] if extension in ['microsoft', 't-sql']: keywords += keywords_tsql if extension in ['oracle', 'pl-sql']: keywords += keywords_plsql keyword_tb = CaseInsensitiveListTokenBuilder(keywords, 'keyword', False) values = ['TRUE', 'FALSE', 'NULL', 'OFF', 'ON', 'NONE'] values_tsql = [ 'ALLOW_ROW_LOCKS', 'ALLOW_PAGE_LOCKS', 'ALWAYS', 'IGNORE_DUP_KEY', 'FILLFACTOR', 'HISTORY_TABLE', 'PAD_INDEX', 'STATISTICS_NORECOMPUTE', 'SUSER_SNAME', 'SYSTEM_VERSIONING', 'SYSTEM_TIME' ] if extension in ['microsoft', 't-sql']: values += values_tsql values_tb = CaseInsensitiveListTokenBuilder(values, 'value', True) operand_types.append('value') types = [ 'BIGINT', 'BIT', 'BLOB', 'CHAR', 'CHARACTER', 'CLOB', 'DATE', 'DECIMAL', 'DOUBLE', 'FLOAT', 'INTEGER', 'NCHAR', 'NCLOB', 'REAL', 'SMALLINT', 'VARCHAR' ] types_tsql = [ 'nvarchar', 'bigint', 'datetime', 'datetime2', 'geography' ] if extension in ['microsoft', 't-sql']: types += types_tsql type_tb = CaseInsensitiveListTokenBuilder(types, 'type', True) operand_types.append('value') invalid_token_builder = InvalidTokenBuilder() tokenbuilders = [ newline_tb, whitespace_tb, integer_tb, integer_exponent_tb, real_tb, real_exponent_tb, string_tb, known_operator_tb, terminators_tb, groupers_tb, keyword_tb, values_tb, identifier_tb, type_tb, bracketed_identifier_tb, comment_tb, self.unknown_operator_tb, invalid_token_builder ] tokenizer = Tokenizer(tokenbuilders) tokens = tokenizer.tokenize(code) tokens = Examiner.combine_adjacent_identical_tokens( tokens, 'invalid operator') tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid') tokens = Examiner.combine_identifier_colon( tokens, ['statement terminator', 'newline'], [], ['whitespace', 'comment']) self.tokens = tokens self.convert_identifiers_to_labels() self.calc_statistics() tokens = self.source_tokens() tokens = Examiner.join_all_lines(tokens) self.calc_token_confidence() self.calc_token_2_confidence() num_operators = self.count_my_tokens(['operator', 'invalid operator']) if num_operators > 0: self.calc_operator_confidence(num_operators) allow_pairs = [] self.calc_operator_2_confidence(tokens, num_operators, allow_pairs) self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs) self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs) self.calc_group_confidence(tokens, group_mids) # operand_types_2 = ['number', 'string', 'symbol'] # self.calc_operand_n_confidence(tokens, operand_types_2, 2) # self.calc_operand_n_confidence(tokens, operand_types, 4) self.calc_keyword_confidence() self.calc_line_length_confidence(code, self.max_expected_line)
def __init__(self, code, extension): super().__init__() operand_types = [] whitespace_tb = WhitespaceTokenBuilder() newline_tb = NewlineTokenBuilder() integer_tb = IntegerTokenBuilder(None) integer_exponent_tb = IntegerExponentTokenBuilder(None) real_tb = RealTokenBuilder(False, False, None) real_exponent_tb = RealExponentTokenBuilder(False, False, 'E', None) operand_types.append('number') num_variable_tb = PrefixedIntegerTokenBuilder('$', False, '0123456789') operand_types.append('variable') known_variables = [ 'ARGC', 'ARGV', 'ENVIRON', 'FILENAME', 'FS', 'NF', 'NR', 'FNR', 'OFMT', 'OFS', 'ORS', 'RLENGTH', 'RS', 'RSTART', 'SUBSEP', ] known_variables_gnu = [ 'ARGIND', 'BINMODE', 'ERRNO', 'FIELDWIDTHS', 'IGNORECASE', 'LINT', 'PROCINFO', 'TEXTDOMAIN' ] if extension == 'gnu': known_variables += known_variables_gnu variable_tb = CaseSensitiveListTokenBuilder(known_variables, 'variable', True) regex_tb = RegexTokenBuilder() operand_types.append('regex') leads = '_' extras = '_' identifier_tb = IdentifierTokenBuilder(leads, extras) operand_types.append('identifier') quotes = ['"', "'", "’"] string_tb = EscapedStringTokenBuilder(quotes, 0) operand_types.append('string') hash_comment_tb = LeadToEndOfLineTokenBuilder('#', False, 'comment') line_continuation_tb = SingleCharacterTokenBuilder( '\\', 'line continuation', False) terminators_tb = SingleCharacterTokenBuilder(';', 'statement terminator', False) known_operators = [ '=', '+', '-', '*', '/', '%', '^', '++', '--', '==', '+=', '-=', '*=', '/=', '%=', '^=', '!=', '>', '>=', '<', '<=', '&&', '||', '|', '!', '?', ':', '~', '!~' ] self.unary_operators = ['+', '-', '!', '~', '++', '--'] self.postfix_operators = [ '++', '--', ] groupers = ['(', ')', ',', '[', ']', '{', '}'] group_starts = ['(', '[', ',', '{'] group_mids = [','] group_ends = [')', ']', '}'] groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False) known_operator_tb = CaseSensitiveListTokenBuilder( known_operators, 'operator', False) keywords = [ 'BEGIN', 'END', 'if', 'else', 'while', 'do', 'for', 'break', 'continue', 'delete', 'next', 'nextfile', 'function', 'func', 'exit' ] keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', False) invalid_token_builder = InvalidTokenBuilder() tokenbuilders = [ newline_tb, whitespace_tb, line_continuation_tb, terminators_tb, integer_tb, integer_exponent_tb, variable_tb, num_variable_tb, real_tb, real_exponent_tb, keyword_tb, known_operator_tb, groupers_tb, regex_tb, identifier_tb, string_tb, hash_comment_tb, self.unknown_operator_tb, invalid_token_builder ] tokenizer = Tokenizer(tokenbuilders) tokens = tokenizer.tokenize(code) tokens = Examiner.combine_adjacent_identical_tokens( tokens, 'invalid operator') self.tokens = Examiner.combine_adjacent_identical_tokens( tokens, 'invalid') self.calc_statistics() tokens = self.source_tokens() tokens = Examiner.join_parens_continued_lines(tokens) tokens = Examiner.join_operator_continued_lines( tokens, self.postfix_operators) self.calc_token_confidence() self.calc_token_2_confidence() num_operators = self.count_my_tokens(['operator', 'invalid operator']) if num_operators > 0: self.calc_operator_confidence(num_operators) allow_pairs = [] self.calc_operator_2_confidence(tokens, num_operators, allow_pairs) self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs) self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs) self.calc_group_confidence(tokens, group_mids) operand_types_2 = ['number', 'variable', 'regex'] self.calc_operand_n_confidence(tokens, operand_types_2, 2) # self.calc_operand_n_confidence(tokens, operand_types, 4) self.calc_keyword_confidence() self.calc_paired_blockers_confidence(['{'], ['}']) self.calc_line_length_confidence(code, self.max_expected_line)
def __init__(self, code): super().__init__() operand_types = [] whitespace_tb = WhitespaceTokenBuilder() newline_tb = NewlineTokenBuilder() integer_tb = IntegerTokenBuilder('_') integer_exponent_tb = IntegerExponentTokenBuilder('_') hex_integer_tb = PrefixedIntegerTokenBuilder('0x', False, '0123456789abcdefABCDEF') real_tb = RealTokenBuilder(False, False, "'") real_exponent_tb = RealExponentTokenBuilder(False, False, 'E', "'") operand_types.append('number') leads = '_' extras = '_' identifier_tb = IdentifierTokenBuilder(leads, extras) perl_identfier_tb = PerlIdentifierTokenBuilder() operand_types.append('identifier') specials = [ '$_', '@_', '$$', '$"', '$(', '$)', '$>', '$<', '$;', '$]', '$[', '$&', '$`', "$'", '$+', '@+', '%+', '@-', '%-', '$,', '$.', '$/', '$\\', '$|', '$%', '$-', '$:', '$=', '$^', '$~', '$!', '$?', '$@', '$#', '$*' ] specials_tb = CaseInsensitiveListTokenBuilder(specials, 'identifier', True) dollar_carat_tb = PerlDollarCaretIdentifierTokenBuilder() sigilbrace_tb = PerlSigilBraceTokenBuilder() quotes = ['"', "'", "’"] string_tb = EscapedStringTokenBuilder(quotes, 0) operand_types.append('string') q_string_tb = PerlQStringTokenBuilder() regex_tb = RegexTokenBuilder() m_regex_tb = MRegexTokenBuilder() s_regex_tb = SRegexTokenBuilder() y_regex_tb = YRegexTokenBuilder() tr_regex_tb = TrRegexTokenBuilder() operand_types.append('regex') prototype_tb = PerlPrototypeTokenBuilder() comment_tb = LeadToEndOfLineTokenBuilder('#', False, 'comment') line_continuation_tb = SingleCharacterTokenBuilder( '\\', 'line continuation', False) directives = ['#line'] preprocessor_tb = CaseSensitiveListTokenBuilder( directives, 'preprocessor', False) terminators_tb = SingleCharacterTokenBuilder(';', 'statement terminator', False) known_operators = [ '+', '-', '*', '**', '/', '%', '=', '==', '!=', '>', '>=', '<', '<=', '**=', '+=', '*=', '&=', '&.=', '<<=', '&&=', '-=', '/=', '|=', '|.=', '>>=', '||=', '.=', '%=', '^=', '^.=', '//=', 'x=', 'ne', 'gt', 'ge', 'le', 'lt', 'eq', '!', '&', '|', '~', '<<', '>>', '^', '.', '..', '...', '++', '--', '->', '=>', '&&', '||', '?', '<->', '<=>', 'and', 'cmp', 'or', 'xor' ] self.unary_operators = ['+', '-', '*', '!', '&', '~', '++', '--'] self.postfix_operators = ['++', '--'] groupers = ['(', ')', ',', '[', ']', '{', '}', ':', '::'] group_starts = ['(', '[', ',', '{'] group_mids = [',', ':', '::'] group_ends = [')', ']', '}'] groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False) known_operator_tb = CaseSensitiveListTokenBuilder( known_operators, 'operator', False) keywords = [ 'bless', 'break', 'continue', 'die', 'do', 'else', 'elsif', 'eval', 'exit', 'exp', 'for', 'foreach', 'if', 'last', 'lock', 'my', 'next', 'no', 'our', 'package', 'redo', 'return', 'say', 'sub', 'taint', 'undef', 'unless', 'until', 'use', 'wantarray', 'while' ] keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', True) values = ['NULL'] values_tb = CaseSensitiveListTokenBuilder(values, 'value', True) operand_types.append('value') invalid_token_builder = InvalidTokenBuilder() tokenbuilders = [ newline_tb, whitespace_tb, line_continuation_tb, terminators_tb, integer_tb, integer_exponent_tb, hex_integer_tb, real_tb, real_exponent_tb, keyword_tb, values_tb, groupers_tb, known_operator_tb, prototype_tb, identifier_tb, perl_identfier_tb, specials_tb, dollar_carat_tb, sigilbrace_tb, string_tb, q_string_tb, regex_tb, m_regex_tb, s_regex_tb, y_regex_tb, tr_regex_tb, preprocessor_tb, comment_tb, self.unknown_operator_tb, invalid_token_builder ] tokenizer = Tokenizer(tokenbuilders) tokens = tokenizer.tokenize(code, ['__END__']) tokens = Examiner.combine_adjacent_identical_tokens( tokens, 'invalid operator') tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid') tokens = Examiner.combine_identifier_colon( tokens, ['statement terminator', 'newline'], ['{'], ['whitespace', 'comment', 'line description']) self.tokens = tokens self.convert_identifiers_to_labels() self.calc_statistics() tokens = self.source_tokens() tokens = Examiner.join_all_lines(tokens) self.calc_token_confidence() self.calc_token_2_confidence(['*', ';']) num_operators = self.count_my_tokens(['operator', 'invalid operator']) if num_operators > 0: self.calc_operator_confidence(num_operators) allow_pairs = [] self.calc_operator_2_confidence(tokens, num_operators, allow_pairs) self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs) self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs) self.calc_group_confidence(tokens, group_mids) operand_types_2 = ['number'] self.calc_operand_n_confidence(tokens, operand_types_2, 2) self.calc_operand_n_confidence(tokens, operand_types, 4) self.calc_keyword_confidence() self.calc_paired_blockers_confidence(['{'], ['}']) self.calc_line_length_confidence(code, self.max_expected_line)
def __init__(self, code): super().__init__() operand_types = [] whitespace_tb = WhitespaceTokenBuilder() newline_tb = NewlineTokenBuilder() integer_tb = IntegerTokenBuilder("'") integer_exponent_tb = IntegerExponentTokenBuilder("'") real_tb = RealTokenBuilder(False, False, "'") real_exponent_tb = RealExponentTokenBuilder(False, False, 'E', "'") operand_types.append('number') leads = '_@' extras = '_@' identifier_tb = IdentifierTokenBuilder(leads, extras) operand_types.append('identifier') quotes = ['"', "'", "’"] string_tb = EscapedStringTokenBuilder(quotes, 0) operand_types.append('string') comment_tb = LeadToEndOfLineTokenBuilder('%', False, 'comment') directives = [ '-include', '-define', '-error', '-warning', '-module', '-compile' ] c_preprocessor_tb = CaseSensitiveListTokenBuilder(directives, 'preprocessor', False) terminators_tb = SingleCharacterTokenBuilder([';', '.'], 'statement terminator', False) known_operators = [ '+', '-', '*', '/', '!', 'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor', 'div', 'not', 'of', 'or', 'orelse', 'xor', '++', '--', '->', '=>', '#', ':=', '=', '==', '/=', '=<', '<', '>=', '>', '=:=', '=/=' ] self.unary_operators = [ '+', '-', 'not', '#', '!' ] self.postfix_operators = [] groupers = ['(', ')', ',', '[', ']', '{', '}', ':', '<', '>', '<<', '>>', '|', '||'] # group_starts = ['(', '[', ',', '{', '<', '<<'] group_ends = [')', ']', '}', '>', '>>'] group_mids = [',', ':', '|', '||'] groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False) known_operator_tb = CaseSensitiveListTokenBuilder(known_operators, 'operator', False) keywords = [ 'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if', 'let', 'receive', 'rem', 'try', 'when', 'ignore' ] keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', False) types = [ 'integer', 'float', 'binary', 'bytes', 'bitstring', 'bits', 'utf8', 'utf16', 'utf32', 'signed', 'unsigned', 'big', 'little', 'native' ] types_tb = CaseSensitiveListTokenBuilder(types, 'type', False) operand_types.append('type') values = [ 'true', 'false', '?MODULE', '?MODULE_STRING', '?FILE', '?LINE', '?MACHINE', '?FUNCTION_NAME', '?FUNCTION_ARITY', '?OTP_RELEASE' ] values_tb = CaseSensitiveListTokenBuilder(values, 'value', True) operand_types.append('value') invalid_token_builder = InvalidTokenBuilder() tokenbuilders = [ newline_tb, whitespace_tb, terminators_tb, integer_tb, integer_exponent_tb, real_tb, real_exponent_tb, keyword_tb, types_tb, values_tb, groupers_tb, known_operator_tb, identifier_tb, string_tb, comment_tb, c_preprocessor_tb, self.unknown_operator_tb, invalid_token_builder ] tokenizer = Tokenizer(tokenbuilders) tokens = tokenizer.tokenize(code) tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid operator') tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid') self.tokens = tokens self.convert_keywords_to_identifiers() self.calc_statistics() tokens = self.source_tokens() tokens = Examiner.join_all_lines(tokens) self.calc_token_confidence() self.calc_token_2_confidence(['*', ';']) num_operators = self.count_my_tokens(['operator', 'invalid operator']) if num_operators > 0: self.calc_operator_confidence(num_operators) allow_pairs = [] self.calc_operator_2_confidence(tokens, num_operators, allow_pairs) self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs) # self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs) self.calc_group_confidence(tokens, group_mids) operand_types_2 = ['number'] self.calc_operand_n_confidence(tokens, operand_types_2, 2) self.calc_operand_n_confidence(tokens, operand_types, 4) self.calc_keyword_confidence() self.calc_paired_blockers_confidence(['{'], ['}']) self.calc_line_length_confidence(code, self.max_expected_line)
def __init__(self, code, tab_size, wide): super().__init__() self.operand_types = [] self.whitespace_tb = WhitespaceTokenBuilder() self.newline_tb = NewlineTokenBuilder() self.integer_tb = IntegerTokenBuilder(None) self.integer_exponent_tb = IntegerExponentTokenBuilder(None) self.binary_integer_tb = SuffixedIntegerTokenBuilder(['B'], False, None) self.real_tb = RealTokenBuilder(False, False, None) self.real_exponent_tb = RealExponentTokenBuilder( False, False, 'E', None) self.binary_real_tb = SuffixedRealTokenBuilder(True, True, ['B'], False, None) self.operand_types.append('number') leads = '_' extras = '_' self.identifier_tb = IdentifierTokenBuilder(leads, extras) self.operand_types.append('identifier') quotes = ['"', "'", "’"] self.string_tb = EscapedStringTokenBuilder(quotes, 0) self.operand_types.append('string') self.label_tb = PL1LabelTokenBuilder() self.operand_types.append('label') self.slash_star_comment_tb = SlashStarCommentTokenBuilder() self.jcl_tb = JCLTokenBuilder() directives = [ '%ACTIVATE', '%DEACTIVATE', '%DECLARE', '%DCL', '%DICTIONARY', '%DO', '%ELSE', '%END', '%FATAL', '%GOTO', '%IF', '%INCLUDE', '%LIST', '%NOLIST', '%PAGE', '%PROCEDURE', '%PROC', '%REPLACE', '%RETURN', '%THEN' ] self.line_continuation_tb = SingleCharacterTokenBuilder( '\\', 'line continuation', False) self.preprocessor_tb = CaseInsensitiveListTokenBuilder( directives, 'preprocessor', False) self.title_tb = LeadToEndOfLineTokenBuilder('%TITLE', True, 'preprocessor') self.subtitle_tb = LeadToEndOfLineTokenBuilder('%SBTTL', True, 'preprocessor') self.error_tb = LeadToEndOfLineTokenBuilder('%ERROR', True, 'preprocessor') self.warn_tb = LeadToEndOfLineTokenBuilder('%WARN', True, 'preprocessor') self.inform_tb = LeadToEndOfLineTokenBuilder('%INFORM', True, 'preprocessor') self.terminators_tb = SingleCharacterTokenBuilder( ';', 'statement terminator', False) known_operators = [ '+', '-', '*', '/', '**', '>', '<', '=', '>=', '<=', '¬>', '¬<', '¬=', '^>', '^<', '^=', '^', '~>', '~<', '~=', '~', '¬', '&', '&:', '|', '|:', '||', '!', '!:', '!!', ':' ] self.unary_operators = ['+', '-', '^', '~', '¬'] self.postfix_operators = [] groupers = ['(', ')', ',', '[', ']', '{', '}'] self.group_starts = ['(', '[', ',', '{'] self.group_mids = [','] self.group_ends = [')', ']', '}'] self.groupers_tb = CaseInsensitiveListTokenBuilder( groupers, 'group', False) self.known_operator_tb = CaseSensitiveListTokenBuilder( known_operators, 'operator', False) keywords = [ 'ALLOCATE', 'ALLOC', 'BEGIN', 'CALL', 'CLOSE', 'DECLARE', 'DCL', 'DO', 'ELSE', 'END', 'FORMAT', 'FREE', 'GET', 'GOTO', 'GO TO', 'IF', 'LEAVE', 'ON', 'OPEN', 'OTHERWISE', 'OTHER', 'PROCEDURE', 'PROC', 'PUT', 'READ', 'RETURN', 'REVERT', 'REWRITE', 'SELECT', 'SIGNAL', 'STOP', 'THEN', 'WHEN', 'WRITE' ] self.keyword_tb = CaseInsensitiveListTokenBuilder( keywords, 'keyword', False) attributes = [ 'ALIGNED', 'ANY', 'AREA', 'BASED', 'BUILTIN', 'CONDITION', 'COND', 'CONTROLLED', 'CTL', 'DEFINED', 'DEF', 'DIRECT', 'ENTRY', 'ENVIRONMENT', 'ENV', 'EXTERNAL', 'EXT', 'FILE', 'GLOBALDEF', 'GLOBALREF', 'INITIAL', 'INIT', 'INPUT', 'INTERNAL', 'INT' 'KEYED', 'LABEL', 'LIKE', 'LIST', 'MEMBER', 'NONVARYING', 'NONVAR', 'OPTIONAL', 'OPTIONS', 'OUTPUT', 'PARAMETER', 'PARM', 'PICTURE', 'PIC', 'POSITION', 'POS', 'PRECISION', 'PREC', 'PRINT', 'READONLY', 'RECORD', 'REFER', 'RETURNS', 'SEQUENTIAL', 'SEQL', 'STATIC', 'STREAM', 'STRUCTURE', 'TRUNCATE', 'UNALIGNED', 'UNAL', 'UNION', 'UPDATE', 'VARIABLE', 'VARYING', 'VAR' ] self.attributes_tb = CaseInsensitiveListTokenBuilder( attributes, 'attribute', False) functions = [ 'ABS', 'ACOS', 'ACTUALCOUNT', 'ADD', 'ADDR', 'ADDREL', 'ALLOCATION', 'ALLOCN', 'ASIN', 'ATAN', 'ATAND', 'ATANH', 'AUTOMATIC', 'AUTO', 'BINARY', 'BIN', 'BIT', 'BOOL', 'BYTE', 'BYTESIZE', 'CEIL', 'CHARACTER', 'CHAR', 'COLLATE', 'COPY', 'COS', 'COSD', 'COSH', 'DATE', 'DATETIME', 'DECIMAL', 'DEC', 'DECODE', 'DESCRIPTOR', 'DESC', 'DIMENSION', 'DIM', 'DIVIDE', 'EMPTY', 'ENCODE', 'ERROR', 'EVERY', 'EXP', 'FIXED', 'FLOAT', 'FLOOR', 'HBOUND', 'HIGH', 'INDEX', 'INFORM', 'INT', 'LBOUND', 'LENGTH', 'LINE', 'LINENO', 'LOG', 'LOG10', 'LOG2', 'LOW', 'LTRIM', 'MAX', 'MAXLENGTH', 'MIN', 'MOD', 'MULTIPLY', 'NULL', 'OFFSET', 'ONARGSLIST', 'ONCHAR', 'ONCODE', 'ONFILE', 'ONKEY', 'ONSOURCE', 'PAGENO', 'POINTER', 'PTR', 'POSINT', 'PRESENT', 'PROD', 'RANK', 'REFERENCE', 'REVERSE', 'ROUND', 'RTRIM', 'SEARCH', 'SIGN', 'SIN', 'SIND', 'SINH', 'SIZE', 'SOME', 'SQRT', 'STRING', 'SUBSTR', 'SUBTRACT', 'SUM', 'TAN', 'TAND', 'TANH', 'TIME', 'TRANSLATE', 'TRIM', 'TRUNC', 'UNSPEC', 'VALID', 'VALUE', 'VAL', 'VARIANT', 'VERIFY', 'WARN' ] self.function_tb = CaseInsensitiveListTokenBuilder( functions, 'function', True) format_items = [ 'A', 'B', 'B1', 'B2', 'B3', 'B4', 'COLUMN', 'COL', 'E', 'F', 'P', 'R', 'TAB', 'X' ] self.format_item_tb = CaseSensitiveListTokenBuilder( format_items, 'format', True) self.operand_types.append('format') options = [ 'APPEND', 'BACKUP_DATE', 'BATCH', 'BLOCK_BOUNDARY_FORMAT', 'BLOCK_IO', 'BLOCK_SIZE', 'BUCKET_SIZE', 'BY', 'CANCEL_CONTROL_O', 'CARRIAGE_RETURN_FORMAT', 'CONTIGUOUS', 'CONTIGUOUS_BEST_TRY', 'CREATION_DATE', 'CURRENT_POSITION', 'DEFAULT_FILE_NAME', 'DEFERRED_WRITE', 'DELETE', 'EDIT', 'EXPIRATION_DATE', 'EXTENSION_SIZE', 'FAST_DELETE', 'FILE_ID', 'FILE_ID_TO', 'FILE_SIZE', 'FIXED_CONTROL_FROM', 'FIXED_CONTROL_SIZE', 'FIXED_CONTROL_SIZE_TO', 'FIXED_CONTROL_TO', 'FIXED_LENGTH_RECORDS', 'FROM', 'GROUP_PROTECTION', 'IDENT', 'IGNORE_LINE_MARKS', 'IN', 'INDEXED', 'INDEX_NUMBER', 'INITIAL_FILL', 'INTO', 'KEY', 'KEYFROM', 'KEYTO', 'LINESIZE', 'LOCK_ON_READ', 'LOCK_ON_WRITE', 'MAIN PROCEDURE', 'MANUAL_UNLOCKING', 'MATCH_GREATER', 'MATCH_GREATER_EQUAL', 'MATCH_NEXT', 'MATCH_NEXT_EQUAL', 'MAXIMUM_RECORD_NUMBER', 'MAXIMUM_RECORD_SIZE', 'MULTIBLOCK_COUNT', 'MULTIBUFFER_COUNT', 'NOLOCK', 'NONEXISTENT_RECORD', 'NONRECURSIVE', 'NORESCAN', 'NO_ECHO', 'NO_FILTER', 'NO_SHARE', 'OWNER_GROUP', 'OWNER_ID', 'OWNER_MEMBER', 'OWNER_PROTECTION', 'PAGE', 'PAGESIZE', 'PRINTER_FORMAT', 'PROMPT', 'PURGE_TYPE_AHEAD', 'READ_AHEAD', 'READ_CHECK', 'READ_REGARDLESS', 'RECORD_ID', 'RECORD_ID_ACCESS', 'RECORD_ID_TO', 'RECURSIVE', 'REPEAT', 'RESCAN', 'RETRIEVAL_POINTERS', 'REVISION_DATE', 'REWIND_ON_CLOSE', 'REWIND_ON_OPEN', 'SCALARVARYING', 'SET READ', 'SHARED_READ', 'SHARED_WRITE', 'SKIP', 'SNAP', 'SPOOL', 'STATEMENT', 'SUPERSEDE', 'SYSTEM', 'SYSTEM_PROTECTION', 'TEMPORARY', 'TIMEOUT_PERIOD', 'TITLE', 'TO', 'UNDERFLOW', 'UFL', 'UNTIL', 'USER_OPEN', 'WAIT_FOR_RECORD', 'WHILE', 'WORLD_PROTECTION', 'WRITE_BEHIND', 'WRITE_CHECK' ] self.options_tb = CaseInsensitiveListTokenBuilder( options, 'option', False) conditions = [ 'ANYCONDITION', 'CONVERSION', 'CONV', 'ENDFILE', 'ENDPAGE', 'FINISH', 'FIXEDOVERFLOW', 'FOFL', 'OVERFLOW', 'OFL', 'STORAGE', 'STRINGRANGE', 'STRG', 'SUBSCRIPTRANGE', 'SUBRG', 'UNDEFINEDFILE', 'UNDF', 'VAXCONDITION', 'ZERODIVIDE', 'ZDIV' ] self.conditions_tb = CaseInsensitiveListTokenBuilder( conditions, 'condition', False) subroutines = [ 'DISPLAY', 'EXTEND', 'FLUSH', 'NEXT_VOLUME', 'RELEASE', 'RESIGNAL', 'REWIND', 'SPACEBLOCK' ] self.subroutines_tb = CaseInsensitiveListTokenBuilder( subroutines, 'subroutine', False) types = [ 'FIXED', 'BINARY', 'FLOAT', 'DECIMAL', 'BIT', 'CHARACTER', 'PICTURE' ] self.types_tb = CaseInsensitiveListTokenBuilder(types, 'type', True) self.operand_types.append('type') values = ['SYSIN', 'SYSPRINT'] self.values_tb = CaseInsensitiveListTokenBuilder(values, 'value', True) self.operand_types.append('value') invalid_token_builder = InvalidTokenBuilder() # tokenize as free-format tokenbuilders_free = [ self.newline_tb, self.whitespace_tb, self.line_continuation_tb, self.terminators_tb, self.integer_tb, self.integer_exponent_tb, self.binary_integer_tb, self.real_tb, self.real_exponent_tb, self.binary_real_tb, self.keyword_tb, self.function_tb, self.attributes_tb, self.options_tb, self.conditions_tb, self.subroutines_tb, self.types_tb, self.values_tb, self.groupers_tb, self.known_operator_tb, self.identifier_tb, self.string_tb, self.label_tb, self.slash_star_comment_tb, self.preprocessor_tb, self.title_tb, self.subtitle_tb, self.error_tb, self.warn_tb, self.inform_tb, self.jcl_tb, self.unknown_operator_tb, invalid_token_builder ] tokenizer_free = Tokenizer(tokenbuilders_free) tokens_free = tokenizer_free.tokenize(code) tokens_free = Examiner.combine_adjacent_identical_tokens( tokens_free, 'invalid operator') tokens_free = Examiner.combine_adjacent_identical_tokens( tokens_free, 'invalid') self.tokens = tokens_free self.calc_statistics() statistics_free = self.statistics self.statistics = {} tokens = self.source_tokens() tokens = Examiner.join_all_lines(tokens) self.calc_token_confidence() self.calc_token_2_confidence() num_operators = self.count_my_tokens(['operator', 'invalid operator']) if num_operators > 0: self.calc_operator_confidence(num_operators) allow_pairs = [] self.calc_operator_2_confidence(tokens, num_operators, allow_pairs) self.calc_operator_3_confidence(tokens, num_operators, self.group_ends, allow_pairs) self.calc_operator_4_confidence(tokens, num_operators, self.group_starts, allow_pairs) self.calc_group_confidence(tokens, self.group_mids) operand_types_2 = ['number', 'symbol'] self.calc_operand_n_confidence(tokens, operand_types_2, 2) self.calc_operand_n_confidence(tokens, self.operand_types, 4) self.calc_keyword_confidence() self.calc_paired_blockers_confidence(['{'], ['}']) self.calc_line_length_confidence(code, self.max_expected_line) confidences_free = self.confidences self.confidences = {} errors_free = self.errors self.errors = [] # tokenize as fixed-format tokenbuilders_fixed = [ self.newline_tb, self.whitespace_tb, self.line_continuation_tb, self.terminators_tb, self.integer_tb, self.integer_exponent_tb, self.binary_integer_tb, self.real_tb, self.real_exponent_tb, self.binary_real_tb, self.keyword_tb, self.function_tb, self.attributes_tb, self.options_tb, self.conditions_tb, self.subroutines_tb, self.types_tb, self.values_tb, self.groupers_tb, self.known_operator_tb, self.identifier_tb, self.string_tb, self.label_tb, self.slash_star_comment_tb, self.preprocessor_tb, self.title_tb, self.subtitle_tb, self.error_tb, self.warn_tb, self.inform_tb, self.jcl_tb, self.unknown_operator_tb, invalid_token_builder ] comment_start_tb = PL1CommentStartTokenBuilder() comment_middle_tb = PL1CommentMiddleTokenBuilder() comment_end_tb = PL1CommentEndTokenBuilder() type1_tokenbuilders = [comment_start_tb] tokenbuilders_fixed_1 = tokenbuilders_fixed + type1_tokenbuilders + [ invalid_token_builder ] tokenizer_fixed_1 = Tokenizer(tokenbuilders_fixed_1) type2_tokenbuilders = [ comment_start_tb, comment_middle_tb, comment_end_tb ] tokenbuilders_fixed_2 = tokenbuilders_fixed + type2_tokenbuilders + [ invalid_token_builder ] tokenizer_fixed_2 = Tokenizer(tokenbuilders_fixed_2) tokens_fixed = self.tokenize_code(code, tab_size, tokenizer_fixed_1, tokenizer_fixed_2, wide) tokens_fixed = Examiner.combine_adjacent_identical_tokens( tokens_fixed, 'invalid operator') tokens_fixed = Examiner.combine_adjacent_identical_tokens( tokens_fixed, 'invalid') tokens_fixed = Examiner.combine_adjacent_identical_tokens( tokens_fixed, 'whitespace') tokens_fixed = self.convert_broken_comments_to_comments(tokens_fixed) self.tokens = tokens_fixed self.calc_statistics() statistics_fixed = self.statistics self.statistics = {} tokens = self.source_tokens() tokens = Examiner.join_all_lines(tokens) self.calc_token_confidence() self.calc_token_2_confidence() num_operators = self.count_my_tokens(['operator', 'invalid operator']) if num_operators > 0: self.calc_operator_confidence(num_operators) allow_pairs = [] self.calc_operator_2_confidence(tokens, num_operators, allow_pairs) self.calc_operator_3_confidence(tokens, num_operators, self.group_ends, allow_pairs) self.calc_operator_4_confidence(tokens, num_operators, self.group_starts, allow_pairs) self.calc_group_confidence(tokens, self.group_mids) operand_types_2 = ['number', 'symbol'] self.calc_operand_n_confidence(tokens, operand_types_2, 2) self.calc_operand_n_confidence(tokens, self.operand_types, 4) self.calc_keyword_confidence() self.calc_paired_blockers_confidence(['{'], ['}']) self.calc_line_length_confidence(code, self.max_expected_line) confidences_fixed = self.confidences self.confidences = {} errors_fixed = self.errors self.errors = [] # compute confidence for free-format and fixed-format confidence_free = 1.0 if len(confidences_free) == 0: confidence_free = 0.0 else: for key in confidences_free: factor = confidences_free[key] confidence_free *= factor confidence_fixed = 1.0 if len(confidences_fixed) == 0: confidence_fixed = 0.0 else: for key in confidences_fixed: factor = confidences_fixed[key] confidence_fixed *= factor # select the better of free-format and spaced-format if confidence_fixed > confidence_free: self.tokens = tokens_fixed self.statistics = statistics_fixed self.confidences = confidences_fixed self.errors = errors_fixed else: self.tokens = tokens_free self.statistics = statistics_free self.confidences = confidences_free self.errors = errors_free
def __call__(self, buffer, cursor, showCandidates): failure = (False, buffer, cursor) if not buffer or cursor == 0: return failure before = buffer[:cursor] after = buffer[cursor:] if re_command.match(before): return self.__findCompletion(buffer, cursor, before, self.__debugger.getCommandNames(), showCandidates, True) runtime = self.__debugger.getCurrentRuntime() match = re_property.match(before) if runtime and match and self.__argumentIsScript(buffer): command = match.group(1) expression = match.group(3) identifier = match.group(4) inScope = True base = "" parens = [] if expression and expression.strip()[-1] == '.': expression = expression.strip()[:-1] inScope = False while expression: match = re_whitespace_at_end.match(expression) if match: base = match.group(2) + base expression = expression[:match.start(2)] if not expression: break match = re_identifier_at_end.match(expression) if match: base = match.group(2) + base expression = expression[:match.start(2)] continue ch = expression[-1] if ch in (')', ']'): parens.append({ ')': '(', ']': '[' }[ch]) base = ch + base expression = expression[:-1] continue if ch in ('(', '['): if not parens or parens[-1] != ch: break parens.pop(-1) base = ch + base expression = expression[:-1] continue if expression[-1] == '.': base = '.' + base expression = expression[:-1] continue if len(expression) >= 3 and expression[-3:] in operators3: if not parens: break base = expression[-3:] + base expression = expression[:-3] continue if len(expression) >= 2 and expression[-2:] in operators2: if not parens: break base = expression[-2:] + base expression = expression[:-2] continue if len(expression) >= 1 and expression[-1:] in operators1: if not parens: break base = expression[-1:] + base expression = expression[:-1] continue return failure if not parens: if inScope: object = None else: try: result = runtime.eval(base, True, True) except KeyboardInterrupt: return failure if result and result.isObject(): object = result.getValue() else: return failure examiner = Examiner(runtime, object, 0xffff, 0, False, False, [], inScope) try: examiner.update() except KeyboardInterrupt: return failure if inScope: scope = examiner.getScope() else: scope = [object] propertyNames = [] for object in scope: while object: propertyNames.extend([name for name, value in examiner.getObjectProperties(object)]) object = object.getPrototype() return self.__findCompletion(buffer, cursor, identifier, propertyNames, showCandidates, False) return failure
class Assistant(object): def __init__(self): self.initLogger() self.examiner = Examiner() self.sec = Secretary() self.clr = Cleaner() self.login() self.init() def init(self): self.proc = 'ProcNull' self.examiner.init() self.sec.clear() self.sendYq('System init!') def login(self): itchat.auto_login() #itchat.auto_login(hotReload=True) self.yq = itchat.search_friends(name='陈有钱')[0] def initLogger(self): self.logger = logging.getLogger('Assistant') self.logger.setLevel(level=logging.INFO) handler = logging.FileHandler("log.txt") handler.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(filename)s - %(levelname)s - %(funcName)s - %(message)s' ) handler.setFormatter(formatter) console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(filename)s - %(levelname)s - %(funcName)s - %(message)s' ) console.setFormatter(formatter) self.logger.addHandler(console) self.logger.addHandler(handler) def sendYq(self, msg): if msg != '': self.logger.info('sendYq: ' + msg) self.yq.send(msg) def command(self, cmd): self.logger.info('rec cmd: ' + cmd) if cmd == 'exam': self.proc = 'ProcExam' #开始测试 self.sendYq('Begin exam:') self.sendYq(self.examiner.ask()) elif cmd == 'createQuestions': self.sendYq('Begin creating questions') self.proc = 'ProcCreateQuestions' #编辑问题 elif cmd == 'init': self.init() elif cmd.startswith('deleteQuestions '): self.clr.deleteAQuestion(cmd.replace('deleteQuestions ', '')) elif cmd.startswith('questionRecord '): self.sendYq(self.clr.quesRecord(cmd.replace('questionRecord ', ''))) else: self.logger.error('Sorry, I don`t known the meaning of: ' + cmd) self.yq.send('Sorry, I don`t known the meaning of: ' + cmd) def exam(self): q = self.examiner.ask() if 'Examiner: There is NO question to ask!' != q: self.sendYq(self.examiner.ask()) self.status = 'EXAM' else: self.status = 'NULL' def process(self, msg): if self.proc == 'ProcExam': self.sendYq(self.examiner.recMsg(msg)) elif self.proc == 'ProcCreateQuestions': self.sendYq(self.sec.recMsg(msg))
def __init__(self, code): super().__init__() operand_types = [] whitespace_tb = WhitespaceTokenBuilder() newline_tb = NewlineTokenBuilder() integer_tb = IntegerTokenBuilder("'") integer_exponent_tb = IntegerExponentTokenBuilder("'") hex_integer_tb = PrefixedIntegerTokenBuilder( '0x', False, '0123456789abcdefABCDEF_') binary_integer_tb = PrefixedIntegerTokenBuilder('0b', False, '01_') octal_integer_tb = PrefixedIntegerTokenBuilder('0c', False, '01234567_') real_tb = RealTokenBuilder(False, False, "'") real_exponent_tb = RealExponentTokenBuilder(False, False, 'E', "'") operand_types.append('number') leads = '_' extras = '_' identifier_tb = IdentifierTokenBuilder(leads, extras) operand_types.append('identifier') quotes = ['"', "'", "’"] string_tb = EscapedStringTokenBuilder(quotes, 0) operand_types.append('string') comment_tb = LeadToEndOfLineTokenBuilder('--', True, 'comment') known_operators = [ ':=', '=', '/=', '<', '>', '<=', '>=', '+', '-', '*', '/', '//', '\\\\', '^', '|..|', '..', 'and', 'or', 'xor', 'not', 'and then', 'or else', 'implies', '.', '@', '#', '|', '&' ] self.unary_operators = ['+', '-', 'not', '@', '#', '|', '&'] self.postfix_operators = [] groupers = ['(', ')', ',', '[', ']', '{', '}', ':', ';'] group_starts = ['(', '[', ',', '{'] group_mids = [',', ';', ':'] group_ends = [')', ']', '}'] groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False) known_operator_tb = CaseSensitiveListTokenBuilder( known_operators, 'operator', False) keywords = [ 'across', 'agent', 'alias', 'all', 'as', 'assign', 'attribute', 'check', 'class', 'convert', 'create', 'debug', 'deferred', 'do', 'else', 'elseif', 'end', 'ensure', 'expanded', 'export', 'external', 'feature', 'from', 'frozen', 'if', 'implies', 'inherit', 'inspect', 'invariant', 'like', 'local', 'loop', 'note', 'obsolete', 'old', 'once', 'only', 'redefine', 'rename', 'require', 'rescue', 'retry', 'select', 'separate', 'then', 'undefine', 'until', 'variant', 'when' ] keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', False) types = ['Current', 'Precursor', 'Result', 'Void', 'TUPLE'] types_tb = CaseSensitiveListTokenBuilder(types, 'type', True) operand_types.append('type') values = ['False', 'True', '?'] values_tb = CaseSensitiveListTokenBuilder(values, 'value', True) operand_types.append('value') invalid_token_builder = InvalidTokenBuilder() tokenbuilders = [ newline_tb, whitespace_tb, integer_tb, integer_exponent_tb, hex_integer_tb, binary_integer_tb, octal_integer_tb, real_tb, real_exponent_tb, keyword_tb, types_tb, values_tb, groupers_tb, known_operator_tb, identifier_tb, string_tb, comment_tb, self.unknown_operator_tb, invalid_token_builder ] tokenizer = Tokenizer(tokenbuilders) tokens = tokenizer.tokenize(code) tokens = Examiner.combine_adjacent_identical_tokens( tokens, 'invalid operator') self.tokens = Examiner.combine_adjacent_identical_tokens( tokens, 'invalid') self.calc_statistics() tokens = self.source_tokens() tokens = Examiner.join_all_lines(tokens) self.calc_token_confidence() self.calc_token_2_confidence() num_operators = self.count_my_tokens(['operator', 'invalid operator']) if num_operators > 0: self.calc_operator_confidence(num_operators) allow_pairs = [] self.calc_operator_2_confidence(tokens, num_operators, allow_pairs) self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs) self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs) self.calc_group_confidence(tokens, group_mids) operand_types_2 = ['number'] self.calc_operand_n_confidence(tokens, operand_types_2, 2) self.calc_operand_n_confidence(tokens, operand_types, 4) self.calc_keyword_confidence() self.calc_paired_blockers_confidence(['{'], ['}']) self.calc_line_length_confidence(code, self.max_expected_line)
def __init__(self, code, block_comment_limit): super().__init__() operand_types = [] whitespace_tb = WhitespaceTokenBuilder() newline_tb = NewlineTokenBuilder() line_continuation_tb = SingleCharacterTokenBuilder( '\\', 'line continuation', False) integer_tb = IntegerTokenBuilder('_') integer_exponent_tb = IntegerExponentTokenBuilder('_') real_tb = RealTokenBuilder(False, True, '_') real_exponent_tb = RealExponentTokenBuilder(False, True, 'E', '_') octal_integer_tb = PrefixedIntegerTokenBuilder('0o', True, '01234567_') hex_integer_tb = PrefixedIntegerTokenBuilder( '0x', True, '0123456789ABCDEFabcdef_') binary_integer_tb = PrefixedIntegerTokenBuilder('0b', True, '01_') operand_types.append('number') leads = '_' extras = '_' identifier_tb = IdentifierTokenBuilder(leads, extras) operand_types.append('identifier') lifetime_tb = IdentifierTokenBuilder("'", extras) attribute_tb = RustAttributeTokenBuilder() quotes = ['"'] string_tb = EscapedStringTokenBuilder(quotes, 10) bstring_tb = PrefixedStringTokenBuilder('b', True, quotes) rstring_tb = RustRawStringTokenBuilder() operand_types.append('string') char_tb = SingleCharStringTokenBuilder() class_type_tb = ClassTypeTokenBuilder() operand_types.append('class') slash_slash_comment_tb = SlashSlashCommentTokenBuilder() slash_star_comment_tb = NestedCommentTokenBuilder( '/*', '*/', block_comment_limit) terminators_tb = SingleCharacterTokenBuilder(';', 'statement terminator', False) known_operators = [ '+', '-', '*', '/', '%', '^', '!', '&', '|', '&&', '||', '<<', '>>', '+=', '-=', '*=', '/=', '%=', '^=', '&=', '|-', '<<=', '>>=', '=', '==', '!=', '>', '<', '>=', '<=', '@', '.', '..', '...', '->', '#', '$', '?', 'in', '&mut' ] self.unary_operators = ['+', '-', '*', '!', '&', '&mut'] self.postfix_operators = [] groupers = ['(', ')', ',', '[', ']', '{', '}', ':', '::', '=>'] group_starts = ['(', '[', ',', '{'] group_mids = [',', ':', '::', '=>'] group_ends = [')', ']', '}', ')|'] groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False) known_operator_tb = CaseSensitiveListTokenBuilder( known_operators, 'operator', False) keywords = [ 'as', 'break', 'const', 'continue', 'crate' 'else', 'enum', 'extern', 'fn', 'for', 'if', 'impl', 'let', 'loop', 'match', 'mod', 'move', 'mut', 'pub', 'ref', 'return', 'static', 'struct', 'trait', 'type', 'unsafe', 'use', 'where', 'while' ] keywords_2018 = ['dyn', 'union', 'static'] keywords_future = [ 'abstract', 'become', 'box', 'do', 'final', 'macro', 'override', 'priv', 'typeof', 'unsized', 'virtual', 'yield', 'async', 'await', 'try' ] keywords += keywords_2018 keywords += keywords_future keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', False) types = [ 'Self', 'u8', 'i8', 'u16', 'i16', 'u32', 'i32', 'u64', 'i64', 'u128', 'i128', 'usize', 'isize', 'f32', 'f64' ] types_tb = CaseSensitiveListTokenBuilder(types, 'type', True) operand_types.append('type') values = ['self', 'true', 'false', 'super', '_'] values_tb = CaseSensitiveListTokenBuilder(values, 'value', True) operand_types.append('value') invalid_token_builder = InvalidTokenBuilder() tokenbuilders = [ newline_tb, whitespace_tb, line_continuation_tb, terminators_tb, integer_tb, integer_exponent_tb, octal_integer_tb, hex_integer_tb, binary_integer_tb, real_tb, real_exponent_tb, keyword_tb, types_tb, values_tb, groupers_tb, known_operator_tb, identifier_tb, char_tb, lifetime_tb, class_type_tb, attribute_tb, string_tb, bstring_tb, rstring_tb, slash_slash_comment_tb, slash_star_comment_tb, self.unknown_operator_tb, invalid_token_builder ] tokenizer = Tokenizer(tokenbuilders) tokens = tokenizer.tokenize(code) tokens = Examiner.combine_adjacent_identical_tokens( tokens, 'invalid operator') tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid') self.tokens = self.combine_numbers_and_adjacent_types(tokens) self.convert_operators_to_identifiers() self.convert_bars_to_groups() self.calc_statistics() tokens = self.source_tokens() tokens = Examiner.join_all_lines(tokens) self.calc_token_confidence() self.calc_token_2_confidence() num_operators = self.count_my_tokens(['operator', 'invalid operator']) if num_operators > 0: self.calc_operator_confidence(num_operators) allow_pairs = [] self.calc_operator_2_confidence(tokens, num_operators, allow_pairs) self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs) self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs) self.calc_group_confidence(tokens, group_mids) operand_types_2 = ['number', 'symbol'] self.calc_operand_n_confidence(tokens, operand_types_2, 2) self.calc_operand_n_confidence(tokens, operand_types, 4) self.calc_keyword_confidence() self.calc_paired_blockers_confidence(['{'], ['}']) self.calc_line_format_confidence() self.calc_line_length_confidence(code, self.max_expected_line)