def test_match_values_false(self): self.assertEqual( False, transpiler_utils.token_check( ['TYPE', 'value'], values=['missing_value', 'missing_value1']))
def test_match_types_false(self): self.assertEqual( False, transpiler_utils.token_check(['TYPE', 'value'], ['MISSING_TYPE1', 'MISSING_TYPE']))
def test_match_value(self): self.assertEqual( True, transpiler_utils.token_check(['TYPE', 'value'], values='value'))
def test_match_none(self): self.assertEqual(False, transpiler_utils.token_check(['TYPE', 'value']))
def lex(raw: List[str]) -> List[List[str]]: tmp = '' is_collector = False is_str = False might_be_special_str = False collector_ends = [] include_collector_end = False is_dict = [] tokens = [] for line in raw: line = lexer_utils.reformat_line(line) for char_index, char in enumerate(line): # Ignores comments if char == '#': break if is_str: if char not in collector_ends: tmp += char if char in collector_ends: tmp += char tokens.append(['STR', tmp]) is_str = False collector_ends = [] tmp = '' continue # Detects raw & f-strings. if char in ['r', 'f']: might_be_special_str = char if is_collector: if char not in collector_ends: tmp += char else: tokens[-1][1] = tmp if include_collector_end: tokens.append([ 'OP' if char not in ['\n', '\t'] else 'FORMAT', char ]) is_collector = False include_collector_end = False tmp = '' # Function call. elif char == '(': if tmp: tokens.append(['FUNC', tmp.replace(' ', '')]) tmp = '' # Variable assignment. elif char == '=': if tmp: tokens.append(['WORD', tmp]) tokens.append(['OP', char]) if tmp not in cfg.variables: cfg.variables.append(tmp) tmp = '' else: tokens.append(['OP', char]) # String. elif char in ['"', '\'']: is_str = True collector_ends = [char] tmp = '' if might_be_special_str == 'r': tmp = r'' elif might_be_special_str == 'f': tmp = f'' tmp += char might_be_special_str = '' # Dict open. elif char == '{': is_dict.append(True) tokens.append(['OP', char]) # Dict close. elif char == '}': tokens, tmp, is_collector, collector_ends, include_collector_end = lexer_utils.word_or_special( tokens, tmp) is_dict.pop(0) tokens.append(['OP', char]) # Number (on its own). elif char.isdigit() and not tmp: if tokens and tokens[-1][0] == 'NUM': tokens[-1][1] += char continue tokens.append(['NUM', char]) # Decorator. elif char == '&': is_collector = True collector_ends = ['\n'] include_collector_end = True tmp = '' tokens.append(['DEC', '']) # Operator (special character). elif char in cfg.operators: if char == ':' and is_dict and tmp: tokens.append(['KEY', tmp]) tmp = '' tokens, tmp, is_collector, collector_ends, include_collector_end = lexer_utils.word_or_special( tokens, tmp) tokens.append(['OP', char]) # Formatting. elif char in ['\n', '\t']: tokens, tmp, is_collector, collector_ends, include_collector_end = lexer_utils.word_or_special( tokens, tmp) tokens.append(['FORMAT', char]) # Character isn't anything specific, meaning it's e.g. a letter. These get collected for later use. elif char not in ['\n', '\t', ' ']: tmp += char else: # There might be a word or keyword in tmp. tokens, tmp, is_collector, collector_ends, include_collector_end = lexer_utils.word_or_special( tokens, tmp) if len(tokens) > 2 and transpiler_utils.token_check( tokens[-2], 'WORD', transpiler_utils.add_underscores_to_elems(['db']) if askfile.get(['rules', 'underscores'], True) else 'db'): # Removes the WORD 'db'/'_db' and the OP '.'. tokens.pop(-1) tokens.pop(-1) is_collector = True collector_ends = ['(', ',', ')'] include_collector_end = True tmp = '' tokens.append(['DB_ACTION', '']) cfg.uses_db = True return tokens
def translate(tokens: List[List[str]]) -> str: translated = '' is_skip = False needs_db_commit = False is_decorator = False add_tabs_to_inner_group = False indention_depth_counter = 0 decorator = '' add_parenthesis_at_en_of_line = False basic_decorator_collection_might_end = False on_next_run_uses_basic_decorator = False past_lines_tokens = [] ignored_due_to_basic_decorator = [] is_import = False is_include = False included_module_name = '' for token_index, token in enumerate(tokens): if is_skip: is_skip = False continue token_type = token[0] token_val = token[1] # Used when "postponing" the setting of uses_basic_decorator to true. if on_next_run_uses_basic_decorator: on_next_run_uses_basic_decorator = False cfg.uses_basic_decorator = True if cfg.uses_basic_decorator and transpiler_utils.token_check( token, 'FORMAT', '\n') and past_lines_tokens: if basic_decorator_collection_might_end: if past_lines_tokens == [['DEC', 'basic']]: on_next_run_uses_basic_decorator = True cfg.basic_decorator_collector = cfg.previous_basic_decorator_collector if not translator_utils.is_db_column_or_model_in_past_line( past_lines_tokens): translated = insert_basic_decorator_code_to_insert( translated, ignored_due_to_basic_decorator) # Re-setting to defaults. basic_decorator_collection_might_end = False cfg.uses_basic_decorator = False cfg.basic_decorator_collector = [] ignored_due_to_basic_decorator = [] cfg.basic_decorator_has_primary_key = False else: basic_decorator_collection_might_end = True if transpiler_utils.token_check(token, 'FORMAT', '\n'): past_lines_tokens = [] else: past_lines_tokens.append(token) if add_tabs_to_inner_group and token_type == 'GROUP': if token_val == 'end': indention_depth_counter -= 1 if indention_depth_counter == 0: add_tabs_to_inner_group = False translated += '\n\treturn wrapper' elif token_val == 'start': indention_depth_counter += 1 if token_type == 'FORMAT': if token_val == '\n' and is_include: is_include = False cfg.included_module_code += include_module( included_module_name) included_module_name = '' continue if token_val == '\n' and add_parenthesis_at_en_of_line: translated += ')' add_parenthesis_at_en_of_line = False translated += token_val if transpiler_utils.token_check(token, 'FORMAT', '\n') and add_tabs_to_inner_group: translated += '\t' elif token_type == 'NUM': translated += f'{translator_utils.space_prefix(translated, token_val)}{token_val}' elif token_type == 'OP': if is_include: included_module_name += token_val continue translated += f'{translator_utils.space_prefix(translated, token_val)}{token_val}' if needs_db_commit and token_val == ')': needs_db_commit = False tab_level = translator_utils.get_tab_count(translated) translated += f'\n{tab_level}db.session.commit()' elif token_type == 'STR': translated += f'{translator_utils.space_prefix(translated, token_val)}{token_val}' elif token_type == 'WORD': if is_include: included_module_name += token_val continue if is_import: is_import = False to_append, _ = translator_utils.might_be_ask_import(token_val) if to_append: for line in to_append: translated += f'{translator_utils.space_prefix(translated, token_val)}{line}\n' continue if token_val == 'extend': is_import = True continue elif token_val == 'include': is_include = True included_module_name = '' continue translated += f'{translator_utils.space_prefix(translated, token_val)}{small_transpilers.transpile_word(token_val, translated)}' elif token_type == 'FUNC': if token_val[0] == '@': new_line = '\n' suffix = new_line if token_index > 2 and tokens[token_index - 2][0] == 'DEC': suffix = '' if token_index < len(tokens) and tokens[token_index + 1][0] == 'STR': next_token_val = tokens[token_index + 1][1] translated += f'@app.route({next_token_val}, methods=[\'{token_val[1:]}\']){suffix}' cfg.uses_routes = True # Flask-selfdoc decorator translated += f'{new_line if suffix == "" else ""}@auto.doc(\'' default_doc_end = f'public\'){suffix}' if is_decorator: # Group type for the auto docs decorator. (private if the route is protected else public) if decorator == '\n@check_for_token': translated += f'private\'){suffix}' else: translated += default_doc_end translated += decorator + '\n' else: translated += default_doc_end translated += ''.join([ f'def {token_val[1:]}', f'{translator_utils.uri_to_func_name(next_token_val)}', f'({translator_utils.extract_params_from_uri(next_token_val)}' ]) is_skip = True is_decorator = False elif token_val in cfg.ask_library_methods: prefix = 'AskLibrary.' if token_val == 'respond': prefix = f'return {prefix}' translated += f'{translator_utils.space_prefix(translated, to_add=token_val)}{prefix}{token_val}(' else: translated += f'{translator_utils.space_prefix(translated, to_add=token_val)}{small_transpilers.transpile_function(token_val)}' if token_val == 'status': add_parenthesis_at_en_of_line = True elif token_type == 'DB_MODEL': translated += f'\nclass {token_val}(db.Model)' elif token_type == 'FUNC_DEF': translated += f'def {token_val if token_val not in ["init", "_init"] else "__init__"}(' elif token_type == 'DEC_DEF': translated += f'def {token_val}(func):' translated += f'\n\tdef wrapper(*args, **kwargs):' add_tabs_to_inner_group = True elif token_type == 'KEY': translated += f'\'{token_val}\'' elif token_type == 'DEC': decorator = small_transpilers.transpile_decorator(token_val) if not decorator: translated += f'@{token_val}' if decorator != '---': is_decorator = True elif token_type == 'DB_ACTION': transpiled_action, needs_commit = small_transpilers.transpile_db_action( token_val) if cfg.uses_basic_decorator: if transpiled_action == 'primary_key=True': cfg.basic_decorator_has_primary_key = True if transpiled_action == 'ignored': ignored_due_to_basic_decorator.append( translator_utils.previous_non_keyword_word_tok( past_lines_tokens)) if transpiled_action == 'db.Column': cfg.basic_decorator_collector.append( translator_utils.previous_non_keyword_word_tok( past_lines_tokens)) for ignored in ignored_due_to_basic_decorator: if ignored in cfg.basic_decorator_collector: cfg.basic_decorator_collector.remove(ignored) if transpiled_action != 'ignored': translated += f'{translator_utils.space_prefix(translated, transpiled_action)}{transpiled_action}' if needs_commit: needs_db_commit = True return translated