def check_types_pipe(result:dict, checker=TypeChecker): """ Build the scope and check for types to be ok """ ast = result.get("ast", None) context = result.get("context",None) if not ast or not context: return result errors = [] checker = checker(context, errors) scope, operator = checker.visit(ast, Scope()) result["scope"] = scope result["operator"] = operator if result.get("verbose", False): if errors: print_errors("Checking Types Errors", errors) print("=========== Checked Types Info =============") print("Scope:") print(scope) print("Operator:") print(operator) result["errors"].extend(errors) return result
def remove_comment_tokens_pipe(result:dict): """ Remove all tokens between (* *) and their respective errors if any """ tokens = result["text_tokens"] errors = result["errors"] new_tokens = [] new_errors = [] deep = 0 start_comment_position, end_comment_position = None, None for tok in tokens: if tok.token_type == comment_open: deep+=1 start_comment_position = (tok.lex[1], tok.lex[2]) elif tok.token_type == comment_close: deep-=1 if deep == 0: end_comment_position = (tok.lex[1], tok.lex[2]) # Removing errors related to comments errors = [x for x in errors if not (start_comment_position <= (x.row, x.column) <= end_comment_position)] elif not deep: new_tokens.append(tok) if result.get("verbose",False): if errors: print_errors("Nested Comment Elimination Errors", errors) result["errors"] = errors result["errors"].extend(new_errors) result.update({ "text_tokens": new_tokens }) return result
def remove_comments_pipe(result:dict, comment_grammar=C, comment_lexer=comment_lexer, comment_parser=comment_parser): """ Remove the commented lines from the text """ text = result["text"] lang = LanguageLR(comment_grammar, comment_lexer, comment_parser) errors = [] parse, tokens = lang(text, errors) if not errors: text = comment_parser.evaluate(tokens, errors, True) if result.get("verbose",False): if errors: print_errors("Removing Comments Errors", errors) if len(text) != len(result["text"]): print("=========== Text Comments Removed ===============") print(text) else: print("=========== No Comments Removed ===============") result["errors"].extend(errors) result["text"] = text return result
def run_program_pipe(result:dict, runner=RunVisitor): """ Run ast and store the result """ ast = result.get("ast",None) context = result.get("context",None) scope = result.get("scope",None) operator = result.get("operator",None) errors = result.get("errors", None) if any(x == None for x in [ast, context, scope, operator]) or errors: return result errors = [] runner = runner(context,scope,operator,errors) value = runner.visit(ast) result["value"] = value if result.get("verbose", False): if errors: print_errors("Running Program Errors", errors) print('=============== PROGRAM RAN ===============') print('Returned Value:') print(value) result["errors"].extend(errors) return result
def parse_text_pipe(result:dict, language_grammar=G, language_lexer=PlyLexer(), language_parser=cool_parser): """ Parse the text """ text = result['text'] tokens = result.get('text_tokens') errors = [] if len(tokens) == 1: tokens[0].set_position(0,0) # EOF token must be at 0,0 lang = result.get('language', LanguageLR(language_grammar, language_lexer, language_parser)) parse, tokens = lang(text, errors, tokens) if result.get("verbose",False): if errors: print_errors("Parsing Text Errors", errors) if not 'text_tokens' in result: print('================== TOKENS =====================') pprint_tokens(tokens) print('=================== PARSE =====================') print('\n'.join(repr(x) for x in parse)) result.update({ "text_parse": parse, "language": lang, "text_tokens": tokens, "parser":language_parser }) result["errors"].extend(errors) return result
def ply_lexer_pipe(result:dict, language_grammar=G, language_lexer=ply_lexer, language_parser=cool_parser): """ Tokenize with ply """ text = result["text"] lang = LanguageLR(language_grammar, language_lexer, language_parser) errors = [] tokens = lang.get_tokens(text, errors) errors = language_lexer.get_errors() result.update({ "parser" : language_parser, "lexer" : language_lexer, "language" : lang, "text_tokens" : tokens }) if result.get("verbose", False): if errors: print_errors("Lexer Errors", errors) print('================== TOKENS =====================') pprint_tokens(tokens) result["errors"].extend(errors) return result
def cil_to_mips_pipe(result: dict, cil_to_mips=CILToMIPSVisitor): ast = result.get("cil_ast",None) if ast is None: return result converter = cil_to_mips() value = converter.visit(ast) result["errors"].extend(converter.errors) if result.get("verbose", False): print("============== CIL to MIPS Result ===============") print(value) print_errors("============ CIL to MIPS Error =============", converter.errors) result['mips_ast'] = value return result
def run_cil_pipe(result: dict, runner= CILRunnerVisitor): ast = result.get("cil_ast",None) if ast is None: return result runner = runner() value = runner.visit(ast) result["errors"].extend(runner.errors) if result.get("verbose", False): print("============== CIL Result ===============") print(value) print_errors("============ CIL Run Error =============", runner.errors) result['cil_value'] = value return result
def auto_resolver_pipe(result:dict, auto_resolver=AutoResolver): ast = result.get("ast",None) context = result.get("context",None) if any(x == None for x in [ast, context]): return result errors = [] resolver = auto_resolver(context, errors) resolver.visit(ast) if result.get("verbose", False): if errors: print_errors("Auto Resolver Errors", errors) result["errors"].extend(errors) return result
def build_types_pipe(result:dict, builder=TypeBuilder): """ Build the types in context """ ast = result.get("ast", None) context = result.get("context",None) if not ast or not context: return result errors = [] builder = builder(context, errors) builder.visit(ast) if result.get("verbose", False): if errors: print_errors("Building Types Errors", errors) print('=============== BUILT TYPES ================') print(context) result["errors"].extend(errors) return result
def cool_to_cil_pipe(result: dict, cool_to_cil=COOLToCILVisitor): context = result.get("context",None) scope = result.get("scope", None) ast = result.get("ast", None) if any(x == None for x in [context, ast, scope]): return result if result.get("errors"): return result errors = [] cool_to_cil_visitor = cool_to_cil(context, errors) cil_ast = cool_to_cil_visitor.visit(ast, scope) result['cil_ast'] = cil_ast if result.get("verbose", False): if errors: print_errors("COOL to CIL Errors", errors) result["errors"].extend(errors) return result
def type_collector_pipe(result:dict, collector=TypeCollector): """ Collects the types in the program. """ ast = result.get("ast", None) if not ast: return result errors = [] collector = collector(errors, result.get("context", None)) collector.visit(ast) context = collector.context result["context"] = context if result.get("verbose", False): if errors: print_errors("Collecting Types Errors", errors) print('============== COLLECTED TYPES ===============') print(context) result["errors"].extend(errors) return result
def ast_pipe(result:dict): """ Add the initial ast """ parser = result["parser"] tokens = result["text_tokens"] text_parsed = result.get("text_parse", None) errors = [] if text_parsed or tokens: ast = parser.evaluate(tokens,errors,True,text_parsed) result["ast"] = ast if result.get("verbose", False): if errors: print_errors("Building AST Errors", errors) print('==================== AST ======================') formatter = FormatVisitor() tree = formatter.visit(ast) print(tree) result["errors"].extend(errors) return result