def interpret(raw_program_string) -> None: """start the interpretation of the program""" # print(raw_program_string) # for debugging # create the Lexer lexer = Lexer() token_stream = lexer.lex(raw_program_string) # replica of token stream for checking the length # since token stream can only be looped once, we have to replicate it token_stream_replica = lexer.lex(raw_program_string) # find the length of the token stream Tools.check_token_length(token_stream_replica) # create parser parser = Parser() parsed_AST = parser.parse(token_stream) # start the type checking process parsed_AST.typecheck(AST.Type_Context.get_empty_context()) # start the evaluation process parsed_AST.eval(AST.Eval_Context.get_empty_context())
def main(): """check right amount of the command line arguments and start the lexcial analyzer""" if len(argv) == 2: # 2 command line arguments (sys.argv[1]: program file) # interpret the program file supplied by user # check suffix of the file Tools.check_suffix(argv[1]) # check whether the file exists in the file system if os.path.exists(argv[1]): # create lexer lexer = Lexer() # get the raw program string raw_program_string = Tools.get_raw_program_string(argv[1]) # lexical analyze the program given, break it down to token stream token_stream = lexer.lex(raw_program_string) # print all tokens identified for token in token_stream: print(token) else: Tools.print_error("file \"" + argv[1] + "\" does not exists in the file system.\n") else: # other number of command line arguments # print usage message Tools.print_warning("Usage: python main.py <program-to-run>\n")
def __init__(self, program_file = None) -> None: """interpret the program file given""" # accept the parameters if it is given if program_file != None: self.program_file = program_file raw_program_string = Tools.get_raw_program_string(self.program_file) self.interpret(raw_program_string) # otherwise, create an empty object with no parameters, enter REPL loop else: self.repl_list = list() self.repl()
def main(): """check right amount of the command line arguments and start the lexcial analyzer""" if len(argv) == 2: # 2 command line arguments (sys.argv[1]: program file) # interpret the program file supplied by user # check whether the file exists in the file system if os.path.exists(argv[1]): # check suffix of the file Tools.check_suffix(argv[1]) # create lexer lexer = Lexer() # get the raw program string raw_program_string = Tools.get_raw_program_string(argv[1]) # lexical analyze the program given, break it down to token stream token_stream = lexer.lex(raw_program_string) # replica of token stream for checking the length # since token stream can only be looped once, we have to replicate it token_stream_replica = lexer.lex(raw_program_string) # find the length of the token stream Tools.check_token_length(token_stream_replica) # create parser parser = Parser() # parse the lexical token stream to AST (abstract syntax tree) parsed_AST = parser.parse(token_stream) print(parsed_AST) else: Tools.print_error("file \"" + argv[1] + "\" does not exists in the file system.\n") else: # other number of command line arguments # print usage message Tools.print_warning("Usage: python main.py <program-to-run>\n")
def main() -> None: """main function that get called when executing the Python program""" if len(argv) == 1: # 1 command line argument (no additional arguments) # enter REPL loop Interpreter() elif len(argv) == 2: # 2 command line arguments (sys.argv[1]: program file) # interpret the program file supplied by user # check suffix of the file Tools.check_suffix(argv[1]) # check whether the file exists in the file system if os.path.exists(argv[1]): Interpreter(argv[1]) else: Tools.print_error("file \"" + argv[1] + "\" does not exists in the file system.\n") else: # other number of command line arguments # print usage message Tools.print_warning("Usage: python main.py <program-to-run>\n")
def repl_interpret(prev_repl_list, input_line) -> bool: # note that True return indicate success, and False return indicate failure """start the REPL interpretation of the program The way repl_interpret is implemented is as follows 1. evaluate the expression prior to the current input line (accumulated throughout the prior REPL session) 2. evaluate the new line input by the user 3. redirect/omit the standard output of the evaluation of the previous session program 4. only display the standard output of the evaluation of the new line """ # extract raw program strings for the previous repl list and the new input line prev_raw_program_string = Tools.extract_raw_program_string("".join(prev_repl_list)) new_raw_program_string = Tools.extract_raw_program_string(input_line) # Evaluation for the prevous REPL List # create the Lexer lexer = Lexer() prev_token_stream = lexer.lex(prev_raw_program_string) # replica of token stream for checking the length # since token stream can only be looped once, we have to replicate it prev_token_stream_replica = lexer.lex(prev_raw_program_string) # create type and evaluation contexts prev_type_context = AST.Type_Context.get_empty_context() prev_eval_context = AST.Eval_Context.get_empty_context() # find the length of the token stream, return if token is empty (no user input) if Tools.token_length(prev_token_stream_replica) != 0: # create parser parser = Parser() prev_parsed_AST = parser.parse(prev_token_stream) # start the type checking process prev_parsed_AST.typecheck(prev_type_context) # debug # redirect the output to somewhere else # sys.stdout = open(os.devnull, "w") # start the evaluation process prev_parsed_AST.eval(prev_eval_context) # recover the stdout stream # sys.stdout = sys.__stdout__ # Evaluation for the new line # create the Lexer lexer = Lexer() new_token_stream = lexer.lex(new_raw_program_string) # replica of token stream for checking the length # since token stream can only be looped once, we have to replicate it new_token_stream_replica = lexer.lex(new_raw_program_string) # create type and evaluation contexts (reuse the one created for the previous REPL list) new_type_context = prev_type_context new_eval_context = prev_eval_context # print(new_eval_context) #debug # find the length of the token stream, return if token is empty (no user input) if Tools.token_length(new_token_stream_replica) != 0: # create parser parser = Parser() try: new_parsed_AST = parser.parse(new_token_stream) # start the type checking process new_parsed_AST.typecheck(new_type_context) Tools.print_warning("OUTPUT >>>\n") # start the evaluation process new_parsed_AST.eval(new_eval_context) Tools.print_warning("<<< OUTPUT\n") # append the line to the repl_list return True except Exception as e: # debugger, print all stack trace during REPL tb = traceback.format_exc() print(tb) # raise RuntimeError("Interpreter Error") # print(e) # return False # return false for all the things that are not contained by the try block return False
def logical_implies(self, rhs): return Boolean_Val(Tools.bool_to_str((not self.value) or rhs.value))
def logical_not(self): return Boolean_Val(Tools.bool_to_str(not self.value))
def logical_or(self, rhs): return Boolean_Val(Tools.bool_to_str(self.value or rhs.value))
def logical_and(self, rhs): # short-circuit evaluation return Boolean_Val(Tools.bool_to_str(self.value and rhs.value))
def to_str(self): return Tools.bool_to_str(self.value)
def __init__(self, value, value_type="BOOLEAN"): # convert boolean value to internal type self.value = Tools.str_to_bool(value) self.value_type = value_type
def __init__(self, msg): Tools.print_error(msg)