def handle_starttag(self, tag, attrs): """Overridden - Called when a start tag is parsed The heart of this function is the state machine. When a <div> tag is detected, the attributes are compared with a map of the form (name,value) -> state. If a match occurs, the state is pushed on top of the stack. Depending on the current state, the start tag is queued for output, or not.""" # Debug if wikidot.debug.ENABLE_DEBUG == True: print >> sys.stderr, "<{}> {}".format(tag, attrs) # Update the state machine state_changed = self.__update_state_machine_start__(tag, attrs) if (state_changed == True) and (self.current_state == "body"): # We have just entered the body, don't output this <div> tag return if self.current_state == "body": # Handle special tags self.__handle_body_tag__(tag, attrs) # Add the tag to output MyParser.handle_starttag(self, tag, attrs) elif self.current_state == "toc": # Handle the content of the TOC self.toc += MyParser.format_start_tag(self, tag, attrs) elif (self.current_state == "breadcrumbs") and (tag == 'a'): # Register the breadcrumbs for attr in attrs: if (attr[0] == 'href'): self.breadcrumbs.append(attr[1]) break
def get_details_from_site(): startTime = datetime.now() data = load_records('pickle', 'remainingDetails.pcl') detailsFile = open("remainingDetails.txt", 'a') # csvFile = open("parsedCsv9.csv", 'w', encoding='utf-8', newline='') # csvWriter = csv.writer(csvFile, delimiter=',') prs = MyParser() prs.init_web() pool = mp.Pool(10) startRec = 0 dataSlice = data[startRec:] results = pool.imap( prs.request_and_parse, dataSlice, 10) pool.close() for i in range(len(dataSlice)): parsedDetails = next(results) json.dump(parsedDetails, detailsFile) print("Wrote record #", i+startRec) # # i+=1 # print("Record #%d" % i) print("Elapsed Time: ", (datetime.now() - startTime).total_seconds())
def loadGrammar(self, grammarFilename): preprocessed = self.preprocess_grammar(grammarFilename) self.neighbours = self.find_neighbours_of_variables() # self.parser = nltk.load_parser(grammarFilename, trace=1 if settings.VERBOSE["Parse"] else 0, cache=False) #nciht mehr parse.[...] # self.parser = nltk.parse.FeatureEarleyChartParser(nltk.grammar.FeatureGrammar.fromstring(preprocessed), trace=settings.VERBOSE["Parse"]) self.parser = MyParser( nltk.grammar.FeatureGrammar.fromstring(preprocessed), trace=settings.VERBOSE["Parse"])
def handle_decl(self, decl): """Overridden - Called when a SGML declaration (<!) is parsed Depending on the current state, the declaration is queued for output, or not.""" if self.current_state == "body": # Add the SGML declaration to the output MyParser.handle_decl(self, decl)
def handle_data(self, data): """Overridden - Called when some data is parsed Depending on the current state, the data is queued for output, or not.""" if self.current_state == "title": # Register the title self.page_title += data.strip() elif self.current_state == "body": # Add data to the output MyParser.handle_data(self, data) elif self.current_state == "toc": # Add data to the TOC self.toc += data
def handle_entityref(self, name): """Overridden - Called when an entityref (&xyz) tag is parsed Depending on the current state, the entityref is queued for output, or not.""" if self.current_state == "title": # Add the entityref to the title self.page_title += ("&" + name + ";") elif self.current_state == "body": # Add the entityref to the output MyParser.handle_entityref(self, name) elif self.current_state == "toc": # Add the entityref to the TOC self.toc += ("&" + name + ";")
def compile(source, isFile=False): text = InputStream(source) if isFile: text = FileStream(source) lexer = DecafeLexer(text) stream = CommonTokenStream(lexer) parser = MyParser(stream) tree = parser.program() errors = parser.errMsg # print(Trees.toStringTree(tree, None, parser)) tsymbol = MyVistor() # make symbol table tsymbol.visit(tree) # tsymbol.symTable.Print() # print table symTable = tsymbol.symTable.ToString() # get json of tables # make tree treeView, _ = convertor.convertInit(tree)(tree, 0) treeView.render('tree.gv', "./web/static/img") # intermediate code inCode = IntermediateCodeGenerator(tsymbol.symTable) inCode.visit(tree) iCode = [] for l in inCode.lines: if l.type != "label": iCode.append("\t" + str(l)) else: iCode.append(str(l)) iCode = "\n".join(iCode) # compile to NASM symT = tsymbol.symTable with open('code.asm', 'w') as f: for line in translate(symT, inCode.lines): f.write("%s\n" % line) print(iCode) errors.extend(tsymbol.errorMsg) errors = list(set(errors)) return symTable, errors, iCode
def __init__(self, code): self.tree = MyParser(code).ast buildPrunnedTree(self.tree) analysis(self.tree) verifyNotUsedVariables(self.tree) print("") printPrunnedTree(self.tree)
def run_weather_bot(serve_forever=True): interpreter = MyParser() agent = Agent.load('./models/dialogue', interpreter=interpreter) if serve_forever: agent.handle_channel(ConsoleInputChannel()) return agent
def handle_endtag(self, tag): """Overridden - Called when an end tag is parsed The state machine is updated when a </div> tag is encountered. Depending on the current state, the end tag is queued for output, or not.""" if self.current_state == "toc": # Add the tag to the TOC self.toc += MyParser.format_end_tag(self, tag) # Update the state machine state_changed = self.__update_state_machine_end__(tag) if state_changed == True: return if self.current_state == "body": # Add the tag to output MyParser.handle_endtag(self, tag)
def __init__(self): """Intialize internal variables""" MyParser.__init__(self) self.div_level = 0 self.div_bookmark = [-1] # List managed as a stack self.state = ["none"] # List managed as a stack self.current_state = "none" # Point to the top of the stack # map for div tag attribute -> state # (attribute name, attribute property, state) self.div_state_map = \ [ ('id', 'page-title', 'title'), ('id', 'breadcrumbs', 'breadcrumbs'), ('id', 'page-content', 'body'), ('id', 'toc-action-bar', 'useless'), ('id', 'toc', 'toc'), ('style','position:absolute', 'useless')] self.page_title = "" self.toc = "" self.links = OrderedSet() self.breadcrumbs = list()
from myparser import MyParser if __name__ == "__main__": parser = MyParser() while True: # getting input and parse while 'ctrl+d' pressed try: s = input('Input Exp >>>> ') except EOFError: break if not s: continue result = parser.parse(s) print(f"Result Is -> { {result} }\n")
def setUp(self): self.parser = MyParser() Constant.clean_up() BinOp.clean_up() BinOpReversable.clean_up()
def main(): parser = argparse.ArgumentParser( description= "trivial right-branching parser that ignores any grammar passed in", formatter_class=argparse.ArgumentDefaultsHelpFormatter) addonoffarg(parser, 'debug', help="debug mode", default=False) parser.add_argument("--infile", "-i", nargs='?', type=argparse.FileType('r'), default=sys.stdin, help="input (one sentence per line strings) file") parser.add_argument("--grammarfile", "-g", nargs='?', type=argparse.FileType('r'), default=sys.stdin, help="grammar file; ignored") parser.add_argument("--outfile", "-o", nargs='?', type=argparse.FileType('w'), default=sys.stdout, help="output (one tree per line) file") try: args = parser.parse_args() except IOError as msg: parser.error(str(msg)) workdir = tempfile.mkdtemp(prefix=os.path.basename(__file__), dir=os.getenv('TMPDIR', '/tmp')) def cleanwork(): shutil.rmtree(workdir, ignore_errors=True) if args.debug: print(workdir) else: atexit.register(cleanwork) infile = prepfile(args.infile, 'r') outfile = prepfile(args.outfile, 'w') grammarfile = prepfile(args.grammarfile, 'r') # Create an instance of PCFGParser using data/weighted.rule grammar file my_grammer = MyParser() #print rules # To use your own grammar file: # parser = PCFGParser('grammar.txt') tim = [] lent = [] for sentence in infile: t0 = time.time() parse_tree = my_grammer.parse(sentence.strip()) t1 = time.time() tot = round(float((t1 - t0)), 5) tim.append(tot) lent.append(round(float(((len(sentence.split())))), 5)) outfile.write('{0}\n'.format(parse_tree)) outfile.close()
class CFG_Grammar(Grammar): """CFG parser based on NLTK.""" def loadGrammar(self, grammarFilename): preprocessed = self.preprocess_grammar(grammarFilename) self.neighbours = self.find_neighbours_of_variables() # self.parser = nltk.load_parser(grammarFilename, trace=1 if settings.VERBOSE["Parse"] else 0, cache=False) #nciht mehr parse.[...] # self.parser = nltk.parse.FeatureEarleyChartParser(nltk.grammar.FeatureGrammar.fromstring(preprocessed), trace=settings.VERBOSE["Parse"]) self.parser = MyParser( nltk.grammar.FeatureGrammar.fromstring(preprocessed), trace=settings.VERBOSE["Parse"]) def interpret(self, input, IS, DOMAIN, NEXT_MOVES, APICONNECTOR, anyString=False, moves=None): #überschreibe ich nochmal in studip """Parse an input string into a dialogue move or a set of moves.""" try: return self.parseString(input, IS, DOMAIN, NEXT_MOVES) except: pass try: if not all(i.isnumeric() for i in input): return eval(input) except: pass if anyString: return Answer(ShortAns(input)) return set([]) def partial_parse(self, tokens, IS, DOMAIN, NEXT_MOVES): typstringlist = self.parser.partial_parse( tokens, self.neighbours ) #TODO der partialparser muss besser sodass er ne einduetige antwort zurückgibt >.< for i, (typ, string) in enumerate(typstringlist): string2 = string.replace("_", " ").replace("?", "") # print("STRING", string) try: converted = self.use_converters(IS, DOMAIN, string2, typ) break except NotRecognizedException as e: if i < len(typstringlist): pass else: NEXT_MOVES.push( State("I did not recognize the " + typ + " you queried!")) raise e # print("CONVERTED", converted) tokens = " ".join(tokens).replace(string, "{" + typ + "}").split(" ") # print("NEU ZU PARSEN:", tokens) trees = next(self.parser.parse(tokens)) root = trees[0].label() root = deepcopy(dict(root)) if root["sem"]["f"] == "given": root["sem"]["f"] = converted root["sem"]["fulfilltype"] = typ return root, converted, tokens def parseString(self, input, IS, DOMAIN, NEXT_MOVES): tokens = self.preprocess_input(input).split() try: trees = next(self.parser.parse( tokens)) # http://www.nltk.org/book/ch09.html root = trees[0].label() except: root, converted, tokens = self.partial_parse( tokens, IS, DOMAIN, NEXT_MOVES) try: return self.sem2move(root['sem'], IS, DOMAIN, NEXT_MOVES) except: pass try: return self.type2move(root[list(dict(root).keys())[0]]) # geez. except: pass return "" def use_converters(self, IS, DOMAIN, string, answertype): try: auth_string = IS.shared.com.get("auth_string").content[1].content content = DOMAIN.converters[answertype](auth_string, string) if not content: raise NotRecognizedException return content except Exception as e: #wenn es noch keinen auth-string gibt versteht er das einfach nicht(!) raise NotRecognizedException def preprocess_input(self, input): input = input.lower() for tofind, replacewith in sorted(list(self.longstrings.items()), key=lambda item: len(item[1]), reverse=True): if tofind in input: input = input.replace( tofind, replacewith ) #anders gehen keine leerzeichen in einem speech act return input def type2move(self, roottype): if roottype == "QUIT": return Quit() raise Exception def sem2move(self, sem, IS, DOMAIN, NEXT_MOVES): #sem bspw: [Ask = 'needvisa'] [ subtype = 'YNQ'] try: return Answer(sem['Answer']) except: pass try: ans = sem['Answer'] pred = ans['pred'] ind = ans['ind'] #return Answer(Prop((Pred1(pred, Ind(ind), True)))) return Answer(pred + "(" + ind + ")") except: pass try: if settings.VERBOSE["Question"]: print("THE QUESTION WAS:\n" + str(sem)) sem["Ask"] if sem["subtype"] == "YNQ": return Ask(YNQ(Prop(Pred0(sem["Ask"]))), askedby="USR") elif sem["subtype"] == "WHQ": return Ask(WhQ(Pred1(sem['Ask'])), askedby="USR") elif sem["subtype"] == "SecOrdQ": if not sem.get("f") or str(sem["f"]).startswith("?"): return Ask(SecOrdQ(Pred2(sem['Ask'], DOMAIN)), askedby="USR") else: range = DOMAIN.preds2[sem[ 'Ask']] #range[1] ist die neue frage, range[0] der answer-typ if sem.get("fulfilltype"): range = [ i for i in range if i[0] == sem["fulfilltype"] ] try: content = self.use_converters( IS, DOMAIN, sem["f"], range[0] [0]) #TODO: dafuq, warum konvertier ich 2 mal? except: content = sem["f"] return Ask(WhQ( Pred1(range[0][1], content, createdfrom=sem['Ask'])), askedby="USR") except: pass try: cmd = sem["Command"] if not cmd.startswith("!("): cmd = "!(" + cmd + ")" return Imperative(Command(cmd)) except: pass raise Exception #################################################################################################################### ############################################# preprocessing stuff ################################################## #################################################################################################################### def preprocess_grammar(self, grammarFilename): preprocessed = '' self.longstrings = {} self.variables = {} self.variablepath = {} self.all_sents = set() with open(grammarFilename, "r", encoding="utf-8") as f: lines = [line for line in f] for i in range(len(lines)): lines[i] = self.line_ops(lines[i], self.variablepath, self.all_sents) lines[i] = self.incorporate_optionals(lines[i]) lines[i] = self.find_longstrings(lines[i]) #other line-operations here (on line) for i in list(self.all_sents): # print(self.preprocess_input(i+" something").split()) # print(self.partial_parse()) #TODO - sobald das partial-parsen einduetig nur die wirklich möglichen returned (sobald ich darin die phrase-structure-teile drin hab) dann auch die richtigen nutzen um hier wann ist {semester} vorzusclhagen #asdf pass preprocessed = "\n".join(lines) #other overall operations here (on preprocessed) for key, val in self.longstrings.items(): preprocessed = preprocessed.replace("'" + key + "'", "'" + val + "'") return preprocessed def find_neighbours_of_variables(self): tmp = [(v, k) for k, v in self.variablepath.items()] # for key,val in tmp: # print(key, "->", val) # okay, hier muss man sich vorher und nachher tmp printen um zu gucken was passiert... tatsache ist, es wird geschaut # wovon die variablen benachbart sein können. whattoreplace = list(self.variables.items()) while len(whattoreplace) > 0: innerkey, innerval = whattoreplace[0] for key, val in tmp: if val == innerkey: tmp.append((key, innerval)) whattoreplace.append((key, innerval)) elif " " in val: reconstr = [] for i in val.split(" "): reconstr.append(i if i != innerkey else innerval) if " ".join(reconstr) != val: tmp.append((key, " ".join(reconstr))) whattoreplace.append((key, " ".join(reconstr))) del whattoreplace[0] # for key,val in tmp: # print(key, "->", val) neighbours = set() for _, val in tmp: if " " in val: pos = val.split(" ") for i in range(len(pos)): if pos[i] in self.variables.values(): if i > 0: neighbours.add((pos[i - 1], "r", pos[i])) if i < len(pos) - 1: neighbours.add((pos[i + 1], "l", pos[i])) return list(neighbours) def rem_spaces(self, text): text = text.replace("\n", "") while text.startswith(" "): text = text[1:] while text.endswith(" "): text = text[:-1] return text def remove_bracks(self, str): return re.sub( ' +', ' ', re.sub("\[.*?\]", "", str).replace("[", "").replace("]", "")) def line_ops(self, line, variablepath, all_sents): if not line.startswith( '#') and "->" in line and "'" in line: #terminals strings = re.findall("-> ?'(.*?)'", line) + re.findall( "\| ?'(.*?)'", line) for curr in strings: line = line.replace("'" + curr + "'", "'" + curr.lower() + "'") tmp = self.find_variables(curr, line, self.variables) if tmp: self.variablepath[self.rem_spaces( tmp[1])] = self.rem_spaces(tmp[0]) # other string-operations here (on strings, an array) if any( line.startswith(i) for i in ["CMD", "WHQ", "SecOrdQ", "YNQ"]): all_sents.add(self.remove_bracks(strings[0])) elif not line.startswith('#') and "->" in line: #non-terminals: # print(line) l = re.sub("\[.*?\]", "", line).replace("[", "").replace("]", "") rightpart = l[l.find("->") + 2:] while rightpart.find("|") > 0: variablepath[self.rem_spaces(rightpart[:rightpart.find("|") - 1])] = self.rem_spaces( l[:l.find("->")]) rightpart = rightpart[rightpart.find("|") + 1:] variablepath[self.rem_spaces(rightpart)] = self.rem_spaces( l[:l.find("->")]) return line def find_variables(self, string, line, appendto): if "{" in string and "}" in string: var = re.findall("\{(.*?)\}", string)[0] otherside = re.sub("\[.*?\]", "", line[:line.find("->")]) newline = otherside, "'{" + var + "}'" appendto[self.rem_spaces(otherside)] = "'{" + var + "}'" return newline return False def incorporate_optionals(self, line): if not line.startswith('#') and "->" in line and "'" in line: strings = re.findall("-> ?'(.*?)'", line) + re.findall( "\| ?'(.*?)'", line) newstrings = [] for curr in strings: optionals = re.findall(r"\[[a-zA-Z ]+?\]", curr) curr = re.sub(r"\[([a-zA-Z ]+?)\]", r"[*]", curr) if len(optionals) > 0: i = 0 while "[*]" in curr: curr = curr.replace("[*]", "[" + str(i) + "]", 1) i += 1 for i in range(2**len(optionals)): formatstr = "0" * ( len(optionals) - len("{0:b}".format(i)) ) + "{0:b}".format( i ) # for eg 3 optionals its all combis from 000 -> 111 newone = curr for i, kommtvor in enumerate(formatstr): if kommtvor == "1": newone = newone.replace( "[" + str(i) + "]", optionals[i][1:-1]) else: newone = newone.replace( " [" + str(i) + "]", "") newstrings.append(newone) else: newstrings.append(curr) newstrings = list(map(lambda x: "'" + x + "'", newstrings)) newline = line[:line.index("->")] + "-> " + " | ".join(newstrings) else: newline = line return newline def find_longstrings(self, line): if not line.startswith('#') and "->" in line and "'" in line: strings = re.findall("-> ?'(.*?)'", line) + re.findall( "\| ?'(.*?)'", line) self.longstrings = { **self.longstrings, **{i: i.replace(" ", "_") for i in strings if " " in i} } # longerstrings.extend([i for i in strings if " " in i]) return line
class TestParser(unittest.TestCase): def setUp(self): self.parser = MyParser() Constant.clean_up() BinOp.clean_up() BinOpReversable.clean_up() def run_tests_almost(self, tests): [self.check_in_scope_f(k, v) for k, v in tests.items()] def run_tests_is(self, tests): [self.check_in_scope_i(k, v) for k, v in tests.items()] def check_in_scope_f(self, text, value): error = 1e-5 scope = Root() scope.code = self.parser.parse(text) ret_type, ret_val = scope.eval_in_scope() v_type, v_val = value self.assertEqual(ret_type, v_type) self.assertTrue(abs(ret_val - v_val) < error) def check_in_scope_i(self, text, value): scope = Root() scope.code = self.parser.parse(text) evaluated = scope.eval_in_scope() if not isinstance(value, list): evaluated = evaluated self.assertEqual(evaluated, value) def test_binop_number(self): tests = { "1 + 1": ("NUMBER", 2), "1 - 1": ("NUMBER", 0), "-1 + 1": ("NUMBER", 0), "123 + 321": ("NUMBER", 444), "0.5 - 0.5": ("NUMBER", 0), "1.67 + 4": ("NUMBER", 5.67), "10 / 1": ("NUMBER", 10), "10 / 10": ("NUMBER", 1), "1 / 10": ("NUMBER", 0.1), "151 / 2": ("NUMBER", 75.5), "3 * 2": ("NUMBER", 6), "0 * 5": ("NUMBER", 0), "-5 * 0 ": ("NUMBER", 0), "1 + 1 + 1": ("NUMBER", 3), "1 - 1 + 1": ("NUMBER", 1), "2 * 3 + 2": ("NUMBER", 8), "2 / 2 + 1": ("NUMBER", 2), "2 + 2 * 3": ("NUMBER", 8), "-5 * 2": ("NUMBER", -10), "-5 * -2": ("NUMBER", 10), "-(5 * 2)": ("NUMBER", -10), "5 * (-10)": ("NUMBER", -50), "13 * -2": ("NUMBER", -26), "2 ** 2": ("NUMBER", 4), "10 ** -1": ("NUMBER", 0.1), } self.run_tests_is(tests) def test_special_func(self): tests = { "cos 0": ("NUMBER", 1), "sin 0": ("NUMBER", 0), "cos 3.141592653589": ("NUMBER", -1), "sin 3.141592653589": ("NUMBER", 0), "sin 2*3.141592653589": ("NUMBER", 0), "cos 2*3.141592653589": ("NUMBER", 1), "sin 3.141592653589/6": ("NUMBER", 0.5), "sin 3.141592653589/2": ("NUMBER", 1), "cos 3.141592653589/3": ("NUMBER", 0.5), "cos 3.141592653589/2": ("NUMBER", 0), } self.run_tests_almost(tests) def test_log(self): tests = { "True && True": ("BOOL", True), "True && False": ("BOOL", False), "False && False": ("BOOL", False), "True || True": ("BOOL", True), "True || False": ("BOOL", True), "False || False": ("BOOL", False), } self.run_tests_is(tests) def test_rel(self): tests = { "10 > 10": ("BOOL", False), "-10 > -10": ("BOOL", False), "10 >= 1": ("BOOL", True), "10 <= 1": ("BOOL", False), "0.5 < 1": ("BOOL", True), "0.5 > 0.25": ("BOOL", True), } self.run_tests_is(tests) def test_conversion(self): tests = { '"michal" + 12': ("STRING_T", "michal12"), '12 + "2"': ("NUMBER", 14), "1 + True": ("NUMBER", 2), "True && 0": ("BOOL", False), } self.run_tests_is(tests) def test_unary_minus(self): tests = { "-(-10)": ("NUMBER", 10), "-10": ("NUMBER", -10), "-0": ("NUMBER", 0), "-(10)": ("NUMBER", -10), "12 + (-2)": ("NUMBER", 10), } self.run_tests_is(tests) def test_multiple_lines(self): tests = { "10 + 12; 10 - 1": ("NUMBER", 9), "{10 + 12; 10 - 1}": ("NUMBER", 9), } self.run_tests_is(tests) def test_if_else(self): tests = { "IF (True) { 10 } ELSE { 11 }": ("NUMBER", 10), "IF (False) { 10 } ELSE { 11 } ": ("NUMBER", 11), } self.run_tests_is(tests) def test_for(self): text = "INT a = 0 ; FOR (INT i = 0 ; i < 4 ; i = i + 1) { a = a + i }" scope = Root() scope.code = self.parser.parse(text) scope.eval_in_scope() scope.code = self.parser.parse("a") evaluated = scope.eval_in_scope() self.assertEqual(evaluated, ("NUMBER", 6)) def eval_in_scope(self, scope, text, value): scope.code = self.parser.parse(text) evaluated = scope.eval_in_scope() self.assertEqual(evaluated, value) def test_variable_init(self): text = 'INT a = 0 ; STRING b = "test" ; DOUBLE c = 0.15 ; BOOL d = True' scope = Root() scope.code = self.parser.parse(text) scope.eval_in_scope() self.eval_in_scope(scope, "a", ("NUMBER", 0)) self.eval_in_scope(scope, "b", ("STRING_T", "test")) self.eval_in_scope(scope, "c", ("NUMBER", 0.15)) self.eval_in_scope(scope, "d", ("BOOL", True)) def test_type_control(self): text = 'INT a = 0 ; STRING b = "test" ; DOUBLE c = a + b' scope = Root() scope.code = self.parser.parse(text) self.assertRaises(Exception, scope.eval_in_scope) def test_overload_ops(self): tests = { '"123" + "1"': ("STRING_T", "1231"), '"test" + " " + "test1" + " 1231"': ("STRING_T", "test test1 1231"), } self.run_tests_is(tests) def test_variable_assign(self): text = 'INT a = 123123 ; a = 123 + 123 ; STRING s = "" ; s = "sweetnight" + " " + "to znaczy slodka noc"' scope = Root() scope.code = self.parser.parse(text) scope.eval_in_scope() self.eval_in_scope(scope, "a", ("NUMBER", 246)) self.eval_in_scope(scope, "s", ("STRING_T", "sweetnight to znaczy slodka noc")) def test_global(self): text = "INT i = 0 ; FOR (INT i = 0 ; i < 4 ; i = i + 1) { i = i + i }" scope = Root(code=self.parser.parse(text)) scope.eval_in_scope() scope.code = self.parser.parse("i") evaluated = scope.eval_in_scope() self.assertEqual(evaluated, ("NUMBER", 0)) text = "{ GLOBAL INT x = 2138 }" scope = Root(code=self.parser.parse(text)) scope.eval_in_scope() scope.code = self.parser.parse("x") evaluated = scope.eval_in_scope() self.assertEqual(evaluated, ("NUMBER", 2138)) def test_fn_definition(self): text = "DEF INT fun( INT i ) { i }" scope = Root() scope.code = self.parser.parse(text) scope.eval_in_scope() scope.code = self.parser.parse("fun(10)") evaluated = scope.eval_in_scope() self.assertEqual(evaluated, ("NUMBER", 10)) text = "DEF STRING fun1( INT i , STRING s ) { s + i }" scope = Root() scope.code = self.parser.parse(text) scope.eval_in_scope() scope.code = self.parser.parse('fun1(1 , "halo" )') evaluated = scope.eval_in_scope() self.assertEqual(evaluated, ("STRING_T", "halo1")) def test_unnecessary(self): text1 = "{1; 2; 3; RETURN 4; 5}" text2 = "{1; 2; 3; RETURN 4}" scope1 = Root(code=self.parser.parse(text1)) scope2 = Root(code=self.parser.parse(text2)) self.assertEqual(scope1.code, scope2.code) def parse_2(self, text_dict): for k, v in text_dict.items(): code1 = self.parser.parse(k) code2 = self.parser.parse(v) self.assertEqual(code1, code2) def test_algebra_opt(self): test = { "x + 0": "x", "0 + x": "x", "x ** 2": "x * x", "x / 1": "x", "x / 2": "x * 0.5", "x * 2": "x + x", } self.parse_2(test) def test_reverse_opt(self): test = {"a * b": "b * a", "a + b": "b + a"} self.parse_2(test) def test_constant_opt(self): test = { "2 * 3": "6", "1 + 2 + 3": "6", "2 + 3 * 4": "14", "(12 - 5) * 2 + 3": "17", '"michal " + "dygas"': '"michal dygas"', } self.parse_2(test) def test_nested(self): text = "DEF INT fun1(INT i) { i }; DEF INT fun2(INT i ) { 1 + fun1(1)}; DEF INT fun3(INT i) { i - 1 }" scope = Root() scope.code = self.parser.parse(text) scope.eval_in_scope() self.eval_in_scope(scope, "fun1(1)", ("NUMBER", 1)) self.eval_in_scope(scope, "fun2(1)", ("NUMBER", 2)) self.eval_in_scope(scope, "fun3(fun1(2))", ("NUMBER", 1)) def test_hoist(self): text1 = "INT x = 12; INT z = 0; INT y = 1; FOR(INT i = 0 ; i < 2 ; i = i + 1 ) { z = x + y } " text2 = "x + y" for_loop = self.parser.parse(text1) expr = self.parser.parse(text2) self.assertTrue(expr[0] in for_loop[-1].before.values()) scope = Root(code=for_loop) scope.eval_in_scope() self.eval_in_scope(scope, "z", ("NUMBER", 13)) def test_expilcit_conversion(self): tests = { "toStr 1": ("STRING_T", "1"), '(toStr 15) + "michal"': ("STRING_T", "15michal"), "(toNumb True) + 21": ("NUMBER", 22), "toStr 23 * 5 + 2": ("STRING_T", "117") } self.run_tests_is(tests)
from myparser import MyParser from model import Model from cli import Cli import sys import os #DEFAULT PARAMETERS NB_OF_BITS = 18 MODEL_NAME = None #GET OPTIONS dir_path = os.path.dirname(os.path.realpath(__file__)) full_path = os.path.join(dir_path, '../options.py') exec(open(full_path).read()) get_opts() model = Model() model.load(MODEL_NAME) parser = MyParser() cli = Cli(parser, model, NB_OF_BITS) cli.run()