def main(): print_menu() cmd = input(">") if cmd == "1": parser = Parser("gr1.txt") seq = read_seq("seq.txt") parser.create_the_nightmare_table() parsing = parser.parse(seq, ["eps", "S"]) if len(parsing) == 0: print("amu ii bai") elif type(parsing[0]) == type("mno"): print("amu ii bai si incepe la tokenul:") print(parsing.reverse()) else: tree = ParseTree(parser.ll1, parsing, parser.productions) print(str(tree)) if cmd == "2": parser = Parser("grammar-ioana.txt") out = read_pif("PIFU_BUN2.txt") # print(out) parser.create_the_nightmare_table() parsing = parser.parse(out, ["eps", "START"]) print(parsing) if len(parsing) == 0: print("amu ii bai") elif type(parsing[0]) == type("mno"): print("amu ii bai si incepe la tokenul:") parsing.reverse() print(parsing) else: tree = ParseTree(parser.ll1, parsing, parser.productions) print(str(tree))
def test_5(): try: regexp = "01('=34567)" my_scan = Scanner(regexp) my_parser = Parser(my_scan) my_parser.parse() except ParseError: print("test 5: Incorrect regexp")
def test_6(): regexp = "(z*)" my_scan = Scanner(regexp) my_parser = Parser(my_scan) my_parser.parse() nfa_construct = NFA_Constructor() my_nfa = nfa_construct.construct_nfa(my_parser.token_list) dfa_construct = DFA_Constructor() my_dfa = dfa_construct.construct_dfa(my_nfa) assert my_dfa.walk_dfa("a") == False assert my_dfa.walk_dfa("4") == False assert my_dfa.walk_dfa("26546") == False assert my_dfa.walk_dfa("Owskemg") == False
def test_4(): regexp = "ytut(a|g)*lk" my_scan = Scanner(regexp) my_parser = Parser(my_scan) my_parser.parse() #print(my_parser.show_token_list()) nfa_construct = NFA_Constructor() my_nfa = nfa_construct.construct_nfa(my_parser.token_list) dfa_construct = DFA_Constructor() my_dfa = dfa_construct.construct_dfa(my_nfa) assert my_dfa.walk_dfa("a") == False assert my_dfa.walk_dfa("ytu") == False assert my_dfa.walk_dfa("ytutalk") == True
def test_1(): regexp = "a|b*" my_scan = Scanner(regexp) my_parser = Parser(my_scan) my_parser.parse() nfa_construct = NFA_Constructor() my_nfa = nfa_construct.construct_nfa(my_parser.token_list) dfa_construct = DFA_Constructor() my_dfa = dfa_construct.construct_dfa(my_nfa) assert my_dfa.walk_dfa("ab") == False assert my_dfa.walk_dfa("a") == True assert my_dfa.walk_dfa("aa") == False assert my_dfa.walk_dfa("ba") == False assert my_dfa.walk_dfa("bbbb") == True
def test_3(): regexp = "(cd*|bha)*jk" my_scan = Scanner(regexp) my_parser = Parser(my_scan) my_parser.parse() nfa_construct = NFA_Constructor() my_nfa = nfa_construct.construct_nfa(my_parser.token_list) dfa_construct = DFA_Constructor() my_dfa = dfa_construct.construct_dfa(my_nfa) assert my_dfa.walk_dfa("k") == False assert my_dfa.walk_dfa("mf") == False assert my_dfa.walk_dfa("jk") == True assert my_dfa.walk_dfa("e") == False assert my_dfa.walk_dfa("t") == False
class Compiler(): def __init__(self): self.parser = Parser() def compile(self, text): ast = self.parser.parse(text) out = Writer() ast.write(out) print out.str
def run(code): scanner = Scanner(code) tokens = scanner.scan_tokens() if error.had_error: return parser = Parser(tokens) stmts = parser.parse() if error.had_error: return res = resolver.Resolver(interp) res.resolve(stmts) if error.had_error: return interp.interpret(stmts)
def setup(): parser = Parser(folder='tests') logs = '\n'.join([ '\n'.join('{}\t{}'.format(timestamp, value) for _ in range(number)) for timestamp, value, number in [ ('2015-08-01 00:00:00', 'query1', 10), # test minutes ('2015-08-01 00:00:01', 'query2', 4), ('2015-08-01 00:00:02', 'query3', 3), ('2015-08-01 00:00:03', 'query4', 2), ('2015-08-01 00:00:04', 'query5', 1), # test hours ('2015-08-01 00:01:00', 'query6', 5), ('2015-08-01 00:02:00', 'query7', 3), ('2015-08-01 00:03:00', 'query8', 2), ('2015-08-01 00:04:00', 'query9', 1), # test day ('2015-08-01 01:00:00', 'query10', 6), ('2015-08-01 02:00:00', 'query11', 3), ('2015-08-01 03:00:00', 'query12', 2), ('2015-08-01 04:00:00', 'query13', 1), # test months ('2015-08-02 00:00:00', 'query14', 7), ('2015-08-03 00:00:00', 'query15', 3), ('2015-08-04 00:00:00', 'query16', 2), ('2015-08-05 00:00:00', 'query17', 1), # test years ('2015-09-01 00:00:00', 'query18', 8), ('2015-10-01 00:00:00', 'query19', 3), ('2015-11-01 00:00:00', 'query20', 2), ('2015-12-01 00:00:00', 'query21', 1), ] ]) with open('tests/logs.tsv', 'w+') as fd: fd.write(logs) parser.parse('tests/logs.tsv')
if __name__ == "__main__": parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument("files", nargs='*', default=[], help="Path to an arrival FILE.", metavar="FILE") args = parser.parse_args() queue = lib.PokemonQueue() app = flask.Flask(__name__) if len(args.files) > 0: from parsing import Parser arrivals = [] for file_ in args.files: arrivals.append(lib.AsyncEnqueue(queue, Parser.parse(file_))) for thread in arrivals: thread.start() else: queue.recieve_batch() api = rest.Api(app) api.add_resource(lib.LastQueue, '/last', resource_class_args=(queue, )) app.run(host=config.HOST, port=str(config.PORT)) exit()
def pikabu(): parser = Parser() posts = parser.parse() return render_template('pikabu.html', posts=posts, posts_len=len(posts[1]))
import sys from parsing import Parser from state import State prog = sys.argv[1] state = sys.argv[2] s = State.load(state) #'test/test.state') print "Initial state:", s parser = Parser() f = parser.parse(prog) #"test/test.while") print f, s final_state = f.evaluate(s, True) print "Final state:", final_state final_state.save("test/result.state")
from api import get_access_token, anaphoric, parse_sentences, parse from parsing import Parser from lex import lexical import sys def compiler(parsed): results = [] for p in parsed: results.append(p.conv()) return "\n".join(results) if __name__ == '__main__': filepath = sys.argv[1] with open(filepath, 'r') as f: text = f.read() access_token = get_access_token() data, users = anaphoric(access_token, text) string_parsed, expressions = parse_sentences(data) r = lexical(parse(access_token, string_parsed)) parser = Parser(r, expressions, users) parsed = parser.parse() compiled = compiler(parsed) exec(compiled)
line = f.readline().strip() for p in productions: print(p) return productions def read_pif(): text = "" sequence = [] with open("pif.txt", "r")as f: line = f.readline().strip() while line != "": sequence.append(line) line = f.readline().strip() return sequence non_terminals, terminals, initial_state = read_part_grammar() # print(non_terminals) # print(terminals) # print(initial_state) productions = read_productions() g = Parser(non_terminals, terminals, productions, initial_state) g.canonical_collection() g.build_actions() sequence = read_pif() g.parse(sequence, g.parse_table) g.build_parse_tree() print('Parse tree:') g.tree_root.bfs()
stack.append(prunc) self.nodes.append(prunc) self.crt += len(list_of_rhs) + 1 index += 1 def __str__(self): string = "" for node in self.nodes: substring = "" substring += str(node.index) + " " + str(node.val) + " " + str( node.parent) + " " + str(node.soeur) + "\n" string += substring return string if __name__ == "__main__": parser = Parser("grammar-ioana.txt") parser.create_the_nightmare_table() parsing = parser.parse(['eps', 'intConst', '+', 'intConst'], ['eps', 'START']) print(parsing) parsing = parser.parse( ['eps', '}', ';', 'intConst', '<-', 'id', '{', 'main_body'], ['eps', 'START']) print(parsing) tree = ParseTree(parser.ll1, parsing, parser.productions) print(str(tree))
""" if __name__ == "__main__": translator = Translator() parser = Parser(instdict) outASM = [] try: givenpath = sys.argv[1] except IndexError: usage() exit() if os.path.isdir(givenpath): for file in os.listdir(givenpath): if file[-3:] == ".vm": classname = file[:-3] translator.newclass(classname) outASM.append( translator.translate( parser.parse(os.path.join(givenpath, file)))) elif os.path.isfile(givenpath) and givenpath[-3:] == ".vm": outASM.append(translator.translate(parser.parse(givenpath))) else: print("Error: Path supplied is invalid!") exit() print "\n".join(common.flatten(outASM))
# TODO: Add fast exponentiation, modular arithmetic, # sqrt, gcd print('Type "help" to learn about what options are available.') while True: try: text = input('>>> ') if text.strip().lower() == 'quit': sys.stdout.write('\n') break elif text.strip().lower() == 'help': print("This shell can handle the following operations:") print("+, -, *, /, //, %, **") else: lexer = Lexer(text) tokens = list(lexer.generateTokens()) parser = Parser(tokens) expression = parser.parse() interpreter = Interpreter() answer = interpreter.evaluate(expression) if answer: print(answer) except EOFError: sys.stdout.write('\n') break except KeyboardInterrupt: #ctrl+c sys.stdout.write('\nKeyboardInterrupt\n') continue except Exception as e: print(e)
def usage(): print""" python S1VM.py <source> where <source> is a valid vm file or a folder containing vm files. """ if __name__=="__main__": translator=Translator() parser=Parser(instdict) outASM=[] try: givenpath = sys.argv[1] except IndexError: usage() exit() if os.path.isdir(givenpath): for file in os.listdir(givenpath): if file[-3:] == ".vm": outASM.append(translator.translate(parser.parse(os.path.join(givenpath, file)))) elif os.path.isfile(givenpath) and givenpath[-3:] == ".vm": outASM.append(translator.translate(parser.parse(givenpath))) else: print("Error: Path supplied is invalid!") exit() print "\n".join(common.flatten(outASM))
def start(file, resources): content = "" with open(file, 'r') as f: content = f.readlines() search = re.compile("\d+/\d+/\d+ \d+:\d+ - ([^:]+): (.+)") names = [] for line in content: match = search.match(line) if match: name = match.group(1) message = match.group(2) if message is not None: match = re.compile("^https://maps.google.com/").match(message) if match is None: if name not in names and len( name ) <= 25: # Whatsapp's name limit is 25 characters names.append(name) me = names[0] if len(names) == 1: while True: _input = input("Are you {}? (y/n): ".format(names[0])) if re.compile("y|yes", re.IGNORECASE).match(_input): break elif re.compile("n|no", re.IGNORECASE).match(_input): me = None break else: print("Type yes or no") else: print("Which one are you?\n\t{}".format('\n\t'.join([ '{}. {}'.format(num + 1, name) for num, name in enumerate(names) ]))) while True: _input = input('Type one of the above numbers: ') try: selected = int(_input) - 1 me = names[selected] break except: continue parser = Parser(names, me) body = [] current_date = "" for date, bubble in parser.parse(content, resources): if date is None: # Means that the last message contained a linebreak formerBubble = body[-1] if isinstance(formerBubble, Bubble): formerBubble.setMessage( formerBubble.message + bubble) # In this case, data received is (None, string) else: if date != current_date: body.append(Datestamp(date)) current_date = date formerBubble = body[-1] if isinstance(formerBubble, Bubble) and isinstance(bubble, Bubble): if formerBubble.name == bubble.name: bubble.doHideName() formerBubble.doHideArrow() else: formerBubble.addSeparation() body.append(bubble) HTML = """<!DOCTYPE html> <html> <head> <meta charset="UTF-8"> <title>{}</title> <link rel="stylesheet" type="text/css" href="{}"> </head> <body> <div class="speech-wrapper"> {} </div> </body> </html>""".format(file[:len(file) - 4], css_file, ''.join([message.inflate() for message in body])) filename = file.replace(' ', '_').replace('.txt', '.html') with open(filename, 'w') as f: f.write(HTML) err = open(os.devnull, 'w') # Redirect to nowhere subprocess.call(["python", "-m", "webbrowser", "-t", filename], stdout=err, stderr=err)