'<': 9, '<=': 9, '==': 9, '!=': 9, '+': 10, '-': 10, '*': 11, '/': 11, 'UMINUS': 15, 'POSITIVE': 15 } assosiation = { # 结合律 # '+':'L', # '-':'L', # '*':'L', # '/':'L', # 'UMINUS':'R' } precs = {'UMINUS': ['E', '-', 'E'], 'POSITIVE': ['E', '+', 'E']} parser = Parser(productions, terminal, nonterminal, precs, precedence, assosiation) parser.generate() parser.htmlparse('test.html') # Parser.printitems(productions, printno=True) # cnt = Counter([p[0] for p in productions]) parser.htmlparse('test.html') print(cnt) print(calls) print(sorted(terminal)) print(len(terminal))
def parser_ripplecounter(names, devices, network, monitors): sc = Scanner(ripplecounterpath, names) return Parser(names, devices, network, monitors, sc)
def __init__(self, pattern: str, view: _V): if pattern != WILDCARD and not pattern.startswith("/"): pattern = f"/{pattern}" self._pattern = pattern self._parser = Parser(self._pattern) self.view = view
def __init__(self, bot): self.bot = bot self.p = Parser()
def get_issues(url): # checks on URL url_ip = ip_address(url) domain = get_domain(url) lengthy_url = long_url(url) redir = redirection_double_slash(url) dash = domain_dash(url) https = https_domain(url) at = at_url(url) # process html text p = Parser() url_text = p.scrape_text(url) # grammar, site category checks pc = PhishCategorizer(url_text) r = pc.categorize() res = pc.check_grammar() win = r == PhishCategory.set_win bank = r == PhishCategory.set_bank personal = r == PhishCategory.set_personal impersonate = r == PhishCategory.impersonate grammar = res > 20 data = {"isPhishing":True, "issues": []} if len(url_ip) > 0: data["issues"].append(url_ip) if lengthy_url: data["issues"].append({'subject': "This <span class='ui tooltip' data-tooltip='web address'>URL</span> is very long.", 'text': "Be wary of sites that have URLs this long, they're likely phishing sites."}) data["issues"].append({'subject': "This <span class='ui tooltip' data-tooltip='web address'>URL</span> is very long.", 'text': "Remember, checking the URL is one of the easiest ways to recognize a phishing site!"}) if redir: data["issues"].append({'subject': "This <span class='ui tooltip' data-tooltip='web address'>URL</span> actually consists of two URL separated by a '//'.", 'text': "This means that the site you're actually going to visit is the one that occurs after the '//'."}) data["issues"].append({'subject': "This <span class='ui tooltip' data-tooltip='web address'>URL</span> actually consists of two URL separated by a '//'.", 'text': "This is one way phishing attackers can try to distract you!"}) if dash: data["issues"].append({'subject': "This domain contains a dash.", 'text': "Most domains that contain a dash are from phishing websites."}) if https: data["issues"].append({'subject': "This domain contains 'https'.", 'text': "While the start of the <span class='ui tooltip' data-tooltip='web address'>URL</span> usually contains the pattern 'https', it should never appears in the domain part. If it does, it's most likely a phishing site!"}) if at: data["issues"].append({'subject': "This <span class='ui tooltip' data-tooltip='web address'>URL</span> contains '@'.", 'text': "Anything that is written before the '@' is ignored by your web browser. Phishers use this character to distract you from the suspicious parts of the URL."}) if bank: data["issues"].append({'subject': "This phishing site might be asking for your bank information.", 'text': "You probably don't have a bank account yet, but you still shouldn't fill in any forms like this. Always ask your parents if it's ok to submit information to any website."}) data["issues"].append({'subject': "This phishing site might be asking for your bank information.", 'text': "If you were to submit your information to a site like this, the attackers could get access to your bank account and you could lose all your money!"}) elif personal: data["issues"].append({'subject': "It appears that this page is asking for personal information.", 'text': "Phishing attackers try to trick you to give them your personal information, like your name or your address! When you are submitting any information to any website, ask your parents to check that it's ok."}) data["issues"].append({'subject': "It appears that this page is asking for personal information.", 'text': "You should never tell strangers where you live. If you submitted information to a page like this, you could be doing just that!"}) elif win: data["issues"].append({'subject': "This site looks like it's falsely promoting a reward to you.", 'text': "Phishing attackers put up such 'free prizes' to try to attract your attention and have you submit information to claim your reward."}) data["issues"].append({'subject': "This site looks like it's falsely promoting a reward to you.", 'text': "It's not safe to give away your information online. Also, you won't actually be getting anything!"}) elif impersonate: data["issues"].append({'subject': "This site looks like it's trying to impersonate a real company.", 'text': "When logging in to any website that looks familiar, make sure that the URL is what you expect it to be and that the page does not contain any weird artifacts."}) if grammar: data["issues"].append({'subject': "This page looks like it was written by a phisher!", 'text': "Some phishing sites, like this one, are not written well. If you land on a webpage that contains weird language, go back - you're on a phishing site!"}) data["issues"].append({'subject': "This page looks like it was written by a phisher!", 'text': "Sometimes you might enter pages that are written in a foreign language. You're better off moving away from those as well - you can't know what's on the page."}) data["issues"].append({'subject': "This page looks like it was written by a phisher!", 'text': "Websites like this are usually for scams, where people try to take your money against fake products. You could be paying for something and not get anything! In addition, you might give someone your payment details and they could use them!"}) if len(data["issues"]) == 0: data["issues"].append({'subject': "You're looking at a phishing site that is very hard to distinguish from a normal website.", 'text': "It's difficult to say what makes this a phishing site, as it looks very similar to a normal website. For some phishing sites, that is the case."}) data["issues"].append({'subject': "You're looking at a phishing site that is very hard to distinguish from a normal website.", 'text': "Besides inspecting the web page, you can also look at the address bar to distinguish phishing websites. Some phishing websites have weird <span class='ui tooltip' data-tooltip='web address'>URLs</span> that differ from normal sites."}) data["issues"].append({'subject': "You're looking at a phishing site that is very hard to distinguish from a normal website.", 'text': "Sometimes, even that doesn't work as the phishing attacker can be very deceptive. That's why you have assistants like me to protect you!"}) data["issues"].append({'subject': "That's it for this website.", 'text': "You'll be redirected to where you were before. Try to remember what you've learned!"}) return data
def on_load_button(self, event): """Handle the event when the user clicks load button.""" with wx.FileDialog(self, _("Open Definition file"), wildcard="Definition files (*.txt)|*.txt", style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as fileDialog: if fileDialog.ShowModal() == wx.ID_CANCEL: return self.pathname = fileDialog.GetPath() self.filename = self.path_leaf(self.pathname) names = Names() devices = Devices(names) network = Network(names, devices) monitors = Monitors(names, devices, network) scanner = Scanner(self.pathname, names) parser = Parser(names, devices, network, monitors, scanner) parser.parse_network() error_list = scanner.error_list num_errors = len(error_list) pages = math.ceil(num_errors / 4) if num_errors != 0: text_list = [] tab_labels = [] for i in range(pages - 1): tab_labels.append("{}-{}".format(1 + i * 4, 4 + i * 4)) label = 4 + i * 4 if num_errors == 1: tab_labels.append("1") elif num_errors <= 4: tab_labels.append("1-{}".format(num_errors)) else: if (label + 1) == num_errors: tab_labels.append("{}".format(num_errors)) else: tab_labels.append("{}-{}".format( label + 1, num_errors)) if num_errors == 1: overview = _("\nDefinition file '{}' contains {} error.")\ .format(self.filename, num_errors) else: overview = _("\nDefinition file '{}' contains {} errors.")\ .format(self.filename, num_errors) for i in range(pages): if i == 0: text = '\n' + '*' * 76 + '\n' else: text = "".format(self.filename, num_errors) for j in range(4): try: text += (error_list[j + i * 4] + "\n") except IndexError: text += ('\n' * 8) text_list.append(text) frame = DefinitionErrors(self, title=_("Error!"), text=text_list, tabs=tab_labels, overview=overview) return self.current_filename = self.filename self.current_pathname = self.pathname self.load_new = True self.Show(False) self.Destroy()
def fake_parser(tokens): return Parser(FakeStream(tokens))
import os from lexer import Lexer from parse import Parser import os from version import * os.system('cls' if os.name == 'nt' else 'clear') print("equationsolver@{}".format(version)) while True: text = input('solve > ') lexer = Lexer(text) tokens = lexer.generate_tokens() ast = Parser(tokens).parse() if not ast: continue print(ast)
from Pascal_Helper_Files.pascal_reader import PascalFile from parse import Parser # parser from emulator import Emulator # emulator if __name__ == '__main__': pretty_printer = pprint.PrettyPrinter() # UNCOMMENT the below statements one at a time # tokens = get_token(PascalFile(input_file_location='array_example.pas', output_location='')) # tokens = get_token(PascalFile(input_file_location='assignment_example.pas', output_location='')) # tokens = get_token(PascalFile(input_file_location='for_example.pas', output_location='')) # tokens = get_token(PascalFile(input_file_location='if_example.pas', output_location='')) tokens = get_token( PascalFile(input_file_location='while_example.pas', output_location='')) # UNCOMMENT THE LINE BELOW TO TEST THE SCANNER --> YOU WILL SEE THE TOKENS # pretty_printer.pprint(tokens) print '----------------------------------' # UNCOMMENT THE LINE BELOW TO TEST THE PARSER # parser = Parser(token_list=tokens, verbose=True) parser = Parser(token_list=tokens) byte_array = parser.parse() # This prints the byte array, uncomment to see the bytearray # pretty_printer.pprint(byte_array) print '----------------------------------' emulator = Emulator(byte_array) emulator.start()
def load_file(self, fname): self.viewport.reset_tool() if fname is None: return self.update_title(fname) parser = Parser(self.imported_tools.tool_dict) parser.parse_file(fname, axioms=True) loaded_tool = parser.tool_dict['_', ()] steps = loaded_tool.assumptions names = [ loaded_tool.var_to_name_proof[x] for x in range(len(loaded_tool.var_to_name_proof)) ] if loaded_tool.implications: goals, proof = loaded_tool.implications, loaded_tool.proof else: goals, proof = None, None self.env.set_steps(steps, names=names, goals=goals, proof=proof) # hide objects visible = set(loaded_tool.result) # old format if visible: for gi in range(len(self.vis.gi_to_hidden)): self.vis.gi_to_hidden[gi] = gi not in visible for gi, name in enumerate(names): # new format if ("hide__{}".format(name), ()) in parser.tool_dict: self.vis.gi_to_hidden[gi] = True # set labels for gi, name in enumerate(names): # new format label_pos_tool = parser.tool_dict.get( ("label__{}".format(name), ()), None) if label_pos_tool is not None: self.vis.gi_label_show[gi] = True logic = LogicalCore(basic_tools=self.imported_tools) pos_l = label_pos_tool.run((), (), logic, 0) pos_n = [logic.num_model[x] for x in pos_l] t = self.vis.gi_to_type(gi) if t == Point: point, = pos_n position = point.a print(position) elif t == Line: position = tuple(d.x for d in pos_n) elif t == Circle: ang, offset = pos_n position = ang.data * 2, offset.x else: print( "Warning: save label: unexpected type {} of {}".format( t, name)) continue self.vis.gi_label_position[gi] = position self.vis.refresh() # viewport zoom and position view_data_tool = parser.tool_dict.get(('view__data', ()), None) if view_data_tool is None: self.viewport.set_zoom((375, 277), 1) else: logic = LogicalCore(basic_tools=self.imported_tools) anchor_l, zoom_l = view_data_tool.run((), (), logic, 0) anchor = logic.num_model[anchor_l].a zoom = logic.num_model[zoom_l].x self.viewport.set_zoom(anchor, zoom) self.reset_view()
from parse import Parser scene = Parser().parse(r'out/my.test') scene.render()
def main(): text = input() lexer = Lexer(text) ast = Parser(lexer) print(eval(ast))
def test_empty(self): tokens = [] node = Parser(tokens).parse() self.assertEqual(node, None)
def extract_airfields(filtericao=lambda x:True,purge=True): #print getxml("/AIP/AD/AD 1/ES_AD_1_1_en.pdf") ads=[] p=Parser("/AIP/AD/AD 1/ES_AD_1_1_en.pdf") points=dict() startpage=None for pagenr in xrange(p.get_num_pages()): page=p.parse_page_to_items(pagenr) if page.count("Aerodrome directory"): startpage=pagenr break if startpage==None: raise Exception("Couldn't find aerodrome directory in file") #print "Startpage: %d"%(startpage,) #nochartf=open("nochart.txt","w") for pagenr in xrange(startpage,p.get_num_pages()): row_y=[] page=p.parse_page_to_items(pagenr) allines=[x for x in (page.get_lines(page.get_partially_in_rect(0,0,15,100))) if x.strip()] for item,next in zip(allines,allines[1:]+[""]): #print "item:",item m=re.match(ur"^\s*[A-ZÅÄÖ]{3,}(?:/.*)?\b.*",item) if m: #print "Candidate, next is:",next if re.match(r"^\s*[A-Z]{4}\b.*",next): #print "Matched:",item #print "y1:",item.y1 row_y.append(item.y1) for y1,y2 in zip(row_y,row_y[1:]+[100.0]): #print "Extacting from y-range: %f-%f"%(y1,y2) items=list(page.get_partially_in_rect(0,y1-0.25,5.0,y2+0.25,ysort=True)) if len(items)>=2: #print "Extract items",items ad=dict(name=unicode(items[0].text).strip(), icao=unicode(items[1].text).strip() ) #print "Icao:",ad['icao'] assert re.match(r"[A-Z]{4}",ad['icao']) if not filtericao(ad): continue if len(items)>=3: #print "Coord?:",items[2].text m=re.match(r".*(\d{6}N)\s*(\d{7}E).*",items[2].text) if m: lat,lon=m.groups() ad['pos']=parse_coords(lat,lon) #print "Items3:",items[3:] elev=re.findall(r"(\d{1,5})\s*ft"," ".join(t.text for t in items[3:])) #print "Elev:",elev assert len(elev)==1 ad['elev']=int(elev[0]) ads.append(ad) big_ad=set() for ad in ads: if not ad.has_key('pos'): big_ad.add(ad['icao']) for ad in ads: icao=ad['icao'] if icao in big_ad: if icao in ['ESIB','ESNY','ESCM','ESPE']: continue try: p=Parser("/AIP/AD/AD 2/%s/ES_AD_2_%s_6_1_en.pdf"%(icao,icao)) except: p=Parser("/AIP/AD/AD 2/%s/ES_AD_2_%s_6-1_en.pdf"%(icao,icao)) ad['aipvacurl']=p.get_url() for pagenr in xrange(p.get_num_pages()): page=p.parse_page_to_items(pagenr) """ for altline in exitlines: m=re.match(r"(\w+)\s+(\d+N)\s*(\d+E.*)",altline) if not m: continue name,lat,lon=m.groups() try: coord=parse_coords(lat,lon) except Exception: continue points.append(dict(name=name,pos=coord)) """ for kind in xrange(2): if kind==0: hits=page.get_by_regex(r"H[Oo][Ll][Dd][Ii][Nn][Gg]") kind="holding point" if kind==1: hits=page.get_by_regex(r"[Ee]ntry.*[Ee]xit.*point") kind="entry/exit point" if len(hits)==0: continue for holdingheading in hits: items=sorted(page.get_partially_in_rect(holdingheading.x1+2.0,holdingheading.y2+0.1,holdingheading.x1+0.5,100), key=lambda x:x.y1) items=[x for x in items if not x.text.startswith(" ")] #print "Holding items:",items for idx,item in enumerate(items): print "Holding item",item y1=item.y1 if idx==len(items)-1: y2=100 else: y2=items[idx+1].y1 items2=[x for x in page.get_partially_in_rect(item.x1+1,y1+0.3,item.x1+40,y2-0.1) if x.x1>=item.x1-0.25 and x.y1>=y1-0.05 and x.y1<y2-0.05] s=(" ".join(page.get_lines(items2))).strip() print "Holding lines:",repr(page.get_lines(items2)) #if s.startswith("ft Left/3"): #Special case for ESOK # s,=re.match("ft Left/3.*?([A-Z]{4,}.*)",s).groups() #m=re.match("ft Left/\d+.*?([A-Z]{4,}.*)",s) #if m: # s,=m.groups() if s.startswith("LjUNG"): #Really strange problem with ESCF s=s[0]+"J"+s[2:] if s.lower().startswith("holding"): sl=s.split(" ",1) if len(sl)>1: s=sl[1] s=s.strip() if kind=="entry/exit point" and s.startswith("HOLDING"): continue #reached HOLDING-part of VAC #Check for other headings #Fixup strange formatting of points in some holding items: (whitespace between coord and 'E') s=re.sub(ur"(\d+)\s*(N)\s*(\d+)\s*(E)",lambda x:"".join(x.groups()),s) m=re.match(r"([A-Z]{2,}).*?(\d+N)\s*(\d+E).*",s) if not m: m=re.match(r".*?(\d+N)\s*(\d+E).*",s) if not m: continue assert m lat,lon=m.groups() #skavsta if icao=="ESKN": if s.startswith(u"Hold north of T"): name="NORTH" elif s.startswith(u"Hold south of B"): name="SOUTH" else: assert 0 #add more specials here else: continue else: name,lat,lon=m.groups() try: coord=parse_coords(lat,lon) except Exception: print "Couldn't parse:",lat,lon continue #print name,lat,lon,mapper.format_lfv(*mapper.from_str(coord)) if name.count("REMARK") or len(name)<=2: print "Suspicious name: ",name #sys.exit(1) continue points[icao+' '+name]=dict(name=icao+' '+name,icao=icao,pos=coord,kind=kind) #for point in points.items(): # print point #sys.exit(1) def fixhex11(s): out=[] for c in s: i=ord(c) if i>=0x20: out.append(c) continue if i in [0x9,0xa,0xd]: out.append(c) continue out.append(' ') return "".join(out) for ad in ads: icao=ad['icao'] if icao in big_ad: #print "Parsing ",icao p=Parser("/AIP/AD/AD 2/%s/ES_AD_2_%s_en.pdf"%(icao,icao),loadhook=fixhex11) ad['aiptexturl']=p.get_url() firstpage=p.parse_page_to_items(0) te="\n".join(firstpage.get_all_lines()) #print te coords=re.findall(r"ARP.*(\d{6}N)\s*(\d{7}E)",te) if len(coords)>1: raise Exception("First page of airport info (%s) does not contain exactly ONE set of coordinates"%(icao,)) if len(coords)==0: print "Couldn't find coords for ",icao #print "Coords:",coords ad['pos']=parse_coords(*coords[0]) elev=re.findall(r"Elevation.*?(\d{1,5})\s*ft",te,re.DOTALL) if len(elev)>1: raise Exception("First page of airport info (%s) does not contain exactly ONE elevation in ft"%(icao,)) if len(elev)==0: print "Couldn't find elev for ",icao ad['elev']=int(elev[0]) freqs=[] found=False thrs=[] #uprint("-------------------------------------") for pagenr in xrange(p.get_num_pages()): page=p.parse_page_to_items(pagenr) #uprint("Looking on page %d"%(pagenr,)) if 0: #opening hours are no longer stored in a separate document for any airports. No need to detect which any more (since none are). for item in page.get_by_regex(".*OPERATIONAL HOURS.*"): lines=page.get_lines(page.get_partially_in_rect(0,item.y2+0.1,100,100)) for line in lines: things=["ATS","Fuelling","Operating"] if not line.count("AIP SUP"): continue for thing in things: if line.count(thing): ad['aipsup']=True for item in page.get_by_regex(".*\s*RUNWAY\s*PHYSICAL\s*CHARACTERISTICS\s*.*"): #uprint("Physical char on page") lines=page.get_lines(page.get_partially_in_rect(0,item.y2+0.1,100,100)) seen_end_rwy_text=False for line,nextline in izip(lines,lines[1:]+[None]): #uprint("MAtching: <%s>"%(line,)) if re.match(ur"AD\s+2.13",line): break if line.count("Slope of"): break if line.lower().count("end rwy:"): seen_end_rwy_text=True if line.lower().count("bgn rwy:"): seen_end_rwy_text=True m=re.match(ur".*(\d{6}\.\d+)[\s\(\)\*]*(N).*",line) if not m:continue m2=re.match(ur".*(\d{6,7}\.\d+)\s*[\s\(\)\*]*(E).*",nextline) if not m2:continue latd,n=m.groups() lond,e=m2.groups() assert n=="N" assert e=="E" lat=latd+n lon=lond+e rwytxts=page.get_lines(page.get_partially_in_rect(0,line.y1+0.05,12,nextline.y2-0.05)) uprint("Rwytxts:",rwytxts) rwy=None for rwytxt in rwytxts: #uprint("lat,lon:%s,%s"%(lat,lon)) #uprint("rwytext:",rwytxt) m=re.match(ur"\s*(\d{2}[LRCM]?)\b.*",rwytxt) if m: assert rwy==None rwy=m.groups()[0] if rwy==None and seen_end_rwy_text: continue print "Cur airport:",icao already=False assert rwy!=None seen_end_rwy_text=False for thr in thrs: if thr['thr']==rwy: raise Exception("Same runway twice on airfield:"+icao) thrs.append(dict(pos=mapper.parse_coords(lat,lon),thr=rwy)) assert len(thrs)>=2 for pagenr in xrange(0,p.get_num_pages()): page=p.parse_page_to_items(pagenr) matches=page.get_by_regex(r".*ATS\s+COMMUNICATION\s+FACILITIES.*") #print "Matches of ATS COMMUNICATION FACILITIES on page %d: %s"%(pagenr,matches) if len(matches)>0: commitem=matches[0] curname=None callsign=page.get_by_regex_in_rect(ur"Call\s*sign",0,commitem.y1,100,commitem.y2+8)[0] for idx,item in enumerate(page.get_lines(page.get_partially_in_rect(callsign.x1-0.5,commitem.y1,100,100),fudge=0.3,order_fudge=15)): if item.strip()=="": curname=None if re.match(".*RADIO\s+NAVIGATION\s+AND\s+LANDING\s+AIDS.*",item): break #print "Matching:",item m=re.match(r"(.*?)\s*(\d{3}\.\d{1,3})\s*MHz.*",item) #print "MHZ-match:",m if not m: continue #print "MHZ-match:",m.groups() who,sfreq=m.groups() freq=float(sfreq) if abs(freq-121.5)<1e-4: if who.strip(): curname=who continue #Ignore emergency frequency, it is understood if not who.strip(): if curname==None: continue else: curname=who freqs.append((curname.strip().rstrip("/"),freq)) for pagenr in xrange(0,p.get_num_pages()): page=p.parse_page_to_items(pagenr) matches=page.get_by_regex(r".*ATS\s*AIRSPACE.*") #print "Matches of ATS_AIRSPACE on page %d: %s"%(pagenr,matches) if len(matches)>0: heading=matches[0] desigitem,=page.get_by_regex("Designation and lateral limits") vertitem,=page.get_by_regex("Vertical limits") airspaceclass,=page.get_by_regex("Airspace classification") lastname=None subspacelines=dict() subspacealts=dict() for idx,item in enumerate(page.get_lines(page.get_partially_in_rect(desigitem.x2+1,desigitem.y1,100,vertitem.y1-1))): if item.count("ATS airspace not established"): assert idx==0 break if item.strip()=="": continue m=re.match(r"(.*?)(\d{6}N\s+.*)",item) if m: name,coords=m.groups() name=name.strip() else: name=item.strip() coords=None if name: lastname=name if coords: subspacelines.setdefault(lastname,[]).append(coords) assert lastname lastname=None #print "Spaces:",subspacelines #print "ICAO",ad['icao'] #altlines=page.get_lines(page.get_partially_in_rect(vertitem.x2+1,vertitem.y1,100,airspaceclass.y1-0.2)) #print "Altlines:",altlines subspacealts=dict() subspacekeys=subspacelines.keys() allaltlines=" ".join(page.get_lines(page.get_partially_in_rect(vertitem.x1+0.5,vertitem.y1+0.5,100,airspaceclass.y1-0.2))) single_vertlim=False totalts=list(mapper.parse_all_alts(allaltlines)) #print "totalts:",totalts if len(totalts)==2: single_vertlim=True for subspacename in subspacekeys: ceil=None floor=None subnames=[subspacename] if subspacename.split(" ")[-1].strip() in ["TIA","TIZ","CTR","CTR/TIZ"]: subnames.append(subspacename.split(" ")[-1].strip()) #print "Parsing alts for ",subspacename,subnames try: for nametry in subnames: if single_vertlim: #there's only one subspace, parse all of vertical limits field for this single one. items=[vertitem] else: items=page.get_by_regex_in_rect(nametry,vertitem.x2+1,vertitem.y1,100,airspaceclass.y1-0.2) for item in items: alts=[] for line in page.get_lines(page.get_partially_in_rect(item.x1+0.5,item.y1+0.5,100,airspaceclass.y1-0.2)): #print "Parsing:",line line=line.replace(nametry,"").lower().strip() parsed=list(mapper.parse_all_alts(line)) if len(parsed): alts.append(mapper.altformat(*parsed[0])) if len(alts)==2: break if alts: #print "alts:",alts ceil,floor=alts raise StopIteration except StopIteration: pass assert ceil and floor subspacealts[subspacename]=dict(ceil=ceil,floor=floor) spaces=[] for spacename in subspacelines.keys(): altspacename=spacename #print "Altspacename: %s, subspacesalts: %s"%(altspacename,subspacealts) space=dict( name=spacename, ceil=subspacealts[altspacename]['ceil'], floor=subspacealts[altspacename]['floor'], points=parse_coord_str(" ".join(subspacelines[spacename])), freqs=list(set(freqs)) ) if True: vs=[] for p in space['points']: x,y=mapper.latlon2merc(mapper.from_str(p),13) vs.append(Vertex(int(x),int(y))) p=Polygon(vvector(vs)) if p.calc_area()<=30*30: pass#print space pass#print "Area:",p.calc_area() assert p.calc_area()>30*30 #print "Area: %f"%(p.calc_area(),) spaces.append(space) #print space ad['spaces']=spaces found=True if found: break assert found ad['runways']=rwy_constructor.get_rwys(thrs) #Now find any ATS-airspace chartblobnames=[] for ad in ads: icao=ad['icao'] if icao in big_ad: parse_landing_chart.help_plc(ad,"/AIP/AD/AD 2/%s/ES_AD_2_%s_2-1_en.pdf"%(icao,icao), icao,ad['pos'],"se",variant="") parse_landing_chart.help_plc(ad,"/AIP/AD/AD 2/%s/ES_AD_2_%s_6-1_en.pdf"%(icao,icao), icao,ad['pos'],"se",variant="vac") parse_landing_chart.help_plc(ad,"/AIP/AD/AD 2/%s/ES_AD_2_%s_2-3_en.pdf"%(icao,icao), icao,ad['pos'],"se",variant="parking") #aip_text_documents.help_parse_doc(ad,"/AIP/AD/AD 2/%s/ES_AD_2_%s_6_1_en.pdf"%(icao,icao), # icao,"se",title="General Information",category="general") aip_text_documents.help_parse_doc(ad,"/AIP/AD/AD 2/%s/ES_AD_2_%s_en.pdf"%(icao,icao), icao,"se",title="General Information",category="general") #if purge: # parse_landing_chart.purge_old(chartblobnames,country="se") #sys.exit(1) for extra in extra_airfields.extra_airfields: if filtericao(extra): ads.append(extra) print print for k,v in sorted(points.items()): print k,v,mapper.format_lfv(*mapper.from_str(v['pos'])) #print "Num points:",len(points) origads=list(ads) for flygkartan_id,name,lat,lon,dummy in csv.reader(open("fplan/extract/flygkartan.csv"),delimiter=";"): found=None lat=float(lat) lon=float(lon) if type(name)==str: name=unicode(name,'utf8') mercf=mapper.latlon2merc((lat,lon),13) for a in origads: merca=mapper.latlon2merc(mapper.from_str(a['pos']),13) dist=math.sqrt((merca[0]-mercf[0])**2+(merca[1]-mercf[1])**2) if dist<120: found=a break if found: found['flygkartan_id']=flygkartan_id else: d=dict( icao='ZZZZ', name=name, pos=mapper.to_str((lat,lon)), elev=int(get_terrain_elev((lat,lon))), flygkartan_id=flygkartan_id) if filtericao(d): ads.append(d) minor_ad_charts=extra_airfields.minor_ad_charts for ad in ads: if ad['name'].count(u"Långtora"): ad['pos']=mapper.to_str(mapper.from_aviation_format("5944.83N01708.20E")) if ad['name'] in minor_ad_charts: charturl=minor_ad_charts[ad['name']] arp=ad['pos'] if 'icao' in ad and ad['icao'].upper()!='ZZZZ': icao=ad['icao'].upper() else: icao=ad['fake_icao'] parse_landing_chart.help_plc(ad,charturl,icao,arp,country='raw',variant="landing") """ assert icao!=None lc=parse_landing_chart.parse_landing_chart( charturl, icao=icao, arppos=arp,country="raw") assert lc if lc: ad['adcharturl']=lc['url'] ad['adchart']=lc """ #print ads for ad in ads: print "%s: %s - %s (%s ft) (%s)"%(ad['icao'],ad['name'],ad['pos'],ad['elev'],ad.get('flygkartan_id','inte i flygkartan')) for space in ad.get('spaces',[]): for freq in space.get('freqs',[]): print " ",freq #if 'spaces' in ad: # print " spaces: %s"%(ad['spaces'],) #if 'aiptext' in ad: # print "Aip texts:",ad['aiptext'] #else: # print "No aiptext" print "Points:" for point in sorted(points.values(),key=lambda x:x['name']): print point f=codecs.open("extract_airfields.regress.txt","w",'utf8') for ad in ads: r=repr(ad) d=md5.md5(r).hexdigest() f.write("%s - %s - %s\n"%(ad['icao'],ad['name'],d)) f.close() f=codecs.open("extract_airfields.regress-details.txt","w",'utf8') for ad in ads: r=repr(ad) f.write(u"%s - %s - %s\n"%(ad['icao'],ad['name'],r)) f.close() return ads,points.values()
dot.edge(p_name, name) for n in node.params: see_node(name, n, dot) see_node(name, node.body, dot) for n in node.localvars: see_node(name, n, dot) elif type(node) == CompoundStmtNode: dot.node(name, str(node.kind)) dot.edge(p_name, name) for n in node.stmts: see_node(name, n, dot) elif type(node) == IfStmtNode: dot.node(name, str(node.kind)) dot.edge(p_name, name) see_node(name, node.cond, dot) see_node(name, node.then, dot) if not node.els is None: see_node(name, node.els, dot) if __name__ == '__main__': path = './t1.c' l = Lexer(filename(path), read_file(path)) l.lex() l.see_tokens() p = Parser(l.tokens) p.parse() see_ast(p.ast)
def on_reload(self, event, new=False): """ Handles loading of circuit specification file. Called when new file loaded or current file reloaded. """ # Create new render self.cycles_completed = 0 self.start = (0, 0) self.names = Names() self.devices = Devices(self.names) self.network = Network(self.names, self.devices) self.monitors = Monitors(self.names, self.devices, self.network) self.scanner = Scanner(self.path, self.names) self.parser = Parser(self.names, self.devices, self.network, self.monitors, self.scanner) if self.parser.parse_network(): self.run_button.Enable() self.continue_button.Enable() self.config_button.Enable() self.add_outs.Enable() self.dele_mons.Enable() self.view2D.Enable() self.view3D.Enable() self.reset.Enable() [self.outputs, self.configurable, self.monitored] = self._gen_lists() self.outs.Set(self.outputs[0]) # Added after discovering application crashes # when SIGGEN circuit loaded. Before this every # ciruit had to include a configurable device. if len(self.configurable[0]) > 0: self.config_list.Set(self.configurable[0]) self.config_list.SetSelection(0) self.config_var.SetValue(self.configurable[2][0]) else: self.config_var.SetValue(0) self.config_list.Clear() self.config_button.Disable() self.mons.Set(self.monitored[0]) # Ensure pan variables are restored, # and new file loaded in both views. self.on_reset(None) self._re_render() if self.currentview == wx.ID_YES: self.on_2D(None) else: self.on_3D(None) self.on_reset(None) self._re_render() if self.currentview == wx.ID_NO: self.on_3D(None) else: self.on_2D(None) self.act_log.Clear() if new: self.act_log.AppendText( _("Successfully loaded new file!") + "\n") else: self.act_log.AppendText( _("Successfully reloaded file!") + "\n") else: self.act_log.Clear() # if new: # self.act_log.AppendText(_("Unsuccessful Load!")+ "\n\n") # else: # self.act_log.AppendText(_("Unsuccessful Reload!")+ "\n\n") for err_msg in self.parser.print_gui: self.act_log.AppendText(_(err_msg)) self.act_log.AppendText(_(self.scanner.broken_comment_msg) + '\n') self.act_log.AppendText( _("***ERROR: Circuit could not ") + _("be parsed. Try again") + "\n\n\n") self.run_button.Disable() self.continue_button.Disable() self.config_button.Disable() self.add_outs.Disable() self.dele_mons.Disable() self.view2D.Disable() self.view3D.Disable() self.reset.Disable() # Delete traces self.names = None self.devices = None self.network = None # Set devices and monitors on the right canvas to None self.config_list.SetValue("") self.outputs = [[] for i in range(2)] self.configurable = [[] for i in range(2)] self.monitored = [[] for i in range(2)] self.outs.Clear() self.config_list.Clear() self.mons.Clear() self.on_reset(None) self._re_render() if self.currentview == wx.ID_YES: self.on_2D(None) else: self.on_3D(None) self.on_reset(None) self._re_render() if self.currentview == wx.ID_NO: self.on_3D(None) else: self.on_2D(None)
flt = Tok(r'[\+\-]?(\d+\.)|(\d*\.\d+)') < Float int_ = Tok(r'[\+\-]?\d+') < Int nam = Tok(r'\w+') < Name var = nam plus = Tok(r'\+') dash = Tok(r'\-') star = Tok(r'\*') slash = Tok(r'\/') mod = Tok(r'\%') dstar = Tok(r'\*\*') opar = Tok(r'\(') cpar = Tok(r'\)') equal = Tok(r'\=') kw_print = Tok(r'print') exprs = Parser() exprl = Parser() expr = Parser() prim = Parser() expo = Parser() sign = Parser() fact = Parser() summ = Parser() asgn = Parser() prin = Parser() prim.parser = flt | int_ | var | opar + expr - cpar expo.parser = ((prim - dstar & expo < Pow) | prim) sign.parser = ((plus + sign < Pos) | (dash + sign < Neg) | expo) fact.parser = sign << ((star + sign, Mul), (slash + sign, Div), (mod + sign, Mod))
def main(arg_list): """Parse the command line options and arguments specified in arg_list. Run either the command line user interface, the graphical user interface, or display the usage message. """ usage_message = ( "Usage:\n" "Show help: logsim.py -h\n" "Command line user interface: logsim.py -c <file path>\n" "Graphical user interface: logsim.py <file path> or logsim.py") try: options, arguments = getopt.getopt(arg_list, "hc:") except getopt.GetoptError: print("Error: invalid command line arguments\n") print(usage_message) sys.exit() # Initialise instances of the four inner simulator classes names = Names() devices = Devices(names) network = Network(names, devices) monitors = Monitors(names, devices, network) #device = Device(self.names.lookup([names])) #names = None #devices = None #network = None #monitors = None for option, path in options: if option == "-h": # print the usage message print(usage_message) sys.exit() elif option == "-c": # use the command line user interface scanner = Scanner(path, names) parser = Parser(names, devices, network, monitors, scanner) if parser.parse_network(): # Initialise an instance of the userint.UserInterface() class userint = UserInterface(names, devices, network, monitors) userint.command_interface() if not options: # no option given, use the graphical user interface app = ab.BaseApp(redirect=False) error = ErrorFrame() if len(arguments) == 0: # wrong number of arguments # Initialise an instance of the gui.Gui() class path = None #print('len(arguments) = 0 if statement is accessed') scanner = Scanner(path, names) parser = Parser(names, devices, network, monitors, scanner) gui = Gui("LogicSim", path, names, devices, network, monitors) gui.Show(True) elif len(arguments) != 0 and len(arguments) != 1: print("Error: one or no file path required\n") print(usage_message) sys.exit() else: [path] = arguments scanner = Scanner(path, names) parser = Parser(names, devices, network, monitors, scanner) if parser.parse_network(): # Initialise an instance of the gui.Gui() class #import app_base as ab #app = ab.BaseApp(redirect=False) #app = wx.App() gui = Gui("LogicSim", path, names, devices, network, monitors) gui.Show(True) error.ShowModal() app.MainLoop()
def load_tools(fname): basic_tools = load_basic_tools() parser = Parser(basic_tools.tool_dict) parser.parse_file(fname, axioms=False, basic_tools=basic_tools) return ImportedTools(parser.tool_dict)
def test_evaluate(self): ast = LiteralAst(1.0) environment = Environment() evaluate(ast, environment, lambda value: self.assertEqual(value, 1.0)) ast = LiteralAst(True) environment = Environment() evaluate(ast, environment, self.assertTrue) ast = LiteralAst(False) environment = Environment() evaluate(ast, environment, self.assertFalse) ast = LiteralAst("aaa") evaluate(ast, Environment(), lambda value: self.assertEqual(value, "aaa")) ast = BinaryAst('+', LiteralAst(1), LiteralAst(2)) evaluate(ast, Environment(), lambda value: self.assertEqual(value, 3.0)) ast = ProgAst([]) evaluate(ast, Environment(), self.assertFalse) ast = ProgAst([LiteralAst(1)]) evaluate(ast, Environment(), lambda value: self.assertEqual(value, 1.0)) ast = ProgAst([LiteralAst(1), LiteralAst(2)]) evaluate(ast, Environment(), lambda value: self.assertEqual(value, 2.0)) ast = AssignAst(LiteralAst(1), LiteralAst("a")) with self.assertRaises(Exception): evaluate(ast, Environment(), lambda value: value) ast = ProgAst([AssignAst(VarAst('a'), LiteralAst("foo")), VarAst('a')]) evaluate(ast, Environment(), lambda value: self.assertEqual(value, "foo")) ast = AssignAst(VarAst("a"), LiteralAst("foo")) with self.assertRaises(Exception): evaluate(ast, Environment(Environment()), lambda value: value) ast = CallAst( LambdaAst("", ["a"], VarAst("a")), [LiteralAst(1)], ) evaluate(ast, Environment(), lambda value: self.assertEqual(value, 1.0)) ast = CallAst(LambdaAst("", ["a"], VarAst("a")), [LiteralAst("abc")]) evaluate(ast, Environment(), lambda value: self.assertEqual(value, "abc")) # # (λ loop (n) if n > 0 then n + loop(n - 1) else 0) (10) ast = CallAst( LambdaAst( "loop", ["n"], IfAst( BinaryAst(">", VarAst("n"), LiteralAst(0)), BinaryAst( "+", VarAst("n"), CallAst(VarAst("loop"), [BinaryAst('-', VarAst('n'), LiteralAst(1))])), LiteralAst(0))), [LiteralAst(10)]) evaluate(ast, Environment(), lambda value: self.assertEqual(value, 55.0)) # # let (x) x; ast = LetAst([VarDefAst("x", None)], VarAst("x")) evaluate(ast, Environment(), self.assertFalse) # # let (x = 2, y = x + 1, z = x + y) x + y + z ast = LetAst([ VarDefAst("x", LiteralAst(2)), VarDefAst("y", BinaryAst("+", VarAst("x"), LiteralAst(1))), VarDefAst("z", BinaryAst("+", VarAst("x"), VarAst("y"))) ], BinaryAst("+", BinaryAst("+", VarAst("x"), VarAst("y")), VarAst("z"))) evaluate(ast, Environment(), lambda value: self.assertEqual(value, 10.0)) # # the second expression will result an errors, # since x, y, z are bound to the let body # # let (x = 2, y = x + 1, z = x + y) x + y + z; x + y + z ast = ProgAst([ LetAst([ VarDefAst('x', LiteralAst(2)), VarDefAst('y', BinaryAst('+', VarAst('x'), LiteralAst(1))), VarDefAst('z', BinaryAst('+', VarAst('x'), VarAst('y'))) ], BinaryAst('+', BinaryAst('+', VarAst('x'), VarAst('y')), VarAst('z'))), BinaryAst('+', BinaryAst('+', VarAst('x'), VarAst('y')), VarAst('z')) ]) with self.assertRaises(Exception): evaluate(ast, Environment(), lambda value: value) ast = IfAst(LiteralAst(""), LiteralAst(1), None) evaluate(ast, Environment(), lambda value: self.assertEqual(value, 1.0)) ast = IfAst(LiteralAst(False), LiteralAst(1), LiteralAst(2)) evaluate(ast, Environment(), lambda value: self.assertEqual(value, 2.0)) ast = IfAst(LiteralAst(False), LiteralAst(1), LiteralAst(False)) evaluate(ast, Environment(), self.assertFalse) ast = {"type": "foo", "value": 'foo'} with self.assertRaises(Exception): evaluate(ast, Environment(), lambda value: value) # fib = λ(n) if n < 2 then n else fib(n - 1) + fib(n - 2); # fib(6); # ast = ProgAst([ AssignAst( VarAst('fib'), LambdaAst( 'n', ['n'], IfAst( BinaryAst('<', VarAst('n'), LiteralAst(2)), VarAst('n'), BinaryAst( '+', CallAst( VarAst('fib'), [BinaryAst('-', VarAst('n'), LiteralAst(1))]), CallAst( VarAst('fib'), [BinaryAst('-', VarAst('n'), LiteralAst(2)) ]))))), CallAst(VarAst('fib'), [LiteralAst(6)]) ]) evaluate(ast, Environment(), lambda value: self.assertEqual(value, 8.0)) ast = IfAst(LiteralAst(False), LiteralAst(1), LiteralAst(False)) evaluate(ast, Environment(), self.assertFalse) ast = CallAst(LiteralAst(1), []) with self.assertRaises(Exception): evaluate(ast, Environment(), self.assertFalse) code = """ 2 + twice(3, 4) """ global_env = Environment() for name, func in primitive.items(): global_env.define(name, func) parser = Parser(TokenStream(InputStream(code))) evaluate(parser(), global_env, lambda result: result)
tile_width = 224 // 8 tile_height = 6 row = 1 col = 8 left_start = origin + 0 * tile_width # top_start = origin + 6 * tile_height top_start = origin + 70 + 4 scenes = [] tasks = [] for left in range(left_start, left_start + col * tile_width, tile_width): for top in range(top_start, top_start + row * tile_height, tile_height): parser = Parser() s = parser.parse(testfile) scenes.append(s) scenes[-1].sampler = TileSampler(left, top, tile_width, tile_height) name = out_dir + '%d.%d.png' % (left, top) scenes[-1].film = copy.deepcopy(scenes[-1].film) scenes[-1].film.filename = name tasks.append(scenes[-1].render) p = Pool(len(tasks)) for s in scenes: print(s.film.filename) print('%d, %d' % (s.sampler.left, s.sampler.top))
return left - right if op.kind == '*': return left * right if op.kind == '/': return left / right @sm.syntaxmap(['E', 'T'], [1]) @sm.syntaxmap(['T', 'F'], [1]) def texp(val): return val @sm.syntaxmap(['F', '(', 'E', ')'], [2]) def lpexp(val): return val @sm.syntaxmap(['F', 'num'], [1]) def numexp(t): # t is a token return t.val tokens = list(Lexer('1+10*123').lex()) # for t in tokens: # print(t) # print(sm.productions()) # parser = Parser(productions, terminal, nonterminal) parser = Parser(sm.productions, sm.terminal, sm.nonterminal) parser.generate(printInfo=True) parser.parse(tokens, sm.sdmap)
args = sys.argv usage = 'Usage: %s (parse <name> <depth> <path to txt file>|gen <name> <count>)' % (args[0], ) if (len(args) < 3): raise ValueError(usage) mode = args[1] name = args[2] if mode == 'parse': if (len(args) != 5): raise ValueError(usage) depth = int(args[3]) file_name = args[4] db = Db(sqlite3.connect(name + '.db'), Sql()) db.setup(depth) txt = codecs.open(file_name, 'r', 'utf-8').read() Parser(name, db, SENTENCE_SEPARATOR, WORD_SEPARATOR).parse(txt) elif mode == 'gen': count = int(args[3]) db = Db(sqlite3.connect(name + '.db'), Sql()) generator = Generator(name, db, Rnd()) for i in range(0, count): print generator.generate(WORD_SEPARATOR) else: raise ValueError(usage)
def play(self): game_parser = Parser() current_location = None run_game = True while run_game: current_location = self.player.location print() print("You are currently in room {0} -".format( current_location.name)) if current_location.familiar == False: print("{0}".format(current_location.longDesc)) print() current_location.familiar = True else: print("{0}".format(current_location.shortDesc)) print() print("Here are the contents of the room:\n") print("Items:") if current_location.items: for item in current_location.items: print(" {0}".format(item.name)) else: print("There are currently no items in this room.\n") print() print("Characters:") if current_location.characters: for char in current_location.characters: print("{0}".format(char.name)) else: print("There is no one else in this room.\n") user_cmd = user_input() user_action, user_direction, user_item, user_char = game_parser.parse_command( user_cmd) if user_action == "go": if user_direction: if user_direction == "north": self.player.go_north() elif user_direction == "south": self.player.go_south() elif user_direction == "west": self.player.go_west() elif user_direction == "east": self.player.go_east() else: print( "Your directions weren't clear. Please specify: north, south, west, or east." ) print() else: print( "When using the \"go\" action, please specify a direction: north, south, west, or east." ) print() if user_action == "look": self.player.look() if user_action == "lookat": self.player.look_at(user_item) if user_action == "take": if user_item: print("You placed {0} into your inventory.".format( user_item)) self.player.take(user_item) else: print( "Error. When using \"take\" command, please specify a name of an item to take." ) if user_action == "drop": if user_item: self.player.drop(user_item) if user_action == "view": self.player.view_inventory() if user_action == "help": print() print("~~~Welcome to the Help Guide~~~") print( "Here are the valid actions you may use in the Eden Adventure Game:" ) print( "go - Use this command, followed by a direction (north, south, west, or east), to move into that direction" ) print() print( "look - Use this one word command to look around and observe your current location" ) print() print( "look at - Use this 2-word command followed by the name of an item within the room to observe an object" ) print() print( "take - Use this command, followed by a name of an item, to pick up a holdable item and place into your inventory" ) print() print( "drop - Use this command, followed by a name of an item that is in your inventory, remove it from your inventory and drop it in your current location" ) print() print( "view - Use this one word command to view the contents of your inventory" ) print() print( "interact - Use this command, followed by the name of a non-playable character, to speak to that character" ) print() print( "fight - Use this command, followed by the name of behemoth, to fight the behemoth to be able to enter the dungeon. Note: You may need an item..." ) print() print( "quit - Use this one word command to quit and end the game" ) if user_action == "fight": if user_char: self.player.fight(user_char) if user_action == "interact": if user_char: #game will end if sucessfully interact with Lyn. run_game = self.player.interact(user_char) if user_action == "quit": print("Shutting down...") return
def parser_fulladder(names, devices, network, monitors): sc = Scanner(fulladderpath, names) return Parser(names, devices, network, monitors, sc)
'>=': 9, '>': 9, '<': 9, '<=': 9, '==': 9, '!=': 9, '+': 10, '-': 10, '*': 11, '/': 11, 'UMINUS': 15 } precs = {'UMINUS': ['Exp', '-', 'Exp']} parser = Parser(sm.productions, sm.terminal, sm.nonterminal, precs, precedence) # print(sm.terminal) t2p = {'id': '[a-zA-Z_]\w*', 'num': '\d+'} # lexer = Lexer('node/test3.dm',sm.terminal,t2p) lexer = Lexer('node/test6.dm', sm.terminal, t2p) # lexer = Lexer('test2.dm',sm.terminal,t2p) # print(list(lexer.lex())) # for t in lexer.lex(): # print(t) # parser.generate() # parser.dumpjson() parser.loadjson() # parser.htmlparse('test.html') tokens = list(lexer.lex())
def parser_emptyfile(names, devices, network, monitors): sc = Scanner("/dev/null", names) return Parser(names, devices, network, monitors, sc)
from lex import Lexer from parse import Parser from codegen import CodeGen fname = "input.toy" with open(fname) as f: text_input = f.read() lexer = Lexer().get_lexer() tokens = lexer.lex(text_input) codegen = CodeGen() module = codegen.module builder = codegen.builder printf = codegen.printf #~ pg = Parser(module, builder, printf) pg1 = Parser() pg1.parse() parser = pg1.get_parser() parser.parse(tokens) codegen.create_ir() #~ codegen.save_ir("output.ll")
#!/usr/bin/python3 from parse import Parser from preprocess import Preprocess from net import NeuralNetwork if __name__ == '__main__': p = Parser() # p.readCSV("xAPI-Edu-Data.csv") p.splitTrainTest("xAPI-Edu-Data.csv") # pre = Preprocess() # pre.createArrs("trainData.txt","testData.txt") nn = NeuralNetwork() # nn.train_neural_network() nn.test_neural_network()
#!/usr/bin/env python # encoding: utf-8 import sys from ast import CallAst, VarAst from compiler import to_js from cps_transformer import to_cps from input_stream import InputStream from optimize import Optimizer from parse import Parser from token_stream import TokenStream code = "" for argv in sys.argv[1:]: with open(argv) as file: code += file.read() parser = Parser(TokenStream(InputStream(code))) ast = parser() ast = to_cps(ast, lambda ast: CallAst( VarAst('β_TOPLEVEL'), [ast], )) # print(ast) ast = Optimizer().optimize(ast) # print(ast) js_code = to_js(ast) print(js_code)