def test_BoardMembers(self): for path in files: print('Parsing={}'.format(path)) with io.open(path, 'r', encoding='utf-8') as f: data = f.read() p = Parser() r = p.parseBoardMembers(data) #assert len(r) print(r)
def test_Description(self): for path in files: print('Parsing={}'.format(path)) with io.open(path, 'r', encoding='utf-8') as f: data = f.read() p = Parser() r = p.parseDescription(data) assert len(r) print(r)
def parse(service, url, start, stop, browser): results = list() if '{}' not in url: warning('URL does not have a placeholder for page number.') try: parser = Parser(service, browser) parser.setup() header = parser.get_header() if header: results.append(header) index = 1 for page in range(start, stop + 1): results += parser.parse(url.format(page)) info('{} results after {} page(s)'.format(len(results) - 1, index)) index += 1 except KeyboardInterrupt: sys.stdout.write('\r') info('Exiting...') finally: parser.teardown() return results
def parse(service, url, start, stop, browser): results = list() if '{}' not in url: warning('URL does not have a placeholder for Crash_ID number.') try: parser = Parser(service, browser) parser.setup() header = parser.get_header() if header: results.append(header) index = 0 with open('test.csv') as csvfile: readCSV = csv.reader(csvfile, delimiter = ',') for row in readCSV: CrashID = row[0] print("CrashID: ") print(CrashID) results += parser.parse(CrashID, url.format(CrashID)) #print(results) except KeyboardInterrupt: sys.stdout.write('\r') info('Exiting...') finally: parser.teardown() return results
def test1(self): file = open(filepath, 'r') data = file.readlines() line_number = 0 for lines in range(len(data)): try: line_number = line_number + 1 l = Lexer(data[lines], line_number).tokenize() Parser().parse(l) except NotImplementedError1: pass
class Test(unittest.TestCase): def test1(self): l = Lexer('Q').tokenize() #self.assertEqual(l.kind, [TokenKind.ID]) def test2(self): tokelist = Lexer('(P /\ Q)').tokenize() #parse_tree = Parser().parse(tokelist) # some assertion goes here if __name__ == '__main__': #Reading input file input_file=open('input.txt','r') #Spliting file based on new line data=input_file.read().splitlines() i=1 for line in data: #printing current line print 'Input #'+`i`+':' print '---------\n' print 'Proposition : '+line #Getting tokens l=Lexer(line).tokenize() lexerOutPut=l.kind print 'Lexer : '+`lexerOutPut` #Getting parse tree from input passed parse_tree=Parser(i).parse(l) if parse_tree !=None: print 'Parser : '+`parse_tree` i =i+1 print '\n'
from tag import Tag from token1 import Token from lexer import Lexer from parser1 import Parser if __name__ == "__main__": lexer = Lexer( "C:\\Users\\chris\\Desktop\\Compilador\\CompiladorPyscal\\src\\teste1.pys" ) parser = Parser(lexer) parser.Programa() parser.lexer.closeFile() print("\n=>Lista de tokens:") token = lexer.proxToken(None) last_token = token while (token is not None and token.getNome() != Tag.EOF): print( token.toString(), "Linha: " + str(token.getLinha()) + " Coluna: " + str(token.getColuna())) token = lexer.proxToken(last_token) last_token = token print("\n=>Tabela de simbolos:") lexer.printTS() lexer.closeFile() print('\n=> Fim da compilacao')
from lexer import Lexer from parser1 import Parser # Aluno: Pablo Dias Couto # Mat: 20142003301045 f = open("codigo.txt", "r") if f.mode == "r": text_input = f.read() lexer = Lexer().get_lexer() tokens = lexer.lex(text_input) pg = Parser() pg.parse() parser = pg.get_parser().parse(tokens)
def __init__(self): self.parser = Parser() self.handleParams() self.current_row = 0
class Main(): def __init__(self): self.parser = Parser() self.handleParams() self.current_row = 0 def handleParams(self): ph = ParamsHelper() params = ph.getParams() self.options = { 'company': params['company'][0], 'outputfile': params['outfile'][0], } print ("company={} outputfile={}".format(self.options['company'], self.options['outputfile'])) def DownloadSite(self, company_name): url = 'https://www.crunchbase.com/organization/{}'.format(company_name.lower().replace(' ', '-')) print ('Downloading url={}'.format(url)) dm = DowloaderMechanize() return dm.getPage(url, 2, None) # return True/False def DownloadSiteToTempFile(self, company_name): self.tempfilename = 'tmp\\' + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(7)) + '.html' site_data = self.DownloadSite(company_name) if len(site_data): with io.open(self.tempfilename, 'w', encoding='utf-8') as f: f.write(site_data) return True return False def WriteDataToXlsx(self, data, worksheet): worksheet.write(self.current_row, 0, data['header']) self.current_row += 1 for r, row in enumerate(data['lst']): for i in range(len(row)): worksheet.write(self.current_row, i, row[i]) self.current_row += 1 self.current_row += 1 # return True/False def ExportData(self, data): workbook = xlsxwriter.Workbook(self.options['outputfile'], {'constant_memory': True}) worksheet = workbook.add_worksheet() for d in data: self.WriteDataToXlsx(d, worksheet) workbook.close() return True def Process(self): r = self.DownloadSiteToTempFile(self.options['company']) if not r: print ("[-] Failed to download site data. Exiting") exit(1) print ("[+] Site data downloaded...\n\tfile={}".format(self.tempfilename)) res = self.parser.parseFile(self.tempfilename) if len(res) == 0: print ('[-] Failed to scrape company data. Exiting') exit(1) print ('[+] Scraped company data.') self.ExportData(res) print ('[+] Data exported to file={}'.format(self.options['outputfile'])) os.unlink(self.tempfilename)
from lexer import Lexer from parser1 import Parser from codegen import CodeGen fname = "input.toy" with open(fname) as f: text_input = f.read() lexer = Lexer().get_lexer() tokens = lexer.lex(text_input) codegen = CodeGen() module = codegen.module builder = codegen.builder printf = codegen.printf pg = Parser(module, builder, printf) pg.parse() parser = pg.get_parser() parser.parse(tokens).eval() codegen.create_ir() codegen.save_ir("output.ll")
inFiles = os.listdir(pInDir) outFiles = os.listdir(pOutDir) for f in outFiles: ffile = open(pOutDir + '/' + f[0:7] + '.out', 'w') ffile.close() for f in inFiles: n += 1 print(f) inp = open(pInDir + '/' + f, 'r', encoding='utf-8') inpp = open(pInDir + '/' + f, 'r', encoding='utf-8') lex = Tokeniser(''.join(inp.readlines())) lexx = Tokeniser(''.join(inpp.readlines())) out = open(pOutDir + '/' + f[0:7] + '.out', 'a', encoding='utf-8') p = Parser(lex) pp = Parser(lexx) try: semantic = SemanticAnalyser(p) semantic.analyse() r = treePrinter.getTree('', pp.ParseProgramModule()) except Exception as err: out.write(''.join(err.args) + '\n') else: out.write(str(r)) out.close() finally: expstr = '' exp = open('exp/' + f[0:7] + '.txt', 'r') for line in exp: expstr += line
def test_ParseFile(self): for path in files: p = Parser() p.parseFile(path) break