def organize_collection(self): tokens = [] for file_item in self.vhdl_source_file_list: vhdl_object = VHD(file_item) with open(file_item, 'r') as file: content = file.read() # Lexer lex = Lexer(content) tokens = lex.tokenize() # Parser parser = Parser(tokens) # Get the vhdl objects dependencies dep_list = parser.get_dependency() # Get the type of vhdl object vhdl_type = parser.get_type() # vhdl object # Set the name of vhdl object vhdl_object.set_name(vhdl_type) # Set the type of vhdl object vhdl_object.set_type(vhdl_type) # Set the vhdl object dependencies vhdl_object.add_dependency(dep_list) # Add vhdl object to list self.vhdl_obj_list.append(vhdl_object) vhdl_object.get_generics(tokens) self._sort_compile_order(self.vhdl_obj_list)
def start(self, event): p = Parser( self.elements["url_e"]["element"].get(), int(self.elements["depth_e"]["element"].get()), self.dir, ).find_all_links() p.save_images()
def main(): lexer = Lexer().build() file = open('./input.txt') text_input = file.read() file.close() lexer.input(text_input) parser = Parser() parser.build().parse(text_input, lexer, False)
def organize_collection(self) -> list : """ With all files in collection do: 1. Create a vhdl_object() object 2. Tokenize file 3. Parse file 4. Read file dependencies 5. Read type of file 6. Set vhdl_object name, type and dependencies. 7. Add vhdl_object to vhdl_object_list 8. Detect vhdl_object generics -> Final, sort vhdl_objects by dependency. """ for file_item in self.vhdl_source_file_list: # Get new VHDL object vhdl_object = VHD(file_item) # Read file content with open(file_item, 'r') as file: content = file.read() # Get tokens from Lexer lexer = Lexer(content) tokens = lexer.get_tokens() # Parser parser = Parser(tokens) # Get the vhdl objects dependencies dep_list = parser.get_dependency() # Get the type of vhdl object vhdl_type = parser.get_type() # VHDL object # Set the name of vhdl object vhdl_object.set_name(vhdl_type) # Set the type of vhdl object vhdl_object.set_type(vhdl_type) # Set the vhdl object dependencies vhdl_object.add_dependency(dep_list) # Add vhdl object to list self.vhdl_obj_list.append(vhdl_object) #vhdl_object.get_generics(tokens) # Organize VHDL objects by internal dependeny return self._sort_compile_order(self.vhdl_obj_list)
def test_words_in(): words_in = ['python', 'linux', 'flask'] jobs = Parser('python') info = jobs.vacancies_pars(words_in) words_d = {'python': 58, 'linux': 16.8, 'flask': 4} check = True for i in info[1]: for j in i: words = i[j] for w in words: if int(words[w]) == 0 or w == 'all_vacancies': continue else: prev = words_d[w] print(prev) if prev - 1 < int(words[w]) / 5 < prev + 1: pass else: check = False assert (check == True)
def main(): name = str(input('Введите название вакансии: ')) words_in = str(input('Введите ключевые слова через пробел: ')).split(' ') jobs = Parser(name) info = jobs.vacancies_pars(words_in) if info == 0: print(f'Ваканский по слову {name} не найдено') else: print(info[0]) print(info[1]) for i in info[0]: for j in i: print(f'На странице {int(j[0]) + 1} найдено {i[j]} ключевых слов') for i in info[1]: for j in i: words = i[j] for w in words: if int(words[w]) == 0 or w == 'all_vacancies': continue else: all_vac = int(words['all_vacancies']) d = round((int(words[w])/all_vac), 3) print(f'В среднем на странице №{int(j[0]) + 1} слово {w} встречается {d} раз')
from transfer_to_pn import PN from stack_machine import stack_machine from triad_processing import Triad from thread_manager import Thread_manager from my_thread import Thread f = open('test2.txt') inp = f.read() f.close() print('\nlexer:') l = Lexer() tokens = l.lex(inp) p = Parser(tokens) pars = p.lang() print('\nparser:', pars) if pars: pn = PN(tokens) transfer, fun = pn.transfer_PN() tr = Triad(transfer, fun) t, val = tr.triad_op() for i in range(len(fun)): print("\nFunctions triads processing:") triad = Triad(fun[i][-1], fun) fun[i][-1] = triad.triad_op(False)
def printTable(self): self.parser = Parser(self.grammar) self.parser.generateFirstSet() self.parser.follow() self.parser.Table() print(self.parser.table)
def printFollowSet(self): self.parser = Parser(self.grammar) self.parser.generateFirstSet() self.parser.follow() print(self.parser.getFollow())
from pars import Parser if __name__ == '__main__': parser = Parser() parser.login() parser.parse_links()
def test_soup_desc(check_connect): pars = Parser('python') link = "https://rabota.by/vacancy/43048095?query=python" results = pars.get_info_vacancies(link) assert (results != [])
def test_soup(check_connect): jobs = Parser('python') info = jobs.vacancies_pars('') assert (info != False)
def test_word(): jobs = Parser('дробовик') info = jobs.vacancies_pars('') assert (info == 0)