def load_grammers(): location = os.path.join(os.getcwd(), config["grammer-location"]) folders = os.listdir(location) grammers = [] for folder_name in folders: productions = load_file( os.path.join(location, folder_name, config["folder-structure"]["productions"])) productions = list(map(splitter, productions)) tests = load_file( os.path.join(location, folder_name, config["folder-structure"]["test-inputs"])) tests = list(map(splitter, tests)) terminals = {"$": Terminal("$")} non_terminals = { production[0]: NonTerminal(production[0]) for production in productions } grammers.append(Grammer(terminals, non_terminals, productions, tests)) parser = Parser(terminals, non_terminals) for test in tests: print() print("####### Testing '{}' #######".format(" ".join(test))) steps = parser.test(test, terminals, non_terminals, productions[0][0]) for step in steps: print(step[0], "::", step[1], "=>", step[2]) # if(step[-1]): # print(step[-2]) print("####### Test Case 'Accepted' #######") print()
def main(): lexer = Lexer() parser = Parser(lexer) state_machine = StateMachine(parser) # print(state_machine.from_regex('|(.(a,*(b)),*(c))')) list = state_machine.from_file() state_machine.parser.parse_file(list) state_machine.update_lists() for item in state_machine.transitions: print('Transition origin: {}, edge: {}, destination: {}'.format( item.origin, item.edge, item.destination)) print('Initial state:') print(state_machine.initial_state.state_name) print('Alphabet: ') for letter in state_machine.alphabet: print(letter) for state in state_machine.states: print(state) state_machine.process_transitions() for state in state_machine.states: print(state.state_name) for edge in state.state_edges: print(edge.edge_label) print(edge.edge_destination) print(state_machine.process_if_dfa()) state_machine.regex_to_nfa('*(a)')
def get_wiki_page(url): r = requests.get(url, auth=(wiki_user, wiki_password)) body = r.text w = xmind.load('test.xmind') s1 = w.getPrimarySheet() r1 = s1.getRootTopic() # r1.setTitle('SRS') feature = r1.addSubTopic() feature.setTitle('Some Feature') Parser(feature, body).html_to_xmind() xmind.save(w, "test.xmind") # test_xmind_file("srs") # parse_html() # get_wiki_page(test_url)
def parse_html(): path = Path.cwd().joinpath("output") # Load xmind file to write w = xmind.load('test.xmind') s1 = w.createSheet() r1 = s1.getRootTopic() r1.setTitle('SRS') for s in path.iterdir(): path_section = s # Set a section in map section = r1.addSubTopic() section.setTitle(s.name) for l in path_section.iterdir(): # Set a feature in map feature = section.addSubTopic() feature.setTitle(l.stem) with l.open() as f: content = f.read() Parser(feature, content).html_to_xmind() xmind.save(w, "test.xmind")
def location_request(): # Retrieve input from form. user_input = request.form['user_input'] # Parse the input parsed_user_input = Parser.arrange_input(user_input) # Instanciation of GoogleMapsApi class and send parsed input maps = GoogleMapsApi(parsed_user_input) # If a location exists for the user's input return a story, a map, an url.. # If no location exists return a simple message try: story = MediaWikiApi(maps) except UnknownLocation: message = ("Je ne sais pas de quoi tu parles petit ! " "Pose moi une vraie question.") else: address_introduction = GrandPyBotConversation.random_response() message = address_introduction + maps.address lat = maps.latitude lng = maps.longitude if message: page = story.request_wiki_page() page_title = page[1].replace(" ", "_") wiki_page_url = f'''https://fr.wikipedia.org/wiki/{page_title}''' address_story = (GrandPyBotConversation.random_story() + story.request_wiki_summary(page[0]) + ".. ") end_quote = GrandPyBotConversation.random_end_quote() return jsonify({ 'user_input': user_input, 'message': message, 'address_story': address_story, 'wiki_page_url': wiki_page_url, 'end_quote': end_quote, 'latitude': lat, 'longitude': lng }) return jsonify({'user_input': user_input, 'message': message})
def query(): """Method to receive the query from the client side (input form) with AJAX and return all the objects needed in json to AJAX, after making instances and running the methods from classes.py.""" # Getting the text the user type in the input form. user_text = request.args.get("text") # Parsing the user text. # Parser instance creation. user_request = Parser(user_text) # Running the parsing method. user_query = user_request.parsing() # GoogleMaps instance creation. query = GoogleMaps(user_query) print(GoogleMaps("openclassrooms")) # Find the address of the place looked for. try: # Running the coordinates method and retrieving latitude, longitude # and the global address of the place the user is looking for. address_coords = query.location() format_address = address_coords[0] latitude = address_coords[1] longitude = address_coords[2] # GrandPy Bot different possible messages in case of success. addressAnswer = GrandPyMessages.randomAnswer() # Find a story of the wanted place. try: # MediaWiki instance creation. coords = Wiki(latitude, longitude) # Running the history method to get the wikipedia page for that coordonates. wikiExtract = coords.comment()[0] pageid = coords.comment()[1] if wikiExtract: # GrandPy Bot different possible messages in case of success. storyAnswer = GrandPyMessages.randomStory() else: # GrandPy Bot different possible messages if there is no answer from Wikipedia. storyAnswer = GrandPyMessages.randomNoStory() # Reference this empty variable. wikiExtract = "" pageid = "" except: print("coucou") # GrandPy Bot different possible messages if there is no answer from Wikipedia. storyAnswer = GrandPyMessages.randomNoStory() # Reference this empty variable. wikiExtract = "" pageid = "" except: # GrandPy Bot different possible messages if there is no answer from GoogleMaps. addressAnswer = GrandPyMessages.randomNoAnswer() # Reference those empty variables. latitude = "" longitude = "" format_address = "" wikiExtract = "" storyAnswer = "" pageid = "" # JSON with the responses send back to AJAX (home.js). return json.dumps( { "userText": user_text, "addressAnswer": addressAnswer, "lat": latitude, "lng": longitude, "format_address": format_address, "storyAnswer": storyAnswer, "wikiExtract": wikiExtract, "pageid": pageid, } )
def parse(self): return Parser(self.tokenize()).parse()
from classes import Parser parser = Parser('https://www.ua-football.com/sport', 'news.txt') parser.run() # print(parser.raw_html) # print(parser.html) # print(parser.results)
#------------------------------------------------------------------------------------------------------------ # # Vzhladavanie informacii, 2014/15 # # Zadanie projektu: # Parsovanie typov s nazvami v roznych jazykoch - obmedzenie na 1 alebo N definovanych jazykov. # # Autor: # Lukáš Gregorovič, 64341 # # ----------------------------------------------------------------------------------------------------------- import string import json #import jsonpickle from classes import Name, ParsedObject, FreebaseObjects, Parser # -------------------------------------------------------------- filename = "data/sample_freebase-rdf-2014-09-28-00-00" #filename = "data/tmp/names_types_2" #filename = "D:/freebase_grep/names_types_2_all" #filename = "/media/mint/ProBookHDD/freebase_names_types_utf-8_all" outputFilename = "data/tmp/freebase_output_sample" parser = Parser() parser.parseFile(filename,outputFilename)
def test_parser(self): """ Parse a resquest to test the parser""" test_user_request = Parser( "bonjour papy, pourrais-tu me dire où se trouve openclassrooms?") assert test_user_request.parsing() == "openclassrooms"
from classes import Parser parser = Parser( 'https://www.ua-football.com/sport', 'news.txt', 'li', 'liga-news-item', 'span', 'd-block', ) parser.run() # print(parser.raw_html) # print(parser.html) print(parser.results)