def test_single_indent_spaces(self): parser = Parser() parsed = parser.parse_line("\t FEATURE TYPE: ## ADHOC") self.assertIsNotNone(parsed) indent, node = parsed self.assertEqual(1, indent)
def test_multiple_indent_mixed(self): parser = Parser() parsed = parser.parse_line("\t \t->FEATURE TYPE: ## ADHOC") self.assertIsNotNone(parsed) indent, node = parsed self.assertEqual(2, indent)
def test_single_indent_spaces(self): parser = Parser() parsed = parser.parse_line("\t NODE_1952482365_2914[...,...]") self.assertIsNotNone(parsed) indent, node = parsed self.assertEqual(1, indent)
def get_ability_markers(spawn_list, ship_stats): """ Parse a spawn list of lines and take the Engine, Shield, Systems and CoPilot ability activations and create markers for them to be put in the TimeLine. """ # TODO: Use ship_statistics to create availability markers categories = ["engines", "shields", "copilot", "systems"] player_id_list = Parser.get_player_id_list(spawn_list) results = {key: [] for key in categories} # Activation markers for line in spawn_list: if not isinstance(line, dict): line = Parser.line_to_dictionary(line) ability = line["ability"] if (line["source"] != line["target"] or line["source"] not in player_id_list or "AbilityActivate" not in line["effect"]): continue if ability in abilities.copilots: category = "copilot" elif ability in abilities.shields: category = "shields" elif ability in abilities.systems: category = "systems" elif ability in abilities.engines: category = "engines" else: continue start = FileHandler.datetime_to_float(line["time"]) args = ("abilities", start, start + 1/60) kwargs = {"background": FileHandler.colors[category]} results[category].append((args, kwargs)) return results
def test_no_indent(self): parser = Parser() parsed = parser.parse_line("NODE_1952482365_2914[...,...]") self.assertIsNotNone(parsed) indent, node = parsed self.assertEqual(0, indent)
def test_comment(self): txt = '<!-- In the interest of restricting article length, please limit this section to two or three short ' \ 'paragraphs and add any substantial information to the main Issues in anarchism article. Thank you. ' \ '--> ' parser = Parser() ast = parser.parse(txt, Grammar.comment) print(ast, Compiler().render(ast))
def main(): """ Main entry point into the program. Parse the command line arguments of the form [lexicon_file grammar_file source_file]. Calls lexer to obtain a sequence L of annotated lexemes from the source file and lexicon file. Calls parser to obtain an abstract syntax tree from L and grammar file and print it. """ # read arguments (options, args) = parse_arguments() (lexicon_file, grammar_file, input_file) = args # create lexer instance lexer_instance = Lexer(lexicon_file, options.use_builtin_lexemes) # obtain lexing sequence from the input file lexing_sequence = lexer_instance.get_lexing_sequence_from_file(input_file) # create parser instance parser_instance = Parser(grammar_file, lexer_instance.lexicon_dict.keys()) # obtain abstract syntax tree from the lexing sequence print(parser_instance.get_ast(lexing_sequence, options.ignore_spaces))
def test_multiple_indent_mixed(self): parser = Parser() parsed = parser.parse_line("\t \t->NODE_1952482365_2914[...,...]") self.assertIsNotNone(parsed) indent, node = parsed self.assertEqual(2, indent)
def test_no_indent(self): parser = Parser() parsed = parser.parse_line("FEATURE TYPE: ## ADHOC") self.assertIsNotNone(parsed) indent, node = parsed self.assertEqual(0, indent)
def include(self, file_name): s = mshl.find_include_file(file_name) if not s: mshl.error( 'cannot include {}: no such file exists'.format(file_name)) return if s in self.includes: return self.includes.append(s) # FIXME: This crap needs to be cleaned up and moved into some other # function in another file. with open(s, 'r') as f: source = StringSource(f.read()) lexer = Lexer(source) parser = Parser(lexer) #mshl.trace('generating syntax tree...') tree = parser.generate_ast() old_srcfile = mshl.srcfile mshl.srcfile = file_name self.verify_internal(tree) mshl.srcfile = old_srcfile
def parse_spawn(self, elements): """ Either starts the parsing of ALL spawns found in the specified match or just one of them and displays the results in the other frames accordingly. """ self.clear_data_widgets() self.main_window.middle_frame.statistics_numbers_var.set("") self.main_window.ship_frame.ship_label_var.set("No match or spawn selected yet.") file_name, match_index, spawn_index = elements[0], int(elements[1]), int(elements[2]) lines = Parser.read_file(file_name) player_list = Parser.get_player_id_list(lines) player_name = Parser.get_player_name(lines) file_cube, match_timings, spawn_timings = Parser.split_combatlog(lines, player_list) match = file_cube[match_index] spawn = match[spawn_index] results = list(spawnstats.spawn_statistics( file_name, spawn, spawn_timings[match_index][spawn_index])) results[1] = Parser.parse_player_reaction_time(spawn, player_name) orig = len(results[1]) results[1] = ScreenParser.build_spawn_events( file_name, match_timings[::2][match_index], spawn_timings[match_index][spawn_index], spawn, player_name) print("[FileFrame] ScreenParser built {} events. Total: {}".format(len(results[1]) - orig, len(results[1]))) self.update_widgets_spawn(*results) arguments = (file_name, match_timings[::2][match_index], spawn_timings[match_index][spawn_index]) string = FileHandler.get_features_string(*arguments) self.main_window.middle_frame.screen_label_var.set(string) self.main_window.middle_frame.update_timeline( file_name, match_index, spawn_index, match_timings, spawn_timings, file_cube)
def parse_ship_descriptor(ship: Ship, line: dict, lines: list, event: tuple): """ Parse an event_descriptor of the SHIP type. Supports ability, component and crew type operations. :param ship: Ship instance for the spawn described by lines :param line: Trigger line dictionary :param lines: List of lines in this spawn :param event: Event descriptor tuple (PatternParser docstring) :return: The result of results this SHIP event descriptor """ if event[0] != Patterns.SHIP: raise InvalidDescriptor(event) event_type, args = event[1], event[2:] # "ability", "component", "crew" # Parse Component selected if event_type == "component": # Component must be selected on the Ship for this spawn component, = args return PatternParser.get_component_in_ship(lines, ship, component) # Parse Crew selected elif event_type == "crew": crew, = args return PatternParser.get_crew_in_ship(ship, crew) # Parse Ability Available elif event_type == "ability": return PatternParser.parse_ability_availability(line, lines, event, ship) # Parse ship type selected elif event_type == "type": # TODO: Optimize usage player = Parser.get_player_id_list(lines) abs_dict = Parser.get_abilities_dict(lines, player) ship_name = ship.name if ship is None else Parser.get_ship_for_dict(abs_dict) ship_type, = args return get_ship_category(ship_name) == ship_type raise InvalidDescriptor(event)
def _build_tracking_effects(events: list, screen_data: dict, ship: ShipStats): """Determine tracking penalty for each primary weapon event""" active_ids = Parser.get_player_id_list(events) distance = screen_data["distance"] primary = "PrimaryWeapon" for i, event in enumerate(events): if "custom" in event and event["custom"] is True: continue if "Primary Weapon Swap" in event["ability"] and event["self"] is True: primary = "PrimaryWeapon2" if primary == "PrimaryWeapon" else "PrimaryWeapon" continue ctg = Parser.get_event_category(event, active_ids) if ctg != "dmgd_pri": continue key = min(distance.keys(), key=lambda k: abs((k - event["time"]).total_seconds())) if abs((key - event["time"]).total_seconds()) > 0.5: continue if primary not in ship: continue tracking = ship[primary]["trackingAccuracyLoss"] * (distance[key] / 10) * 100 del events[i] event["effects"] = ( ("", "Tracking", "Penalty", "-{:.0f}%".format(tracking), "", "spvp_improvedfiringarctrackingbonus"), ) events.append(event) return events
def update_files(self): """Update the Calendar with the new files""" self.clear_data_widgets() self._dates.clear() folder = variables.settings["parsing"]["path"] if not os.path.exists(folder): messagebox.showerror("Error", "The specified CombatLogs folder does not exist. Please " "choose a different folder.") folder = filedialog.askdirectory() variables.settings.write_settings({"parsing": {"path": folder}}) return self.update_files() files = [f for f in os.listdir(folder) if Parser.get_gsf_in_file(f)] self.create_splash(len(files)) match_count: Dict[datetime: int] = DateKeyDict() for file in files: date = Parser.parse_filename(file) if date is None: # Failed to parse continue if date not in match_count: match_count[date] = 0 match_count[date] += Parser.count_matches(file) if date not in self._dates: self._dates[date] = list() self._dates[date].append(file) self._splash.increment() self._calendar.update_heatmap(match_count) self.destroy_splash()
def get_component_in_ship(lines: list, ship: Ship, component: (str, Component)): """Return whether a component is found within a Ship instance""" if isinstance(component, Component): name, category = component.name, component.category else: # str name = component category = PatternParser.get_component_category(component) player = Parser.get_player_id_list(lines) abilities = Parser.get_abilities_dict(lines, player) if name in abilities: return True if ship is None: return False # Ship option not available at results time if category not in ship: return False # Configured improperly at results time categories = (category,) if "Weapon" in category: # Extend to double primaries/secondaries categories += (category[0] + "2",) # Loop over categories result = False for category in categories: if category not in ship: # Double primaries/secondaries continue component = ship[category] if not isinstance(component, Component): # Improper config print("[PatternParser] Improperly configured Ship instance:", ship, component) continue if component.name == name: result = True break continue return result
def parse_expr(input: str) -> Expr: from parsing.lexer import get_all_tokens, Lexer from parsing.parser import Parser lexer = Lexer(input) tokens = get_all_tokens(lexer) parser = Parser(tokens) return parser.expression()
def test_link(self): txt = '[[File:Nearest_stars_rotating_red-green.gif|alt=Rotating 3D image of the nearest stars|thumb|Animated 3D map of the nearest stars, centered on the Sun. {{3d glasses|color=red green}}]]' txt2 = '[[File:William Shea.jpg|thumb|upright|[[William Shea]] was instrumental in returning [[National League|National League baseball| [[asd|{{asd}}]]]] to [[New York City]] after five years of absence.]]' txt3 = '[[asd]]' parser = Parser() ast = parser.parse(txt2, Grammar.link) print(ast) return ast
def test_adhoc(self): parser = Parser() parsed = parser.parse_line("FEATURE TYPE: ## ADHOC") self.assertIsNotNone(parsed) indent, node = parsed self.assertEqual(["adhoc"], node.features()) self.assertEqual('feature', node.line_type()) self.assertEqual('FEATURE TYPE: ## ADHOC', node.line())
def test_elipsis(self): parser = Parser() parsed = parser.parse_line("NODE_1952482365_2914[...,...]") self.assertIsNotNone(parsed) indent, node = parsed self.assertEqual('elipsis', node.role()) self.assertEqual('node', node.line_type()) self.assertEqual('NODE_1952482365_2914[...,...]', node.line())
def test_headings(self): txt = '==asd==' txt3 = '===asd===' txt4 = '====asd====' txt5 = '=====asd=====' txt6 = '======asd======' parser = Parser() ast = parser.parse(txt, expression=Grammar.headings) print(ast) return ast
def get_treeview_values(line_dict, player_name, start_time, active_ids): """Return the Treeview values for a certain line_dict""" values = ( TimeView.format_time_diff(line_dict["time"], start_time), player_name if Parser.compare_ids(line_dict["source"], active_ids) else line_dict["source"], player_name if Parser.compare_ids(line_dict["target"], active_ids) else line_dict["target"], line_dict["ability"], line_dict["amount"] ) return values
def test_parse(self, name='wikitext'): with (DATA_FOLDER / name).open(encoding="utf8") as f: text = f.read() t0 = time.time() # lexer = Lexer() # tokens = lexer.tokenize(text) parser = Parser() ast = parser.parse(text) t1 = time.time() # print(ast) print('Ast built in: ', t1 - t0) return ast
def shell(fn, text): # Generate tokens lexer = Lexer(fn, text) tokens, exception = lexer.make_tokens() if exception: return None, exception # Generate AST parser = Parser(tokens) ast = parser.parse() return ast.node, ast.error
def update_files(self, silent=False): """ Function that checks files found in the in the settings specified folder for GSF matches and if those are found in a file, it gets added to the listbox. Provides error handling. """ self.file_tree.delete(*self.file_tree.get_children()) self.clear_data_widgets() self.main_window.ship_frame.ship_label_var.set("") try: old_cwd = os.getcwd() os.chdir(variables.settings["parsing"]["path"]) os.chdir(old_cwd) except OSError: tkinter.messagebox.showerror("Error", "The CombatLogs folder found in the settings file is not valid. Please " "choose another folder.") folder = tkinter.filedialog.askdirectory(title="CombatLogs folder") variables.settings.write_settings({'parsing': {'path': folder}}) variables.settings.read_settings() combatlogs_folder = variables.settings["parsing"]["path"] file_list = os.listdir(combatlogs_folder) if not silent: splash_screen = SplashScreen(self.main_window, len(file_list), title="Loading files") else: splash_screen = None if len(file_list) > 100: tkinter.messagebox.showinfo("Suggestion", "Your CombatLogs folder contains a lot of CombatLogs, {0} to be " "precise. How about moving them to a nice archive folder? This " "will speed up some processes " "significantly.".format(len(file_list))) self.file_tree.insert("", tk.END, iid="all", text="All CombatLogs") file_list = list(reversed(sorted(file_list)) if not self.ascending else sorted(file_list)) if self.main_window.splash is not None and self.main_window.splash.winfo_exists(): self.main_window.splash.update_max(len(file_list)) for number, file in enumerate(file_list): if not Parser.get_gsf_in_file(file): continue file_string = Parser.parse_filename(file) if file_string is None: continue self.file_string_dict[file_string] = file number += 1 if splash_screen is not None: splash_screen.increment() splash_screen.update() self.insert_file(file_string) if self.main_window.splash is not None and self.main_window.splash.winfo_exists(): self.main_window.splash.increment() if splash_screen is not None: splash_screen.destroy() return
def test_get_effects_ability_eligible(self): with open(self.FILE) as fi: lindex = fi.readlines() index = lindex.index(self.EFFECT) no_effect = lindex.index(self.LINE) lines = Parser.read_file(self.FILE) player = Parser.get_player_id_list(lines) line = Parser.line_to_dictionary(lines[index], player) effect = Parser.get_effects_ability(line, lines, "2963000049645") self.assertIsInstance(effect, dict) self.assertTrue(len(effect) > 0) # Tests get_effects_eligible self.assertFalse(Parser.get_effects_ability(lines[no_effect], lines, "2963000048240"))
def test_parse_file_stats(self): lines = [ "true", "Using complex purge", "bloop", "bleep", "FEATURE TYPE: ## ADHOC", ] parser = Parser() stats = parser.parse_file(lines) self.assertEqual(5, stats['nb_read']) self.assertEqual(1, stats['nb_parsed']) self.assertEqual(2, stats['nb_skipped'])
def insert_spawn(self, spawn, player_name, active_ids: list = None): """Insert the events of a spawn into the Treeview""" self.delete_all() if len(spawn) == 0: raise ValueError("Invalid spawn passed.") spawn = spawn if isinstance(spawn[0], dict) else [Parser.line_to_dictionary(line) for line in spawn] start_time = spawn[0]["time"] active_ids = Parser.get_player_id_list(spawn) if active_ids is None else active_ids for line in spawn: if "custom" not in line or line["custom"] is False: line_event_dict = Parser.line_to_event_dictionary(line, active_ids, spawn) else: line_event_dict = line self.insert_event(line_event_dict, player_name, active_ids, start_time)
def spawn_statistics(file_name, spawn, spawn_timing, sharing_db=None): """Build strings to show in the StatsFrame""" # Retrieve required data lines = Parser.read_file(file_name, sharing_db) player_numbers = Parser.get_player_id_list(lines) (abilities_dict, dmg_t, dmg_d, healing, dmg_s, enemies, critcount, crit_luck, hitcount, ships_list, enemy_dmg_d, enemy_dmg_t) = \ Parser.parse_spawn(spawn, player_numbers) name = Parser.get_player_name(lines) # Build the statistics string stat_string = "{name}\n{enemies} enemies\n{dmg_d}\n{dmg_t}\n{dmg_r:.1f} : 1.0\n" \ "{dmg_s}\n{healing}\n{hitcount}\n{critcount}\n{crit_luck:.2f}\n" \ "{deaths}\n{minutes}:{seconds:.0f}\n{dps:.1f}" start = spawn_timing finish = Parser.line_to_dictionary(spawn[-1])["time"] delta = finish - start minutes, seconds = divmod(delta.total_seconds(), 60) killsassists = sum(True if enemy_dmg_t[enemy] > 0 else False for enemy in enemies if enemy in enemy_dmg_t) stat_string = stat_string.format( name=name, enemies=killsassists, dmg_d=dmg_d, dmg_t=dmg_t, dmg_r=dmg_d / dmg_t if dmg_t != 0 else 0, dmg_s=dmg_s, healing=healing, hitcount=hitcount, critcount=critcount, crit_luck=critcount / hitcount if hitcount != 0 else 0, deaths="-", minutes=minutes, seconds=seconds, dps=dmg_d / delta.total_seconds() if delta.total_seconds() != 0 else 0 ) # Build the components list components = {key: "" for key in abilities.component_types} for component in [ability for ability in abilities_dict.keys() if ability in abilities.components]: for type in components.keys(): if component not in getattr(abilities, type): continue # Dual primary/secondary weapons if components[type] != "": components[type] += " / {}".format(component) break components[type] = component break components = [components[category] for category in abilities.component_types] # Return return name, spawn, abilities_dict, stat_string, ships_list, components, enemies, enemy_dmg_d, enemy_dmg_t
def get_prop(self,str): in_prop = False prop = '' empty = True for pos,x in enumerate(str): if x.isalpha(): in_prop = True if in_prop and x.isspace(): return Parser(Lexer(prop)).parse(), pos if in_prop: prop += x if in_prop: prop.strip('\n') return Parser(Lexer(prop)).parse(), len(str) return None, 0
def test_on_parsed_line(self): lines = [ "FEATURE TYPE: ## ADHOC", "true", "bleep", ] parser = Parser() parsed_lines = [] def on_parsed_line(indent, content): parsed_lines.append(content.line()) parser.parse_file(lines, on_parsed_line=on_parsed_line) self.assertEqual(["FEATURE TYPE: ## ADHOC"], parsed_lines)
def parse_file(self, file_name): """ Function either sets the file and calls add_matches to add the matches found in the file to the matches_listbox, or starts the parsing of all files found in the specified folder and displays the results in the other frames. """ self.clear_data_widgets() self.main_window.middle_frame.statistics_numbers_var.set("") self.main_window.ship_frame.ship_label_var.set("No match or spawn selected yet.") lines = Parser.read_file(file_name) player_list = Parser.get_player_id_list(lines) file_cube, _, _ = Parser.split_combatlog(lines, player_list) results = filestats.file_statistics(file_name) self.update_widgets(*results)
def __init__(self): """ Constructor in charge of connecting to mailbox via IMAPS. """ Parser.__init__(self) self._imap = imaplib.IMAP4_SSL(settings.SCORING_EMAIL['host']) self._imap.login(settings.SCORING_EMAIL['polling']['user'], settings.SCORING_EMAIL['polling']['password']) self._imap.select('INBOX') self._failed_uids = [] self._parser = None self._current = 0 self._feed_queue(True)
def process_player(name: str, active_ids: list, player: str): """Return an appropriate player representation""" if Parser.compare_ids(player, active_ids): return name if player == "": return "System" return player
def get_spawn(self): """ Get the spawn from the selection in the file_tree :return: list of event strings, player_list, spawn timing and match timing """ selection = self.file_tree.selection()[0] elements = selection.split(" ") if len(elements) is not 3: tkinter.messagebox.showinfo("Requirement", "Please select a spawn to view the events of.") return lines = Parser.read_file(elements[0]) player_list = Parser.get_player_id_list(lines) file_cube, match_timings, spawn_timings = Parser.split_combatlog(lines, player_list) match_index, spawn_index = int(elements[1]), int(elements[2]) return (file_cube[match_index][spawn_index], player_list, spawn_timings[match_index][spawn_index], match_timings[match_index])
def __init__(self, input_file, delimiter): """ Default constructor :param str input: Path of the CSV file to parse :param str delimiter: CSV delimiter :raises: `IOError` if an error occurs while opening the file """ Parser.__init__(self) if not os.path.exists(input_file): raise IOError(str('File [%s] does not exist.' % (input_file))) self._delim = delimiter self._current_row = 0 with open(input_file, 'r') as fdesc: content = fdesc.read() self._rows = csv.reader(content.splitlines(), delimiter=delimiter)
def __init__(self, input_file, delimiter): """ Default constructor :param str input: Path of the CSV file to parse :param str delimiter: CSV delimiter :raises: `IOError` if an error occurs while opening the file """ Parser.__init__(self) if not os.path.exists(input_file): raise IOError(str('File [{}] does not exist.'.format(input_file))) self._delim = delimiter self._current_row = 0 with open(input_file, 'r') as fdesc: content = fdesc.read() self._rows = csv.reader(content.splitlines(), delimiter=delimiter)
def parse(string): # Parses a string and returns an abstract # syntax tree (AST) of the formula written # in the string, as long as the formula is # written in an appropriate format. Allowed # symbols: # words from letters, numbers, and _ for # propositional variables; # ! && || -> for propositional connectives; # X G F U W R for LTL operators. return Parser(Lexer(string)).parse()
def parsing(self, code, lexer): parser = Parser(lexer=lexer) ast = parser(code) if len(parser.errors) > 0: errors_list = '\n'.join(repr(error) for error in parser.errors) self.ui.textErrors.setPlainText( f'{self.ui.textErrors.toPlainText()}{errors_list}') NotificationWindow.error('Error', ''' <html> <body> <span style=" font-style:italic; color:teal;"> <p>Ha ocurrido error(es) en el análisis sintáctico.</p> <p>Revise la pestaña Errors para más información.</p> <p></p> </span> </body> </html> ''', callback=lambda: self.help()) self.go_dialog(6) return False, ast else: path = './parsing/parser.out' file = open(path) parser_list = [] for _ in range(0, 4): file.readline() while True: line = file.readline().strip() if line.split(' ')[0] != 'Rule': break parser_list.append(line) file.close() parser_list = '\n'.join(repr(parser) for parser in parser_list) self.ui.textParser.setPlainText( f'{self.ui.textParser.toPlainText()}{parser_list}') NotificationWindow.success( 'Listo', ''' <html> <body> <span style="color:green;"> <p>El análisis sintáctico ha termiando.</p> <p>Todos los resultados están listos en la pestaña Parser.</p> <p></p> </span> </body> </html> ''') # print(ast) return True, ast
def parse_match(self, elements: list): """ Either adds sets the match and calls add_spawns to add the spawns found in the match or starts the parsing of all files found in the specified file and displays the results in the other frames. :param elements: specifies file and match """ self.clear_data_widgets() self.main_window.middle_frame.statistics_numbers_var.set("") self.main_window.ship_frame.ship_label_var.set("No match or spawn selected yet.") file_name, match_index = elements[0], int(elements[1]) lines = Parser.read_file(file_name) player_list = Parser.get_player_id_list(lines) file_cube, match_timings, _ = Parser.split_combatlog(lines, player_list) player_name = Parser.get_player_name(lines) match = file_cube[match_index] results = matchstats.match_statistics(file_name, match, match_timings[::2][match_index]) self.update_widgets(*results) match_list = Parser.build_spawn_from_match(match) self.main_window.middle_frame.time_view.insert_spawn(match_list, player_name)
def parse_spawn(self, file: str, match_i: int, spawn_i: int): """ Either starts the results of ALL spawns found in the specified match or just one of them and displays the results in the other frames accordingly. """ print("[FileFrame] Parsing '{}', match {}, spawn {}".format(file, match_i, spawn_i)) self.main_window.middle_frame.statistics_numbers_var.set("") self.main_window.ship_frame.ship_label_var.set("No match or spawn selected yet.") lines = Parser.read_file(file) player_list = Parser.get_player_id_list(lines) player_name = Parser.get_player_name(lines) file_cube, match_timings, spawn_timings = Parser.split_combatlog(lines, player_list) match = file_cube[match_i] spawn = match[spawn_i] results = list(spawnstats.spawn_statistics( file, spawn, spawn_timings[match_i][spawn_i])) results[1] = Parser.parse_player_reaction_time(spawn, player_name) orig = len(results[1]) results[1] = ScreenParser.build_spawn_events( file, match_timings[::2][match_i], spawn_timings[match_i][spawn_i], spawn, player_name) print("[FileFrame] ScreenParser built {} events. Total: {}".format(len(results[1]) - orig, len(results[1]))) self.update_widgets_spawn(*results) arguments = (file, match_timings[::2][match_i], spawn_timings[match_i][spawn_i]) string = FileHandler.get_features_string(*arguments) self.main_window.middle_frame.screen_label_var.set(string) self.main_window.middle_frame.update_timeline( file, match_i, spawn_i, match_timings, spawn_timings, file_cube) match_timing = datetime.combine(Parser.parse_filename(file).date(), match_timings[::2][match_i].time()) self.main_window.middle_frame.scoreboard.update_match(match_timing)
def get_spawn_dictionary(data: dict, file_name: str, match_dt: datetime, spawn_dt: datetime): """ Function to get the data dictionary for a spawn based on a file name, match datetime and spawn datetime. Uses a lot of code to make the searching as reliable as possible. """ if data is None: data = FileHandler.get_data_dictionary() print("[FileHandler] Spawn data requested for: {}/{}/{}".format(file_name, match_dt.time(), spawn_dt.time())) # First check if the file_name is available if file_name not in data: return "Not available for this file.\n\nScreen results results are only available for spawns in files " \ "which were spawned while screen results was enabled and real-time results was running." file_dt = Parser.parse_filename(file_name) if file_dt is None: return "Not available for this file.\n\nScreen results results are not supported for file names which do " \ "not match the original Star Wars - The Old Republic CombatLog file name format." file_dict = data[file_name] # Next up comes the checking of datetimes, which is slightly more complicated due to the fact that even equal # datetime objects with the == operators, are not equal with the 'is' operator # Also, for backwards compatibility, different datetimes must be supported in this searching process # Datetimes always have a correct time, but the date is not always the same as the filename date # If this is the case, the date is actually set to January 1 1900, the datetime default # Otherwise the file name of the CombatLog must have been altered match_dict = None for key, value in file_dict.items(): if key.hour == match_dt.hour and key.minute == match_dt.minute: match_dict = value if match_dict is None: return "Not available for this match\n\nScreen results results are only available for spawns " \ "in matches which were spawned while screen results was enabled and real-time results " \ "was running" # Now a similar process starts for the spawns, except that seconds matter here. spawn_dict = None for key, value in match_dict.items(): if key is None: # If the key is None, something weird is going on, but we do not want to throw any data away # This may be caused by a bug in the ScreenParser # For now, we reset key to a sensible value, specifically the first moment the data was recorded, if # that's possible. If not, we'll skip it. try: key = list(value[list(value.keys())[0]].keys())[0] except (KeyError, ValueError, IndexError): continue if key.hour == spawn_dt.hour and key.minute == spawn_dt.minute and key.second == spawn_dt.second: spawn_dict = value if spawn_dict is None: return "Not available for this spawn\n\nScreen results results are not available for spawns which " \ "were not spawned while screen results was enabled and real-time results were running." print("[FileHandler] Retrieved a spawn dictionary.") return spawn_dict
def test_1root_1child(self): lines = [ "NODE_1_1[...,...]", "\t->NODE_1_2[...,...]", ] parser = Parser() graph = Graph() graph.build(parser, lines) self.assertEqual(2, len(graph.nodes())) self.assertEqual(1, len(graph.roots())) self.assertEqual(1, len(graph.tails()))
def interface(streams, debug, mode): try: lexer = parser = None LexerError.lexer = lexer = Lexer(streams.in_stream) lexer.make_tokens() ParserError.parser = parser = Parser(lexer) parser.make_nodes() except SeaError as error: streams.error_stream.write(error) finally: print_debug_info(debug, streams.debug_stream, lexer, parser)
def _process_new_file(self): """Backlog only the lines of a match that are match lines""" print("[LogStalker] Processing new file.") lines = self.read_file(self.path, 0) if len(lines) == 0: return player_list = Parser.get_player_id_list(lines) file_cube, _, _ = Parser.split_combatlog(lines, player_list) if len(file_cube) == 0: print("[LogStalker] No matches in this file") self._read_so_far = len(lines) return last_line = file_cube[-1][-1][-1] if last_line["time"] == lines[-1]["time"]: print("[LogStalker] Match still active") # Last line is still a match line match_len = sum(len(spawn) for spawn in file_cube[-1]) self._read_so_far = len(lines) - match_len return # Last line is no longer a match print("[LogStalker] Last line is not a match event") self._read_so_far = len(lines)
def parsing(self): ''' Lleva a cabo el análisis sintáctico. ''' self.parser = Parser(lexer=self.lexer) self.ast = self.parser(self.code) Utils.Write(self.debug_path, '.parser', '\n'.join( repr(rule) for rule in Utils.GetRulesParsing())) if self.debug else None if len(self.parser.errors) > 0: print(self.parser.errors[0]) exit(1)
def _select_date(self, date: datetime): """Callback for Calendar widget selection command""" self.clear_data_widgets() self._tree.delete(*self._tree.get_children("")) if date not in self._dates: return self._files: List[str] = self._dates[date] for f, file in enumerate(sorted(self._files)): name = Parser.get_player_name_raw(file) cube, matches, spawns = Parser.split_combatlog_file(file) for m, match in enumerate(sorted(matches[::2])): match = datetime.strftime(match, "%H:%M, {}".format(name)) match_iid = "{},{}".format(f, m) self._tree.insert("", tk.END, text=match, iid=match_iid) for s, spawn in enumerate(sorted(spawns[m])): spawn = datetime.strftime(spawn, "%H:%M:%S") player_list: List[str] = Parser.get_player_id_list(cube[m][s]) abs_dict: Dict[str: int] = Parser.get_abilities_dict(cube[m][s], player_list) ships: List[str] = Parser.get_ship_for_dict(abs_dict) ship = self.format_ships_list(ships) spawn = "{}{}".format(spawn, ship) spawn_iid = "{},{},{}".format(f, m, s) self._tree.insert(match_iid, tk.END, text=spawn, iid=spawn_iid)
def parse_file(s): with open(s, 'r') as f: source = StringSource(f.read()) lexer = Lexer(source) parser = Parser(lexer) tree = parser.generate_ast() if mshl.num_errors > 0: mshl.fatal('there were errors') analyzer = SemanticAnalyzer() analyzer.verify(tree) if not mshl.conf.flag('--no-optim'): optim = ASTOptimizer() optim.optimize_ast(tree) if mshl.num_errors > 0: mshl.fatal('there were errors') return tree
def test_line_to_dictionary(self): line_dict = Parser.line_to_dictionary(self.LINE) self.assertIsInstance(line_dict, dict) KEYS = ["time", "source", "target", "target", "amount", "ability", "effect"] for key in KEYS: self.assertTrue(key in line_dict) self.assertIsInstance(line_dict["time"], datetime) if not isinstance(line_dict["time"], datetime): raise ValueError self.assertEqual(line_dict["time"].hour, 22) self.assertEqual(line_dict["source"], "2963000048128") self.assertEqual(line_dict["target"], "2963000048240") self.assertEqual(line_dict["ability"], "Quad Laser Cannon") self.assertTrue("Damage" in line_dict["effect"]) self.assertEqual(int(line_dict["amount"]), 296)
def update_timeline(self, file, match, spawn, match_timings, spawn_timings, file_cube): """ Update the TimeLine with the results of results the file and the screen results data """ # Get start and end times of the spawn start = FileHandler.datetime_to_float(Parser.line_to_dictionary(file_cube[match][spawn][0])["time"]) finish = FileHandler.datetime_to_float(Parser.line_to_dictionary(file_cube[match][spawn][-1])["time"])+1 self.time_line.delete_marker(tk.ALL) self.time_line.config(start=start, finish=finish) # Update the TimeLine screen_data = FileHandler.get_data_dictionary() screen_dict = FileHandler.get_spawn_dictionary( screen_data, file, match_timings[2 * match], spawn_timings[match][spawn] ) screen_dict = None if isinstance(screen_dict, str) or screen_dict is None else screen_dict spawn_list = file_cube[match][spawn] active_ids = Parser.get_player_id_list(spawn_list) markers = dict() if isinstance(screen_dict, dict): markers = FileHandler.get_markers(screen_dict, spawn_list, active_ids) markers["patterns"] = PatternParser.parse_patterns(spawn_list, screen_dict, Patterns.ALL_PATTERNS, active_ids) print("[TimeLine] Building {} markers.".format(sum(len(value) for value in markers.values()))) for category, data in markers.items(): for (args, kwargs) in data: try: self.time_line.create_marker(*args, **kwargs) except (ValueError, TypeError, tk.TclError) as e: print("[TimeLine] Marker creation failed: '{}', '{}', '{}', '{}': {}".format( args[0], args[1], args[2], kwargs["background"], repr(e)) ) if isinstance(e, ValueError): pass else: raise return
def include(self, file_name): # TODO: Provide some general compilation function so we can reuse flags here s = mshl.find_include_file(file_name) if s in self.includes: return f = open(s) s = f.read() f.close() p = Parser(Lexer(StringSource(s))) ast = p.generate_ast() b = Batch(ast) b.scope = self.scope b.tempvar_counter = self.tempvar_counter b.label_counter = self.label_counter b.generate_code() self.tempvar_counter = b.tempvar_counter self.label_counter = b.label_counter #top_scope = self.scope #while top_scope.parent_scope: # top_scope = top_scope.parent_scope #for varname, var in b.scope.variables.iteritems(): # self.decl_var(varname, var.type_) self.emit(b.segments['init'], 'init') self.emit(b.segments['decl'], 'decl') self.emit(b.segments['code'], 'code') # Don't kill the stack. self.push('include', STR) self.includes.append(s)
def test_2root_3child(self): lines = [ "NODE_1_1[...,...]", "\t->NODE_1_2[...,...]", "\t->\t NODE_1_3[...,...]", "\t->NODE_1_4[...,...]", "NODE_2_1[...,...]", ] parser = Parser() graph = Graph() graph.build(parser, lines) self.assertEqual(5, len(graph.nodes())) self.assertEqual(2, len(graph.roots())) self.assertEqual(3, len(graph.tails()))
"""Tests that we can parse loan commands""" import unittest import helper # noqa from parsing.parser import Parser import parsing.ext_tokens import lbshared.money as money try: from summons.loan import PARSER except: # noqa PARSER = Parser('$loan', [{ 'token': parsing.ext_tokens.create_money_token(), 'optional': False }, { 'token': parsing.ext_tokens.as_currency_token(), 'optional': True }]) class Test(unittest.TestCase): def test_simple(self): self.assertEqual(PARSER.parse('$loan 15'), [money.Money(1500, 'USD'), None]) def test_symbol(self): self.assertEqual(PARSER.parse('$loan 15$'), [money.Money(1500, 'USD'), None]) def test_iso(self): self.assertEqual(PARSER.parse('$loan EUR 15'), [money.Money(1500, 'EUR'), None])