def test_rule_lexer_tokenize_return_tokens_single_statement(): input_statement = "Ryan is not the worst" rl = RankingLexer() actual_result = rl.tokenize(input_statement) for t in actual_result: assert(isinstance(t, LexToken))
def highlight(self, text): result = copy(text) tokens = None rl = RankingLexer() try: tokens = rl.tokenize(text) except TypeError: return result replacements = dict() for t in tokens: if t.type not in self.style_map: continue if t.value in replacements: continue replacement = self.style_map[t.type] + \ t.value + \ Fore.RESET + \ Style.NORMAL + \ Back.RESET replacements[t.value] = replacement for r in replacements: escaped_r = re.escape(r) if r[0] != "[": result = re.sub(f"\\b{escaped_r}\\b", replacements[r], result) else: result = re.sub(f"{escaped_r}", replacements[r], result) return result
def test_rule_lexer_tokenize_throws_input_error(): input_text = ["Ryan is not the worst"] rl = RankingLexer() with pytest.raises(TypeError, match=r"Expected str, got .*"): rl.tokenize(input_text)
def test_rule_lexer_tokenize_return_empty(): expected_result = [] rl = RankingLexer() actual_result = rl.tokenize("hello") assert(actual_result == expected_result)
def test_rule_lexer_tokenize_correct_tokens_single_statement(): input_statement = "Ryan is not the worst" expected_result_types = [ "ENTITY", "NOT", "WORST" ] rl = RankingLexer() actual_result = rl.tokenize(input_statement) assert([x.type for x in actual_result] == expected_result_types)
def __init__(self, project_id=None, log_history=True): self.generated_project = True self.log_history = log_history self.project_id = None self.set_project_id(project_id) self._rp = RankingParser() self._rl = RankingLexer() self.lexer = self._rl.build() self._rp.build() self._hl = HighLighter(self._rl, STYLE_MAP) self.history = list() self.pp = pprint.PrettyPrinter(indent=4)
def solve_challenge(): rl = RankingLexer() hl = HighLighter(rl, STYLE_MAP) print() for s in programmer_riddle: print(hl.highlight(s)) print() print("Solving by specifying rules") print(Fore.CYAN + ("=" * 30) + Fore.RESET) r = RankingProblem() r.set_items(["Jessie", "Evan", "John", "Sarah", "Matt"]).\ not_first("Jessie").not_last("Evan").\ not_first("John").not_last("John").\ is_before("Sarah", "Evan").\ not_directly_before_or_after("Matt", "John").\ not_directly_before_or_after("John", "Evan") solutions = r.solve() for s in solutions: typewrite_print(", ".join(s)) print() print("Solving by parsing") print(Fore.CYAN + ("=" * 30) + Fore.RESET) rp = RankingParser() typewrite_print(", ".join(rp.parse_statements(programmer_riddle)))
def test_highlighter_highlight_doesnt_change_input(): input_text = "make Foo not war" input_text_copy = copy(input_text) hl = HighLighter(RankingLexer(), STYLE_MAP) hl.highlight(input_text) assert(input_text == input_text_copy)
def test_write_out_highlighted_lines(file_name, lines): colorama_init() hl = HighLighter(RankingLexer(), STYLE_MAP) # this will create a text file of the escaped terminal # colour codes included too with open(file_name, "w") as f: for s in lines: s_out = hl.highlight(s) f.write(s_out + "\n") print(s_out) f.close()
def test_highlighter_complex_highlight_correct(): expected_results = [ "[49m[1m[36m[Ryan (1)][39m[22m[49m [49m[22m[32mbefore[39m[22m[49m [49m[1m[36m[(Other)][39m[22m[49m", "[49m[1m[36m[Sue & Julie][39m[22m[49m [49m[22m[32mbefore[39m[22m[49m [49m[1m[36m[Ryan (1)][39m[22m[49m", "[49m[1m[36m[^Alfred^][39m[22m[49m [49m[22m[91mnot[39m[22m[49m [49m[1m[95mlast[39m[22m[49m", ] hl = HighLighter(RankingLexer(), STYLE_MAP) for i, val in enumerate(complex_lines()): result = hl.highlight(val) assert(result == expected_results[i])
def test_highlighter_highlight_correct(): expected_results = [ "[49m[1m[36mJessie[39m[22m[49m is [49m[22m[91mnot[39m[22m[49m the [49m[1m[32mbest[39m[22m[49m developer", "[49m[1m[36mEvan[39m[22m[49m is [49m[22m[91mnot[39m[22m[49m the [49m[1m[95mworst[39m[22m[49m developer", "[49m[1m[36mJohn[39m[22m[49m is [49m[22m[91mnot[39m[22m[49m the [49m[1m[32mbest[39m[22m[49m developer [49m[22m[93mor[39m[22m[49m the [49m[1m[95mworst[39m[22m[49m developer", "[49m[1m[36mSarah[39m[22m[49m is a [49m[22m[32mbetter[39m[22m[49m developer than [49m[1m[36mEvan[39m[22m[49m", "[49m[1m[36mMatt[39m[22m[49m is [49m[22m[91mnot[39m[22m[49m directly [49m[22m[95mbelow[39m[22m[49m [49m[22m[93mor[39m[22m[49m [49m[22m[32mabove[39m[22m[49m [49m[1m[36mJohn[39m[22m[49m as a developer", "[49m[1m[36mJohn[39m[22m[49m is [49m[22m[91mnot[39m[22m[49m directly [49m[22m[95mbelow[39m[22m[49m [49m[22m[93mor[39m[22m[49m [49m[22m[32mabove[39m[22m[49m [49m[1m[36mEvan[39m[22m[49m as a developer", ] hl = HighLighter(RankingLexer(), STYLE_MAP) for i, val in enumerate(programmer_riddle): result = hl.highlight(val) assert(result == expected_results[i])
class Session(object): def __init__(self, project_id=None, log_history=True): self.generated_project = True self.log_history = log_history self.project_id = None self.set_project_id(project_id) self._rp = None self.reset_ranking_parser() self._rl = RankingLexer() self._rg = RankingGraph() self._rn = RankingNetwork() self.lexer = self._rl.build() self._hl = HighLighter(self._rl, STYLE_MAP) self.history = list() self.pp = pprint.PrettyPrinter(indent=4) def reset_ranking_parser(self): self._rp = RankingParser() self._rp.build() def set_project_id(self, project_id): if project_id is not None: self.generated_project = False self.project_id = project_id else: self.project_id = str(uuid.uuid1()) def change_project_id(self, project_id, move_files=True): if self.project_id == project_id: return if not path.exists(project_id): if self.generated_project or move_files: if path.exists(self.project_id): move(self.project_id, project_id) else: mkdir(project_id) else: if path.exists(self.project_id): copytree(self.project_id, project_id) else: mkdir(project_id) self.generated_project = False self.set_project_id(project_id) else: self.reset_ranking_parser() self.generated_project = False self.set_project_id(project_id) self.load_history(project_id) def file_in_project(self, filename, extension="txt"): result = check_file_extension(filename, extension) if result[:len(self.project_id)] != self.project_id: result = f"{self.project_id}/{result}" return result def do_parse(self, text, write_history=True): if text.strip() == "": return print(self._hl.highlight(text)) try: result = self._rp.parse(text) except ParsingError as e: print(STYLE_MAP["ERROR"], f"ERROR: {e}", STYLE_MAP["RESET"]) return if result is not None: self.history.append(text) if write_history: self.write_history() if not self._rp.ranking_problem.is_solvable: self.undo() if write_history: self.write_history() def write_history(self): if not self.log_history: return try: if not path.exists(self.project_id): mkdir(self.project_id) file_name = self.file_in_project("output", "txt") export_lines_to_text(self.history, file_name) except: pass def load_history(self, project_id): file_name = f"{project_id}/output.txt" lines = import_text_to_lines(file_name) if len(lines) == 0: print(f"Nothing to read from {file_name}") return self.history.clear() for l in lines: self.do_parse(l, write_history=False) def import_items(self, file_name): lines = import_text_to_lines(file_name) if len(lines) == 0: print(f"Nothing to read from {file_name}") return for l in lines: self.do_parse(f"+ [{l.strip()}]") print("imported items") def export_items(self, file_name): output_file_name = self.file_in_project(file_name, "txt") export_lines_to_text(self._rp.items, output_file_name) print(f"exported items to {output_file_name}") def do_tokenize(self, text): result = self._rl.tokenize(text.strip()) self.pp.pprint(result) def undo(self): if len(self.history) > 0: print(f"{STYLE_MAP['ERROR']}Undoing:{STYLE_MAP['RESET']}", self._hl.highlight(self.history[-1])) self._rp.remove_last_constraint() def print_history(self): for h in self.history: print(self._hl.highlight(h)) def print_token_debug(self, s): self.do_tokenize(s.strip()) def generate_graph(self, filename): solutions = self.solve() if len(solutions) == 0: print("No solutions to generate a graph from") return generate_viz_from_solutions(solutions, filename) def export_csv(self, filename): solutions = self.solve() if len(solutions) == 0: print("No solutions to generate a graph from") return output_filename = self.file_in_project(filename, "csv") export_csv(solutions, output_filename) def suggest_pair(self): pair = None try: pair = self._rp.ranking_problem.least_most_common_variable except Exception as e: print("Couldn't suggest pair", e) return print(self._hl.highlight(f"[{pair[0]}] versus [{pair[1]}]")) def solve(self): result = list() try: result = self._rp.solve() except IncompleteResultsError as e: print( f"{STYLE_MAP['ERROR']}{e.message}{Style.NORMAL}{STYLE_MAP['RESET']}" ) # result = e.results except UnsolvableModelError as e: print( f"{STYLE_MAP['ERROR']}{e.message}{Style.NORMAL}{STYLE_MAP['RESET']}" ) return result def print_stats(self, filename=None): solutions = self.solve() if len(solutions) == 0: print("No solutions to generate a stats from") return result = stats_from_solutions(solutions) print(Style.RESET_ALL) for item in result: print(f"{Style.BRIGHT}{item}{Style.NORMAL}") printout(result[item], item) print(Style.RESET_ALL) if filename is not None: lines = list() for key in result: lines.append(f"{key}={result[key]}") export_lines_to_text(lines, self.file_in_project(filename)) def display_commands(self): for key in commands: print(Style.RESET_ALL) print(f"{Style.BRIGHT}{key}{Style.NORMAL} : {commands[key][0]}") try: command_example = self._hl.highlight(commands[key][1]) except ParsingError as e: command_example = commands[key][1] print(f"{Style.DIM}{command_example}{Style.NORMAL}") print() print(Style.RESET_ALL) def display_solution(self): result = self.solve() if len(result) == 1: self.pp.pprint(result[0]) elif len(result) < 5: self.pp.pprint(result) else: print( f"{STYLE_MAP['INFO']}More than 5 results showing the path(s) with most support{STYLE_MAP['RESET']}" ) highlight_path = export_highlighted_path(result) self.pp.pprint(highlight_path) def read_input(self, text): text_split = text.split(" ") if text == "=": self.display_solution() elif text == "undo": self.undo() elif text == "history": self.print_history() elif text_split[0] in ["import_items", "import"]: if len(text_split) > 1: self.import_items(text_split[1]) else: print("Need to specify a filename") elif text_split[0] in ["export_items", "export"]: if len(text_split) > 1: self.export_items(text_split[1]) else: print("Need to specify a filename") elif text_split[0] == "load": if len(text_split) > 1: self.change_project_id(text_split[1]) else: print("Need to specify a filename") elif text_split[0] == "copy": if len(text_split) > 1: self.change_project_id(text_split[1], move_files=False) else: print("Need to specify a filename") elif text_split[0] in ["graph", "diagram"]: if len(text_split) > 1: self.generate_graph(f"{self.project_id}/{text_split[1]}") else: print("Need to specify a filename") elif text_split[0] in ["csv"]: if len(text_split) > 1: self.export_csv(f"{self.project_id}/{text_split[1]}") else: print("Need to specify a filename") elif text_split[0] == "~": self.suggest_pair() elif text_split[0] in ["stats"]: if len(text_split) > 1: self.print_stats(text_split[1]) else: self.print_stats() elif text[0] == "?": if len(text) > 2: self.print_token_debug(text[1:].strip()) else: print("insufficient parameters") elif text_split[0] == "help": self.display_commands() else: self.do_parse(text) def start(self): prompt_session = PromptSession() word_completer = WordCompleter(self._rp.items, ignore_case=True) while True: prompt = "RankParser>" if not self.generated_project: prompt = f"{self.project_id}>" text = prompt_session.prompt(prompt, auto_suggest=AutoSuggestFromHistory(), completer=word_completer, complete_in_thread=True) if text.lower() == "quit": break if text != "": try: self.read_input(text) except Exception as e: print(Style.RESET_ALL) print(f"{STYLE_MAP['ERROR']}Error reading input: {text}") print(f"{e}{Style.NORMAL}") else: word_completer.words = self._rp.items
def test_can_call_highlighter_highlight(): hl = HighLighter(RankingLexer(), STYLE_MAP) hl.highlight("foo")
def test_can_call_rule_lexer_tokenize(): rl = RankingLexer() rl.tokenize("hello")
def test_can_call_ranking_lexer_build(): rl = RankingLexer() rl.build()
class Session(object): def __init__(self, project_id=None, log_history=True): self.generated_project = True self.log_history = log_history self.project_id = None self.set_project_id(project_id) self._rp = RankingParser() self._rl = RankingLexer() self.lexer = self._rl.build() self._rp.build() self._hl = HighLighter(self._rl, STYLE_MAP) self.history = list() self.pp = pprint.PrettyPrinter(indent=4) def set_project_id(self, project_id): if project_id is not None: self.generated_project = False self.project_id = project_id else: self.project_id = str(uuid.uuid1()) def change_project_id(self, project_id): self.generated_project = False if not path.exists(project_id): move(self.project_id, project_id) self.set_project_id(project_id) else: self.set_project_id(project_id) self.load_history(project_id) def do_parse(self, text, write_history=True): if text.strip() == "": return print(self._hl.highlight(text)) try: result = self._rp.parse(text) except TypeError as e: print(e) return if result is not None: self.history.append(text) if write_history: self.write_history() def write_history(self): if not self.log_history: return try: if not path.exists(self.project_id): mkdir(self.project_id) file_name = f"{self.project_id}/output.txt" with open(file_name, "w") as f: for l in self.history: f.write(f"{l}\n") except: pass def load_history(self, project_id): file_name = f"{project_id}/output.txt" lines = [] with open(file_name, "r") as f: lines = f.readlines() if len(lines) == 0: print(f"Nothing to read from {file_name}") return self.history.clear() for l in lines: self.do_parse(l, write_history=False) def import_items(self, file_name): with open(file_name, "r") as f: lines = f.readlines() if len(lines) == 0: print(f"Nothing to read from {file_name}") return for l in lines: self.do_parse(f"+ [{l.strip()}]") print("imported items") def do_tokenize(self, text): result = self._rl.tokenize(text.strip()) self.pp.pprint(result) def undo(self): self._rp.remove_last_constraint() print(self._rp.solve()) def print_history(self): for h in self.history: print(self._hl.highlight(h)) def print_token_debug(self, s): self.do_tokenize(s.strip()) def generate_graph(self, filename): solutions = self._rp.solve() if len(solutions) == 0: print("No solutions to generate a graph from") return generate_viz_from_solutions(solutions, filename) def suggest_pair(self): pair = None try: pair = self._rp.ranking_problem.least_most_common_variable() except Exception as e: print("Couldnt suggest pair", e) return print(self._hl.highlight(f"[{pair[0]}] versus [{pair[1]}]")) def display_commands(self): for key in commands: print(f"{key} : {commands[key][0]}") print(self._hl.highlight(commands[key][1])) try: self.do_tokenize(commands[key][1]) except: pass print() def read_input(self, text): text_split = text.split(" ") if text == "=": result = self._rp.solve() if len(result) == 1: self.pp.pprint(result[0]) elif len(result) > 1: self.pp.pprint(result) elif text == "undo": self.undo() elif text == "history": self.print_history() elif text == "import_items": if len(text_split) > 1: self.import_items(text_split[1]) else: print("Need to specify a filename") elif text_split[0] == "load": if len(text_split) > 1: self.change_project_id(text_split[1]) else: print("Need to specify a filename") elif text_split[0] in ["graph", "diagram"]: if len(text_split) > 1: self.generate_graph(f"{self.project_id}/{text_split[1]}") else: print("Need to specify a filename") elif text_split[0] == "~": self.suggest_pair() elif text[0] == "?": if len(text) > 2: self.print_token_debug(text[1:].strip()) else: print("insufficient parameters") elif text_split[0] == "help": self.display_commands() else: self.do_parse(text) def start(self): self._rp.build() while True: prompt = "RankParser>" if not self.generated_project: prompt = f"{self.project_id}>" text = input(prompt).strip() if text.lower() == "quit": break if text != "": try: self.read_input(text) except Exception as e: print(f"Error reading input: {text}") print(e)
def test_ranking_lexer_build_returns_lex(): rl = RankingLexer() build_result = rl.build() assert (isinstance(build_result, Lexer))