class Filterator(QtWidgets.QApplication): def __init__(self, *args, **kwargs): #Initialization super().__init__(*args, **kwargs) self.tree_model = TreeModel(self) self.filter_list_model = FilterListModel(DEFAULTS) self.main_window = FilteratorMainWindow(self) self.manager = Manager(self) self.processor = Processor() self.main_window.setupMainWindow(self.manager) self.manager.setupManager(self.main_window, self.processor) self.processor.setupProcessor(self.manager) #Multithreading self.processor_thread = QtCore.QThread() self.processor.moveToThread(self.processor_thread) self.processor_thread.start() def __call__(self): #Show MainWindow self.main_window.showMaximized() self.main_window.raise_() #Start event loop self.exec_()
def _run_console_option(self) -> None: usersInput = UserInterface.GetUserInput() matches_list = Processor.SearchData(usersInput[0], self.names_data_dict, usersInput[1]) UserInterface.PrintSearchResults(matches_list) matches_total = Processor.GetSearchTotals(matches_list) UserInterface.PrintTotalBorn(matches_total)
class CompaFactory(): def __init__(self): self.extractor = Extractor() self.processor = Processor(lists=WORDLISTS) self.spellcheck = SpellChecker() self.repetition = RepetitionChecker() def execute(self, word): # Extrai os Tokens da Frase extracted = self.extractor.extract(word) # Faz a Checagem Gramatical e retorna os erros encontrados spelling_errors = self.spellcheck.from_tokenizer(extracted) # Atualiza os tokens extraidos com as correções extracted = self.spellcheck.autocorrect(extracted) # Atualiza os tokens removendo as duplicadas repetitions, extracted = self.repetition.repetition(extracted) # Processa a lista de tokens e adiciona as tags de parte-de-fala tagged = self.processor.process(extracted) # Parser Sintático parsed = ReduceParser(tagged) semantic_errors = parsed.get_errors() reduced = parsed.sintagmas #errors = spelling_errors + repetitions + semantic_errors return spelling_errors, semantic_errors, repetitions, reduced, tagged, extracted
def __init__(self, *args, **kwargs): #Initialization super().__init__(*args, **kwargs) self.tree_model = TreeModel(self) self.filter_list_model = FilterListModel(DEFAULTS) self.main_window = FilteratorMainWindow(self) self.manager = Manager(self) self.processor = Processor() self.main_window.setupMainWindow(self.manager) self.manager.setupManager(self.main_window, self.processor) self.processor.setupProcessor(self.manager) #Multithreading self.processor_thread = QtCore.QThread() self.processor.moveToThread(self.processor_thread) self.processor_thread.start()
def main(): brain: Processor = Processor() current_channel = Telegram() while True: input_request: str = current_channel.get_input() if input_request == "/shutdown": break output: str = brain.process(input_request) current_channel.output(output)
def write_comments(comments, attributes, file=sys.stdout): """ write the :param attributes of all :param comments to a csv :param file. """ writer = csv.writer(file, dialect='unix') with Processor('en') as p: for comment in comments: row = [] for attribute in attributes: try: row.append(p.preprocess(comment[attribute])) except TypeError: row.append(comment[attribute]) writer.writerow(row)
def match(self, a, b, gap_length, match_length): comparator = self.Comparator(a=a, b=b, name_a=self.name_a, name_b=self.name_b, gap_length=gap_length, match_length=match_length) singlet_pairs = comparator.compare() alpha = Mock() alpha.raw_body = a alpha.pre_body = a beta = Mock() beta.raw_body = b beta.pre_body = b matches = Processor.singlet_pairs_to_matches( alpha=alpha, beta=beta, singlet_pairs=singlet_pairs) return MatchSet(None, None, matches=matches)
def match(self, a, b, gap_length, match_length): comparator = self.Comparator(a=a, b=b, name_a=self.name_a, name_b=self.name_b, gap_length=gap_length, match_length=match_length) singlet_pairs = comparator.compare() alpha = Mock() alpha.raw_body = a alpha.pre_body = a beta = Mock() beta.raw_body = b beta.pre_body = b matches = Processor.singlet_pairs_to_matches(alpha=alpha, beta=beta, singlet_pairs=singlet_pairs) return MatchSet(None, None, matches=matches)
def setUp(self): self.passages_a = [chr(i + ord('a')) for i in xrange(10)] self.passages_b = [chr(i + ord('A')) for i in xrange(10)] self.file_a = 'models/test_data/match_set_test.json' self.document_a = Document.from_json(self.file_a) self.file_b = 'models/test_data/match_set_test2.json' self.document_b = Document.from_json(self.file_b) self.matches = [] self.singlet_pairs = [] for i in xrange(len(self.passages_a)): a = MatchHalf(passage=self.passages_a[i]) b = MatchHalf(passage=self.passages_b[i]) s_pair = (a, b) self.singlet_pairs.append(s_pair) # Alpha/beta need to be actual documents, not names self.matches = Processor.singlet_pairs_to_matches(alpha=self.document_a, beta=self.document_b, singlet_pairs=self.singlet_pairs) self.match_set = MatchSet(alpha_doc=self.document_a, beta_doc=self.document_b, matches=self.matches)
def setUp(self): self.passages_a = [chr(i + ord('a')) for i in xrange(10)] self.passages_b = [chr(i + ord('A')) for i in xrange(10)] self.file_a = 'models/test_data/match_set_test.json' self.document_a = Document.from_json(self.file_a) self.file_b = 'models/test_data/match_set_test2.json' self.document_b = Document.from_json(self.file_b) self.matches = [] self.singlet_pairs = [] for i in xrange(len(self.passages_a)): a = MatchHalf(passage=self.passages_a[i]) b = MatchHalf(passage=self.passages_b[i]) s_pair = (a, b) self.singlet_pairs.append(s_pair) # Alpha/beta need to be actual documents, not names self.matches = Processor.singlet_pairs_to_matches( alpha=self.document_a, beta=self.document_b, singlet_pairs=self.singlet_pairs) self.match_set = MatchSet(alpha_doc=self.document_a, beta_doc=self.document_b, matches=self.matches)
def __init__(self): names_path = "./names/" self.names_data_dict = Processor.GetFilesTextData(names_path)
def _run_graph_option(self) -> None: name_to_search = UserInterface.GetSearchName() match_list = Processor.SearchData(name_to_search, self.names_data_dict) DataGraph.display_graph("Year", "Number born", match_list)
def __init__(self): self.extractor = Extractor() self.processor = Processor(lists=WORDLISTS) self.spellcheck = SpellChecker() self.repetition = RepetitionChecker()