def print_new_words_es(self): """TODO decribe what this method does.""" for row in self._parse_src(): words = set([Translator.normalize_word(word) for word in Translator.resolve_word_list(row.original_word)]) if words <= wordlist_es.WORD_COLLECTION_ES: # words is subset from WORD_COLLECTION print('Removed entry %s (Normalized: %s)' % (row.original_word, ', '.join(words))) else: self.print_csv_row(row)
def print_commons_marked(self, diff_file): """TODO decribe what this method does. Keyword arguments: diff_file -- TODO describe what this argument is about """ rows = self._parse_src() marked = Wordanalyzer.get_normalized_words(self._reader.parse(diff_file)) self.print_header_row(rows[0].get_column_format() if len(rows) > 0 else self._column_format.split('|')) for row in rows: normalized = Translator.normalize_word(row.original_word) if normalized in marked: if len(row.tags) > 0: row.tags = 'marked %s' % row.tags else: row.tags = 'marked' #end if marked.remove(normalized) print('Debug: tags = %s' % row.tags) #end if self.print_csv_row(row) #end for for word in marked: self.print_csv_row([word, '', '', '', '', '', '', 'new'])
def print_new_words_en(self): """TODO decribe what this method does.""" for row in self._parse_src(): word = Translator.strip_annotations(row.original_word).lower() # normalize entry if word in wordlist_en.WORD_COLLECTION_EN: # words is subset from WORD_COLLECTION print('Removed entry %s (Normalized: %s)' % (row.original_word, word)) else: self.print_csv_row(row)
def print_word_array(self, lang_code): """TODO decribe what this method does. Keyword arguments: lang_code -- TODO describe what this argument is about """ rows = self._parse_src() self._ostream.write('#!/usr/bin/env python3\n\nWORD_COLLECTION_%s = set(sorted([\n' % lang_code) # print header first_row = True if lang_code == 'es': for row in rows: for word in Translator.resolve_word_list(row.original_word): word = word.lower() # normalize entry if first_row: self._ostream.write('\t u\'') first_row = False else: self._ostream.write('\t, u\'') #end if self._ostream.write(word.replace('\'', '\\\'')) self._ostream.write('\'\n') #end for #end for else: for row in rows: word = Translator.strip_annotations(row.original_word).lower() # normalize entry if first_row: self._ostream.write('\t u\'') first_row = False else: self._ostream.write('\t, u\'') #end if self._ostream.write(word.replace('\'', '\\\'')) self._ostream.write('\'\n') #end for #end if self._ostream.write(']))\n')
def transcript_genome(self): """Reads a gene, determines if it should be expressed and assigns it to a list, based on its type. This list represents the data of all genes of a given type. All gene types are then added to a dictionary. The transcriptor is then fed this dict. """ self.to_transcribe = {} for gene in self.dna: if gene.is_expressed: if gene.type in self.to_transcribe: self.to_transcribe[gene.type].append(gene) else: self.to_transcribe[gene.type] = [gene] self.translator = Translator(self.to_transcribe)
def get_normalized_words(rows): return set([Translator.normalize_word(row.original_word) for row in rows])
import time from functools import partial from Connection import Connection, Listener from translators import Translator from handlers.request_handlers import handle_product_translation if __name__ == "__main__": translators = { "NL_EUR": Translator(1.0, 1.21, "€"), "GB_GBP": Translator(0.855, 1.2, "£"), "US_USD": Translator(1.11, 1.1, "$"), } request_handlers = { "translate-products": partial(handle_product_translation, translators) } c = Connection( "main-queue", "control-queue", 61613, Listener(request_handlers=request_handlers), "translator", ) while True: # keep app running to prevent docker from terminating time.sleep(0.01)