def evaluate_policy_docs(): opt = make_options() dataset = data.Dataset() feeder = data.Feeder(dataset) model, _ = models.load_or_create_models(opt, False) translator = Translator(model, opt.beam_size, opt.min_length, opt.max_length) docs = data.load_policy_documents() for doc in docs: data.parse_paragraphs(doc) lines = [] for doc in docs: paras = [p for p in doc.paragraphs if 50 <= len(p) <= 400] if not paras: continue lines.append('=================================') lines.append(doc.title) if len(paras) > 16: paras = random.sample(paras, 16) paras = sorted(paras, key=lambda x: -len(x)) pids = [feeder.sent_to_ids(p) for p in paras] pids = data.align2d(pids) src = nu.tensor(pids) lengths = (src != data.NULL_ID).sum(-1) tgt = translator.translate(src.transpose(0, 1), lengths, opt.best_k_questions) questions = [[feeder.ids_to_sent(t) for t in qs] for qs in tgt] for p, qs in zip(paras, questions): lines.append('--------------------------------') lines.append(p) for k, q in enumerate(qs): lines.append('predict {}: {}'.format(k, q)) utils.write_all_lines(opt.output_file, lines)
def main(): import os DEVNULL = open(os.devnull, 'wb') #Kill Previous dnsmasq process. Popen(["sudo", "pkill", "ovs"], stdout=DEVNULL, stderr=DEVNULL) Popen(["sudo", "killall", "dnsmasq"], stdout=DEVNULL, stderr=DEVNULL) #pass the username to the custom net function customNet(username = sys.argv[5], enableBlank = sys.argv[1], enableBasic = sys.argv[2], enableDhcp = sys.argv[3]) arguments = sys.argv[1:] pcapFiles = [] username = sys.argv[5] #sudo python NetInitialiser.py bw delay loss file.xml h1.pcap h2.pcap s1-eth1.pcap for i in range(6, len(sys.argv)): pcapFiles = sys.argv[i].split() print "pcap files = ", pcapFiles packetReader = PacketReader(pcapFiles, username) packetReader.openFiles() #packetReader.calculateTimes() #print " " #print packetReader.getFullSrcIPList() #print " " #print packetReader.getFullDstIPList() translator = Translator(packetReader.getFullSrcIPList(), packetReader.getFullDstIPList(), packetReader.getPktTimes(), packetReader.getIpNodeDict(), packetReader.getMetaInfo()) print sys.argv[4] translator.getHostSwitchIDs(sys.argv[4]) #username = getpass.getuser() translator.writeToXML("/home/comhghall/Final-Year-Project/resources/netanim_topo.xml")
def test_match_found(self): t = Translator() self.assertFalse(t.countef) # empty object for counting mappings self.assertFalse(t.totalf) # empty object t.train(english, foreign) self.assertEquals(t.translate('dog'), [('der', 0.1287760647333088), ('Hund', 0.8712239352666912)])
def test_no_match_found(self): t = Translator() self.assertFalse(t.countef) # empty object for counting mappings self.assertFalse(t.totalf) # empty object t.train(english, foreign) # self.assertEquals(t.translate('bro'), NoMatchError('no matches found')) self.assertRaises(NoMatchError, t.translate, 'bro')
def translate_file(input_file, input_file_name, output_file, write_boot): """ translates the given input vm file to the given output asm file :param input_file: the input vm file :param input_file_name: the name of the input file :param output_file: the output asm file :param write_boot: should the function write the booting lines in the beginning of the translation """ file_name_dirs = input_file_name.split( os.path.sep) # split the path to its directories and the file name file_name = file_name_dirs[ FILE_NAME_POSITION][:-len(VM_SUFFIX) - 1] # gets the file name only file_parser = Parser(file_name) file_translator = Translator(file_parser) # if needed: puts the booting line at the start of the file if write_boot: output_file.write(file_translator.translate_booting()) # the input file translation for line in input_file: file_parser.set_command(line) # setting the parser to the current line file_parser.parse() asm_command = file_translator.translate() output_file.write( asm_command) # printing the asm code in the output file
class gui(): def __init__(self, debug = False): """Launch GUI""" self.debug = debug self.app = QApplication(sys.argv) self.translator = Translator() self.app.installTranslator(self.translator.getGeneric()) self.app.installTranslator(self.translator.getDFF()) self.app.setApplicationName("Digital Forensics Framework") self.app.setApplicationVersion("1.2.0") pixmap = QPixmap(":splash.png") self.splash = SplashScreen(pixmap, Qt.WindowStaysOnTopHint, self.app.applicationVersion()) self.splash.setMask(pixmap.mask()) def launch(self, modPath = None): self.splash.show() if modPath: self.loader = loader() self.loader.do_load(modPath, self.splash.showMessage) mainWindow = MainWindow(self.app, self.debug) mainWindow.show() self.splash.finish(mainWindow) sys.exit(self.app.exec_())
def __init__(self, source): self.output_type = type(BaseMessage) self.input_type = type(BaseMessage) #assert issubclass(source,Source) #assert source.output_type == self.input_type #self.next = source Translator.__init__(self, source)
class MessageTests(unittest.TestCase): def setUp(self) -> None: self.translator = Translator(common) def test_message_without_head(self): msg = "1 2 3 6 0 й 6 0 6 0 6 3 й 6 0 3 й й 1" self.assertFalse( self.translator.is_encoded(msg) ) def test_valid_message(self): msg = "й й 2 1 2 3 6 0 й 1 1 1 3 3 3 6 6 6 й й й й й 1" self.assertTrue( self.translator.is_encoded(msg) ) def test_invalid_len_message(self): msg = "й й 2 1 2 3 6 0 й 6 0 6 0 6 3 й 6 й й 1" self.assertFalse( self.translator.is_encoded(msg) ) def test_message_without_tail(self): msg = "й й 2 1 2 3 6 0 й 6 0 6 0 6 3 й 6 0" self.assertFalse( self.translator.is_encoded(msg) )
def test_simple_test(self): translator = Translator("tests/test_data/translate/file_to_translate", "tests/test_data/translate/actual_file", "tests/test_data/translate/entries.json") translator.translate("tests/test_data/translate/actual_file") os.remove("tests/test_data/translate/actual_file")
def translate(config): model_path = config.model_path if config.gpu < 0: device = torch.device('cpu') else: device = torch.device('cuda:0') model_state = Checkpointer.restore(model_path, device) fields = model_state["fields"] src_field = fields[0][1] # SRC field object tgt_field = fields[1][1] # SRC field object datareader = DatasetReader(config, fields) test_iter = datareader.prepare_for_test() model_config = model_state["config"] model_config.beam_width = config.beam_width model = build_model(model_config, src_field, tgt_field) model.to(device) model.load_state_dict(model_state["model"]) translator = Translator(config, fields, model, test_iter) translator.translate()
def build_floor(self): # place the floor down first self.place_floor() # build outer walls self.place_outer_walls() # create initial room positions self.initialize_rooms() # debug information # self.print_floor() # grow the rooms until none can grow anymore self.grow_rooms() # fix the walls to patch up weird growing self.fix_walls() # builds the door locations between rooms and on the outerwall self.build_doors() #debug information # self.print_floor() # translate self.floor from 2d numpy array into 3d Minecraft space translator = Translator(self.level, self.box, self.floor, self.options) translator.translate_floor()
def translate(message): t = Translator(YANDEX_API_KEY) if t.get_language(message.text) == 'en': result = t.get_translation(message.text, 'ru') else: result = t.get_translation(message.text, 'en') bot.send_message(message.chat.id, result)
def get(self, page): self.response.headers['Content-Type'] = 'text/html' # FIXME: URL stringing is sooo crude. response = fetch('http://trac.webkit.org/export/HEAD/trunk/LayoutTests/' + page + '-expected.txt') translator = Translator() self.response.out.write('<html>\n<head>\n<link rel="stylesheet" type="text/css" href="/assets/styles.css">\n<script src="/assets/visualizer.js"></script>\n<script>') translator.translate_file(self.response.out, response.content.split('\n')) self.response.out.write('</script>\n</head>\n<body></body>\n</html>\n')
def __init__(self, f): self.asm_file = f self.table = SymbolTable() self.data = self.read_file() self.parser = Parser(self.data) self.commands = self.parser.commands self.translator = Translator() self.filename = self.asm_file.split(".")[0]
def __init__(self, show_tree): #terminales = ['+', '-', '*', '/', '(', ')', 'num', 'id', '$'] #noterminales = ['E', 'Ep', 'T', 'Tp', 'F'] self.nodoInicial = None self.tablaSintactica = None self.terminales.add('$') # Fin de cadena self.analizador_lexico = AnalizadorLexico() self.translator = Translator() self.show_tree = show_tree
def handle_translation(message): translator = Translator() translator.detect_language(message) result = None try: result = translator.translate_text_with_model( translator.target_language, message) except: result = 'Sorry cannnot translate your message' return result
def run(self): self.soc_serv = UDPSocketServer(self.app_logger) self.transl = Translator(self.app_logger) self.app_logger.info("server started") while True: message, address = self.soc_serv.recv() for client in self.clients: if address[0] == client.IP: client_queue = client.QUEUE client_queue.put(message)
def testTranslate(self): translator = Translator() content = "我爱" from_lang = 'ch' to_lang = 'en' result = translator.translate(content, from_lang, to_lang) self.assertEqual("I love", result) translate = translator.translate("When writing unit tests", to_lang, from_lang) print(translate)
def __init__(self, filename): self.filename = filename self.translator = Translator() self.translator.load_model(self.MODEL_PATH) self.image_out_path = os.path.join(os.getcwd(), "./data/images/") self.outfilename = os.path.join(os.path.dirname(self.filename), "output" + str(uuid.uuid1()) + ".txt") self.english = list() self.hindi = list()
def translate(): if request.method == 'POST': # Get the results from the web user form_data = request.form for key, value in form_data.items(): if key == 'Input_Text': input = value continue if key == 'Language': lang_index = value set_language(lang_index) # Get the model preferences locally or from S3 s3_file = False try: if (s3_file): model_pref_path = 'machine-learning/models/' + lang_prefix[ lang_index] + model_id + 'pickles/model_prefs.pkl' s3 = S3Bucket() model_prefs = pickle.load(s3.read_pickle(model_pref_path)) else: model_pref_path = 'models/' + lang_prefix[ lang_index] + model_id + 'pickles/model_prefs.pkl' model_prefs = pickle.load(open(model_pref_path, 'rb')) except Exception as e: input = e translation_error = 'No Model found for {}'.format(model_pref_path) return render_template( 'index.html', input_echo=input, input_text='Unable to load language model: ' + lang_index, translation=translation_error, selected_lang=get_selected(lang_options), options=lang_options, lang_details=lang_details[lang_index], bleu_score=bleus[lang_index]) # A model exists, so use it and translate away! T = Translator(model_prefs) translation = T.translate(input) # # # Keras backend needs to clear the session clear_session() return render_template('index.html', input_echo=input, input_text=input, translation=translation, selected_lang=get_selected(lang_options), options=lang_options, lang_details=lang_details[lang_index], bleu_score=bleus[lang_index])
class PageTranslator(object): """ Provides methods for translating the text of each page belonging to a publicaiton, into English. """ translator = None def __init__(self, api_key): self.translator = Translator(api_key) def translate_page(self, input_filepath, output_file_path, output_file_name): """ Reads in text from the input file, and then uses the Translator to determine whether the language is english or not. If not then it uses the Translator to acquire an english language version and save it to the output file. Args: input_filepath (str): The input page text file output_file_path (str): The output directory path output_file_name (str): The name of the file to save the translated text to """ if os.path.exists(output_file_path + output_file_name): return with open(input_filepath, 'r') as input_file: page_text = input_file.read() is_english = self.translator.is_english(page_text) if is_english != True: print "[" + is_english + "]" + input_filepath + " ---> " + output_file_path + output_file_name translation = self.translator.translate(page_text) if not os.path.exists(output_file_path): os.makedirs(output_file_path) with open(output_file_path + output_file_name, 'w') as output_file: output_file.write(str(translation['text'])) def translate_publications(self, working_directory, translation_directory, input_file): """ Iterates through in input csv containing ukmhl identifiers and then iterates through the page text files for each publication, using translate_page to attempt to translate the page text if required. Args: input_file (str): The input csv file containing a list of ukmhl identifiers to process """ lab_csv_reader = LabCSVReader(input_file) for row in lab_csv_reader.reader(): publication_directory = working_directory + row['id'] + "/pages/" for page_file in os.listdir(publication_directory): translation_path = translation_directory + row[ 'id'] + "/translations/" self.translate_page(publication_directory + page_file, translation_path, page_file) lab_csv_reader.close()
class TextDocument(Document): def set_text(self, text): self.text = text; def translate(self, config): self.translator = Translator(config) if self.input_file is not None: self.braille_text = self.translator.translate_file (self.input_file) else: self.braille_text = self.translator.translate_string (self.text)
def __init__(self, inputt, musicName, initialBPM, initialInstrument): input_converter = Input_information_converter(inputt) translator = Translator() self._text = input_converter.get_text() self._instructions = translator.translate_text_to_instructions( self._text) self._music_name = musicName self._initial_bpm = initialBPM self._initialInstrument = initialInstrument self._track = Track(1, self._initial_bpm, self._initialInstrument)
def translate(self, term, language): if len(term.split()) > 1: print("Error: A Term must be only a single word") else: try: t = Translator() word = t.translate(term, dest=language) return word except: print("Invalid Word")
def get_bug_report(self): # set gzip header cherrypy.response.headers['Content-Type'] = 'application/gzip' # create return tuple return_tuple = {} return_tuple['status'] = "failed" return_tuple['warning'] = "" return_tuple['error'] = "" # parse json encoded input input = helper.convert_dict_values_to_utf8(cherrypy.request.json) # user language language = "" if input.has_key("language") == True: language = input['language'] # if the user sends a language, which is not german, take the default language setting if language != "de": language = Config().get_param("default_language") # initialize the translator object with the user's choosen language translator = Translator(language) # bug report variables if input.has_key("file_name") == False: return_tuple['error'] = translator.translate( "message", "no_bug_report_file_name") return helper.zip_data(return_tuple) if input.has_key("bug_report") == False: return_tuple['error'] = translator.translate( "message", "no_bug_report_contents") return helper.zip_data(return_tuple) # save bug report try: bug_report_folder = os.path.join(Config().get_param("logs_folder"), "client") if os.path.exists(bug_report_folder) == False: os.makedirs(bug_report_folder) file = open( os.path.join(bug_report_folder, input['file_name'].split("/")[-1]), 'w') file.write(input['bug_report']) file.close() except IOError as e: pass # send mail to the admin helper.send_email( "OSMRouter: New bug report", "%s\n\n%s" % (input['file_name'].split("/")[-1], input['bug_report'])) # convert return_tuple to json and zip it, before returning return_tuple['status'] = "ok" return helper.zip_data(return_tuple)
def __init__(self, path, source_language, target_language, engine="Google"): self.source_language = source_language self.target_language = target_language self.ebook = Ebook(path) self.set_out_path() self.set_counter_path() self.set_start_point() self.translator = Translator(source_language, target_language, engine)
def main(): logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) args = parse_args() # load data if args.share_vocab: vocab = load_vocab(args.vocab_path) src_vocab = vocab tgt_vocab = vocab vocab_size = len(vocab) logger.info("Load vocabulary from %s, vocabulary size: %d" % (args.vocab_path, vocab_size)) else: src_vocab = load_vocab(args.src_vocab_path) tgt_vocab = load_vocab(args.tgt_vocab_path) src_vocab_size, tgt_vocab_size = len(src_vocab), len(tgt_vocab) logger.info("Load src vocabulary from %s, vocabulary size: %d" % (args.src_vocab_path, src_vocab_size)) logger.info("Load tgt vocabulary from %s, vocabulary size: %d" % (args.tgt_vocab_path, tgt_vocab_size)) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() use_gpu = n_gpu > 0 model = torch.load(args.model_path) model.to(device) input_data = list(load_data(args.input_file, src_vocab)) logger.info("Load dataset with %d samples from %s" % (len(input_data), args.input_file)) translator = Translator(model=model, src_vocab=src_vocab, tgt_vocab=tgt_vocab, batch_size=args.batch_size, beam_size=args.beam_size, device=device) with torch.no_grad(): results = translator.translate(input_data) prediction = results['prediction'] content = '\n'.join(prediction) + '\n' with open(args.output_file, 'w', encoding='utf8') as f: f.write(content) logger.info('%d samples have been translated.' % len(prediction))
class EbookTranslator: def __init__(self, path, source_language, target_language, engine="Google"): self.source_language = source_language self.target_language = target_language self.ebook = Ebook(path) self.set_out_path() self.set_counter_path() self.set_start_point() self.translator = Translator(source_language, target_language, engine) def translate(self): for counter, original_sentence in enumerate(tqdm( self.ebook.sentences)): translated_sentence = self.translator.translate(original_sentence) both_sentences = f"{translated_sentence}\n\n{original_sentence}\n\n" self.write_to_file(both_sentences, self.out_path) self.write_to_file(str(counter), self.counter_path, mode="w") self.quit() def set_out_path(self): self.out_path = self.ebook.path.replace( ".txt", f"_translated_to_{self.target_language}.txt") def set_counter_path(self): self.counter_path = self.ebook.path.replace(".txt", f"_counter.txt") def write_counter_to_file(self, counter): self.write_to_file(counter, self.counter_path, mode="w") def write_to_file(self, text, path, mode="a"): with open(path, mode) as file: file.write(text) def read_counter(self): with open(self.counter_path) as file: return int(file.read()) def set_start_point(self): if os.path.exists(self.counter_path): start_point = self.read_counter() + 1 print(f"Continuing translation from sentence {start_point}") self.ebook.sentences = self.ebook.sentences[start_point:] else: print("Starting translation from the beginning.") def quit(self): self.translator.quit()
def main(): """Requests infix expressions, translates them to postfix, and evaluates them, until the user enters nothing.""" while True: sourceStr = input("Enter an infix expression: ") if sourceStr == "": break try: scanner = Scanner(sourceStr) translator = Translator(scanner) postfix = translator.translate() evaluator = Evaluator(postfix) print("Value:", evaluator.evaluate()) except Exception as e: print("Error:", e, translator.translationStatus())
def evaluation(model, src_vocab, tgt_vocab, valid_data, reference, batch_size=64, beam_size=1, args=None, device=None): translator = Translator( model=model, src_vocab=src_vocab, tgt_vocab=tgt_vocab, batch_size=batch_size, beam_size=beam_size, device=device) result = translator.translate(valid_data) prediction = result['prediction'] prediction = [pred.replace('@@ ', '') for pred in prediction] prediction = [pred.split() for pred in prediction] bleu = corpus_bleu(reference, prediction) * 100. return bleu
def get_bug_report(self): # set gzip header cherrypy.response.headers['Content-Type'] = 'application/gzip' # create return tuple return_tuple = {} return_tuple['status'] = "failed" return_tuple['warning'] = "" return_tuple['error'] = "" # parse json encoded input input = helper.convert_dict_values_to_utf8( cherrypy.request.json ) # user language language = "" if input.has_key("language") == True: language = input['language'] # if the user sends a language, which is not german, take the default language setting if language != "de": language = Config().get_param("default_language") # initialize the translator object with the user's choosen language translator = Translator(language) # bug report variables if input.has_key("file_name") == False: return_tuple['error'] = translator.translate("message", "no_bug_report_file_name") return helper.zip_data(return_tuple) if input.has_key("bug_report") == False: return_tuple['error'] = translator.translate("message", "no_bug_report_contents") return helper.zip_data(return_tuple) # save bug report try: bug_report_folder = os.path.join( Config().get_param("logs_folder"), "client") if os.path.exists(bug_report_folder) == False: os.makedirs(bug_report_folder) file = open(os.path.join(bug_report_folder, input['file_name'].split("/")[-1]), 'w') file.write(input['bug_report']) file.close() except IOError as e: pass # send mail to the admin helper.send_email("OSMRouter: New bug report", "%s\n\n%s" % (input['file_name'].split("/")[-1], input['bug_report']) ) # convert return_tuple to json and zip it, before returning return_tuple['status'] = "ok" return helper.zip_data(return_tuple)
def on_content_loading_state_change(self, event): """Handles loading/unloading of a content. :param event: Event that contains information about current availability of a content :type event: :class:`events.ContentLoadingState` """ video = getattr(event, 'video', None) subtitles = getattr(event, 'subtitles', None) if video is not None and video != self._video_state: self._video_state = video if self.video.Length(): self.video_slider.SetRange(0, self.video.Length()) self.video_slider.SetPageSize(int(self.video.Length() / 10)) self.Layout() for widget in ('play_button', 'stop_button', 'video_slider'): getattr(self, widget).Enable(video) self.enable_menu_actions(play=video, stop=video) self.video.SetVolume(self.volume_slider.GetValue() / 100.0) if video: wx.CallLater(300, wx.PostEvent, self, wx.PyCommandEvent(wx.EVT_BUTTON.typeId, ids.PLAY)) if subtitles is not None: self._subtitles_state = subtitles both = self._video_state and self._subtitles_state for widget in ('mode_choice', 'delay_spin', 'answer_edit'): getattr(self, widget).Enable(both) if both: self.translator = Translator() else: try: del self.translator except AttributeError: pass
class PdfDocument(Document): def _get_text (self, file): uri = "file://" + file document = poppler.document_new_from_file (uri, None) npages = document.get_n_pages() text = "" for p in range(0,npages): page = document.get_page(p) w,h = page.get_size() r = poppler.Rectangle () r.x1 = 0 r.x2 = w r.y1 = 0 r.y2 = h # Currently we are getting the layout from the pdf here # we should collapse it text += page.get_text(poppler.SELECTION_GLYPH,r) return text def translate(self, config): # FIXME: Check if poppler gives us always UTF-8 strings config['outputFormat']['inputTextEncoding'] = "UTF8" self.translator = Translator(config) text = self._get_text(self.input_file) self.braille_text = self.translator.translate_string (text) return
def translate(source_lang, target_lang, data_list: List[str] = Query(None), special_tokens: List[str] = Query(None)): ''' :param source_lang: source language [arb, eng, heb] :param target_lang: target language [arb, eng, heb] :param data_list: list[str] :return: list[str] ''' model_index = f"{source_lang}_{target_lang}" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model_index not in running_models.keys(): # need to init model running_models[model_index] = Translator(source_lang=source_lang, target_lang=target_lang, special_tokens=special_tokens) else: #model exists if torch.cuda.is_available(): running_models[model_index].load_model_to_gpu() running_models[model_index].to(device) translation_df = running_models[model_index].translate(data_list) api_logger.info( f"returned value to client: {translation_df['tgt_text'].tolist()}") _free_model(model_index) return translation_df["tgt_text"].tolist()
def translate(self, config): # FIXME: Check if poppler gives us always UTF-8 strings config['outputFormat']['inputTextEncoding'] = "UTF8" self.translator = Translator(config) text = self._get_text(self.input_file) self.braille_text = self.translator.translate_string (text) return
def main_loop(): translator = Translator(settings.TRANSLATION_MAP, DoneAction()) def collect_actions(): result = translator.translate_pressed(universe.mode) for event in pygame.event.get(): universe.mouseCoord = pygame.mouse.get_pos() print(universe.mouseCoord) result += translator.translate_event(universe.mode, event) return result while True: actions = collect_actions() if any(action.is_done() for action in actions): break for action in actions: action.change_universe(universe, render) screen.fill((255, 255, 255)) render.draw(universe.surface_altitudes) render.draw_hero(universe.hero) # NEW METHOD pygame.display.flip() universe.update() # render_universe() clock.tick(settings.FPS)
def get_translation_response(self, from_lang_number, to_lang_number, word): """ Gets the translation response for the word from the original language to the target language """ from_language = self.languages[from_lang_number] to_language = self.languages[to_lang_number] return Translator(from_language, to_language).translate(word)
def cancel_request(self): # set gzip header cherrypy.response.headers['Content-Type'] = 'application/gzip' # create the return tuple return_tuple = {} return_tuple['warning'] = "" return_tuple['error'] = "" translator = Translator(Config().get_param("default_language")) # parse json encoded input options = helper.convert_dict_values_to_utf8( cherrypy.request.json ) # create session id if options.has_key("session_id") == False: return_tuple['error'] = translator.translate("message", "no_session_id_option") return helper.zip_data(return_tuple) Config().query_removement_of_session_id(options['session_id']) print "cancel session id %s" % options['session_id'] return helper.zip_data(return_tuple)
def on_Whatson_response(self, response, tv_channel, irc_channel, user, url, next): #log.msg('{response}'.format(response=response)) program = scrape_tv_schedule(response, tv_channel) if not program: self._parent.say(channel, u'\x033No schedule found at tv.so-net.ne.jp...'.encode('utf-8')) return if next: #TODO: figure out the time the current program ends #this will be of the form '6:00 ~ 7:00' in the response. #in that case i'd just want '7:00' which i can turn into a datetime #THen we initiate another lookup with that time. next_program_time = program.end_time self.initiate_program_lookup(next_program_time, tv_channel, irc_channel, user, False) return #blurb = u'{program}\x033{url}'.format(program=unicode(program), url=url) Translator.translate(program.name, self.on_translation, self.on_translation_error, tv_channel=tv_channel, irc_channel=irc_channel, program=program, user=user, url=url)
class DocDocument(Document): def _get_text(seff, file): text = subprocess.check_output([antiword, "-x", "db", file]) return text def translate(self, config): config['outputFormat']['inputTextEncoding'] = "UTF8" self.translator = Translator(config) result = self._get_text (self.input_file) self.braille_text = self.translator.translate_string (result)
def __init__(self, debug = False): """Launch GUI""" self.debug = debug self.app = QApplication(sys.argv) self.translator = Translator() self.app.installTranslator(self.translator.getGeneric()) self.app.installTranslator(self.translator.getDFF()) self.app.setApplicationName("Digital Forensics Framework") self.app.setApplicationVersion("1.2.0") pixmap = QPixmap(":splash.png") self.splash = SplashScreen(pixmap, Qt.WindowStaysOnTopHint, self.app.applicationVersion()) self.splash.setMask(pixmap.mask())
class OdtDocument(Document): def _get_text(sefl, file): odhandler = ODF2XHTML (False, False) odhandler.elements[(TEXTNS, u"changed-region")] = (odhandler.s_ignorexml,None) try: result = odhandler.odf2xhtml(file).encode('UTF-8','xmlcharrefreplace') except: result = "" pass return result def translate(self, config): config['outputFormat']['inputTextEncoding'] = "UTF8" self.translator = Translator(config) result = self._get_text (self.input_file) self.braille_text = self.translator.translate_string (result)
class PdfDocument(Document): def _get_text (self, file): uri = "file://" + file document = Poppler.Document.new_from_file (uri, "") npages = document.get_n_pages() text = "" for p in range(0,npages): page = document.get_page(p) text += page.get_text() return text def translate(self, config): # FIXME: Check if poppler gives us always UTF-8 strings config['outputFormat']['inputTextEncoding'] = "UTF8" self.translator = Translator(config) text = self._get_text(self.input_file) self.braille_text = self.translator.translate_string (text) return
def test_EM_transitions(self): t = Translator() self.assertFalse(t.countef) # empty object for counting mappings self.assertFalse(t.totalf) # empty object t.train(english, foreign) self.assertTrue(t.transmissions)
def get_departures(self): # set gzip header cherrypy.response.headers['Content-Type'] = 'application/gzip' # create the return tuple return_tuple = {} return_tuple['departures'] = [] return_tuple['warning'] = "" return_tuple['error'] = "" translator = Translator(Config().get_param("default_language")) # parse json encoded input options = helper.convert_dict_values_to_utf8( cherrypy.request.json ) # user language language = "" if options.has_key("language") == True: language = options['language'] # if the user sends a language, which is not german, take the default language setting if language != "de": language = Config().get_param("default_language") # initialize the translator object with the user's choosen language translator = Translator(language) # check latitude, longitude and vehicles parameters try: lat = float(options['lat']) except KeyError as e: return_tuple['error'] = translator.translate("message", "no_latitude_value") return helper.zip_data(return_tuple) except ValueError as e: return_tuple['error'] = translator.translate("message", "no_latitude_value") return helper.zip_data(return_tuple) try: lon = float(options['lon']) except KeyError as e: return_tuple['error'] = translator.translate("message", "no_longitude_value") return helper.zip_data(return_tuple) except ValueError as e: return_tuple['error'] = translator.translate("message", "no_longitude_value") return helper.zip_data(return_tuple) try: vehicles = options['vehicles'].split("+") except KeyError as e: vehicles = [] # get the nearest stations for this coordinates and take the first one gateway = JavaGateway(GatewayClient(port=Config().get_param("gateway_port")), auto_field=True) main_point = gateway.entry_point closest_stations_result = main_point.getNearestStations( geometry.convert_coordinate_to_int(lat), geometry.convert_coordinate_to_int(lon)) if closest_stations_result.status.toString() == "INVALID_STATION": return_tuple['error'] = translator.translate("message", "no_station_for_this_coordinates") return helper.zip_data(return_tuple) if closest_stations_result.status.toString() == "SERVICE_DOWN": return_tuple['error'] = translator.translate("message", "bahn_server_down") return helper.zip_data(return_tuple) if closest_stations_result.locations == None or len(closest_stations_result.locations) == 0: return_tuple['error'] = translator.translate("message", "no_station_for_this_coordinates") return helper.zip_data(return_tuple) # get departures for station sfinder = StationFinder(translator) station = sfinder.choose_station_by_vehicle_type(closest_stations_result.locations, lat, lon, vehicles) departures_result = main_point.getDepartures( station.id) date_format = gateway.jvm.java.text.SimpleDateFormat("HH:mm", gateway.jvm.java.util.Locale.GERMAN) for station_departure in departures_result.stationDepartures: for departure in station_departure.departures: try: dep_entry = {} dep_entry['nr'] = "%s%s" % (departure.line.product.code, departure.line.label) dep_entry['to'] = departure.destination.name dep_entry['time'] = date_format.format(departure.plannedTime) # remaining time duration = departure.plannedTime.getTime()/1000 - int(time.time()) minutes, seconds = divmod(duration, 60) dep_entry['remaining'] = minutes return_tuple['departures'].append(dep_entry) except Exception as e: pass # convert return_tuple to json and zip it, before returning return helper.zip_data(return_tuple)
def get_poi(self): # set gzip header cherrypy.response.headers['Content-Type'] = 'application/gzip' # create the return tuple return_tuple = {} return_tuple['poi'] = [] return_tuple['warning'] = "" return_tuple['error'] = "" translator = Translator(Config().get_param("default_language")) # parse json encoded input options = helper.convert_dict_values_to_utf8( cherrypy.request.json ) # user language language = "" if options.has_key("language") == True: language = options['language'] # if the user sends a language, which is not german, take the default language setting if language != "de": language = Config().get_param("default_language") # initialize the translator object with the user's choosen language translator = Translator(language) # check latitude, longitude and radius input try: lat = float(options['lat']) except KeyError as e: return_tuple['error'] = translator.translate("message", "no_latitude_value") return helper.zip_data(return_tuple) except ValueError as e: return_tuple['error'] = translator.translate("message", "no_latitude_value") return helper.zip_data(return_tuple) try: lon = float(options['lon']) except KeyError as e: return_tuple['error'] = translator.translate("message", "no_longitude_value") return helper.zip_data(return_tuple) except ValueError as e: return_tuple['error'] = translator.translate("message", "no_longitude_value") return helper.zip_data(return_tuple) try: radius = int(options['radius']) except KeyError as e: return_tuple['error'] = translator.translate("message", "no_range_value") return helper.zip_data(return_tuple) except ValueError as e: return_tuple['error'] = translator.translate("message", "no_range_value") return helper.zip_data(return_tuple) # tags and search # tag list if options.has_key("tags") == False: return_tuple['error'] = translator.translate("message", "no_tags_value") return helper.zip_data(return_tuple) if options['tags'] == "": return_tuple['error'] = translator.translate("message", "no_tags_value") return helper.zip_data(return_tuple) tag_list = options['tags'].split("+") # search try: search = options['search'] except KeyError as e: search = "" # create session id if options.has_key("session_id") == False: return_tuple['error'] = translator.translate("message", "no_session_id_option") return helper.zip_data(return_tuple) session_id = options['session_id'] # try to cancel prior request if Config().clean_old_session(session_id) == False: return_tuple['error'] = translator.translate("message", "old_request_still_running") return helper.zip_data(return_tuple) if Config().number_of_session_ids() == Config().get_param("thread_pool") - 1: return_tuple['error'] = translator.translate("message", "server_busy") return helper.zip_data(return_tuple) Config().add_session_id(session_id) # get poi poi = POI(session_id, translator) poi_list = poi.get_poi(lat, lon, radius, tag_list, search) if poi_list == None: Config().confirm_removement_of_session_id(session_id) return_tuple['poi'] = [] return_tuple['error'] = translator.translate("message", "process_canceled") return helper.zip_data(return_tuple) # convert return_tuple to json and zip it, before returning return_tuple['poi'] = poi_list Config().confirm_removement_of_session_id(session_id) return helper.zip_data(return_tuple)
def get_transport_routes(self): # set gzip header cherrypy.response.headers['Content-Type'] = 'application/gzip' # create the return tuple return_tuple = {} return_tuple['transport_routes'] = {} return_tuple['warning'] = "" return_tuple['error'] = "" translator = Translator(Config().get_param("default_language")) # parse json encoded input input = helper.convert_dict_values_to_utf8( cherrypy.request.json ) # options object if input.has_key("options") == False: return_tuple['error'] = translator.translate("message", "no_route_options") return helper.zip_data(return_tuple) elif type(input['options']) != type({}): return_tuple['error'] = translator.translate("message", "no_route_options") return helper.zip_data(return_tuple) options = input['options'] # user language language = "" if options.has_key("language") == True: language = options['language'] # if the user sends a language, which is not german, take the default language setting if language != "de": language = Config().get_param("default_language") # initialize the translator object with the user's choosen language translator = Translator(language) # source route if input.has_key("source_route") == False: return_tuple['error'] = translator.translate("message", "no_source_route") return helper.zip_data(return_tuple) elif type(input['source_route']) != type([]): return_tuple['error'] = translator.translate("message", "no_source_route") return helper.zip_data(return_tuple) elif input['source_route'].__len__() < 3: return_tuple['error'] = translator.translate("message", "source_route_incomplete") return helper.zip_data(return_tuple) source_route = input['source_route'] # check if route is valid index = 0 number_of_transport_parts = 0 for part in source_route: if part['type'] in ["way_point", "intersection", "poi", "station"]: index += 1 elif part['type'] in ["footway", "transport"]: index -= 1 if part['sub_type'] == "transport_place_holder": number_of_transport_parts += 1 else: index = -1 break if index != 1: return_tuple['error'] = translator.translate("message", "source_route_incomplete") return helper.zip_data(return_tuple) if number_of_transport_parts == 0: return_tuple['error'] = translator.translate("message", "source_route_no_transport_parts") return helper.zip_data(return_tuple) if number_of_transport_parts > 1: return_tuple['error'] = translator.translate("message", "source_route_multiple_transport_parts") return helper.zip_data(return_tuple) # further options if options.has_key("number_of_possible_routes") == False: options['number_of_possible_routes'] = 3 # create session id if options.has_key("session_id") == False: return_tuple['error'] = translator.translate("message", "no_session_id_option") return helper.zip_data(return_tuple) session_id = options['session_id'] # try to cancel prior request if Config().clean_old_session(session_id) == False: return_tuple['error'] = translator.translate("message", "old_request_still_running") return helper.zip_data(return_tuple) if Config().number_of_session_ids() == Config().get_param("thread_pool") - 1: return_tuple['error'] = translator.translate("message", "server_busy") return helper.zip_data(return_tuple) Config().add_session_id(session_id) # create route logger object route_logger = RouteLogger("routes", "public_transport---%s---%s" % (source_route[0]['name'], source_route[-1]['name'])) # parse route parts rtc = RouteTransportCreator(session_id, route_logger, translator) for i in range(1, source_route.__len__(), 2): if source_route[i]['type'] == "footway" and source_route[i]['sub_type'] == "transport_place_holder": result = rtc.find_best_transport_routes(source_route[i-1], source_route[i+1], options['number_of_possible_routes']) return_tuple['transport_routes'] = result.routes pre_source_route = source_route[0:i-1] post_source_route = source_route[i+2:source_route.__len__()] break if return_tuple['transport_routes'] == None: Config().confirm_removement_of_session_id(session_id) route_logger.append_to_log("\n----- result -----\ncanceled") return_tuple['transport_routes'] = [] return_tuple['error'] = translator.translate("message", "process_canceled") return helper.zip_data(return_tuple) for key in return_tuple['transport_routes'].keys(): serializable_list = [] for route in return_tuple['transport_routes'][key]: route.route = pre_source_route + route.route + post_source_route serializable_list.append(route.__dict__) return_tuple['transport_routes'][key] = serializable_list f = open("/tmp/tr_routes.json", "w") f.write(json.dumps(return_tuple['transport_routes'], indent=4, encoding="utf-8")) f.close() # convert return_tuple to json and zip it, before returning Config().confirm_removement_of_session_id(session_id) return helper.zip_data(return_tuple)
def get_route(self): # set gzip header cherrypy.response.headers['Content-Type'] = 'application/gzip' # create the return tuple return_tuple = {} return_tuple['route'] = [] return_tuple['warning'] = "" return_tuple['error'] = "" translator = Translator(Config().get_param("default_language")) # parse json encoded input input = helper.convert_dict_values_to_utf8( cherrypy.request.json ) # options object if input.has_key("options") == False: return_tuple['error'] = translator.translate("message", "no_route_options") return helper.zip_data(return_tuple) elif type(input['options']) != type({}): return_tuple['error'] = translator.translate("message", "no_route_options") return helper.zip_data(return_tuple) options = input['options'] # user language language = "" if options.has_key("language") == True: language = options['language'] # if the user sends a language, which is not german, take the default language setting if language != "de": language = Config().get_param("default_language") # initialize the translator object with the user's choosen language translator = Translator(language) # source route if input.has_key("source_route") == False: return_tuple['error'] = translator.translate("message", "no_source_route") return helper.zip_data(return_tuple) elif type(input['source_route']) != type([]): return_tuple['error'] = translator.translate("message", "no_source_route") return helper.zip_data(return_tuple) elif input['source_route'].__len__() < 3: return_tuple['error'] = translator.translate("message", "source_route_incomplete") return helper.zip_data(return_tuple) source_route = input['source_route'] # check if route is valid index = 0 for part in source_route: if part['type'] in ["way_point", "intersection", "poi", "station"]: index += 1 if part.has_key("turn"): part.__delitem__("turn") print "turn deleted" elif part['type'] in ["footway", "transport"]: index -= 1 else: index = -1 break if index != 1: return_tuple['error'] = translator.translate("message", "source_route_incomplete") return helper.zip_data(return_tuple) # route factor if options.has_key("route_factor") == False: return_tuple['error'] = translator.translate("message", "no_route_factor_option") return helper.zip_data(return_tuple) # allowed way classes if options.has_key("allowed_way_classes") == False: allowed_way_classes = ["big_streets", "small_streets", "paved_ways", "unpaved_ways", "unclassified_ways", "steps"] else: allowed_way_classes = options['allowed_way_classes'] # blocked way ids blocked_ways = [] if options.has_key("blocked_ways") == True: for id in options['blocked_ways'].split(","): try: blocked_ways.append(int(id)) except ValueError as e: pass # create session id if options.has_key("session_id") == False: return_tuple['error'] = translator.translate("message", "no_session_id_option") return helper.zip_data(return_tuple) session_id = options['session_id'] # try to cancel prior request, if necessary if Config().clean_old_session(session_id) == False: return_tuple['error'] = translator.translate("message", "old_request_still_running") return helper.zip_data(return_tuple) # this code is onley reached, if the prior session was canceled successfully if Config().number_of_session_ids() == Config().get_param("thread_pool") - 1: return_tuple['error'] = translator.translate("message", "server_busy") return helper.zip_data(return_tuple) Config().add_session_id(session_id) # create route logger object route_logger = RouteLogger("routes", "%s---%s" % (source_route[0]['name'], source_route[-1]['name'])) # and append the source route route_logger.append_to_log("\n----- start of source route -----") route_logger.append_to_log( json.dumps( source_route, indent=4, encoding="utf-8") \ + "\n----- end of source route -----\n") # get a route rfc = RouteFootwayCreator(session_id, route_logger, translator, options['route_factor'], allowed_way_classes, blocked_ways) for i in range(1, source_route.__len__(), 2): if source_route[i]['type'] == "footway" and source_route[i]['sub_type'] == "footway_place_holder": try: route_part = rfc.find_footway_route(source_route[i-1], source_route[i+1]) except RouteFootwayCreator.FootwayRouteCreationError as e: Config().confirm_removement_of_session_id(session_id) route_logger.append_to_log("\n----- result -----\ncanceled") return_tuple['route'] = [] return_tuple['error'] = "%s" % e return helper.zip_data(return_tuple) if return_tuple['route'].__len__() > 0: route_part.__delitem__(0) return_tuple['route'] += route_part else: if return_tuple['route'].__len__() == 0: return_tuple['route'].append(source_route[i-1]) return_tuple['route'].append(source_route[i]) return_tuple['route'].append(source_route[i+1]) # delete start point and first route segment, if it's a nameless one, just added as place holder if return_tuple['route'].__len__() >= 3 \ and return_tuple['route'][1].has_key("sub_type") \ and return_tuple['route'][1]['sub_type'] == "": print "deleted placeholder start segment %s" % return_tuple['route'][1] return_tuple['route'].__delitem__(0) return_tuple['route'].__delitem__(0) # check for missing turn values at intersections and poi # for example this can happen, if an intersection is a intermediate destination of a source route for i in range(2, return_tuple['route'].__len__()-2, 2): if return_tuple['route'][i].has_key("turn") == False \ and return_tuple['route'][i-1].has_key("bearing") == True \ and return_tuple['route'][i+1].has_key("bearing") == True: return_tuple['route'][i]['turn'] = geometry.turn_between_two_segments( return_tuple['route'][i+1]['bearing'], return_tuple['route'][i-1]['bearing']) print "new turn in webserver = %d" % return_tuple['route'][i]['turn'] return_tuple['description'] = rfc.get_route_description( return_tuple['route'] ) route_logger.append_to_log("\n----- start of result route -----") route_logger.append_to_log( json.dumps( return_tuple['route'], indent=4, encoding="utf-8") \ + "\n----- end of result route -----\n") # delete session id Config().confirm_removement_of_session_id(session_id) # convert return_tuple to json and zip it, before returning return helper.zip_data(return_tuple)
from translator import Translator from bing_translator import BingTranslator import redis import os REDIS_HOST = os.environ.get("REDIS_HOST", "localhost") REDIS_PORT = os.environ.get("REDIS_PORT", "6379") TRANSLATE_ENABLED = bool(os.environ.get("TRANSLATION_ENABLED", False)) TARGET_TRANSLATION_LANG = 'de' gig_redis = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=0) gig_translator = Translator() gig_translator.set_translator(BingTranslator()) gig_translator.set_target_translation_language(TARGET_TRANSLATION_LANG) def build_localizable_key(untranslated, target_translation_lang): return 'giglocalizable%%%' + untranslated + '%%%' + target_translation_lang def translate(untranslated): key = build_localizable_key(untranslated, TARGET_TRANSLATION_LANG) if not TRANSLATE_ENABLED: return untranslated # If redis contains the localized string in the target language, return it. try:
import parser, re, json from translator import Translator, is_function def rename_type(name): return re.sub(r"^snd_", r"", name) env = parser.default_env() parser.parse(env, ['/usr/include/alsa/asoundlib.h']) translate = Translator(env, rename_type) translate.blacklist.update([ 'pollfd', # recursive rule '_IO_marker', # recursive rule '_IO_FILE', # recursive rule ]) translate.bluelist.update({ 'snd_seq_real_time': 'seq_real_time', 'snd_dlsym_link': 'dlsym_link', }) constants = {} variables = {} for name, value in env.constants.iteritems(): if re.match(r'^SND_\w', name): if isinstance(value, (int, str)): name = re.sub(r"^SND_", r"", name) constants[name] = value for cname in env.names: if re.match(r'^snd_\w', cname): typespec = translate.declarator(env.names[cname])
def translate(self, name, content): t = Translator(ts=self) return t.translate(name, content) # Turn PSP source to Python source
class Bottlenose: def __init__(self, bootstrapVocabulary=False): Concept(bootstrapVocabulary=bootstrapVocabulary) self._contexts = [Context()] self._context = self._contexts[0] self._translator = Translator() self._interpreter = Interpreter(self._context) def tell(self, input): JSON = self._translator.visit(grammar.parse(input)) return self.tellJSON(JSON) def tellJSON(self, JSON): results = self._interpreter.interpret(JSON) self._context.ponderRecentMentions() if isinstance(results, set) or isinstance(results, list): objects = list() for result in results: objects.append(BottlenoseObject(result, self._context)) return objects elif not results: return None else: return [BottlenoseObject(results, self._context)] def ask(self, subject, clause=None): query = "?" + subject if clause: query += "(" + clause + ")" return self.tell(query) def context(self): return self._context def listContexts(self): return self._contexts def setContext(self, index): if index >= 0 and index < len(self._contexts): self._context = self._contexts[index] self._interpreter.setContext(self._contexts[index]) def loadFile(self, filePath, onlyBeliefs=False, onlyStatements=False): file = open(filePath, 'r') for line in file: line = line.rstrip("\n") JSON = self._translator.visit(grammar.parse(line)) if 'statement' in JSON: if not onlyBeliefs: self.tellJSON(JSON) else: if not onlyStatements: self.tellJSON(JSON) def loadDirectory(self, dirPath): filePaths = [] for root, dirnames, filenames in os.walk(dirPath): for filename in fnmatch.filter(filenames, '*.bottle'): filePaths.append(os.path.join(root, filename)) for filePath in filePaths: self.loadFile(filePath, onlyBeliefs=True) for filePath in filePaths: self.loadFile(filePath, onlyStatements=True)
def __init__(self, bootstrapVocabulary=False): Concept(bootstrapVocabulary=bootstrapVocabulary) self._contexts = [Context()] self._context = self._contexts[0] self._translator = Translator() self._interpreter = Interpreter(self._context)
def translate(self, config): self.translator = Translator(config) if self.input_file is not None: self.braille_text = self.translator.translate_file (self.input_file) else: self.braille_text = self.translator.translate_string (self.text)
def follow_this_way(self): # set gzip header cherrypy.response.headers['Content-Type'] = 'application/gzip' # create the return tuple return_tuple = {} return_tuple['route'] = [] return_tuple['warning'] = "" return_tuple['error'] = "" translator = Translator(Config().get_param("default_language")) # parse json encoded input input = helper.convert_dict_values_to_utf8( cherrypy.request.json ) # options if input.has_key("options") == False: return_tuple['error'] = translator.translate("message", "no_route_options") return helper.zip_data(return_tuple) elif type(input['options']) != type({}): return_tuple['error'] = translator.translate("message", "no_route_options") return helper.zip_data(return_tuple) options = input['options'] # user language language = "" if options.has_key("language") == True: language = options['language'] # if the user sends a language, which is not german, take the default language setting if language != "de": language = Config().get_param("default_language") # initialize the translator object with the user's choosen language translator = Translator(language) # start point if input.has_key("start_point") == False: return_tuple['error'] = translator.translate("message", "no_start_point") return helper.zip_data(return_tuple) start_point = input['start_point'] if start_point.has_key("name") == False: return_tuple['error'] = translator.translate("message", "start_point_no_name") return helper.zip_data(return_tuple) elif start_point.has_key("lat") == False: return_tuple['error'] = translator.translate("message", "start_point_no_latitude") return helper.zip_data(return_tuple) elif start_point.has_key("lon") == False: return_tuple['error'] = translator.translate("message", "start_point_no_longitude") return helper.zip_data(return_tuple) elif start_point.has_key("type") == False: return_tuple['error'] = translator.translate("message", "start_point_no_type") return helper.zip_data(return_tuple) # further options if options.has_key("way_id") == False: return_tuple['error'] = translator.translate("message", "no_way_id") return helper.zip_data(return_tuple) if options.has_key("bearing") == False: return_tuple['error'] = translator.translate("message", "no_bearing_value") return helper.zip_data(return_tuple) add_all_intersections = False if options.has_key("add_all_intersections") == True: if options['add_all_intersections'] == "yes": add_all_intersections = True way = DBControl().fetch_data("SELECT nodes from ways where id = %d" % options['way_id']) if way.__len__() == 0: return_tuple['error'] = translator.translate("message", "way_id_invalid") return helper.zip_data(return_tuple) # create session id if options.has_key("session_id") == False: return_tuple['error'] = translator.translate("message", "no_session_id_option") return helper.zip_data(return_tuple) session_id = options['session_id'] # try to cancel prior request if Config().clean_old_session(session_id) == False: return_tuple['error'] = translator.translate("message", "old_request_still_running") return helper.zip_data(return_tuple) if Config().number_of_session_ids() == Config().get_param("thread_pool") - 1: return_tuple['error'] = translator.translate("message", "server_busy") return helper.zip_data(return_tuple) Config().add_session_id(session_id) # get a route route_logger = RouteLogger("routes", "%s---way_id.%s" % (start_point['name'], options['way_id'])) rfc = RouteFootwayCreator(session_id, route_logger, translator, 1.0, ["big_streets", "small_streets", "paved_ways", "unpaved_ways", "unclassified_ways", "steps"], []) try: route = rfc.follow_this_way(start_point, options['way_id'], options['bearing'], add_all_intersections) except RouteFootwayCreator.FootwayRouteCreationError as e: route_logger.append_to_log("\n----- result -----\ncanceled") Config().confirm_removement_of_session_id(session_id) return_tuple['route'] = [] return_tuple['error'] = "%s" % e return helper.zip_data(return_tuple) # return calculated route return_tuple['route'] = route return_tuple['description'] = rfc.get_route_description( return_tuple['route'] ) route_logger.append_to_log("\n----- result -----\n") route_logger.append_to_log( json.dumps( return_tuple['route'], indent=4, encoding="utf-8") + "\n----- end of route -----\n") # convert return_tuple to json and zip it, before returning Config().confirm_removement_of_session_id(session_id) return helper.zip_data(return_tuple)
def translate(self, config): config['outputFormat']['inputTextEncoding'] = "UTF8" self.translator = Translator(config) result = self._get_text (self.input_file) self.braille_text = self.translator.translate_string (result)
#!/usr/bin/python3 import argparse from translator import Translator if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('word', action='store', help="word that will be translated" ) parser.add_argument('-d', '--details', action='store_true', help='show detailed information about the translation', default=False ) params = parser.parse_args() word = params.word details = params.details t = Translator(word) t.show(details)