def main(sequence, input_file=None): if input_file: with open(input_file) as f: lines = f.readlines() print_protein = lambda seq: print(Translator(seq).to_protein) [print_protein(line.strip()) for line in lines] elif sequence: print(Translator(sequence).to_protein) else: click.echo('Missing argument. Please run --help.')
def main(): tran = Translator( targetLang='zh-CN', host="https://translate.google.cn/", proxy=None, timeout=2, ) while True: try: #基目录为家目录,pic2char参数为home下的文件夹 print('Waiting...') data = tran.pic2char("Pictures/pic/") except Exception as e: print('Exception: ', e) sys.exit() data = data.replace('\n', ' ') if not data.isspace() and not len(data) == 0: pass #cprint(data, 'yellow') else: print('No string found') print('Wait for another...') continue os.system('/usr/bin/tranen ' + data)
def test(model, dataloader, target_lang='chinese', max_steps=None, verbose=True): dataloader.shuffle(1024) translator = Translator(model, 'no-bpe') preds, refs = [], [] if target_lang == 'chinese': source_lang = 'english' else: source_lang = 'chinese' for i, (src, tgt) in tqdm(enumerate(dataloader)): if max_steps and i >= max_steps: break x = src.tolist()[0] y = tgt.tolist()[0] try: pred = translator.translate(x) src_sentence = index2sentence(x, source_lang) tgt_sentence = index2sentence(y, target_lang) pred_sentence = index2sentence(pred, target_lang) preds.append(pred_sentence) refs.append(tgt_sentence) if verbose: print() print('src', src_sentence) print('tgt', tgt_sentence) print('pred', pred_sentence) except Exception as e: print('failed to predict due to error:', e) bleu = sacrebleu.corpus_bleu(preds, [refs]) return bleu
def main(mulp=False): path = r'C:\Users\XL\Documents\GitHub\GoogleTranslator'.replace('\\', '/') file_list = ['t_3. Manuscript.docx', 't_4. Manuscript.docx'] translator = Translator() for f in file_list: file = os.path.join(path, f) file_content = read_file(file) index = range(0, len(file_content)) dict_content = dict(zip(index, file_content)) # fun_temp = lambda dict_c: translate(dict_c, translator) if mulp: content = {} pool = Pool(10) for i, j in dict_content.items(): # a = pool.apply_async(translate, ({i: j}, translator)).get() content.update( pool.apply_async(translate, ({ i: j }, translator, len(dict_content))).get()) # temp = pool.map(fun_temp, dict_content) # for di in temp: # content.update(di) # content.update(pool.map(fun_temp, dict_content)) pool.close() pool.join() else: content = translate(dict_content, translator) save_file(content, dict_content)
def main(args): filenames = [] is_sys_present = False path = os.path.abspath(args[1]) if os.path.isdir(path): additional, is_sys_present = parse_dir(path) filenames.extend(additional) output = os.path.join(path, f"{os.path.basename(path)}.asm") else: filenames.append(path) output = path[:-3] + ".asm" if os.path.basename(path) == 'Sys.vm': is_sys_present = True translator = Translator("") with open(output, 'w') as output_file: if is_sys_present: output_file.write(translator.bootstrap()) for filename in filenames: translator.filename = os.path.basename(filename[:-3]) with open(os.path.join(path, filename)) as input_file: for line in input_file: translated = translator.interpret(line[:-1]) output_file.write(translated) output_file.write(translator.finish())
def getUserTyp(type, name): if type == 'user': return User(name) if type == 'translator': return Translator(name) if type == 'admin': return Admin(name)
def test(): debug = False local_resolvers = DNSSEC_resolver_check.get_local_resolvers() print("local resolvers = %s" % (local_resolvers, )) drc = DNSSEC_resolver_check() drc.set_debug(debug) tr = Translator(debug=debug) def do(nameserver, comment): results = drc.evaluate_resolver(nameserver, comment) translation = tr.translate(results) print("%s: %s - %s" % ( nameserver, results, translation, )) for res in local_resolvers: do(res, "from ogud") do('8.8.8.8', "from bob") do('8.8.4.4', "from bob") do('4.2.2.2', "from bob") #do('192.168.1.5', "from bob") S = Squery() ip_ns1_shkx_org = S.addr_lookup('8.8.8.8', 'ns1.shkx.org') do(ip_ns1_shkx_org, "from bob")
def do_eval(resolv, msg, long_report): print resolv, msg gr = DNSSEC_resolver_check().evaluate_resolver(resolv, msg) tr = Translator().translate(gr) if (long_report == True): print "Eval: ", resolv, " Tests=", gr, "Result=", tr else: print "Result: ", resolv, " ", tr
def checkLanguageStringToAllFiles(key): codes = LanguageUtil.getCodesSupportedLanguages() translator = Translator() for code in codes: translation = LanguageUtil.loadTranslation(code) if key in translation.keys(): print("Founded '{0}' in {1}".format(translation[key], code)) else: print("Not founded in {0}".format(code))
def __init__(self): self._classifier = Classifier() self._translator = Translator() # Create a TCP/IP socket self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.bind((SERVER_NAME, PORT)) return
def run(self): ''' invokes the config parser, generates the sql statements and runs optimizer ''' Parser = ConfigParser(self.filename) config = Parser.configContent translator = Translator(config) translator.translate() self.blocks = translator.getBlocks()
def __init__(self): self.translator = Translator() self.points = {} self.segments = {} self.circles = {} self.operations = [] self.alloperations = [] self.CongruentSegments = {} self.dependpoints = {} self.error = 6
def translateTweet(self, sentence): try: translator = Translator() replaced_tweet = self.replaceAbbsWords(sentence) temp_tweet = translator.toChinese(replaced_tweet) temp_tweet = self.replaceStationWords(temp_tweet) translated_tweet = self.replaceTrainLineWords(temp_tweet) return replaced_tweet, translated_tweet except Exception as e: print("Error in translateTweet: {}".format(e))
def main(): '''Main Function''' parser = argparse.ArgumentParser(description='translate.py') parser.add_argument('-model', required=True, help='Path to model .pt file') parser.add_argument('-src', required=True, help='Source sequence to decode (one line per sequence)') parser.add_argument('-vocab', required=True, help='Source sequence to decode (one line per sequence)') parser.add_argument('-output', default='pred.txt', help="""Path to output the predictions (each line will be the decoded sequence""") parser.add_argument('-beam_size', type=int, default=5, help='Beam size') parser.add_argument('-batch_size', type=int, default=30, help='Batch size') parser.add_argument('-n_best', type=int, default=1, help="""If verbose is set, will output the n_best decoded sentences""") parser.add_argument('-no_cuda', action='store_true') opt = parser.parse_args() opt.cuda = not opt.no_cuda # Prepare DataLoader preprocess_data = torch.load(opt.vocab) preprocess_settings = preprocess_data['settings'] test_src_word_insts = read_instances_from_file( opt.src, preprocess_settings.max_word_seq_len, preprocess_settings.keep_case) test_src_insts = convert_instance_to_idx_seq( test_src_word_insts, preprocess_data['dict']['src']) test_loader = torch.utils.data.DataLoader( TranslationDataset( src_word2idx=preprocess_data['dict']['src'], tgt_word2idx=preprocess_data['dict']['tgt'], src_insts=test_src_insts), num_workers=2, batch_size=opt.batch_size, collate_fn=collate_fn) translator = Translator(opt) with open(opt.output, 'w') as f: for batch in tqdm(test_loader, mininterval=2, desc=' - (Test)', leave=False): all_hyp, all_scores = translator.translate_batch(*batch) for idx_seqs in all_hyp: for idx_seq in idx_seqs: pred_line = ' '.join([test_loader.dataset.tgt_idx2word[idx] for idx in idx_seq]) f.write(pred_line + '\n') print('[Info] Finished.')
def translate(dict_content): content = {} translator = Translator() for index, con in dict_content.items(): if len(con) < 7: content[index] = con else: con1 = translator.translate(con) print('Translate para %s/%s' % (index, len(dict_content))) content[index] = con1 return content
def expertsystem(file): parser = Parser(file) lexer = Lexer(parser.raw_rules) if lexer.check(): translator = Translator(parser.raw_rules, parser.raw_facts, parser.raw_queries) translator.translate() for query in translator.queries: print(query.solve()) if config.interactive: while process_input(): _ = 1
def translate(dict_content): content = {} translator = Translator() for index, con in dict_content.items(): if len(con) < 7: content[index] = con else: con1 = translator.translate(con) # print('size = ', len(con1)) content[index] = con1 return content
def translateTextIntoUserLanguage(self, text, userLanguage="en"): # Use IBM Watson service to translate text translator = Translator() if translator.language_translator is not None: translation = translator.translate(text, None, userLanguage) self.openPopover(html=translation) else: self.parent.parent.displayMessage( config.thisTranslation["ibmWatsonNotEnalbed"]) config.mainWindow.openWebsite( "https://github.com/eliranwong/UniqueBible/wiki/IBM-Watson-Language-Translator" )
def main(): file_name = "".join(sys.argv[1].split(".")[:-1]) ext = ".asm" with open(file_name + ext, "w") as write_file: with open(sys.argv[1], encoding="UTF-8") as read_file: translator = Translator(file_name.split("\\")[-1].split("/")[-1]) for line in read_file: print(f'Line {line}') value = translator.interpret(line[:-1]) print(value) write_file.write(value) write_file.write(translator.finish())
def translate(): # Translate all datasets train_data_en, unlabel_data_cn, validation_data_cn = import_data() translator = Translator() train_data_cn = __translate_dataset(translator.en_to_cn, train_data_en) unlabel_data_en = __translate_dataset(translator.cn_to_en, unlabel_data_cn) validation_data_en = __translate_dataset(translator.cn_to_en, validation_data_cn) print "train_data_cn", train_data_cn print "unlabel_data_en", unlabel_data_en print " validation_data_en", validation_data_en return train_data_cn, unlabel_data_en, validation_data_en
def main(): parser = argparse.ArgumentParser( description='eels - to fill into your hovercraft', epilog= 'Translates an eBook by passing it piece by piece through DeepL (deepl.com)' ) parser.add_argument('-i', '--input', metavar='filename', default='test.epub', help='ebook to translate') parser.add_argument('-il', '--inputLanguage', metavar='language', default='EN', help='input language') parser.add_argument('-o', '--output', metavar='filename', default='output.epub', help='output file') parser.add_argument('-ol', '--outputLanguage', metavar='language', default='DE', help='output language') parser.add_argument('-t', '--throttle', metavar='seconds', type=int, default=1, help='seconds to wait after each translation request') args = parser.parse_args() src = Importer(args.input) dest = Exporter(args.output) translator = Translator(args.outputLanguage, args.inputLanguage, args.throttle) for info in src.items: with src.open(info) as content: if info.filename.endswith('.html'): utf8Content = content.read().decode('utf-8') translation = translator.translateHTML(utf8Content) dest.add(info, translation) else: dest.add(info, content.read()) content.close() dest.write()
def translateText(self): text = self.editor.textCursor().selectedText() if text: translator = Translator() if translator.language_translator is not None: fromLanguage = Translator.fromLanguageCodes[self.fromLanguageCombo.currentIndex() - 1] if self.fromLanguageCombo.currentIndex() != 0 else translator.identify(text) toLanguage = Translator.toLanguageCodes[self.toLanguageCombo.currentIndex()] result = translator.translate(text, fromLanguage, toLanguage) self.editor.insertPlainText(result) else: self.displayMessage(config.thisTranslation["ibmWatsonNotEnalbed"]) webbrowser.open("https://github.com/eliranwong/UniqueBible/wiki/IBM-Watson-Language-Translator") else: self.selectTextFirst()
def updateLanguageStringToAllFiles(key, englishTranslation): codes = LanguageUtil.getCodesSupportedLanguages() translator = Translator() for code in codes: translation = LanguageUtil.loadTranslation(code) if key in translation.keys(): filename = "lang/language_" + code + ".py" if code[:2] == "en": result = englishTranslation else: result = translator.translate( englishTranslation, "en", "zh-TW" if code == "zh_HANT" else code[:2]) data = ' "{0}": "{1}",\n'.format(key, result) FileUtil.updateStringIntoFile(filename, data) print("updated '{0}' into {1}".format(result, code))
def translate(self): # Translate all datasets timer = Timer("translating data") timer.start() translator = Translator() self.train_data_cn = self.__translate_dataset(translator.en_to_cn, self.train_data_en) self.unlabel_data_en = self.__translate_dataset( translator.cn_to_en, self.unlabel_data_cn) self.validation_data_en = self.__translate_dataset( translator.cn_to_en, self.validation_data_cn) self.test_data_en = self.__translate_dataset(translator.cn_to_en, self.test_data_cn) timer.finish()
def generate_latex(html, **kwargs): """ Generate latex from html. Args: key, value pairs are passed in from main latex command. """ # Options used by html2latex conversion config = dict() for option in ['hrule', 'site', 'headings']: config[option] = kwargs.pop(option) # Build latex h2l = Translator(extensions=[BasicExtension(**config), MooseExtension(**config)]) tex = h2l.convert(html) return tex, h2l
def execute(self): print('compile: ' + self.inName + ' to ' + self.outName) source = open(self.inName).read() t = Translator() t.translate(source) # for debug print t.getFuncList() print t.getAddrList() e = MakeElf(t) e.execute() f = open(self.outName, 'wb') #f.write(elf) f.write(text)
def updateLanguageFile(lang): filename = "lang/language_" + lang + ".py" if not path.exists(filename): print(filename + " does not exist") else: english = LanguageUtil.loadTranslation("en_US") target = LanguageUtil.loadTranslation(lang) missing = "" translator = Translator() count = 0 for key in english.keys(): if key not in target.keys(): count += 1 print(count) text = english[key] result = translator.translate(text, "en", lang[:2]) missing += ' "{0}": "{1}",\n'.format(key, result) FileUtil.insertStringIntoFile(filename, missing, -1) print("{0} lines inserted into {1}".format(count, filename))
def step(self, obs): super(Overmind, self).step(obs) if (not self.loaded): self.loaded = True self.homeHatch = self.get_buildings(obs, units.Zerg.Hatchery)[0] self.model = ks.models.load_model( "C:\\Users\\Charlie\Models\\Conv2D-noobs - 1st Gen") return FUNCTIONS.move_camera( [const.MiniMapSize().x / 2, const.MiniMapSize().y / 2]) # If nothing to macro # if obs.observation.player['food_army'] < 10: # function = self.macro(obs) # print(function) # return function T = Translator() tFeatureLayers = T.translate_feature_layers( T.crop_feature_layers(obs.observation.feature_screen[0], obs.observation.feature_screen, const.ScreenSize().x, const.ScreenSize().y)) featureLayers = (np.moveaxis((np.array(tFeatureLayers)), 0, 2)).reshape(-1, const.ScreenSize().x, const.ScreenSize().y, 12) prediction = self.model.predict(featureLayers)[0] action = translate(obs, prediction) print(action[0]) for pred in prediction: print(pred) if (action[0] in obs.observation.available_actions): return actions.FunctionCall(action[0], action[1:]) else: print("No-op, Switching to Macro:") function = self.macro(obs) print(function) return function
def visitProgram(self, ctx:LittleParser.ProgramContext): # setup symbol table self.error = None self.symbolTable = collections.deque() self.enterScope() # setup abstract syntax tree self.tree = [] # traverse parse tree self.visitChildren(ctx) ir = [] for tree in self.tree: ir = ir + tree.generateCode() for line in ir: print(';' + line) translator = Translator() tiny = translator.translate(ir, self.symbolTable[0]) for line in tiny: print(line) self.exitScope() return
def checkUserLanguage(self): # Use IBM Watson service to translate text translator = Translator() if translator.language_translator is not None: if config.userLanguage and config.userLanguage in Translator.toLanguageNames: selectedText = self.selectedText().strip() if not selectedText: self.messageNoSelection() else: userLanguage = Translator.toLanguageCodes[ Translator.toLanguageNames.index(config.userLanguage)] self.translateTextIntoUserLanguage(selectedText, userLanguage) else: self.parent.parent.openTranslationLanguageDialog() else: self.parent.parent.displayMessage( config.thisTranslation["ibmWatsonNotEnalbed"]) config.mainWindow.openWebsite( "https://github.com/eliranwong/UniqueBible/wiki/IBM-Watson-Language-Translator" )