def translate(self, filename_in, filename_out, config, package): # Parse input file syntax_tree = self.factory.parser.parse_file(str(filename_in)) if not syntax_tree: return # Find type declaration type = objc.base.BaseTranslator.find_type(syntax_tree) if not type: return # Add to the list of detected types package["types"].append(type) # Transform and gather information on generated types for later transforms. type.extends = objc.base.BaseTranslator.convert_extends(config, package, type) type_info = objc.base.BaseTranslator.convert_type(config, package, type.name) if type_info is not None: package["types_info"][type_info["objc_name"]] = type # Pick proper translator if isinstance(type, plyj.model.InterfaceDeclaration): translator = objc.interface.InterfaceTranslator(self.factory) elif isinstance(type, plyj.model.ClassDeclaration): translator = objc.cls.ClassTranslator(self.factory) else: return # Perform translation and generate output file(s) translator.translate(config, package, type, filename_out)
def resp(): if request.method == 'POST': r = request.get_json() chat_id = r['message']['chat']['id'] message = r['message']['text'] time = datetime.fromtimestamp(r['message']['date']) user = r['message']['from']['first_name'] mess_low = message.lower() command = my_bot.check_command(mess_low) if mess_low in greetings: my_bot.send_message(chat_id, 'Дратути!') elif mess_low.startswith('/t'): if mess_low[2] == ' ': my_bot.send_message(chat_id, translate(message[3:])) else: my_bot.send_message(chat_id, translate(message[2:])) elif command in commands: my_bot.send_message(chat_id, commands[command]()) else: my_bot.send_message(chat_id, 'Я не понял(((') log_message = 'В {} {} написал(а) {}'.format( time.strftime("%Y-%m-%d %H:%M:%S"), user, message) my_bot.write_log(log_message) return '<h1>wewew</h1>' else: return '<h1>Вжух</h1>'
def uploader(obj:str, fileitem): filename = fileitem.filename if obj[0]=="P": filename = "projects/"+filename elif obj[0]=="M": filename = "modules/"+filename elif obj[0]=="A": filename = "arrays/"+filename elif obj[0]=="I": filename = "pixmaps/"+filename if filename[-4:-3]=="." and not (obj[0]=="A" or obj[0]=="I"): filename=filename[:-4] try: if len(obj)<2: if obj[0]=="I": with open(filename, 'wb') as f: f.write(fileitem.file.read()) else: with open(filename, 'w', encoding="utf-8") as f: f.write(fileitem.file.read().decode()) f.close() else: stack=[] for line in fileitem.file: stack.append(line[:-1].decode()) with open(filename, 'w', encoding="utf-8") as f: json.dump(stack,f) f.close() os.chmod(filename,0o666) mainpage() except: hth.htmlhead("startIDE", tr.translate("Upload failed")) hth.htmlfoot("","index.py",tr.translate("Back"))
def info(self, wk=None, sk=None): """extract keywords and summary sentences""" cname = self.cache_name() if cname and exists_file(cname): print('FROM CACHED: ', cname) sids, sents, kwds = from_json(cname) return kwds, sids, sents, None if wk is None: wk = PARAMS['k_count'] if sk is None: sk = PARAMS['s_count'] ranker = ranker_dict[PARAMS['RANKER']] g = self.to_nx() ranks = ranker(g) ns = self.keynouns() kwds, sids, picg = ranks2info(g, ranks, self.doc.sentences, ns, wk, sk, self.lang) kwds = self.extend_kwds(kwds, ranks) kwds = dict((k, 1) for k in kwds) # remove duplicates, keep order kwds = [translate(w, source_lang=self.lang) for w in kwds] sids = sorted(sids) sents = map(self.get_sent, sids) sents = [translate(s, source_lang=self.lang) for s in sents] if cname: print('CACHING TO: ', cname) to_json((sids, sents, kwds), cname) if PARAMS['CACHING']: self.to_tsv() return kwds, sids, sents, picg
def translate(desc, data, language = None): """Perform translation on data according to desc and language Parameters: - desc --- hashmap indicating "attribute name" -> "list name" - data --- list or hashmap containing data - language --- the target language (NOT USED) """ # sanity checks if desc is None or data is None: return data # create a translation configuration prepared = translator.prepare(desc, language) # perform the translation if isinstance(data, types.DictType): # translate single entry data = translator.translate(prepared, data) elif isinstance(data, types.ListType): # translate each entry for i in range(len(data)): data[i] = translator.translate(prepared, data[i]) # return the translated data return data
def execute_cmd(cmd): global startTime if cmd == 'ctime': now = datetime.datetime.now() speak("Сейчас {0}:{1}".format(str(now.hour), str(now.minute))) elif cmd == 'shutdown': os.system('shutdown -s') speak("Выключаю...") elif cmd == 'calc': os.startfile("C:\\Users\\g.chistopolskij\\Desktop\\Калькултор") elif cmd == 'translator': translator.translate() elif cmd == 'internet': webbrowser._browsers elif cmd == 'startStopwatch': speak("Секундомер запущен") startTime = time.time() elif cmd == "stopStopwatch": if startTime != 0: Time = time.time() - startTime speak( f"Прошло {round(Time // 3600)} часов {round(Time // 60)} минут {round(Time % 60, 2)} секунд" ) startTime = 0 else: speak("Секундомер не включен") elif cmd == 'deals': speak("Пока отлично.") else: print("Команда не распознана!")
def crawl_new_card(uri: str, update: bool = False): # https://gamewith.jp/uma-musume/article/show/266299 r = requests.get(uri) if r.status_code != 200: return False soup = BeautifulSoup(r.text, 'lxml') card = get_support_card(uri, soup) card_from_db = db_session.query(SupportCard).filter_by( second_name=card.second_name, card_name=card.card_name, rare_degree=card.rare_degree).first() if card_from_db and update: card = card_from_db elif card_from_db: return False get_card_event(soup, card, update) if not update: get_related_skill_from_db(uri, card) db_session.commit() translate() return True
def translateList(captionsStrList): ''' 翻译给定的英文字符串列表,并把翻译结果插入到列表元素的第一行 ''' print('开始翻译...') # 合并为少于5000字的字符串 translateTempStr = '' translatedStr = '' lenLimits = 5000 splitStr = '\n\n\n\n\n' for index in range(len(captionsStrList)): # 一条一条加入字幕 # 这里计算的是转义后的字符串是否超限 if len(urllib.parse.quote(translateTempStr)) < lenLimits: # 字幕没满,加入 translateTempStr += captionsStrList[index] + splitStr print("({}/{}) ".format(index + 1, len(captionsStrList)), end='') else: # 字幕已满,翻译 translatedStr += translator.translate(translateTempStr) # 清空并把这条加进去 translateTempStr = captionsStrList[index] + splitStr print("({}/{}) ".format(index + 1, len(captionsStrList))) # 循环结束后,把剩下的翻译一下 translateTempStr = translateTempStr[:-len(splitStr)] translatedStr += translator.translate(translateTempStr) print(translatedStr) # 把翻译好的东西插入到原来的英文上面(\n 返回的是对应数目的空格) translatedStr = translatedStr.split(' ' * len(splitStr)) for index in range(len(captionsStrList)): captionsStrList[ index] = translatedStr[index] + '\n' + captionsStrList[index]
def main(): parser = _create_parser_with_options() if len(sys.argv) == 1: parser.print_usage() else: in_filepath, out_filepath, mapping_filepath = _get_paths_from_arguments(parser) translate(mapping_filepath, in_filepath, out_filepath)
def update(self, generator): prev_status = self.status info = self._get_match_events() events = filter(lambda x: int(x['sort']) > self.counter_event, info['event']) list_of_events = list(events) for event in events: team = self.stats_home if event[ 'home_away'] == 'h' else self.stats_away if event['player'] not in team: team[event['player']] = {} team[event['player']][event['event']] = 1 else: if event['event'] not in team[event['player']]: team[event['player']][event['event']] = 1 else: team[event['player']][event['event']] += 1 if event['home_away'] == 'h': self.stats_home = team else: self.stats_away = team self.counter_event = len(info['event']) - 1 if info['match']['score'] != '? - ?': self.score_home, self.score_away = map( int, info['match']['score'].split(' - ')) self.status = info['match']['status'] if self.status == 'IN PLAY' and prev_status == 'NOT STARTED': print("MATCH STARTED", self.team_home, self.team_away) send_message_to_channel( '', f"Матч между {self.team_home} и {self.team_away} начался") for event in list_of_events: event['score_home'] = self.score_home event['score_away'] = self.score_away event['team_home'] = f"{self.team_home}" event['team_away'] = f"{self.team_away}" event['player'] = f"{event['player']}" cls_event = Event(event) text_event = cls_event.format_text(self.events_patterns) text_fact = generator.trivia_event(event) print(text_event + ' \n\n' + text_fact) translated_event = translate(text_event) translated_fact = translate(text_fact) result_text = translated_event + '\n\n' + translated_fact print(result_text) send_message_to_channel(event['event'], result_text) post( translated_event.replace('*', '').split('\n')[1], translated_fact, ' '.join([ i.lower().capitalize() for i in event['player'].split() ]), f'{self.team_home} VS {self.team_away}', info['match']['competition']['name'], event['event']) print(self.events) self.events = list_of_events
def search_command(bot, channel, sender, message): results = search(message) if len(results) == 0: bot.send_message(channel, translator.translate("nothing found")) else: res = results[0] msg = "{} [ {} ]".format( translator.translate(res["titleNoFormatting"]), res["unescapedUrl"]) bot.send_message(channel, msg)
def wolfram_search(query, lang): global wolfram_client query = translator.translate(lang, 'en', query) print(query) res = wolfram_client.query(query) pprint.pprint(res) return translator.translate('en', lang, next(res.results).text)
def info(self, wk=None, sk=None): """extract keywords and summary sentences""" cname = self.cache_name() if cname and exists_file(cname): print('FROM CACHED: ', cname) sids, sents, kwds = from_json(cname) return kwds, sids, sents, None if wk is None: wk = PARAMS['k_count'] if sk is None: sk = PARAMS['s_count'] ranker = ranker_dict[PARAMS['RANKER']] g = self.to_nx() ranks = ranker(g) # print('@@@@@',ranks) def rank_phrase(pair): sid, ws = pair if sid not in ranks: return 0, ws r = sum(ranks[x] for x in ws if x in ranks) r = r * (ranks[sid] / (1 + math.log(1 + len(ws)))) # r=ranks[sid] return (r, ws) def extend_kwd(w): cs = contexts[w] if not cs: return w rs = map(rank_phrase, cs) rs = sorted(rs, reverse=True, key=lambda x: x[0]) phrase = " ".join(rs[0][1]) return phrase ns = self.keynouns() contexts = self.context_dict() kwds, sids, picg = ranks2info(g, ranks, self.doc.sentences, ns, wk, sk, self.lang) kwds = map(extend_kwd, kwds) kwds = dict((k, 1) for k in kwds) # remove duplicates, keep order kwds = [translate(w, source_lang=self.lang) for w in kwds] sids = sorted(sids) sents = map(self.get_sent, sids) sents = [translate(s, source_lang=self.lang) for s in sents] if cname: print('CACHING TO: ', cname) to_json((sids, sents, kwds), cname) if PARAMS['CACHING']: self.to_tsv() return kwds, sids, sents, picg
def core(): current_dir_path = get_dir_abs_path() current_dir_files_list = get_dir_file_names(current_dir_path) articles = filter_txt_files_in_list(current_dir_files_list) for article in articles: lang = article.split('.')[0] source_path = make_path_to_file(article) result_path = make_path_to_file(article, get_dir_abs_path('translation')) translator.translate(source_path, result_path, lang)
def command_translate(): name_result = entry_name.get() if name_result == "": m_box.showwarning("Unable to proceed", "Enter the name for the .txt file!") else: conf = m_box.askyesno( "Confirmation", "The program will now create a file " + name_result + ".txt\nIf the file exists it will be overwritten.\nDo you want to proceed?" ) if conf == 1: core.translate(text_input.get(index1="1.0", index2=END), name_result)
def interact(self): """Have a conversation with a user.""" # Read a line, process it, and print the results until no input remains. while True: try: user_input = self.interface.get_user_input() print(user_input) user_input = translate(user_input, to_lang=self.rules_language) print(user_input) except: break response_en = self.respond(user_input) #self.interface.get_user_output(response_en) response = translate(response_en, to_lang=self.interface.output_lang) self.interface.get_user_output(response)
def greet_command(bot, channel, sender, message): nick = sender if len(message.split()) >= 1: nick = message.split()[0] bot.send_message( channel, translator.translate("Hello there {}. I am Donald".format(nick)))
def get_rect_text(input_img, rec, is_pool=False): if is_pool and (rec[0] < 0 or rec[1] < 0): return "Pool" roi = helper.truncate(input_img, rec) roi = helper.rotate_90_clockwise(roi) text = translator.translate(roi) return text
def translate(bot, update): chat_id = get_chat_id(update) message = update.message or update.channel_post if not message: return lang = message.text lang = lang.replace("/translate", "").strip() logger.debug("Language %s", lang) if lang not in config.get_config_prop("app")["languages"]: bot.send_message(chat_id=chat_id, text=R.get_string_resource( "translate_language_not_found", TBDB.get_chat_lang(chat_id)).format(lang), is_group=chat_id < 0) return lang = config.get_config_prop("app")["languages"][lang].split('-')[0] if not message.reply_to_message: bot.send_message(chat_id=chat_id, text=R.get_string_resource( "translate_reply_to_message", TBDB.get_chat_lang(chat_id)), is_group=chat_id < 0) return translation = translator.translate(source=TBDB.get_chat_lang(chat_id), target=lang, text=message.reply_to_message.text) message.reply_text(translation)
def display_analyses(self, word): self.OUTPUT.delete(0, self.OUTPUT.size()) for form_list in self.transducer.lookup(word).values(): for form in form_list: analysis = form[0].replace(epsilon, '') analysis += ' - ' + str(translator.translate(form[0].split('+')[1])) self.add_to_output(analysis)
def _setLPath(self): t = self.treeview.canvas().getTreeModel() lpath = translate(t,space=' ') if lpath is None: self.entQuery.setText('') else: self.entQuery.setText(lpath)
def show_answers(self, sids): print('') for sid in sids: sid, sent = self.sents[sid] sent = translate(sent, source_lang=self.lang) print(sent) print('')
def wrong_checksum(account): possible_accounts = [] for i in range(len(account)): index = account[i] hex = digits.digits_hex[index] to_check = translator.translate(hex) possibilities = check_possibilities(to_check) possible_digits = [] if (len(possible_digits)) == 0: break for entry in possibilities: if entry in digits.digits_hex: possible_digits.append(digits.digits_hex.index()) temp = account.copy() for j in possible_digits: temp[i] = j if checksum.checksum(temp) == True: possible_accounts.append(temp) if len(possible_accounts) == 1: account = possible_accounts[0] return account elif len(possible_accounts) == 0: account.append(" ERR") return account else: account.append(" AMB " + str(possible_accounts)) return account
def main(): from StringIO import StringIO import sys print("Content-Type: text/plain") try: src, inputData = loadSource() prg = translator.translate(src) print("Program-Size: " + str(prg['_top'])) cpu = EnhancedExecutor() if len(inputData) == 1: if cpu.fetchState(inputData[0]): inputData = [] cpu.inputData = '\n'.join(inputData) old_stdout = sys.stdout my_stdout = StringIO() sys.stdout = my_stdout cpu.run(prg) sys.stdout = old_stdout print("Program-Cycles: " + str(cpu.cycles)) print('') result = my_stdout.getvalue() sys.stdout.write(result) if len(inputData) == 0: cpu.printRegs() except Exception as e: sys.stdout = old_stdout print('') print("Error: %s\n" % e)
def test_translate_a_instruction(self): '''test simple traslate function from machine code to binary value''' machine_code = '@2' binary_value = '0000000000000010' self.assertEqual(translate_a_instruction(machine_code), binary_value) self.assertEqual(translate(machine_code), binary_value)
def run(): seq = request.args.get("DNAseq") if seq is not None: protein = translator.translate(seq) return render_template("DNAtranslator.html", ProSeq=protein, DNAseq=seq) return render_template("DNAtranslator.html")
def update_values(self): v0 = app.path_frame.nc_path.get() v1 = app.path_frame.as_path.get() v2 = app.coord_frame.base_coord.get() v3 = app.coord_frame.tool_coord.get() v4 = app.move_frame.rapid_speed.get() v5 = app.move_frame.rapid_accuracy.get() v6 = app.move_frame.line_speed.get() v7 = app.move_frame.line_accuracy.get() v8 = app.move_frame.circular_speed.get() v9 = app.move_frame.circular_accuracy.get() v10 = app.option_frame.option_speed.get() v11 = app.option_frame.option_base.get() update_previous(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) translate(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11) return None
def serverTranslationFunc(rawMessage, models): segments, outputLang, outputStyle, delim = decodeRequest(rawMessage) translations, _, _, _ = translator.translate(models, segments, outputLang, outputStyle, getCnstrs()) return encodeResponse(translations, delim)
def translateList(captionsStrList): ''' 翻译给定的英文字符串列表,并把翻译结果插入到列表元素的第一行 由于百度翻译有6000字节的限制因此要将要翻译的东西分开。 ''' print('原文分段...') counter = 0 partList = list() partList.append(list()) # 始终会有一部分 for tmpStr in captionsStrList: counter += sys.getsizeof(tmpStr) if counter < 6000: if tmpStr != '': partList[-1].append(tmpStr) else: partList.append(list()) partList[-1].append(tmpStr) counter = 0 print('开始翻译...') for i in range(len(partList)): resultList.extend(translator.translate('\n'.join(partList[i]))) print(f'翻译第 {i+1} 部分,共{len(partList)}部分') time.sleep(1) # 接口限制,等待 1 s # 把翻译好的东西插入到原来的英文上面 print(resultList) for index in range(len(captionsStrList)): captionsStrList[ index] = resultList[index] + '\n' + captionsStrList[index]
def __init__(self, voronoi, grid): self.pairs = [] self.all_pts = [] self.grid = grid self.width = len(self.grid[0]) self.height = len(self.grid) self.pairs = voronoi # for cell_i in xrange(0, len(voronoi)): # cell = voronoi[cell_i] # for face in cell['faces']: # self.pairs.append([cell['vertices'][i] for i in face['vertices']]) self.pruned_pairs = [] for pair in self.pairs: bres_pts = get_line((int(pair[0][0]), int(pair[0][1])), (int(pair[1][0]), int(pair[1][1])), dim=(self.width, self.height)) hits_wall = False for pt in bres_pts: if self.prune_collisions(pt[0], pt[1]): hits_wall = True break if not hits_wall: self.pruned_pairs.append(pair) self.all_pts.extend(bres_pts) for point in self.all_pts: x = int(point[0]) y = int(point[1]) [i, j] = translate(x, y, [self.width, self.height]) self.grid[i][j] = -2
def translate_file(in_filename, java_classname): """ Translate the SPL contents of the file with name in_filename to Java, outputting to out_filename. Note that the file extensions are appended. :param in_filename: the input SPL filename. :param java_classname: the name of the output Java class; the filename is {java_classname}.java. :raises FileNotFoundError: if in_filename does not exist """ out_filename = java_classname + '.java' # parse in_filename and output to out_filename with open(in_filename, 'r') as spl_file: spl = spl_file.read() try: java = translate(spl, java_classname) except SplError as e: error = e.args[0] print('Compilation error:') print(error) return with open(out_filename, 'w') as java_file: java_file.write(java) print('Output successfully to', out_filename)
def test(): opt = parse_args() opt.experiment = os.path.join(root_dir, opt.experiment) opt.chkpt = os.path.join(opt.experiment, opt.chkpt) opt.out_file = os.path.join(opt.experiment, opt.out_file) opt.out_json = os.path.join(opt.experiment, opt.out_json) sessions = json.loads(open(opt.test_file).read())['sessions'] # Model loading model = make_model(len(opt.word2idx)) chkpt = torch.load(opt.chkpt, map_location = lambda storage, log: storage) model.load_state_dict(chkpt) if opt.gpuid >= 0: model = model.cuda() # ====== *********************** ================ model.eval() # =============================================== # decode results = [] print('Decoding ...') decode_sessions = {'sessions': []} for session in sessions: n_session = {} n_session['session-id'] = session['session-id'] n_session['turns'] = [] for turn in session['turns']: asr_hyps = turn['asr-hyps'] asr_hyp = asr_hyps[0]['asr-hyp'] string = translate(model, asr_hyp, opt.word2idx, opt.idx2word, opt.cuda) if string == '': classes = [] else: classes = trp_reverse_process(string, 1) results.append((asr_hyp, string)) slu_hyp = [slot2dic(string) for string in classes] n_session['turns'].append( { 'asr-hyps': asr_hyps, 'slu-hyps': [{'slu-hyp': slu_hyp, 'score': 1.0}] } ) decode_sessions['sessions'].append(n_session) string = json.dumps(decode_sessions, sort_keys=True, indent=4, separators=(',', ':')) with open(opt.out_json, 'w') as f: f.write(string) print('Decode results saved in {}'.format(opt.save_file)) with open(opt.out_file, 'w') as f: for (enc, dec) in results: f.write('{}\t<=>\t{}\n'.format(enc.strip(), dec.strip()))
def handle(self): time.sleep(0.5) try: first_line = self.rfile.readline().split() assert first_line[0] == 'GET' assert len(first_line) >= 2 request = first_line[1] except (EOFError, AssertionError, IndexError): print >> sys.stderr, 'Ill formed request:', first_line self.wfile.write( http_packet(400, 'The server does not understand your request.')) raise if request.startswith('/flag/'): reql = request.split('?') try: recv_flag = reql[0][6:].decode('base64') except: print >> sys.stderr, 'Ill formed request:', request self.wfile.write(http_packet(400, 'Flag format wrong.')) raise with tempfile.TemporaryFile() as tmpfin: with tempfile.TemporaryFile() as tmpfout: tmpfin.write(recv_flag + '\n') tmpfin.seek(0) token = '' if len(reql) > 1: for l in reql[1].split('&'): if l.startswith('token='): token = l.split('=')[1] break tid = translator.translate(token) fc_proc = subprocess.Popen([ 'docker', 'exec', '-i', '-u', '233:233', self.server.docker_prefix + ('%03d' % tid), 'nc', '127.0.0.1', '2323' ], stdin=tmpfin, stdout=tmpfout) time.sleep(1) tmpfout.seek(0) fc_response = tmpfout.read() print >> sys.stderr, 'input: %s\noutput: %s' % ( recv_flag, fc_response) fc_response = [ cgi.escape(repr(_)[1:-1]) for _ in fc_response.split('\n') ] fc_response = '<br>'.join(fc_response) if 'Right!' in fc_response: self.wfile.write( http_packet(200, good_flag_response % fc_response)) else: self.wfile.write( http_packet(200, bad_flag_response % fc_response)) else: self.wfile.write(http_packet(200, hint_response))
def translate(plan): solver = AllocationSolver(_topology) flat_plan = flatten(plan) not_scalable_arr = get_not_scalable_tiers(_topology) new_allocation = solver.solve(flat_plan, _allocation, not_scalable_arr) logging.debug('new_allocation={}'.format(new_allocation)) flat_topology = flatten_topology(_topology) return translator.translate(_allocation, new_allocation, flat_topology)
def translate_xml(tree, target="pt", source="en", wait_line=0.5): """ translate_etree(tree[target="pt", source="en", wait_line=0.5]) Verte uma xml tree do idioma ``source`` para o idioma ``target``. O tempo padrão de espera entre uma tradução e outra é de 0.5 segundos. Se a quantidade de pares a traduzir for maior que 10 o tempo de espera é reconfigurado para 30s. Essa medida tenta evitar o encerramento da conexão com o google translate, mas não garante isso. #FIXME :param tree: Tree do XML. :type tree: ElementTree :param target: Idioma de destino :type target: str :param source: Idioma de origem :type source: str :param wait_line: Tempo de espera de tradução entre linhas. :type wait_line: float :returns: ElementTree """ CRITICAL_SIZE = 10 # seconds CRITICAL_WAIT_LINE = 1 # seconds # ROOT MUST BE COPIED!!! entailment_corpus = tree.getroot().copy() pairs = entailment_corpus.getchildren() pairs_size = len(pairs) if pairs_size > CRITICAL_SIZE and wait_line < CRITICAL_WAIT_LINE: print "Muitos pares rte na lista de tradução." print "O tempo de espera mínimo de tradução foi ajustado para %ds." % CRITICAL_WAIT_LINE wait_line = CRITICAL_WAIT_LINE # 30s count = 1 # Iniciando contagem for pair in pairs: print "Traduzindo par %d de %d" % (count, pairs_size) pair.find("t").text = translate(pair.find("t").text, target, source) time.sleep(wait_line) pair.find("h").text = translate(pair.find("h").text, target, source) time.sleep(wait_line) count += 1 translated_tree = ElementTree(entailment_corpus) return translated_tree
def treat(to_pl, fromsite): try: to_text = to_pl.get() interwikis = to_pl.interwiki() except wikipedia.IsRedirectPage: print "Can't work on redirect page." return except wikipedia.NoPage: print "Page not found." return from_pl = None for interwiki in interwikis: if interwiki.site() == fromsite: from_pl = interwiki if from_pl is None: print "Interwiki link to %s not found." % repr(fromsite) return from_text = from_pl.get() wikipedia.setAction(wikipedia.translate(mysite.lang, msg) + from_pl.aslink()) # search start of table table = get_table(from_text) if not table: wikipedia.output(u"No table found in %s" % (from_pl.aslink())) return print_debug(u"Copying images") if copy_images: # extract image links from original table images=imagelinks(fromsite, table) for image in images: # Copy the image to the current wikipedia, copy the image description page as well. # Prompt the user so that he can translate the filename. new_filename = lib_images.transfer_image(wikipedia.Page(fromsite, image), debug) # if the upload succeeded if new_filename: old_image_tag = wikipedia.Page(fromsite, image).title() new_image_tag = wikipedia.Page(mysite, mysite.image_namespace() + ":" + new_filename).title() print_debug(u"Replacing " + old_image_tag + " with " + new_image_tag) # We want to replace "Image:My pic.jpg" as well as "image:my_pic.jpg", so we need a regular expression. old_image_tag = old_image_tag.replace(" ", "[ \_]") old_image_tag = "[" + old_image_tag[0].upper() + old_image_tag[0].lower() + "]" + old_image_tag[1:] #todo: regex for first letter of filename, i.e. first letter after the colon rOld_image_tag = re.compile(old_image_tag) table = re.sub(old_image_tag, new_image_tag, table) translated_table = translator.translate(table, type, fromsite.lang, debug, mysite.lang) if not translated_table: print "Could not translate table." return print_debug(u"\n" + translated_table) # add table to top of the article, seperated by a blank lines to_text = translated_table + "\n\n" + to_text if not debug: # save changes on Wikipedia to_pl.put(to_text, minorEdit='0')
def codelist(obj:str): if obj=="P": hth.htmlhead("startIDE", tr.translate("Show a project code listing")) elif obj=="M": hth.htmlhead("startIDE", tr.translate("Show a module code listing")) hth.separator() hth.lf() if obj=="P": hth.text(tr.translate("Please select project:")) hth.lf(2) listfiles("projects/") elif obj=="M": hth.text(tr.translate("Please select module:")) hth.lf(2) listfiles("modules/") hth.lf(2) hth.separator() hth.htmlfoot("","javascript:history.back()",tr.translate("Back"))
def api(): res = json.dumps(translator.translate(bottle.request.query.input, not not bottle.request.query.aggressive)) if bottle.request.query.callback: bottle.response.content_type = "application/javascript" cb = re.sub(reject, "", bottle.request.query.callback) return cb + "(" + res + ");" else: bottle.response.content_type = "application/json" return res
def main(): if len(argv) > 1: fn = argv[1] else: fn = 'tables.schema' p = parse(fn) nm = fn.split('.')[0] tg = '.'.join(fn.split('.')[:-1]+['sql']) s = ['USE %s;\n\n' % nm] + translate(p) with open(tg, 'w') as f: f.writelines(s)
def run(self): if not translator.init(): return os.nice(10) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((self.HOST, self.PORT)) s.listen(10) while True: conn, addr = s.accept() print 'Connected by', addr while True: data = conn.recv(1024) if not data: break try: query = json.loads(data) if query['auto']: translator.translate_selected() else: translator.translate(query['text'], query['lang']) except TypeError, KeyError: print 'Request is not valid.' conn.close()
def execute(plan): global _allocation, _topology solver = AllocationSolver(_topology) flat_plan = flatten(plan) _allocation.pop('time', None) if solver.need_solution(flat_plan, _allocation): not_scalable_arr = get_not_scalable_tiers(_topology) new_allocation = solver.solve(flat_plan, _allocation, not_scalable_arr) logging.debug('new_allocation={}'.format(new_allocation)) flat_topology = flatten_topology(_topology) actions = translator.translate(_allocation, new_allocation, flat_topology) _allocation = executor.aws_execute(actions, new_allocation, _topology, _allocation) return _allocation
def query(self): if self.db is not None: if not self.treeview.canvas(): return t = self.treeview.canvas().getTreeModel() lpath = translate(t) if lpath is None: return self.disconnect(self.db.emitter, PYSIGNAL("gotMoreTree"), self.gotMoreTree) self._queryJustSubmitted = True self.statusBar().message("Submitted the query. Please wait...") self.btnNextTree.setEnabled(False) if self.db.submitQuery(lpath) == True: self.queryTree = parse_lpath(lpath) else: self.statusBar().message("Query failed.")
def main(parser): sikb_zip = re.sub('src/pakbon-ld.py', 'if/SIKB0102 versie 3.1.0 XSD en Lookup domeintabellen.zip', os.path.realpath(__file__)) args = parser.parse_args() if args.version: print('Pakbon-ld v{0}'.format(version)) sys.exit(0) if args.ignore_version: print('WARNING: version discrepancy may result in errors') if args.endpoint == '': args.endpoint = args.default_namespace + '/sparql/' if len(args.align_with) > 0: args.align_with = args.align_with[0].split() if args.input_path == '' and args.generate_ontology is False and args.generate_ontology is False: print('Missing required input (flags).\nUse \'pakbon-ld.py -h\' for help.') sys.exit(1) if args.output_path == '': if args.input_path != '' : args.output_path = os.getcwd() + '/' + re.sub(r'^(?:.*/)?(.*)\..*$', r'\1', args.input_path) else: args.output_path = os.getcwd() + '/' + 'SIKB0102' (graph, ontology, vocabulary) = translator.translate(args.input_path,\ args.align_with,\ args.default_namespace,\ sikb_zip,\ args.generate_ontology,\ args.generate_vocabulary,\ args.ignore_version,\ args.align,\ args.endpoint,\ args.enable_georesolver,\ args.interactive) if graph is not None: writer.write(graph, args.output_path + extOf(args.serialization_format), args.serialization_format) if ontology is not None: writer.write(ontology, args.output_path + '_Ontology' + extOf(args.serialization_format), args.serialization_format) if vocabulary is not None: writer.write(vocabulary, args.output_path + '_Vocabulary' + extOf(args.serialization_format), args.serialization_format)
def mouseMoveEvent(self, e): c = self.canvas() if not c: return p = self.viewportToContents(e.pos()) for item in c.collisions(p): if isinstance(item, TreeCanvasNode): if item.node != self.highlightedNode: if item.node.filterExpression: x = self.contentsToViewport(item.boundingRect().topRight()) self.filterExpPopup.popup(x, item.node) self.unhighlight() self.highlight(item.node) s = translate(item.node.lpRoot, item.node, ' ') if s is not None: self.emit(PYSIGNAL('highlightLPath'),(s,)) break else: if self.highlightedNode is not None: self.filterExpPopup.hide() self.unhighlight()
def visit_module(self, file_path, overrides, platform, module_name): dir_name, file_name = os.path.split(file_path) if not file_name.endswith(".js") and file_name.split(".")[0] != module_name.split(".")[-1]: if file_name == "__init__.py": if os.path.basename(dir_name) != module_name.split(".")[-1]: return else: return self.merge_resources(dir_name) if platform and overrides: plat_suffix = ".__%s__" % platform else: plat_suffix = "" if self.compile_inplace: mod_part, extension = os.path.splitext(file_path) out_file = "%s%s.js" % (mod_part, plat_suffix) else: out_file = os.path.join(self.output, "lib", "%s%s.js" % (module_name, plat_suffix)) if out_file in self.done.get(platform, []): return # translate if # - no platform # - or if we have an override # - or the module is used in an override only if platform is None or (platform and overrides) or (out_file not in self.done.get(None, [])): if file_name.endswith(".js"): fp = open(out_file, "w") fp.write("/* start javascript include: %s */\n" % file_name) fp.write(open(file_path, "r").read()) fp.write("$pyjs.loaded_modules['%s'] = function ( ) {return null;};\n" % file_name) fp.write("/* end %s */\n" % file_name) deps = [] self.dependencies[out_file] = deps else: logging.info("Translating module:%s platform:%s out:%r" % (module_name, platform or "-", out_file)) deps, js_libs = translator.translate( self.compiler, [file_path] + overrides, out_file, module_name=module_name, **self.translator_arguments ) self.dependencies[out_file] = deps for path, mode, location in js_libs: if mode == "default": if self.multi_file: mode = "dynamic" else: mode = "static" if mode == "dynamic": self.dynamic_js_libs.append(path) elif mode == "static": if location == "early": self.early_static_js_libs.append(path) elif location == "middle": self.static_js_libs.append(path) elif location == "late": self.late_static_js_libs.append(path) else: raise RuntimeError, "Unknown js lib location: %r" % location else: raise RuntimeError, "Unknown js lib mode: %r" % mode if "." in module_name: for i, dep in enumerate(deps): if module_path(dep, path=[dir_name]): deps[i] = ".".join(module_name.split(".")[:-1] + [dep]) else: deps = self.dependencies[out_file] if out_file not in self.done.setdefault(platform, []): self.done[platform].append(out_file) if module_name not in self.visited_modules.setdefault(platform, []): self.visited_modules[platform].append(module_name) if deps: self.visit_modules(deps, platform, file_path)
def menu_View_SqlTranslation(self): t = self.treeview.canvas().getTreeModel() q = translate(t) if q.strip(): d = SqlViewDialog(lpql=q, parent=self) d.exec_loop()
class InterpreterError: def __init__(self,value): self.value=value def __str__(self): return self.value if __name__=='__main__': setrecursionlimit(10000) options,arguments=getopt.getopt(argv[1:],"p") if len(arguments)>=1: try: infile=open(arguments[0], 'r') lines=infile.readlines() lines=translate([tokenizer.tokenize(line) for line in lines]) (linenum_idx,parse_tree)=syntax_parser.create_parse_trees(lines) if ('-p','') in options: # print parse tree in a separate text file parse_file=open(arguments[0][:max(arguments[0].index('.'),0)]+'_parsetree.txt','w') parse_file.write(str(parse_tree)) cmd_line=Variable('COMMAND',StringType,StringType(' '.join(arguments))) #cmd=MultiArray([len(arguments)],'COMMAND',StringType,) #cmd_line=Variable('COMMAND',MultiArray,MultiArray()) runner=Interpreter(parse_tree, linenum_idx, {'COMMAND$':cmd_line, 'COMMAND':cmd_line}) runner.run() except IOError,ioe: print "Error in reading/writing file:\n",str(ioe) else: print "usage: ./spaghetti.py <file name> or python spaghetti.py <file_name>"
def onWelcome(self, bot): self.log("Welcome message sent.", 1) for channel in self.channels: self.bot.join_channel(channel) self.bot.send_message(channel, translator.translate("Hello kids, Donald is back"))
(main (println (abs 1)) (println (abs (- 0 2))) (println True) (println (> 1 2)) (println (== 0 1)) (println (== 0 0)) (define d 1) ) ''' def filterTree(tree): if not isinstance(tree, list): return tree tmp = filter(lambda x: x != '' and x != '\n', tree) return map(filterTree, tmp) if __name__ == '__main__': t = lexer.getTokens(s) tree = parser.parse(t)[0] tree = filterTree(tree) print tree src = translator.translate(tree) print src with open('tmp.cpp', 'w') as f: f.write(src) os.system("g++ -std=c++14 tmp.cpp && ./a.out") #print executer.execute(tree)
def mainpage(): hth.htmlhead("startIDE", tr.translate("Control your model with a finger touch")) print('<img src="icon.png">') hth.separator() hth.text(tr.translate("<b>Download</b> a")) hth.link(tr.translate("project"),"index.py?action=PDown") hth.text(tr.translate("or a")) hth.link(tr.translate("module"),"index.py?action=MDown") hth.text(tr.translate("from your TXT.")) hth.lf(2) hth.text(tr.translate("<b>Upload</b> a")) hth.link(tr.translate("project"),"index.py?action=PUp") hth.text(tr.translate("or a")) hth.link(tr.translate("module"),"index.py?action=MUp") hth.text(tr.translate("to your TXT.")) hth.lf(2) hth.text(tr.translate("<b>Show</b> a")) hth.link(tr.translate("project"),"index.py?action=PList") hth.text(tr.translate("or a")) hth.link(tr.translate("module"),"index.py?action=MList") hth.text(tr.translate("code listing.")) hth.lf(2) hth.text(tr.translate("<b>Download</b> a")) hth.link(tr.translate("logfile"),"index.py?action=LogDown") hth.text(tr.translate("from your TXT.")) hth.lf(2) hth.text(tr.translate("<b>Upload</b> a")) hth.link(tr.translate("pixmap"),"index.py?action=PICUp") hth.text(tr.translate("to your TXT.")) hth.separator() hth.lf(1) hth.text(tr.translate("<b><u>Experts corner</b></u>")) hth.lf(2) hth.text(tr.translate("<b>Download</b> a")) hth.link(tr.translate("project"),"index.py?action=PCDown") hth.text(tr.translate("or a")) hth.link(tr.translate("module"),"index.py?action=MCDown") hth.text(tr.translate("and convert it to plain text.")) hth.lf(1) hth.text(tr.translate("<b>Upload</b> a")) hth.link(tr.translate("project"),"index.py?action=PCUp") hth.text(tr.translate("or a")) hth.link(tr.translate("module"),"index.py?action=MCUp") hth.text(tr.translate("from a plain text file.")) hth.lf(2) hth.text(tr.translate("<b>Convert</b> a")) hth.link(tr.translate("logfile"),"index.py?action=LogCSV") hth.text(tr.translate("to .CSV")) hth.lf(2) hth.text(tr.translate("<b>Download</b> an")) hth.link(tr.translate("array"),"index.py?action=ADown") hth.text(tr.translate("from your TXT.")) hth.lf(1) hth.text(tr.translate("<b>Upload</b> an")) hth.link(tr.translate("array"),"index.py?action=AUp") hth.text(tr.translate("to your TXT.")) hth.separator() hth.htmlfoot("","/","TXT Home")
scope.append(p) elif b == "}": scope.pop() i += 1 root = root.lpChildren[0] # root.lpPrune() return root if __name__ == "__main__": q = "//VP//NP[==>JJ and ==>NN]" t = parse_lpath(q) def f(t, n): if t is not None: print (" " * n) + t.data["label"] for c in t.children: f(c, n + 4) def g(t, n): if t is not None: print (" " * n) + t.data["label"] for c in t.lpChildren: g(c, n + 4) else: print " " * n + "None" g(t, 0) print translate(t)
Tagalog - tl Thai - th Tamil - ta Telugu - te Turkish - tr Ukrainian - uk Urdu - ur Finnish - fi French - fr Hindi - hi Hmong - hmn Croatian - hr Czech - cs Swedish - sv Esperanto - eo Estonian - et Javanese - jw Japanese - ja''' else: TextToTranslate = args[0] if len (args)>1: TargetLanguage = args[1] else: TargetLanguage = 'en' if len (args)>2: CurrentLanguage = args[2] else: CurrentLanguage = 'auto' print translator.translate(translator.bringing(TextToTranslate),TargetLanguage,CurrentLanguage)
def process_command(line, sender, channel): # !help -- get help about TuxBot's commads match = re.match(r'help$', line) if match: irc.send_private_notice(commandref, sender) if sender in channel_ops[channel] + channel_voices[channel]: irc.send_private_notice(opcommandref, sender) return True # !help <key> -- get help match = re.match(r'help\s+(.*)$', line) if match: key = clean_string(match.group(1)) text = config.get_help(key) if text: irc.send_message("%s" % (text), channel) else: irc.send_message("I don't have an answer for \"%s\". You can set it using the \"!sethelp question: answer\" command." % (key), channel) return True # !man <section> <name> -- get the URL to an online man page match = re.match(r'man\s+(\w+)\s+([-A-Za-z0-9_]+)$', line) if match: irc.send_message(man.get(match.group(1), match.group(2)), channel) return True # !synopsis <section> <name> -- print the "SYNOPSIS" section of the specified man page match = re.match(r'synopsis\s+(\w+)\s+(\w+)$', line) if match: text = man.synopsis(match.group(1), match.group(2)) if not text: irc.send_message("Failed to get man page for \"%s\" in section \"%s\"" % (match.group(2), match.group(1)), channel) return True irc.send_message(text, channel) return True # !man <criteria> -- search for an online man page match = re.match(r'man\s+(\w+)$', line) if match: irc.send_message(man.search(match.group(1)), channel) return True # !xkcd -- get a random xkcd comic match = re.match(r'xkcd$', line) if match: irc.send_message(xkcd.get_random(), channel) return True # !xkcd <index> -- get an xkcd comic by index match = re.match(r'xkcd\s+([0-9]+)$', line) if match: irc.send_message(xkcd.get_url(int(match.group(1))), channel) return True # !xkcd-linux and !xkcd-geek -- get linux-related and geeky xkcd comics match = re.match(r'xkcd-linux$', line) if match: l = config.get_linux_xkcds() if not l: irc.send_message("No linux-related comics in list.", channel) else: irc.send_message(xkcd.get_url(int(l[random.randint(0, len(l)-1)])), channel) return True match = re.match(r'xkcd-geek$', line) if match: l = config.get_geek_xkcds() if not l: irc.send_message("No linux-related comics in list.", channel) else: irc.send_message(xkcd.get_url(int(l[random.randint(0, len(l)-1)])), channel) return True # !google <criteria> -- get the URL for a Google search match = re.match(r'google\s+([^\s].+)$', line) if match: irc.send_message("https://encrypted.google.com/#q=" + clean_string(match.group(1)).replace(" ", "+"), channel) return True # !wikipedia -- get a random wikipedia article match = re.match(r'wikipedia$', line) if match: irc.send_message("http://en.wikipedia.org/wiki/Special:Random", channel) return True # !wikipedia <article> -- get a link to wikipedia article match = re.match(r'wikipedia\s+([^\s].+)$', line) if match: irc.send_message("http://en.wikipedia.org/wiki/Special:Search?search=" + clean_string(match.group(1)).replace(" ", "+"), channel) return True # !wikipedia-<lang> -- get a random wikipedia article in a certain language match = re.match(r'wikipedia-(\w+)$', line) if match: irc.send_message("http://"+match.group(1)+".wikipedia.org/wiki/Special:Random", channel) return True # !wikipedia-<lang> <article> -- get a link to wikipedia article in a certain language match = re.match(r'wikipedia-(\w+)\s+([^\s].+)$', line) if match: irc.send_message("http://"+match.group(1)+".wikipedia.org/wiki/Special:Search?search=" + clean_string(match.group(2)).replace(" ", "+"), channel) return True # !time and !date -- get the current time match = re.match(r'(time|date)$', line) if match: irc.send_message(time.strftime("%A %Y-%m-%d %H:%M:%S %Z"), channel) return True match = re.match(r'(time|date)\s+([^\s].*)$', line) if match: irc.send_message(time.strftime(match.group(2)), channel) return True # !tr[anslate]-<fromlang> <text> -- translate some text to English match = re.match(r'tr(anslate)?-([a-z]+)\s+([^\s].*)', line) if match: irc.send_message(translator.translate(match.group(2), "en", match.group(3)), channel) return True; # !tr[anslate]-<fromlang>-<tolang> <text> -- translate some text match = re.match(r'tr(anslate)?-([a-z]+)-([a-z]+)\s+([^\s].*)', line) if match: irc.send_message(translator.translate(match.group(2), match.group(3), match.group(4)), channel) return True; # !license or !authors or !credits -- display license information and the names of the people who made TuxBot match = re.match(r'credits|authors|license$', line) if match: irc.send_private_notice(license, sender) return True # !user -- display the username which this python script is running under match = re.match(r'user$', line) if match: irc.send_message(getpass.getuser(), channel) return True # !version -- get TuxBot's version. Assumes that TuxBot is run from its git repository directory match = re.match(r'version$', line) if match: irc.send_message(version, channel) return True # !quit -- make TuxBot quit match = re.match(r'quit$', line) if match: if sender not in channel_ops[channel] + channel_voices[channel]: irc.send_message(sender + ": Permission denied. You must be +o or +v.", channel) else: irc.quit(quitmessage) sys.exit(0) return True response = config.get_command_response(clean_string(line)) if response: irc.send_message(response.replace("\\s", sender), channel) return response = config.get_command_response_command(clean_string(line)) if response: process_command(response, sender, channel) return return False
pprint.pprint({ "IP": self.IP, "STACK": self.STACK, "NAME_REG": self.NAME_REG, "VARs": self.VARS, "IP_STACK": self.IP_STACK, "VAR_STACK": self.VAR_STACK, }) def Run(self): if(self.debugger): self.PrintState() while not self.halted: self.Step() if(self.debugger): self.PrintState() if __name__ == "__main__": from samples import sample_prog from lexer import tokenize from parser import parse from translator import translate (code, strings) = translate(parse(tokenize(sample_prog))) vm = BasicVM() vm.Load(code, strings) #vm.SetDebugger(True) vm.Run()
def main(argv): # Get the command-line options given to the program try: opts, args = getopt.getopt(argv, 'hvdpo:', ['help', 'verbose', 'pretty-print', 'output=', 'libraries-only', 'detailed', 'tabsize=']) except getopt.GetoptError: usage() sys.exit(2) debug = False verbose = False generate_sample = False embed_in_html = False libraries_only = False pretty_print = False detailed = False tabsize = None output_format = ScenejsJavascriptStream for opt, arg in opts: if opt in ('-h', '--help'): print "Translate a Collada file to a JSON formatted SceneJS file" usage() sys.exit() elif opt == '-d': debug = True elif opt in ('-o', '--output'): try: output_format = { 'json': ScenejsJsonStream, 'js': ScenejsJavascriptStream, 'binary': ScenejsBinaryStream, 'html': ScenejsJavascriptStream, 'htmljs': ScenejsJavascriptStream }[arg] if arg == 'htmljs': generate_sample = True elif arg == 'html': generate_sample = True embed_in_html = True except KeyError: print "Unknown output format '" + arg + "'" usage() sys.exit(2) elif opt in ('-v', '--verbose'): verbose = True elif opt in ('-p', '--pretty-print'): pretty_print = True elif opt == '--libraries-only': libraries_only = True elif opt == '--detailed': detailed = True else: print "Unknown option supplied '" + opt + "'" usage() sys.exit(2) if pretty_print and output_format == ScenejsJavascriptStream: output_format = ScenejsPrettyJavascriptStream # Check arguments for additional caveats if not args: print "No input files specified" usage() sys.exit(2) if pretty_print and output_format != ScenejsPrettyJavascriptStream: print "Warning: Pretty print is only available with JavaScript output at this time" if detailed and generate_sample != True: print "Warning: The --detailed flag has no effect when no sample is being generated" if tabsize != None and not pretty_print: print "Warning: The --tabsize flag has no effect without the --pretty_print flag" #if generate_sample and not libraries_only and len(args) > 1 # print "Cannot generate a sample for multiple input files without the --libraries-only option." # usage() # sys.exit(2) # Generate an Html file if required html_output_stream = open('index.html','w') if generate_sample else None if verbose and html_output_stream: print "Created the sample file 'index.html'" # Write Html header if required if html_output_stream: html_output_stream.write(generate_html_head("SceneJS sample", detailed)) # Load and translate each file specified scene_ids = [] for filename in args: # Check whether the file exists and try to load it into a collada object if not os.path.isfile(filename): print "'" + filename + "' is not a valid file path." sys.exit(2) collada_obj = collada.Collada(filename, ignore=[collada.DaeUnsupportedError]) # Add every scene's id to the list of al scene ids (to be used with sample generation) scene_ids.append(collada_obj.scene.id) # Generate an output stream base_path = os.path.splitext(filename)[0] output_stream = None if embed_in_html and html_output_stream: # Output should continue in the html file output_stream = html_output_stream else: # Create an output file to write the SceneJS scene to output_file_name = base_path + '.' + output_format.file_extension output_stream = open(output_file_name,'w') if verbose and output_stream: print "Created the file '" + output_file_name + "'" # Translate and output the file if html_output_stream: if embed_in_html: html_output_stream.write(" <script type='text/javascript'>\n") else: html_output_stream.write(" <script type='text/javascript' src='" + output_file_name + "'></script>\n") serializer = output_format(output_stream) #TODO: serializer.tabstring = ' ' * tabsize if tabsize else " " translate(serializer, collada_obj, { 'libraries_only': libraries_only }, debug, verbose) if html_output_stream and embed_in_html: html_output_stream.write("</script>\n\n") output_stream.flush() if html_output_stream and len(scene_ids) > 0: # Todo: support multiple scenes in a sample file... (via a html drop-down) html_output_stream.write(generate_html_body(scene_ids[0] if not libraries_only else None)) html_output_stream.flush()
def gdaimport(filename): mod= new.module(filename) f=open(filename) data = tokenize.untokenize(translator.translate(f.readline)) exec data in mod.__dict__ return mod
def get_translated_text(to_language, text): return translator.translate(text, to_language)
def create_from_migrated_check(cls, migrated_check): alarm = translate(migrated_check) if not alarm: return None return cls(migrated_check, alarm)