def main(): rules = loadrules("pokemon.yaml") trees = [] trees.append(Tree("(S let me show you my Pokémon)")) trees.append(Tree("(S let me show you my cats)")) for tree in trees: translate(tree, rules)
def makeMutantFromSequence(target_protein_seq, base_dna_seq): codons = [x for x in translate.codons(base_dna_seq)] base_prot_seq = translate.translate(base_dna_seq) assert len(base_prot_seq) == len(target_protein_seq) mutant_dna_seq = '' for (i, aa) in enumerate(target_protein_seq): if aa == base_prot_seq[i]: mutant_dna_seq += codons[i] else: mutant_dna_seq += translate.randomReverseTranslate(aa) assert translate.translate(mutant_dna_seq) == target_protein_seq return mutant_dna_seq
def main(): rules = loadrules("german.yaml") trees = [] ## I like eating / Ich esse gern trees.append(Tree("(S (NP (PRP I)) (VP (VB like) (VBG eating)))")) ## I am hungry / Ich habe Hunger trees.append(Tree("(S (NP (PRP I)) (VP (VB am) (JJ hungry)))")) for tree in trees: translate(tree, rules)
def test_ende(self): with open('en-de/in', 'r', encoding='utf-8') as in_file, \ open('en-de/out', 'w', encoding='utf-8') as out_file: os.chdir('models/en-de/') settings = TranslationSettings() settings.input = in_file settings.output = out_file settings.models = ["model.npz"] settings.beam_size = 12 settings.normalization_alpha = 1.0 translate(settings=settings) os.chdir('../..') self.outputEqual('en-de/ref2','en-de/out')
def process(msg): if msg.startswith('#') or msg.startswith('#'): pass elif msg == u'新生指南': return freshman.get_guidance(), "news" elif msg == u'新闻' or msg == u'体育' or msg == u'体育新闻': return news.getNews(msg), "news" elif isinstance(msg, type('string')): msg = msg.lower() msg = msg.strip() if msg == 'bbc world' or msg == 'bbc china' or msg == 'bbc' or msg == 'nba': return news.getNews(msg), "news" else: return translate.translate(msg), "text" #elif msg == u'新闻': # return news_info, "text" elif msg == u'校车' or msg == u'明天校车': return xiaoche.get_timetable(msg), "text" elif msg == u'摆渡车': return ferrybus.get_timetable(msg), "text" elif msg == u'环一' or msg == u'环1': return huanyi.get_timetable(), "text" elif msg == u'天气': return weather.weather(), "text" elif msg == u'空气': return weather.get_airquality(), "text" elif re.match(u"发状态", msg): if msg[3:]: return renren.renren_status(msg[3:]), "text" else: return u"请输入状态内容", "text" else: return u"无法处理请求,请查看使用说明\n" + help_info + report_info, "text"
def translateExamples(inputDir, outputDir, targetsDir, includedTargets=None): # Load all target dictionaries targets = [] for target in os.listdir(targetsDir): # Ignore targets not in includedTargets if includedTargets and not os.path.basename(target).split(".")[0] in includedTargets: continue with open(os.path.join(targetsDir, target)) as tFile: targets.append(json.load(tFile)) # Translate each example for f in os.listdir(inputDir): # Ignore hidden files if f[0] == ".": continue # Parse the example file with open(os.path.join(inputDir,f), 'r') as file: ast = parse(file.read(), f) basename = f[:-len(".sg")] # Translate ast to each target language for target in targets: translation = translate(ast, targetDict=target) directory = os.path.join(outputDir, target["OutputDirectoryName"]) extension = target["FileExtension"] # Create directory if it does not exist if not os.path.exists(directory): os.makedirs(directory) # Write translation with open(os.path.join(directory, basename + extension), "w") as nf: nf.write(translation)
def test004(self): """translation with problems""" s = "ATGCatTCTNNNTAAAGA" # print translate.translate(s) self.assertTrue(translate.translate(s) is None) # print translate.translateRaw(s,bad_aa='@') self.assertTrue(translate.translateRaw(s, bad_aa="@") == "MHS@*R")
def test008(self): """zero-length nucleotides""" s = "" c = [cod for cod in translate.codons(s)] self.assertTrue(len(c) == 0) self.assertTrue(translate.translate(s) == "") self.assertTrue(translate.translateRaw(s) == "")
def detectPrintMessage(pktlen, data, timestamp): # Magical numbers and structure taken from original Dota 2 Translator if not data: return if data[12:14]=='\x08\x00': decoded = decode_ip_packet(data[14:]) data = decoded['data'] identifierLength = len(ALL_CHAT_IDENTIFIER) + 2 chatFound = str.find(data, ALL_CHAT_IDENTIFIER) type = 0 # If no all chat found if (chatFound == -1): identifierLength = len(TEAM_CHAT_IDENTIFIER) + 2 chatFound = str.find(data, TEAM_CHAT_IDENTIFIER) type = 1 # If either all or team chat message found if (chatFound != -1): indexName = chatFound + identifierLength lengthName = ord(data[indexName - 1]) # Get the ascii-value indexMessage = indexName + lengthName + 2 lengthMessage = ord(data[indexMessage - 1]) # Get the ascii-value name = data[indexName:indexName+lengthName] message = data[indexMessage:indexMessage+lengthMessage] # "Terneray operator"~ish print "[Allies] " if (type==1) else "", name, ": ", message, " => ", translate.translate(message)
def main(): """ Stages 1. Setup stuff from arguments 2. Ignore certain lines of code. 3. Add necessary includes and main function. 4. Actual translation. """ args = get_args() if not error_check_python(args.file): return 1 elif args.ast_tree: translate.prettyparseprintfile(args.file) return 0 translated_code = translate.translate( args.file, indent_size=args.indent_size) if args.compile_check: return 0 if error_check_c(translated_code) else 2 elif args.execute: return 0 if error_check_c(translated_code, True) else 2 elif args.memory_check: return memory_check(translated_code) else: print(translated_code) return 0
def run(args): if args.input_file: with open(args.input_file, "r") as input_file: source = input_file.read() else: import sys source = sys.stdin.read() if args.assemble: with open(args.output, "w") as binout: write_binary(parse_instructions(source), binout) else: tree = parse(preprocess(source)) if args.print_ast: print_tree(tree) if not tree: return instructions = link(*translate(tree)) if args.print_assembly: print_instructions(instructions) with open(args.output, "w") as binout: write_binary(instructions, binout)
def process_translate(i, words): from_language = "en" into_language = "en" for j,graph in enumerate(words): if graph == "from": from_language = words[j+1] if graph =="into": into_language = words[j+1] if from_language in rev_lang_dict and into_language in rev_lang_dict: espeak("translate from " + from_language + " into " + into_language) espeak("say the phrase: ") hypos = continuous_listen.listen(from_language) if len(hypos) > 0 and type(hypos) is not str: confidence, speech = hypos[0]['confidence'], str(hypos[0]['utterance']) espeak("translating ") espeak(speech, language_dict[from_language]) espeak("into " + into_language) translation = translate.translate(sentence, language_dict[into_language]) espeak(translation, language_dict[into_language]) else: espeak("input not understood") else: espeak("language is not supported")
def shindan(bot, trigger): """ .shindan <id> [name] - Do the shindanmaker thing! Will automatically translate japanese shindans to english. (Waifu id: 215100 | Teh_Colt's Drama Gen id: 490953) """ if not trigger.group(3) or not trigger.group(3).isdigit() or int(trigger.group(3).strip()) < 2000: bot.say('You must specify a shindanmaker ID (Waifu id: 215100 | Teh_Colt\'s Drama Gen id: 490953)') return name = trigger.nick if (trigger.group(4)): name = trigger.group(4) data = web.urlencode({'u': name, 'from': ''}).encode('ascii') url = follow_redirects('http://en.shindanmaker.com/'+trigger.group(3).strip()) try: soup = get_soup(web.post(url, data)) shindan = soup.find(attrs={'class':re.compile("result")}) if 'en' in url: bot.say(shindan.text.strip()) else: msg, in_lang = translate.translate(shindan.text.strip()) if in_lang == 'ja': in_lang = 'Japanese' bot.say('%s (Translated from %s)' % (msg, in_lang)) except Exception as e: bot.say('418 I\'m a teapot')
def createAlgsList(self): # First we populate the list of algorithms with those created # extending GeoAlgorithm directly (those that execute GDAL # using the console) self.preloadedAlgs = [nearblack(), information(), warp(), translate(), rgb2pct(), pct2rgb(), merge(), buildvrt(), polygonize(), gdaladdo(), ClipByExtent(), ClipByMask(), contour(), rasterize(), proximity(), sieve(), fillnodata(), ExtractProjection(), gdal2xyz(), hillshade(), slope(), aspect(), tri(), tpi(), roughness(), ColorRelief(), GridInvDist(), GridAverage(), GridNearest(), GridDataMetrics(), gdaltindex(), gdalcalc(), rasterize_over(), # ----- OGR tools ----- OgrInfo(), Ogr2Ogr(), Ogr2OgrClip(), Ogr2OgrClipExtent(), Ogr2OgrToPostGis(), Ogr2OgrToPostGisList(), Ogr2OgrPointsOnLines(), Ogr2OgrBuffer(), Ogr2OgrDissolve(), Ogr2OgrOneSideBuffer(), Ogr2OgrTableToPostGisList(), OgrSql(), ] # And then we add those that are created as python scripts folder = self.scriptsFolder() if os.path.exists(folder): for descriptionFile in os.listdir(folder): if descriptionFile.endswith('py'): try: fullpath = os.path.join(self.scriptsFolder(), descriptionFile) alg = GdalScriptAlgorithm(fullpath) self.preloadedAlgs.append(alg) except WrongScriptException as e: ProcessingLog.addToLog(ProcessingLog.LOG_ERROR, e.msg)
def main(connection, info, args): """Translates using google translate""" connection.msg( info["channel"], '%s: "%s"' % (info["sender"], partfilter(translate.translate(args[1], args[2], " ".join(args[3:])).encode("utf-8"))), )
def verifyWeak(): for (eng, ita) in StrongVerificationCases: for e in eng: translation = translate("Eng", "Ita", e) if (not list_subset(translation, ita)): print "Source: %s" % e print "Expected: %s" % ita print "Actual: %s" % translation raise AssertionError ("Failed test case") for i in ita: translation = translate("Ita", "Eng", i) if (not list_subset(translation, eng)): print "Source: %s" % i print "Expected: %s" % eng print "Actual: %s" % translation raise AssertionError ("Failed test case")
def test009(self): """odd-length coding sequence""" s = "TCTCGTAAGTACGCAGC" c = [cod for cod in translate.codons(s)] self.assertTrue(len(c) == 5) self.assertTrue(c[-1] == "GCA") self.assertTrue(translate.translate(s) is None) self.assertTrue(translate.translateRaw(s) == "SRKYA")
def y_translate(text, langc="zh-CN"): # http://api.yandex.com/translate/ from yandex_translate import YandexTranslate translate = YandexTranslate('Your API key here.') ## print('Languages:', translate.langs) ## print('Translate directions:', translate.directions) ## print('Detect language:', translate.detect('Привет, мир!')) ## print('Translate:', translate.translate('Привет, мир!', 'ru-en')) return translate.translate(text, langc)
def test001(self): """reverse translate""" N = 1000 aas = "ACDEFGHIKLMNPQRSTVWY" for i in range(N): prot = "".join([random.choice(aas) for xi in range(100)]) gene = translate.reverseTranslate(prot) newprot = translate.translate(gene) self.assertTrue(prot == newprot)
def push(uuid, text="", lang=""): user = state.SW[uuid] other = state.SW[user.other] translated = translate.translate(text, user.lang, other.lang) other.queue.put(translated) if lang: user.lang = lang return translated, "text/plain"
def construct_language(language): print('Getting words') foreign_words = get_words(language) print('Translating words') translate_dict = translate(foreign_words, language) print('Getting English synonyms') full_dict = construct_synonyms(translate_dict) print('Saving language') save_as_text_file(full_dict, language) return full_dict
def main(): from lexer import lexer from mlparser import parser from translate import translate from pprint import pprint with open('fizzbuzz.mil') as f: src = f.read() tokens = lexer(src) ast = parser(tokens) code = translate(ast, ()) run(code)
def createAlgsList(self): # First we populate the list of algorithms with those created # extending GeoAlgorithm directly (those that execute GDAL # using the console) self.preloadedAlgs = [ nearblack(), information(), warp(), translate(), rgb2pct(), pct2rgb(), merge(), buildvrt(), polygonize(), gdaladdo(), ClipByExtent(), ClipByMask(), contour(), rasterize(), proximity(), sieve(), fillnodata(), ExtractProjection(), gdal2xyz(), hillshade(), slope(), aspect(), tri(), tpi(), roughness(), ColorRelief(), GridInvDist(), GridAverage(), GridNearest(), GridDataMetrics(), gdaltindex(), gdalcalc(), rasterize_over(), retile(), gdal2tiles(), # ----- OGR tools ----- OgrInfo(), Ogr2Ogr(), Ogr2OgrClip(), Ogr2OgrClipExtent(), Ogr2OgrToPostGis(), Ogr2OgrToPostGisList(), Ogr2OgrPointsOnLines(), Ogr2OgrBuffer(), Ogr2OgrDissolve(), Ogr2OgrOneSideBuffer(), Ogr2OgrTableToPostGisList(), OgrSql(), ]
def main(): nonSemiEI = eC.NonSemiEngToIta semiEI = eC.SemiEngToIta nonSemiIE = eC.NonSemiItaToEng semiIE = eC.SemiItaToEng ## System ## sysEI = lambda x : tr.translate("Eng", "Ita", x) persistResults(sysEI, nonSemiEI, "Results/System/NonSemiEngToIta.txt") persistResults(sysEI, semiEI, "Results/System/SemiEngToIta.txt") sysIE = lambda x : tr.translate("Ita", "Eng", x) persistResults(sysIE, nonSemiIE, "Results/System/NonSemiItaToEng.txt") persistResults(sysIE, semiIE, "Results/System/SemiItaToEng.txt") ## Baseline ## baseEI = lambda x : bT.translate("Eng", "Ita", x) persistResults(baseEI, nonSemiEI, "Results/Baseline/NonSemiEngToIta.txt") persistResults(baseEI, semiEI, "Results/Baseline/SemiEngToIta.txt") baseIE = lambda x : bT.translate("Ita", "Eng", x) persistResults(baseIE, nonSemiIE, "Results/Baseline/NonSemiItaToEng.txt") persistResults(baseIE, semiIE, "Results/Baseline/SemiItaToEng.txt")
def convert_example(self, fname, target, extension): source_path = self.resolve_path(fname)[1] target_path = "%s/targets/%s.json" % (PATH_TO_SHOGUN_META, target) destination_path = self.resolve_path(fname+'.'+extension)[1] with open(source_path, 'r') as source, \ open(target_path, 'r') as translator, \ open(destination_path,'w') as destination: ast = parse.parse(source.read(), source_path) language = json.load(translator) translated = translate.translate(ast, language) destination.write(translated)
def test_success(self): execute_result = {'translations': [{'translatedText':'hello how are&you'}]} mock_client = Mock() mock1 = Mock() mock0 = Mock() mock1.execute.return_value = execute_result mock0.list.return_value = mock1 mock_client.translations.return_value = mock0 translated = translate.translate(mock_client, "", "", "") self.assertEqual(translated, "hello how are&you")
def test_translate_two_args(self): """Test translate on a function with one argument value, no return value, no preconditions, and no postconditions. """ with open('./test/add_function.py') as f: source = f.read() actual = translate.translate(source) with open('./test/add_function.py.dafny') as f: expected = f.read() self.assertEqual(actual, expected)
def translateReview(token, original): """ Translating the review into hindi. :param token: the token to be used for translation :param original: the original review text in English :return: the review in Hindi """ language = constant.lang try: hindiReview = translate.translate(token, original, language) return hindiReview except: print 'Error!' return 'Error in translation. Please edit manually, if needed'
def translate_local(text, from_lang="zh", to_lang=None, max_nb_lines=None): u'''Translate to a different language. Eg: '你好' becomes 'Hello' Only installed dictionaries can be used. ''' if "zh" != from_lang: return "(translation from languages other than Chinese : not available yet.)" text = translate_module.translate(text) if max_nb_lines: regex="" while max_nb_lines>0: regex += ".*?<br/?>" max_nb_lines -= 1 text = re.sub("(^"+regex+").*", r"\1", text, flags=re.I) return text
def get_hit_regions_new(seq_file,hmmProfile): seq = SeqIO.read(seq_file,'fasta') global_length = len(seq.seq.tostring()) seqs = translate(seq) hmm_out_file = '%s.out'%seq_file.split('.')[0] all_translated = '%s_6f.faa'%seq_file.split('.')[0] SeqIO.write(seqs,open(all_translated,'w'),'fasta') sp.call(['hmmsearch','--domtblout',hmm_out_file,hmmProfile.file_path,all_translated],stdout=sp.PIPE) hits = parse_domains(hmm_out_file, seqs, global_length) os.remove(hmm_out_file) os.remove(all_translated) return hits
continue test_cates.append(train_dataset.app_cate_dict[terms[0]]) test_rates.append(terms[1]) test_src_texts.append(terms[4]) test_tgt_texts.append(terms[5]) test_rate_sents = [train_dataset.rate_vocab.token2id[i] for i in test_rates] test_cate_sents = [train_dataset.cate_vocab.token2id[i] for i in test_cates] test_senti_sents = [train_dataset.senti_vocab.token2id[i] for i in test_sentis] # test_src_texts = [line.split('***')[4] for line in test_fr.readlines()] # test_tgt_texts = [ for line in test_fr.readlines()] print(len(test_src_texts), len(test_tgt_texts)) out_texts = [] for idx, src_text in tqdm(enumerate(test_src_texts)): _, out_text, _ = translate(src_text.strip(), test_rate_sents[idx], test_cate_sents[idx], test_senti_sents[idx], train_dataset, encoder, decoder, max_seq_len=opts.max_seq_len) out_texts.append(src_text.strip() + '***' +test_tgt_texts[idx].strip() +'***' +out_text+'\n') # if idx%100 == 0: # print("already translate to %d th sentence." % idx) # print("> %s" % src_text) # print("= %s" % test_tgt_texts[idx]) # print("< %s" % out_text) # if idx == 10: # break from parameter import checkpoint_path fw_name = checkpoint_path.split('/') dir_name = os.path.join('/research/lyu1/cygao/workspace/data', 'pred', fw_name[-2]) if not os.path.exists(dir_name): os.makedirs(dir_name) with open(os.path.join(dir_name, fw_name[-1].strip('.pt')), 'w') as f:
# If word exists in HSWN if HSWN.searchHSWN(word) != 'NF': wordPol = HSWN.searchHSWN(word) ####### HANDLE MULTIPLIER wordPol = wordPol * mult totalPol += wordPol # If word not in HSWN else: #print 'yes' #pass if typ in ['NN', 'VB', 'JJ', 'RB']: inEn = translate.translate(word) pol = moduleeng.polarity(inEn, typ) if pol != 'NF': totalPol += pol print 'yes' writeDoc.write(str(sno) + ',') if totalPol > 0.1: writeDoc.write('1\n') elif totalPol < -0.1: writeDoc.write('-1\n') else: writeDoc.write('0\n') sno += 1
from flask import Flask, request from predict import predicter import json from flask_cors import CORS import pandas as pd from translate import translate from treatment import csv2str df = pd.read_csv('./treatment.csv') doctor = csv2str(df) app = Flask(__name__) CORS(app) fromLang = 'zh' toLang = 'en' cols = ['symptoms', 'causes', 'treatment'] translator = translate() handler = predicter(translator) with open('./stopwords.txt', encoding='utf8') as f: stopW = f.read().split('\n') # filt(f1.read()) @app.route('/diagnosis', methods=['POST']) def diagnosis(): symptom = request.form['symptom_discription'] res = handler.go(symptom, fromLang, toLang) doctor_advice = doctor.go(cols, res[0]) doctor_advice = translator.go(sentence="$".join(doctor_advice), fromLang='en', toLang='zh').split('$') return json.dumps( { 'status': 'success',
from dictionary import list_definitions from exceptions import DictionaryException, TranslateException, ApiException from translate import translate if __name__ == "__main__": if not sys.argv[1:]: while True: try: text = input("""Select the option you like to run: 1) Find definitions 2) Translate\n""") if text == "1": list_definitions() break elif text == "2": print(translate()) break else: print("Invalid option. Please try again.") except (DictionaryException, TranslateException) as e: print(e.message) except ApiException: print( "There was an error with the API call, please fix it ASAP!" ) raise except KeyboardInterrupt: print("\nGoodbye!") break
def tarjima(message): msg = message.text tarjimonbot.reply_to(message, translate(msg))
origin ='es' translation = 'en' if __name__ == '__main__': multiprocessing.freeze_support() x= sb.subtitles() audio, rate = x.getaudio(source) del x file_name = os.path.basename(os.path.splitext(source)[0]) print(file_name) path = os.path.splitext(source)[0]+'' if not os.path.exists(path): os.makedirs(path) shutil.copy(audio, path+'\\'+file_name+'.wav') y= sp.splitfiles(path,file_name+'.wav') del y x= sb.subtitles() audio, rate = x.getaudio(path+'\\'+file_name+'\\vocals.wav') #winsound.PlaySound(audio, winsound.SND_FILENAME) x.generate_subtitles(path,origin,0,audio_filename=audio,audio_rate=rate,name=file_name) del x print('\n') tr.translate(translation,os.path.splitext(path+'\\'+file_name)[0],origin)
def compile(pyPattern): jsStrPattern, jsTokens, jsFlags, named_groups = translate(pyPattern) return PyRegExp(jsStrPattern, jsTokens, jsFlags, named_groups)
print ' ' tohz = foldfreq * 1e-6 tos = 1 / tohz foldper = tos / 86400 binnum = 1000 binsize = foldper / binnum phasedtimearray = np.zeros(len(times)) finalampls = np.zeros(binnum) amplcounts = np.zeros(binnum) for i, val in enumerate(times): phasedtime = val % foldper newphasedtime = tr.translate(phasedtime, 0, foldper, 0, 1) phasedtimearray[i] = newphasedtime bindex = int((phasedtime - (phasedtime % binsize)) / binsize - 1) finalampls[bindex] += ampls[i] amplcounts[bindex] += 1 finalampls = np.divide(finalampls, amplcounts) finaltimes = np.histogram(phasedtimearray, bins=binnum - 1, range=(0, 1)) nanlist = [] for i, val in enumerate(finalampls): if np.isnan(val) == True: nanlist.append(i) phasetime = np.delete(finaltimes[1], nanlist)
parser.add_argument("-p", "--path", dest="muscle_path", default=const_default_muscle_exepath, help="path to Muscle binary") parser.add_argument("-t", "--translate", dest="translate", action="store_true", default=False, help="translate the input sequences?") parser.add_argument("-o", "--out", dest="out_fname", default=None, help="output filename") options = parser.parse_args() outs = util.OutStreams() if not options.out_fname is None: fname = os.path.expanduser(options.out_fname) #print fname outf = open(fname,'w') outs.addStream(outf) else: outs.addStream(sys.stdout) (headers, seqs) = biofile.readFASTA(open(options.in_fname,'r')) seqs_to_align = seqs if options.translate: seqs_to_align = [translate.translate(s) for s in seqs] alseqs = alignSequences(seqs_to_align, exepath=options.muscle_path) #print alseqs if options.translate: alseqs = [alignGeneFromProtein(g, s) for (g,s) in zip(seqs,alseqs)] for (h,s) in zip(headers,alseqs): outs.write(">{}\n{}\n".format(h,s)) if not options.out_fname is None: outf.close()
from build_section import build_section from firebase_config import config if __name__ == '__main__': firebase = pyrebase.initialize_app(config) db = firebase.database() page = urllib.request.urlopen('https://www.gov.pl/web/koronawirus/aktualne-zasady-i-ograniczenia').read() soup = BeautifulSoup(page, "html.parser") content = soup.find('div', {'class': 'editor-content'}) sections = content.find_all(['h3', 'div']) sectionsIterator = iter(sections) sections = [] counter = 0 for current in sectionsIterator: headerTag = current descriptionTag = next(sectionsIterator) sections += [build_section( counter, translate("pl", headerTag), translate("pl", descriptionTag) )] counter += 1 results = db.child("PL").set(sections)
def compute(self, send_calculation, callback, computed_args): new_program = translate(template_source=self.template_source, template_target=self.template_target, args=computed_args) send_calculation(callback, new_program)
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2011 timger # +Author timger # +Gtalk&Email [email protected] # +Msn [email protected] # +Weibo @timger http://t.sina.com/zhanghaibo # +twitter @yishenggudou http://twitter.com/yishenggudou # Licensed under the MIT License, Version 2.0 (the "License"); from translate import translate import sys from api import API translate = translate() def search(keyword): a = API(keyword) rst = a.load() _ = u'\r\n\r\n'.join([ u'CMD example:{0}\r\ninfo:{1}\r\n\r\n'.format(i['command'], i['summary']) for i in rst[:10] ]) print _ return _.encode('utf-8') if __name__ == "__main__": kw = sys.argv[1] print search(kw)
def content_proc(bot, update): content = update.message.text lang = open('translate/' + str(update.message.from_user.id) + '.txt', 'r').read() update.message.reply_text(translate.translate(q=content, toLang=lang)) return ConversationHandler.END
def translate_filter(key, value, _format, _): if key not in ['Space', 'Str']: debug(key, value) try: cls = getattr(pandocfilters, key) except AttributeError: return if key == 'Para' and value: marker = value[0].get('c') if isinstance(marker, str) and marker.startswith('!!!') and len(value) > 2: # Admonition case if marker != '!!!': # Lost space after !!! case value.insert(1, pandocfilters.Str(marker[3:])) value.insert(1, pandocfilters.Space()) value[0]['c'] = '!!!' admonition_value = [] remaining_para_value = [] in_admonition = True break_value = [pandocfilters.LineBreak(), pandocfilters.Str(' ' * 4)] for item in value: if in_admonition: if item.get('t') == 'SoftBreak': in_admonition = False else: admonition_value.append(item) else: if item.get('t') == 'SoftBreak': remaining_para_value += break_value else: remaining_para_value.append(item) if admonition_value[-1].get('t') == 'Quoted': text = process_sentence(admonition_value[-1]['c'][-1]) text[0]['c'] = '"' + text[0]['c'] text[-1]['c'] = text[-1]['c'] + '"' admonition_value.pop(-1) admonition_value += text else: text = admonition_value[-1].get('c') if text: text = translate.translate(text[0].upper() + text[1:]) admonition_value.append(pandocfilters.Space()) admonition_value.append(pandocfilters.Str(f'"{text}"')) return cls(admonition_value + break_value + process_sentence(remaining_para_value)) else: return cls(process_sentence(value)) elif key == 'Plain' or key == 'Strong' or key == 'Emph': return cls(process_sentence(value)) elif key == 'Link': try: # Plain links case if value[2][0] == value[1][0].get('c'): return pandocfilters.Str(value[2][0]) except IndexError: pass value[1] = process_sentence(value[1]) href = value[2][0] if not (href.startswith('http') or href.startswith('#')): anchor = None attempts = 10 if '#' in href: href, anchor = href.split('#', 1) if href.endswith('.md') and not href.startswith('/'): parts = [part for part in os.environ['INPUT'].split('/') if len(part) == 2] lang = parts[-1] script_path = os.path.dirname(__file__) base_path = os.path.abspath(f'{script_path}/../../{lang}') href = os.path.join( os.path.relpath(base_path, os.path.dirname(os.environ['INPUT'])), os.path.relpath(href, base_path) ) if anchor: href = f'{href}#{anchor}' value[2][0] = href return cls(*value) elif key == 'Header': if value[1][0].islower() and '_' not in value[1][0]: # Preserve some manually specified anchors value[1][0] = slugify.slugify(value[1][0], separator='-', word_boundary=True, save_order=True) # TODO: title case header in en value[2] = process_sentence(value[2], is_header=True) return cls(*value) elif key == 'SoftBreak': return pandocfilters.LineBreak() return
from image_to_text import image_to_text from translate import translate from replace import replace_text import time import cv2 startTime = time.time() img = cv2.imread("images\\test4.png") print("load time: ", str(round(time.time() - startTime, 3)), "s") sentences, box_coor = image_to_text(img, "eng") print('box coor: ', str(box_coor)) translations = translate(sentences, 'es') print(translations) replace_text(box_coor, translations, img) print("run time: ", str(round(time.time() - startTime, 3)), "s") cv2.imshow("img", img) cv2.waitKey(0)
import time import translate from patternMeUpDaddy import barToCommands comamnds = barToCommands(["lrAlternate", "udAlternate"], [[1, -1, 1], [1, -1, -1]], 120) starttime = time.time() buffer = translate.translate(comamnds, starttime, 1, 1, 2) print(buffer)
def converter(): dna = request.args.get("dna", "") protein = translate(dna) return render_template("converter.html", dna=dna, protein=protein)
json_file = open('classifier.json', 'r') loaded_model_json = json_file.read() json_file.close() classifier = model_from_json(loaded_model_json) # load weights into new model classifier.load_weights("classifier.h5") print("Loaded model from disk") dataset = pd.read_csv('data/test/dataset.csv') img_ids_test = dataset.tail(281)['file'].values target_test = dataset.tail(281)['severity'].values feature_test = [] target_test = translate(target_test) for index, img_id in np.ndenumerate(img_ids_test): img = misc.imread(img_id) if img.shape[0] != 460: target_test = np.delete(target_test, index) img_ids_test = np.delete(img_ids_test, index) else: feature_test.append(np.array([img])) copy_replace(img_ids_test) feature_test = np.concatenate(feature_test, axis=0)
def main(test_src_path, test_trg_path): checkpoint = torch.load(params.MODEL_PATH) SRC = checkpoint["SRC"] TRG = checkpoint["TRG"] if params.MODEL_TYPE == 0: enc = base_model.Encoder(len(SRC.vocab), params.ENC_EMB_DIM, params.HID_DIM, params.N_LAYERS, params.ENC_DROPOUT, params.BID) dec = base_model.Decoder(len(TRG.vocab), params.ENC_EMB_DIM, params.HID_DIM, params.N_LAYERS, params.DEC_DROPOUT, params.BID) model = base_model.Seq2Seq(enc, dec, params.DEVICE).to(params.DEVICE) elif params.MODEL_TYPE == 1: attn = atten_model.Atten(params.HID_DIM, params.HID_DIM, params.N_LAYERS, bid=params.BID) enc = atten_model.Encoder(len(SRC.vocab), params.ENC_EMB_DIM, params.HID_DIM, params.N_LAYERS, params.ENC_DROPOUT, bid=params.BID) dec = atten_model.Decoder(len(TRG.vocab), params.DEC_EMB_DIM, params.HID_DIM, params.HID_DIM, params.N_LAYERS, attn, bid=params.BID) model = atten_model.Seq2Seq(enc, dec, params.DEVICE).to(params.DEVICE) elif params.MODEL_TYPE == 2: pass else: print("params.MODEL_TYPE error") exit(1) # 从文件中读取src和trg test_src_data = get_data(test_src_path) test_trg_data = get_data(test_trg_path) assert len(test_src_data) == len(test_trg_data), "测试src数据和trg数据不匹配" trg = torch.from_numpy(np.array([TRG.vocab.stoi['<sos>'] ])) # trg [1] <sos>是开始标志 trg = trg.unsqueeze(0) # trg [bs, seq_len]=[1, 1] print("trg_size ", trg.size()) trg_eos_idx = TRG.vocab.stoi['<eos>'] total_bleu = 0.0 for idx, src in enumerate(test_src_data): src = SRC.preprocess(src) # print(src) src = SRC.process([src], device=None) # src [seq_len, 1] # print(src.size()) src = src.permute([1, 0]) # src [1, seq_len] # print(src.size()) outpus = translate(model, src, trg, trg_eos_idx) print(test_trg_data[idx]) # print(TRG.process(test_trg_data[idx])) print( TRG.process([TRG.preprocess(test_trg_data[idx])]).permute([1, 0])) print(outpus.max(2)[1]) # print(TRG.vocab.itos[5892]) exit(0)
def deal(connect, blogs): fresh = [] cursor = connect.cursor() for blog in blogs: post = blog[0] author = blog[1] title = blog[2] html = blog[3] url = blog[4] feed_id = tool.get_feed_id(url) if cursor.execute('select * from feed where id = %s', (feed_id, )) != 0: continue url = tool.truncate_url(url) text = tool.purify_text(html) title = tool.purify_text(title) snippet = tool.clip_text(text) photo_path_id = tool.get_photo_path_id(html) author = member.bind(author, feed_id) author, title = member.identify(author, title) member_id = member.get_id(author) romaji = member.get_romaji(author) indicator('{} {}'.format(author, title)) # print('%s, %s, %s, %s, %s' % (feed_id, post, member_id, url, title)) try: cursor.execute( 'insert into feed values(%s, %s, %s, %s, %s, %s, %s, %s)', (feed_id, post, member_id, url, title, snippet, False, 0)) except Exception as e: indicator('feed insert', e) else: connect.commit() indicator('list save') (text, thumbnail, images) = photo.process( { 'feed_id': feed_id, 'romaji': romaji, 'post': post, 'photo_path_id': photo_path_id }, text) try: cursor.executemany( 'insert into photo values({}, %s, %s, %s, %s, %s, %s)'.format( feed_id), images) except Exception as e: indicator('photo insert', e) else: connect.commit() indicator('photo save') try: cursor.execute('update feed set thumbnail = %s where id = %s', (thumbnail, feed_id)) except Exception as e: indicator('feed update', e) else: connect.commit() indicator('cover update') title_translated = translate.translate(title) text_translated = translate.translate(text) try: cursor.execute( 'insert into blog values(%s, %s, %s, %s, %s)', (feed_id, text, title_translated, text_translated, html)) except Exception as e: indicator('blog insert', e) else: connect.commit() indicator('blog save') fresh.append(feed_id) cursor.close()
def get(self, word, n): return str(translate.translate(word, n))
# indx+=1 # if(indx==4): # break if(example.instrlist[line-1].split(', ')[1] == 'goto' or example.instrlist[line-1].split(', ')[1] \ == 'conditional_goto' or example.instrlist[line-1].split(', ')[1] == 'return' or example.instrlist[line-1].split(', ')[1] == 'fn_call_1' or example.instrlist[line-1].split(', ')[1] == 'fn_call_2' or example.instrlist[line-1].split(', ')[1] == 'label'): text_section += "### Flushing -----------\n" for reg, var in registers.regdict.items(): if var is not "": text_section += '\t' + "movl " + reg + ", " + var + "\n" registers.regdict[reg] = "" example.address_descriptor[var] = var text_section += "### Flushed ------------\n" flag = 1 generated_code, example, registers = translate( example.instrlist[line - 1], key, example, registers) text_section += generated_code if (flag == 0 and line == val): text_section += "### Flushing -----------\n" for reg, var in registers.regdict.items(): if var is not "": if var in example.arr_varz.keys(): registers.regdict[reg] = "" example.address_descriptor[var] = var else: text_section += '\t' + "movl " + reg + ", " + var + "\n" registers.regdict[reg] = "" example.address_descriptor[var] = var text_section += "### Flushed ------------\n"
rnd.write(0, 2, 'After') for i in range(len(examples)): for j in range(3): rnd.write(i + 1, j, examples[i][j]) ######################################################################################################################## try: wb = Workbook() examples = [] takeVBA(openfile, 'code.txt') translate("code.txt") # переводим код, он записывается в transpep deleteVBA() import transpep from transpep import * # импортируем файл info = GetInfo() # информация по обрабатываемым функциям more_info = [] for i in range(info[1] + 2): more_info.append(info[i]) more_info = more_info[2:] GenerateFile()
def main(): print("PATH IN PYTHON: " + sys.argv[0]) arguments = sys.argv[1:] print(arguments) action = arguments[0] arguments[1] = arguments[1].replace("desktop", "Desktop").replace( "downloads", "Downloads").replace("music", "Music").replace("documents", "Documents") parameter = {} try: parameter = dict(item.split(":") for item in arguments[1].split(",")) except: print('parameter is empty') print( 'Usage: python main.py action parameter1:value1,parameter2:value2...' ) exit(0) print(action) print(parameter) if action == 'cp': try: parameter['absSrcPath'] parameter['absDestPath'] parameter['fileName'] parameter['pwd'] except KeyError: parameter_error() if parameter['absSrcPath'] == '': parameter['absSrcPath'] = parameter['pwd'] parameter['absSrcPath'] = '/' + parameter['absSrcPath'].replace( "#", "/") parameter['absDestPath'] = '/' + parameter['absDestPath'].replace( "#", "/") + '/' parameter[ 'command'] = 'cp ' + parameter['absSrcPath'] + '/' + parameter[ 'fileName'] + ' ' + parameter['absDestPath'] print('::cp::') print(parameter['command']) build_json("Command is Being Executed", parameter['command'], command.execute_command(parameter)) elif action == 'devdocs': try: parameter['query'] except KeyError: parameter_error() parameter['url'] = 'devdocs.io/search?q=' + quote( parameter['query'].replace("#", "+")) print('::devdocs::') print(parameter['url']) firefox.open_url(parameter) elif action == 'cmd': try: parameter['command'] except KeyError: parameter_error() print('::cmd::') parameter['command'] = parameter['command'].replace("#", " ") print(parameter['command']) build_json("Command is Being Executed", parameter['command'], command.execute_command(parameter)) elif action == 'ps': print('::ps::') parameter['command'] = 'bash ../htop.sh' print(parameter['command']) build_json("Command is Being Executed", 'htop', command.execute_command(parameter)) elif action == 'kill': try: parameter['pid'] except KeyError: parameter_error() parameter['command'] = 'kill -9 ' + parameter['pid'] print('::kill::') print(parameter['command']) command.execute_command(parameter) build_json("Killed " + parameter['pid'] + " process.", parameter['command'], "") elif action == 'man': print('::man::') parameter['command'] = 'bash ../man.sh ' + parameter['command'] print(parameter['command']) build_json("Command is Being Executed", 'man ' + parameter['command'].split(' ')[2], command.execute_command(parameter)) elif action == 'move': try: parameter['absSrcPath'] parameter['pwd'] parameter['fileName'] parameter['absDestPath'] except KeyError: parameter_error() if parameter['absSrcPath'] == '': parameter['absSrcPath'] = parameter['pwd'] parameter['absSrcPath'] = '/' + parameter['absSrcPath'].replace( "#", "/") parameter['absDestPath'] = '/' + parameter['absDestPath'].replace( "#", "/") + '/' parameter[ 'command'] = 'mv ' + parameter['absSrcPath'] + '/' + parameter[ 'fileName'] + ' ' + parameter['absDestPath'] print('::mv::') print(parameter['command']) build_json("Command is Being Executed", parameter['command'], command.execute_command(parameter)) # elif action == 'rmdir': # try: # parameter['absPath'] # parameter['pwd'] # parameter['directoryName'] # except KeyError: # parameter_error() # if parameter['absPath'] == "": # parameter['absPath'] = parameter['pwd'] # parameter['absPath'] = '/'+parameter['absPath'].replace("#","/")+'/' # parameter['command'] = 'rm -r ' + parameter['absPath'] + parameter['directoryName'] # print('::rmdir::') # print(parameter['command']) # build_json("Command is Being Executed", parameter['command'], command.execute_command(parameter)) # elif action == 'rm': # try: # parameter['absPath'] # parameter['pwd'] # parameter['fileName'] # except KeyError: # parameter_error() # if parameter['absPath'] == "": # parameter['absPath'] = parameter['pwd'] # parameter['absPath'] = '/'+parameter['absPath'].replace("#","/")+'/' # parameter['command'] = 'rm ' + parameter['absPath'] + parameter['fileName'] # print('::rm::') # print(parameter['command']) # build_json("Command is Being Executed", parameter['command'], command.execute_command(parameter)) elif action == 'ls': try: parameter['pwd'] parameter['path'] except KeyError: parameter_error() if parameter['path'] == "": parameter['path'] = parameter['pwd'] print('::ls::') print(parameter['pwd']) print(parameter['path']) parameter['command'] = 'ls /' + parameter['path'].replace("#", "/") print(parameter['command']) build_json("Command is Being Executed", parameter['command'], command.execute_command(parameter)) #remove htop as replaced by ps # elif action == 'htop': # print('::htop::') # parameter['command'] = "top -n 1 -b" # build_json("Command is Being Executed", parameter['command'], command.execute_command(parameter)) elif action == 'whatis': try: parameter['command'] except KeyError: parameter_error() parameter['command'] = 'whatis ' + parameter['command'] print(command.execute_command(parameter)) build_json("Command is Being Executed", parameter['command'], command.execute_command(parameter)) elif action == 'loadFirefox': try: parameter['saveFile'] except KeyError: parameter_error() print('::loadFirefox::') print(parameter['saveFile']) firefox.load(parameter) build_json("Loading " + parameter['saveFile'] + "Firefox State", "", "") elif action == 'showFirefox': print('::showFirefox::') firefox.show(parameter) elif action == 'saveFirefox': try: parameter['saveFile'] except KeyError: parameter_error() print('::saveFirefox::') print(parameter['saveFile']) build_json("Saving" + parameter['saveFile'] + "Firefox State", "", "") firefox.save(parameter) elif action == 'url': try: parameter['url'] except KeyError: parameter_error() print('::url::') print(parameter['url']) firefox.open_url(parameter) build_json("Opened " + parameter['url'], "", "") elif action == 'fileio': try: parameter['pwd'] parameter['filePath'] except KeyError: parameter_error() print('::fileio::') parameter['filePath'] = '/' + parameter['pwd'].replace( "#", "/") + '/' + parameter['filePath'].replace("#", "/") print(parameter['filePath']) file.fileio(parameter) build_json("File Sharing Link", "", parameter['result']) # elif action == "fsearch": # try: # parameter['key'] # except KeyError: # parameter_error() # parameter['command'] = "locate " + parameter['key'].replace('#', '\ ') # print('::fsearch::') # print(parameter['command']) # print(command.execute_command(parameter)) # build_json("Here are your search results", parameter['command'], command.execute_command(parameter)) elif action == "gsearch": try: parameter['query'] parameter['search-engine'] except KeyError: parameter_error() print('::gsearch::') parameter['search-string'] = parameter['query'] search.google(parameter) elif action == 'wsearch': try: parameter['query'] except KeyError: parameter_error() print('::wolfsearch::') print(parameter['query']) search.wolfram(parameter) elif action == 'translate': try: parameter['query'] parameter['langauge'] except KeyError: parameter_error() print('::translate::') print(parameter['query']) print(parameter['langauge']) translate.translate(parameter) elif action == 'weather': try: parameter['location'] except KeyError: parameter_error() print('::weather::') print(parameter['location']) parameter['query'] = 'weather+at+' + parameter['location'].replace( " ", "+").replace("#", "+") search.wolfram(parameter) elif action == 'dictionary': try: parameter['word'] except KeyError: parameter_error() parameter['query'] = 'meaning+of+' + parameter['word'].replace( " ", "+").replace("#", "+") print('::dictionary::') print(parameter['query']) search.wolfram(parameter) elif action == 'changeJava': try: parameter['version'] parameter['open'] except KeyError: parameter_error() print('::cmd::') parameter['command'] = 'bash ./skills/setJava.sh ' + parameter[ 'version'] + " " + parameter['open'] print(parameter['command']) command.execute_command(parameter) build_json("Java Environment variable changed", "", "") elif action == 'changePython': try: parameter['version'] except KeyError: parameter_error() print('::cmd::') parameter[ 'command'] = 'bash ./skills/setPython.sh ' + parameter['version'] command.execute_command(parameter) build_json("Python Environment variable changed", "", "") elif action == 'alarm': try: parameter['hour'] parameter['min'] parameter['message'] except KeyError: parameter_error() print('::cmd::') parameter['command'] = 'bash ./skills/alarm.sh ' + parameter[ 'hour'] + " " + parameter['min'] + " " + parameter[ 'message'].replace("#", "\ ") print(parameter['command']) command.execute_command(parameter) build_json( "Alarm set for " + parameter['hour'] + ":" + parameter['min'], "", "") #Timer elif action == 'timer': try: parameter['message'] parameter['duration'] except KeyError: parameter_error() print('::cmd::') parameter['seconds'] = parameter['duration'] print(parameter['duration'].split(",")[0].split(":")[1]) print(parameter['duration'].split(",")[1].split(":")[1]) parameter['command'] = 'bash ./skills/timer.sh ' + parameter[ 'duration'] + " " + parameter['message'].replace("#", "\ ") print(parameter['command']) command.execute_command(parameter) build_json("Timer set for " + parameter['duration'] + " seconds", "", "") elif action == 'musicplayer': try: parameter['absPath'] parameter['fileName'] except KeyError: parameter_error() print('::cmd::') parameter[ 'command'] = 'bash ../skills/musicPlayer.sh ' + '/' + parameter[ 'absPath'].replace("#", "/") + '/' + parameter['fileName'] + ".mp3" print(parameter['command']) command.execute_command(parameter) build_json("Now Playing Music", "", "") elif action == 'debug': debug() else: print('Undefined Action')
def outputEqual(output1, output2): """given two translation outputs, check that output string is identical """ for i, (line, line2) in enumerate( zip(open(output1).readlines(), open(output2).readlines())): #assertEqual(line.strip(), line2.strip()) print "translate {}".format(i) print line print line2 # English-German WMT16 system, no dropout """ Initialize and customize settings. """ translation_settings = TranslationSettings() translation_settings.models = ["model_test/model.npz-80000"] #translation_settings.num_processes = 8 translation_settings.beam_width = 10 translation_settings.normalization_alpha = 1.0 translation_settings.verbose = True translation_settings.n_best = True translation_settings.suppress_unk = True translate(input_file=open('data/translated/fr.00'), output_file=open('data/fr.00.8.out', 'w'), translation_settings=translation_settings) print "everyting ok" #outputEqual('en-de/ref2','en-de/out')
def show(): finalresult.value = translate.translate(checkLang(choice.value), check.value, checkLang(buttons.value))
import json import sys from CPrinter import CPrinter from PythonPrinter import PythonPrinter from translate import translate if len(sys.argv) != 3: print( "Usage: exactly two args: input file to translate (json) and output file" ) sys.exit(1) inpath = sys.argv[1] outpath = sys.argv[2] with open(inpath, 'r') as fin, open(outpath, 'w') as fout: if outpath[-2:] == '.h': printer = CPrinter(fout) elif outpath[-3:] == '.py': printer = PythonPrinter(fout) else: raise ValueError( "Output file '{}' has an unknown file extension".format(outpath)) j = json.load(fin) translate(j, printer)
else: outs.addStream(sys.stdout) pp = protprop.ProteinProperties() if not options.sequence is None: if options.translate: seq = translate.translateRaw(options.sequence) else: seq = options.sequence seq_dict = {"input": seq} else: # Load from FASTA seq_dict = biofile.readFASTADict(options.in_fname) if options.translate: for k in seq_dict.keys(): seq_dict[k] = translate.translate(seq_dict[k]) outs.write("# {}\n".format(options)) outs.write("pos\taa\tcharge\n") n_seqs = len(seq_dict.keys()) for (seqid, seq) in seq_dict.items(): if n_seqs > 1: outs.write("# {}\n".format(seqid)) outs.write("# Total protein charge at pH {} = {}\n".format( options.pH, pp.getCharge(seq, options.pH))) window_width = (options.window - 1) / 2 # Run over start_pos = 0 focal_pos = 0 end_pos = min(start_pos + options.window, len(seq)) while focal_pos < len(seq):
res = read() if res[0] != 'S': raise Exception('Send data failed') print('[ {} ] Data Transfer Successful! Len = {} bytes'.format( num, len(js))) except Exception as e: print(e) class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer): pass if __name__ == "__main__": Data = translate.translate('../editor/test2.in') socketserver.TCPServer.allow_reuse_address = True server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler) ip, port = server.server_address server_thread = threading.Thread(target=server.serve_forever) server_thread.daemon = True server_thread.start() print("Server loop running in thread:", server_thread.name) print_interval = 1 while True: Ready = False s = input()
data_outs.addStream(sys.stdout) # Write out parameters data_outs.write("# Run started {}\n".format(util.timestamp())) data_outs.write("# Command: {}\n".format(' '.join(sys.argv))) data_outs.write("# Parameters:\n") optdict = vars(options) for (k,v) in optdict.items(): data_outs.write("#\t{k}: {v}\n".format(k=k, v=v)) # Read input if not os.path.isfile(options.in_fname): raise IOError("# Error: file {} does not exist".format(options.in_fname)) (headers, seqs) = biofile.readFASTA(file(options.in_fname, 'r')) #, key_fxn=biofile.secondField) if options.translate_sequences: seqs = [translate.translate(s) for s in seqs] zhs = [(h,s) for (h,s) in zip(headers,seqs) if not s is None] all_keys = [biofile.firstField(h) for (h,s) in zhs] (headers, seqs) = zip(*zhs) prot_dict = dict([(biofile.firstField(h), s) for (h,s) in zhs]) gene_orf_dict = dict([(biofile.secondOrFirstField(h), biofile.firstField(h)) for h in headers]) orf_gene_dict = dict([(v,k) for (k,v) in gene_orf_dict.items()]) # Select which genes to process query_keys = [] if not options.query_orf is []: # Specific ORF(s) query_keys += options.query_orf if not options.query_gene is []: # Specific gene(s) query_keys += [gene_orf_dict[k] for k in options.query_gene]