def test_connection_timeout(): # Requests library specifies two timeouts: connection and read with raises((ConnectionError, ReadTimeout)): """If a number is passed to timeout parameter, both connection and read timeouts will be set to it. Firstly, the connection timeout will fail. """ translator = Translator(timeout=0.00001) translator.translate('안녕하세요.')
def receive_message(): print('Method: ' + str(request.method)) if request.method == 'GET': """Before allowing people to message your bot, Facebook has implemented a verify token that confirms all requests that your bot receives came from Facebook.""" token_sent = request.args.get("hub.verify_token") return verify_fb_token(token_sent) #if the request was not get, it must be POST and we can just proceed with sending a message back to user else: # get whatever message a user sent the bot output = request.get_json() for event in output['entry']: messaging = event['messaging'] for message in messaging: msg = message.get('message') if msg: #Facebook Messenger ID for user so we know where to send response back to sender_id = message['sender']['id'] print('The sender id is: ') print(sender_id) print('The message received is: ') print(msg) msg_text = msg.get('text') if msg_text: if msg_text.startswith('#start-translate'): # set a flag in the database # response_sent_text = get_message() options.update_options(sender_id, options.default_opts) response_sent_text = 'Starting translation. All audio attachments\ will now be converted from speech to text' send_message(sender_id, response_sent_text) elif msg_text.startswith('#options'): opts = options.parse_options(msg_text) options.update_options(sender_id, opts) send_message(sender_id, 'Translating FROM: %s, TO: %s' % (opts['src'], opts['dest'])) #if user sends us a GIF, photo,video, or any other non-text item attachments = msg.get('attachments') if attachments: attch = attachments[0] attch_type = attch.get('type') if attch_type == 'audio': try: opts = options.get_options(sender_id) except Exception as e: print(e) opts = options.default_opts url = attch['payload']['url'] src = opts['src'] dest = opts['dest'] response = a2t.convert_audio_from_url(url, src) translator = Translator() converted_response = \ translator.translate(response, dest=dest, src=src) send_message(sender_id, converted_response.text) return "Message Processed"
def test_bind_multiple_service_urls(): service_urls = [ 'translate.google.com', 'translate.google.co.kr', ] translator = Translator(service_urls=service_urls) assert translator.service_urls == service_urls assert translator.translate('test', dest='ko') assert translator.detect('Hello')
class CommonMetadataTranslator(object): """ Class for interfacing with google translator """ def __init__(self, db_connection): self.translator = Translator() def com_meta_translator(self, trans_text, lang_code='en'): # TODO will need to handle stuff > 15k characters at once return self.translator.translate(trans_text, dest=lang_code)
def command_tr(self, event): '''Usage: ~tr <languageTo> <phrase> The bot will auto-detect the language of the targeted text.''' try: translator = Translator() phrase = event.params.split() translated = translator.translate(' '.join(phrase[1:]),dest= phrase[0]) text = 'Translated from {}: {}'.format(translated.src,translated.text.encode('utf-8', 'replace')) if len(text) > 397: text = text[0:396] + '...' self.send_message(event.respond, text) except: self.send_message( event.respond, 'Translation unsuccessful! Maybe the service is down?')
def command_trs(self, event): '''Usage: It's like ~tr, but more specific. Use it by doing ~trs <languageFrom> <languageTo> <phrase>''' try: translator = Translator() phrase = event.params.split() translated = translator.translate(' '.join(phrase[2:]),dest=phrase[1],src=phrase[0]) text = 'Translated from {} to {}: {}'.format(translated.src,translated.dest,translated.text.encode('utf-8', 'replace')) if len(text) > 397: text = text[0:396] + '...' self.send_message(event.respond, text) except: self.send_message( event.respond, 'Translation unsuccessful! Maybe the service is down?') raise
class GoogleTranslator(MyTranslator.Translator): def __init__(self): self.translator = Translator(service_urls=['translate.google.cn']) def get_langlist(self): return GoogleLangList def lang2code(self, lang): return GoogleLanguages[lang] def translate(self, string, src, dest): return self.translator.translate(string, src=self.lang2code(src), dest=self.lang2code(dest)).text def detect(self, string): return GoogleCodes[self.translator.detect(string).lang.lower()]
def hello(): translator = Translator() input_src=request.form['inputtext'] print input_src #b=mtranslate.translate(name,"hi","utf8") #b=b.encode('utf-8') #print b.encode("utf-8") detected = translator.detect(input_src) if detected.lang == "hi": input_detected = "Hindi" output_detected = "English" destination_code = "en" if detected.lang == "en": input_detected = "English" output_detected = "Hindi" destination_code = "hi" output_src = translator.translate(input_src, src=detected.lang, dest=destination_code) print output_src.text #connect to the microsoft cloud for searching the various corpora of the english language with their corrosponding hindi corpora. #tokens = tokens = nltk.word_tokenize(name) #print tokens #pos_tagging = nltk.pos_tag(tokens) #print pos_tagging #client = support_library.MicrosoftTranslatorClient('machinetranslationmanishrana','mjbYdQ3SyROItdT1gJAXUcIxYlBDaEKs3oKZ8XcFq0w=') #language = langid.classify(name) '''if language[0] == 'en': translated = client.TranslateText(name, 'en', 'hi') input_detected = "English" output_detected = "Hindi" translated = translated.replace('"',"") data_set.append((name, translated)) else: translated = client.TranslateText(name, 'hi', 'en') input_detected = "Hindi" output_detected = "English" translated = translated.replace('"',"") data_set.append((translated, name))''' #output_detected="tst" #translated=name #input_detected="test" return render_template('index.html', name=input_src, translated=output_src.text, input_detected=input_detected,output_detected=output_detected)
def translate(jarvis, s): """ translates from one language to another. """ jarvis.say('\nEnter source language ') srcs = jarvis.input().lower() while ( srcs not in LANGUAGES) and ( srcs not in SPECIAL_CASES) and ( srcs not in LANGCODES): if srcs in SPECIAL_CASES: srcs = SPECIAL_CASES[srcs] elif srcs in LANGCODES: srcs = LANGCODES[srcs] else: jarvis.say("\nInvalid source language\nEnter again") srcs = jarvis.input().lower() jarvis.say('\nEnter destination language ') des = jarvis.input().lower() while ( des not in LANGUAGES) and ( des not in SPECIAL_CASES) and ( des not in LANGCODES): if des in SPECIAL_CASES: des = SPECIAL_CASES[des] elif des in LANGCODES: des = LANGCODES[des] else: jarvis.say("\nInvalid destination language\nEnter again") des = jarvis.input().lower() jarvis.say('\nEnter text ') tex = jarvis.input() translator = Translator() result = translator.translate(tex, dest=des, src=srcs) result = u""" [{src}] {original} -> [{dest}] {text} [pron.] {pronunciation} """.strip().format(src=result.src, dest=result.dest, original=result.origin, text=result.text, pronunciation=result.pronunciation) print(result)
def __init__(self, json_data, coin_data=None): self.tweet_id = json_data['id_str'] self.user_id = json_data['user']['screen_name'] self.text_en = json_data['text'] self.text_ko = '' self.name_ko = '' self.name_en = '' self.screen_name = '' if coin_data is not None: self.name_ko = coin_data.name_ko self.name_en = coin_data.name_en self.screen_name = coin_data.screen_name self.alias = coin_data.alias translator = Translator(service_urls=[ 'translate.google.com', 'translate.google.co.kr', ]) refine_text = self.remove_emoji(self.text_en) refine_text = self.remove_text_symbol(refine_text) temp_ko_text = '' try: temp_ko = translator.translate(refine_text, dest='ko') temp_ko_text = temp_ko.text except Exception as e: print('trans error : %s' % refine_text) temp_ko_text = '### 번역 에러 ###' self.text_ko = temp_ko_text temp_date = json_data['created_at'] myDatetime = datetime.strptime(temp_date, '%a %b %d %H:%M:%S %z %Y') self.created_date = myDatetime.strftime('%Y-%m-%d %H:%M:%S')
def __init__(self, json_data): self.tweet_id = json_data['id_str'] self.user_id = json_data['user']['screen_name'] self.en_text = json_data['text'] self.ko_text = '' translator = Translator(service_urls=[ 'translate.google.com', 'translate.google.co.kr', ]) refine_text = self.remove_emoji(self.en_text) refine_text = self.remove_text_symbol(refine_text) temp_ko_text = '' try: temp_ko_text = translator.translate(refine_text, dest='ko') except Exception as e: print(refine_text) self.ko_text = temp_ko_text.text temp_date = json_data['created_at'] myDatetime = datetime.strptime(temp_date, '%a %b %d %H:%M:%S %z %Y') self.created_date = myDatetime.strftime('%Y-%m-%d %H:%M:%S')
def googleApi(source_text): translator = Translator() return translator.translate(source_text).text
from googletrans import Translator translator = Translator() text = input() to_lang = input() after = translator.translate(text, dest=to_lang) print("This is "+ after.src +" language") print("It says:" +after.text)
from googletrans import Translator # This script is intended to translate a text to multiple languages which can be used to improve search results. text = 'How to convert some text to multiple languages' destination_languages = { 'Spanish': 'es', 'Simplified Chinese': 'zh-CN', 'Italian': 'it', 'Hindi': 'hi', 'Mongolian': 'mn', 'Russian': 'ru', 'Ukrainian': 'uk', 'French': 'fr', 'Indonesian': 'id', 'Japanese': 'ja', 'Slovak': 'sk' } translator = Translator() for key, value in destination_languages.items(): print(translator.translate(text, dest=value).text)
n = range(len(dictshow)) plt.bar(n, dictshow.values(), align='center') plt.xticks(n, dictshow.keys(), rotation=45) plt.title("Erdogan Victory Speech Most Frequent Words") plt.tight_layout() plt.savefig("Erdogan Victory Speech MFW.png", transparent=True, dpi=1000) #Translating most frequest words into English from googletrans import Translator translator = Translator() eng_FQwords = [] for fqword in list(dictshow): trs = translator.translate(fqword, src='tr', dest='en') eng_FQwords.append(trs.text) print(eng_FQwords) del eng_FQwords[1] # Removing duplicates - happens due to differences in the language def remove_duplicates(values): output = [] seen = set() for value in values: # If value has not been encountered yet, # ... add it to both list and set. if value not in seen:
print("Delete Successful!") # sys.exit() # format is ("TranslateThis! language"), ex: "TranslateThis! ko" to translate to Korean if re.search("TranslateThis!", comment.body, re.IGNORECASE): needing_translation = comment.parent() comment_as_list = comment.body.split() type_of_parent = comment_vs_submission(needing_translation) if type_of_parent == "comment": parent = bot.comment(needing_translation) language_of_text = translator.detect(parent.body).lang translation = translator.translate(parent.body, dest=comment_as_list[1]) else: parent = bot.submission(needing_translation) language_of_text = translator.detect(parent.title).lang translation = translator.translate(parent.title, dest=comment_as_list[1]) print("Language of the text: ", language_of_text) print("Language to be translated into: ", comment_as_list[1]) print("Input: ", translation.origin) print("Output: ", translation.text) now = str(datetime.now()) entry_list = [] entry_list.append(now) entry_list.append("scriptBotTesting") entry_list.append(language_of_text)
def toFrench(text): text = text.replace('translate', '') text = text.replace('to French', '') translator = Translator() translated = translator.translate(text, src='en', dest='fr') return (translated.text)
def toHindi(text): text = text.replace('translate', '') text = text.replace('to Hindi', '') translator = Translator() translated = translator.translate(text, src='en', dest='hi') return (translated.text)
# # TRANSLATE from googletrans import Translator trans = Translator() #word = input('Word:') t = trans.translate('bom dizzza para voce', src='pt', dest='en') print(f'Source: {t.src}') print(f'Destination: {t.dest}') print(f'{t.origin} -> {t.text}') # LIST SUPPORTED LANGUAGES #from googletrans import LANGUAGES #for lang in LANGUAGES: #print(f'{lang} - {LANGUAGES[lang]}') # LIST POSSIBLE MISTAKES AND TRANSLATIONS pm = t.extra_data['possible-mistakes'] pt = t.extra_data['possible-translations'] print(f'Possible Mistakes: {pm}') print(f'Possible Translations: {pt}')
import json from googletrans import Translator from googletrans import LANGUAGES print("Hello") data = None trans = Translator() with open("lyrics.json", "r") as read_file: data = json.load(read_file) for item in data: artistEn = item['artist'] lyricsArtistEn = item['lyricsArtist'] musicArtistEn = item['musicArtist'] if artistEn is not None: artistSin = trans.translate(artistEn, src='en', dest='si') item['artist'] = artistSin.text if lyricsArtistEn is not None: lyricsArtistSin = trans.translate(lyricsArtistEn, src='en', dest='si') item['lyricsArtist'] = lyricsArtistSin.text if musicArtistEn is not None: musicArtistSin = trans.translate(musicArtistEn, src='en', dest='si') item['musicArtist'] = musicArtistSin.text print(item['artist'], item['lyricsArtist'], item['musicArtist']) with open("proccessed_lyrics.json", "w") as write_file: json.dump(data, write_file)
except Exception as e: client.sendText(receiver, str(e)) elif 'apakah ' in msg.text.lower(): try: txt = ['iya','tidak','bisa jadi'] isi = random.choice(txt) tts = gTTS(text=isi, lang='id', slow=False) tts.save('temp2.mp3') client.sendAudio(receiver, 'temp2.mp3') except Exception as e: client.sendText(receiver, str(e)) elif "sytr:" in msg.text: try: isi = msg.text.split(":") translator = Translator() hasil = translator.translate(isi[2], dest=isi[1]) A = hasil.text tts = gTTS(text=A, lang=isi[1], slow=False) tts.save('temp3.mp3') client.sendAudio(receiver, 'temp3.mp3') except Exception as e: client.sendText(receiver, str(e)) elif "tr:" in msg.text: try: isi = msg.text.split(":") translator = Translator() hasil = translator.translate(isi[2], dest=isi[1]) A = hasil.text client.sendText(receiver, str(A)) except Exception as e: client.sendText(receiver, str(e))
def translate(self): """ This feature is to translate words into more regional languages using googletrans module and has not been build""" translate = Translator() translate.translate(self._word, dest="hi", src="en")
class apps(commands.Cog): def __init__(self): self.translator = Translator(service_urls=['translate.googleapis.com']) self.Wikipedia = wikipediaapi.Wikipedia('en') self.ia = imdb.IMDb() @command('movie') @cooldown(5) async def tv(self, ctx, *args): ctx.bot.Parser.require_args(ctx, args) data = await ctx.bot.util.get_request( f'http://api.tvmaze.com/singlesearch/shows', json=True, q=' '.join(args)) if not data: raise ctx.bot.util.BasicCommandException("Did not found anything.") try: star = str( ':star:' * round(data['rating']['average'])) if data['rating'][ 'average'] is not None else 'No star rating provided.' embed = ctx.bot.Embed( ctx, title=data['name'], url=data['url'], desc=ctx.bot.Parser.html_to_markdown(data['summary']), fields={ 'General Information': '**Status: **' + data['status'] + '\n**Premiered at: **' + data['premiered'] + '\n**Type: **' + data['type'] + '\n**Language: **' + data['language'] + '\n**Rating: **' + str(data['rating']['average'] if data['rating']['average'] is not None else '`<not available>`') + '\n' + star, 'TV Network': data['network']['name'] + ' at ' + data['network']['country']['name'] + ' (' + data['network']['country']['timezone'] + ')', 'Genre': str(', '.join(data['genres']) if len(data['genres']) > 0 else 'no genre avaliable'), 'Schedule': ', '.join(data['schedule']['days']) + ' at ' + data['schedule']['time'] }, image=data['image']['original']) await embed.send() del embed except: raise ctx.bot.util.BasicCommandException( "There was an error on fetching the info.") @command('spy,spot,splay,listeningto,sp') @cooldown(2) async def spotify(self, ctx, *args): user = ctx.bot.Parser.parse_user(ctx, args) act = [i for i in user.activities if isinstance(i, discord.Spotify)] if len(act) == 0: raise ctx.bot.util.BasicCommandException( f"Sorry, but {user.display_name} is not listening to spotify.") await ctx.trigger_typing() panel = ctx.bot.Panel(ctx, spotify=act[0]) await panel.draw() await panel.send_as_attachment() panel.close() @command() @cooldown(5) async def itunes(self, ctx, *args): ctx.bot.Parser.require_args(ctx, args) await ctx.trigger_typing() data = await ctx.bot.util.get_request( 'https://itunes.apple.com/search', json=True, raise_errors=True, force_json=True, term=' '.join(args), media='music', entity='song', limit=10, explicit='no') if len(data['results']) == 0: return await ctx.send('{} | No music found... oop'.format( ctx.bot.util.error_emoji)) choose = ctx.bot.ChooseEmbed(ctx, data['results'], key=(lambda x: "[" + x["trackName"] + "](" + x["trackViewUrl"] + ")")) data = await choose.run() if not data: return panel = ctx.bot.Panel(ctx, title=data['trackName'], subtitle=data['artistName'], description=data['primaryGenreName'], icon=data['artworkUrl100']) await panel.draw() await panel.send_as_attachment() panel.close() @command('tr,trans') @cooldown(5) async def translate(self, ctx, *args): await ctx.trigger_typing() if len(args) > 1: try: toTrans = ' '.join(args[1:]) if len(args[0]) > 2: try: _filter = list( filter(lambda x: args[0].lower() in x.lower(), [LANGUAGES[x] for x in list(LANGUAGES)])) assert len(_filter) > 0 del _filter destination = _filter[0] except: return None else: destination = args[0].lower() except: raise ctx.bot.util.BasicCommandException( 'Please insert a valid language and a text to translate.') try: translation = self.translator.translate(toTrans[0:1000], dest=destination) embed = ctx.bot.Embed( ctx, title= f"{LANGUAGES[translation.src]} to {LANGUAGES[translation.dest]}", desc=translation.text[0:1900]) await embed.send() del embed, translation, _filter, destination, toTrans except Exception as e: raise ctx.bot.util.BasicCommandException( f'An error occurred! ```py\n{str(e)}```') raise ctx.bot.util.BasicCommandException( f'Please add a language and a text!') @command('wiki') @cooldown(5) async def wikipedia(self, ctx, *args): ctx.bot.Parser.require_args(ctx, args) await ctx.trigger_typing() page = self.Wikipedia.page(' '.join(args)) if not page.exists(): return await ctx.send(content='That page does not exist!') embed = ctx.bot.Embed(ctx, title=page.title, url=page.fullurl, desc=page.summary[0:2000]) return await embed.send() @command() @cooldown(5) async def imdb(self, ctx, *args): ctx.bot.Parser.require_args(ctx, args) await ctx.trigger_typing() try: query = " ".join(args[1:]) res = self.ia.search_movie(query) choose = ctx.bot.ChooseEmbed(ctx, res[0:10], key=(lambda x: x["long imdb title"])) movie = await choose.run() if not movie: return await ctx.trigger_typing() data = self.ia.get_movie_main(movie.movieID)["data"] votes = (":star:" * round(data["rating"]) ) + f' ({data["rating"]}, {data["votes"]} votes)' if ( data.get("votes") and data.get("rating")) else "<data not available>" embed = ctx.bot.Embed( ctx, title=movie["long imdb title"], url=self.ia.get_imdbURL(movie), image=movie["full-size cover url"], fields={ "Plot": data["plot outline"].split(".")[0][0:1000], "Movie Ratings": votes, "Directors": ", ".join([ i["name"] for i in data["directors"] if i.get("name") ]), "Producers": ", ".join([ i["name"] for i in data["producers"] if i.get("name") ]), "Writers": ", ".join( [i["name"] for i in data["writers"] if i.get("name")]) }) await embed.send() del res, data, embed, votes, choose, movie, query except Exception as e: raise ctx.bot.util.BasicCommandException( "The movie query does not exist.\n" + str(e))
def main(): # exe1 i = random.randint(5, 10) l = [] for x in range(0, i): l.append(random.randint(10, 20)) print(l) l1 = [x for x in l if x % 3 == 0] print(l1) l1 = [x for x in l if x % len(l) == 0] print(l1) l1 = [x for x in l if x > 12] print(l1) l1 = [x * x for x in l] print(l1) l1 = list(set(l)) print(l1) # exe2 users = ["Mickey", "Minnie", "Donald", "Ariel", "Pluto"] french_words = ["Bonjour", "Au revoir", "Bienvenue", "A bientôt"] dict1 = {} dict2 = {} dict3 = {} dict4 = {} for index, x in enumerate(users): dict1[x] = index dict2[index] = x for index, x in enumerate(sorted(users)): dict3[x] = index translator = Translator() for x in french_words: dict4[x] = translator.translate(x, src="fr", dest='en').text print(dict1) print(dict2) print(dict3) print(dict4) dict1.clear() for index, x in enumerate(users): dict1[x] = index dict2[index] = x # exe3 # 1 Capitalize all of the pet names and print the list my_pets = ['sisi', 'bibi', 'titi', 'carla'] for index, x in enumerate(my_pets): my_pets[index] = x.title() print(my_pets) # 2 Zip the 2 lists into a list of tuples, but sort the numbers from lowest to highest. my_strings = ['a', 'b', 'c', 'd', 'e'] my_numbers = [5, 4, 3, 2, 1] my_numbers.reverse() zip = [] for index, x in enumerate(my_strings): zip.append((x, my_numbers[index])) print(zip) # 3 Filter the scores that pass over 50% scores = [73, 20, 65, 19, 76, 100, 88] high_scores = [x for x in scores if x >= 80] print(high_scores) # 4 Bonus : Combine all of the numbers that are in a list on this file using reduce (my_numbers and scores). What is the total? x = reduce(do_sum, scores) + reduce(do_sum, my_numbers) print(x)
def translator(text): translator = Translator() trans = translator.translate(text) return trans.text
def sentimentCheck(now,trending): # Configurando conexao do MongoDB client = cm.connectMongo() db = client.twitterCollection #Declarando variaveis translator = Translator() polarity_result = {} tweet_list = {} polarityNegative = {} polarityPositive = {} count = 0 #Iniciando aalise de sentimentos for tweet in db.tweets.find(): #remover caracteres especiais tweet_clean = unidecode.unidecode(tweet['text']) #Traduzindo para ingles e efetuando analise tweet_list[count] = translator.translate(tweet_clean, dest='en').text analysis = tb(tweet_list[count]) polarity_result[count] = analysis.sentiment.polarity # Printando tweet e resultado da analise no terminal print('Tweet => ' + tweet_list[count]) print('=========> ' + str(len(tweet_list)) + ' ==> '+ str(polarity_result[count])) # Salvando polaridade no tweet ou deletando se invalido para calculo if polarity_result[count] <= 1 and polarity_result[count] >= -1 and polarity_result[count] != 0: db.tweets.update({'_id':tweet['_id']},{'$set':{'polarity':polarity_result[count]}}) print('PARCIAL: ' + str(sum(polarity_result.values()))) if (polarity_result[count] > 0 ): polarityPositive[count] = polarity_result[count] else: polarityNegative[count] = polarity_result[count] else: db.tweets.delete_many({'_id':tweet['_id']}) del(polarity_result[count]) # Pause na traducao, para nao ser bloqueado pelo Google if count%2==0 and count !=0: print('Dormindo 2 segundos') time.sleep(2) if count%10==0 and count !=0: print('Dormindo 10 segundos') time.sleep(10) count+=1 def average(polAverage): if (sum(polAverage.values()) != 0): polarityAverage = round(sum(polAverage.values())/len(polAverage), 2) else: polarityAverage = 0 return polarityAverage # Calculando e arrdondando polaridade media em 2 casas decimais print(' ') print('########################################') print('## Trending Topic: ' + trending) print('## Timestamp: ' + now) print('## Total: ' + str(len(polarity_result)) + ' ==> Average: ' + str(average(polarity_result))) print('## Positive: ' + str(len(polarityPositive)) + ' ==> Average: ' + str(average(polarityPositive))) print('## Negative: ' + str(len(polarityNegative)) + ' ==> Average: ' + str(average(polarityNegative))) print('########################################') print(' ') # Salvando resultado da analise tt.updateTrending(now,trending,str(len(polarity_result)),str(average(polarity_result)),str(len(polarityPositive)),str(average(polarityPositive)),str(len(polarityNegative)),str(average(polarityNegative))) # retornando media no metodo return average(polarity_result)
#import the package from googletrans import Translator # Store some text for translation in french language text = ''' Pour souvent, quand sur mon canapé je mens D'humeur vacante ou songeuse, Ils clignotent sur cet œil intérieur Quel est le bonheur de la solitude; Et puis mon cœur se remplit de plaisir, Et danse avec les jonquilles. ''' # Create an instance of Translator to use translator = Translator() # detect the language lang = translator.detect(text) print(lang) print(' ') # Call the translate() translated = translator.translate(text, dest = 'en') #print the result print(translated.text)
def toSpanish(text): text = text.replace('translate', '') text = text.replace('to Spanish', '') translator = Translator() translated = translator.translate(text, src='en', dest='es') return (translated.text)
# -*- coding:utf-8 -*- #成功 from googletrans import Translator translator = Translator() # 实例化 print(translator.translate("星期日").text) print(translator.translate(text='console', dest='zh-CN').text) # 指定语言 print( translator.translate( text= "Before you go back to code in your IDE, let us introduce Tabnine Home. This is where you can find all the information about Tabnine installation and configuration.", dest='zh-CN').text) #
def toItalian(text): text = text.replace('translate', '') text = text.replace('to Italian', '') translator = Translator() translated = translator.translate(text, src='en', dest='it') return (translated.text)
for page in pages: filename = "page_" + str(image_counter) + ".jpg" # page.save(filename, 'JPEG') image_counter = image_counter + 1 """ print("page finish") filelimit = 336 # PNG images to text for i in range(1, filelimit + 1): strNum = str(i) strNum = strNum.rjust(3, '0') outfile = "knk_text/knk_page_" + strNum + ".txt" outfile_trans = "knk_translated/knk_page_trans_" + strNum + ".txt" f = open(outfile, "w", encoding='utf8') f_trans = open(outfile_trans, "w", encoding='utf8') filename = "pngs/kingBook-ch1to10-" + strNum + ".png" text = str(((pytesseract.image_to_string(Image.open(filename))))) text = text.replace('-\n', '') f.write(text) f.close() # if text is null if(text == ''): f_trans.write('') f_trans.close() continue text_trans = translator.translate(text, src='en', dest='ko') f_trans.write(text_trans.text) f_trans.close()
def new_words_in_article( url='https://www.troyhunt.com/the-773-million-record-collection-1-data-reach/' ): res = requests.get(url) html_page = res.content soup = BeautifulSoup(html_page, 'html.parser') text = soup.find_all(text=True) words = '' blacklist = [ '[document]', 'noscript', 'header', 'html', 'meta', 'head', 'input', 'script', 'style', # there may be more elements you don't want, such as "style", etc. ] for t in text: if t.parent.name not in blacklist: print("\n\n", t.parent.name, "\n") print('{} '.format(t)) words += '{} '.format(t) print("Before ", type(words)) words = words.split() words = " ".join(words) print(words) table = str.maketrans( '', '', string.punctuation.replace("-", "").replace('\'', "")) stripped = [w.translate(table) for w in words] stripped = ''.join(c for c in stripped if not c.isdigit()) print(stripped) tokens = nltk.word_tokenize(''.join(stripped)) # print(tokens, len(tokens)) words = [w.lower() for w in tokens] vocab = sorted(set(words)) print(vocab, len(vocab)) stop_words = nltk.corpus.stopwords.words('french') vocab = [w for w in vocab if not w in stop_words] with open('ignore_words.pickle', 'rb') as f: ignore_words = pickle.load(f) vocab = [w for w in vocab if not w in ignore_words] print(len(vocab)) translator = Translator() try: translations = translator.translate(vocab, src='fr', dest='en') print( "\nDo you know these words (type 'y' or 'n' for each).\nIf it's a proper name or similar type 'y' to add to ignore list" ) new_words = {} for translation in translations: # translation = translator.translate(word, src='fr', dest='en') if translation.origin != translation.text.lower(): print(translation.origin, ' -> ', translation.text, " ") ans = input("?") if ans.lower() == 'y': ignore_words.append(translation.origin) elif ans.lower() == 'n': new_words[translation.origin] = translation.text elif ans.lower() == 'q': break print("these are the new words: ", new_words) with open('ignore_words.pickle', 'wb') as f: pickle.dump(ignore_words, f) ans = input("save new words and translations to csv file? ") if ans == 'y': with open('new_words.csv', 'w') as f: print("saving to new_words.csv") for key in new_words.keys(): f.write("%s,%s\n" % (key, new_words[key])) except: print("google translate is tired, need to work manually") print( "\nDo you know these words (type 'y' or 'n' for each).\nIf it's a proper name or similar type 'y' to add to ignore list" ) new_words = [] for word in vocab: ans = input(word + "?") if ans.lower() == 'y': ignore_words.append(word) elif ans.lower() == 'n': new_words.append(word) elif ans.lower() == 'q': break print("these are the new words: ", new_words) with open('ignore_words.pickle', 'wb') as f: pickle.dump(ignore_words, f) ans = input("save new words and translations to csv file? ") if ans == 'y': with open('new_words_notrans.csv', 'w') as f: print("saving to new_words_notrans.csv") for word in new_words: f.write("%s\n" % (word))
def translateArabic(self, text): translator = Translator() return translator.translate(text).text
import googletrans from googletrans import Translator print('Available languages: ') lang_avail = googletrans.LANGUAGES print(type(lang_avail)) for i in lang_avail: print(i, " : ", lang_avail[i]) translator = Translator() f = open('bengali_result.txt', 'r') input_text = f.read() result = translator.translate('আপনি কেমন আছেন', src='bn', dest='en') # result = translator.translate(input_text, src='bn', dest='en') print(result.text)
def translate_text(text, src='en', dest='sv'): translator = Translator() swe_text = translator.translate(text, dest=dest, src=src) return swe_text.text
# to Import Translator module from googletrans package from googletrans import Translator from gtts import gTTS x = Translator() # default input taken as english # giving input for the Source language source_language = input('Enter Source Language: ') # give the input word/sentance word = input('Enter Source word: ') # giving input for the Destination language destination_language = input('Enter Destination Language: ') # "dest=" destination language y = x.translate(word, src=source_language, dest=destination_language) # Translated word z = y.text print(word, 'meaning in ', destination_language, 'is: ', z) # Destination language pronunciation print(" Pronunciation:", y.pronunciation) # Converting the Destination language text to speech tts = gTTS(z) tts.save('z.mp3')
def test_read_timeout(): with raises(ReadTimeout): translator = Translator(timeout=(10, 0.00001)) translator.translate('안녕하세요.')
# -*- coding: utf-8 -*- from googletrans import Translator translator = Translator() cont = 1 # O unicode não é necessário para mostrar só o texto nome = '1. Overview of the Projects' enviar = open(nome+'.vtt','r') receber = open(nome+' pt.vtt','w') for linha in enviar.readlines(): traduzido = translator.translate(linha, dest='pt').text receber.write(traduzido+'\n') enviar.close() receber.close()
# run from here corpus = corpus[:-2] # In[20]: translator = Translator() # In[21]: str1 = "ये गलत है" # In[22]: for i in range(0, 2): review = re.sub("[^a-zA-Z]", " ", translator.translate(str1).text) review = review.lower() review = review.split() a = [] for word in review: if (word in stopwords.words("english")): m = 1 else: s = ps.stem(word) a.append(s) review = a review = ' '.join(review) corpus.append(review) # In[23]:
#-*- coding: utf-8 -*- import time import feedparser from googletrans import Translator translator = Translator(service_urls=['translate.google.cn']) url = 'http://feeds.newscientist.com/tech/' resp = feedparser.parse(url) for feed in resp['entries'][:10]: title = feed['title'] cn_title= translator.translate(title, src='en', dest='zh-cn').text summary = feed['summary'] cn_summary = translator.translate(summary, src='en', dest='zh-cn').text print(time.strftime('%Y-%m-%d %H:%M:%S',feed['published_parsed'])) print(title) print(cn_title) print(summary) print(cn_summary) print('')
'Chinese':'zh-CN', 'Japanese':'ja' } while True: # user language input lang = input(''' ***************************************************** * Choose a language from the following selection * * below: * * 1.) French * * 2.) Spanish * * 3.) Chinese * * 4.) Japanese * *****************************************************\n ''') try: new_translation = translate.translate(word, languages.get(lang), 'en') new_translation.text print(new_translation.text) break except (KeyError and AttributeError): print(''' !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Please, enter a country code from the selection: !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n ''') continue
# # Usage: # # Input: # # Output: # # Author: hadoop # # Create Time: 2017-06-22 08:36:21 # ###################################################### import sys reload(sys) sys.setdefaultencoding("utf-8") import os import time from datetime import datetime, timedelta from googletrans import Translator translator = Translator() for line in sys.stdin: result = translator.translate(line, dest="zh-CN") print result.text
async def on_message(message): if message.author == client.user: return if message.content.startswith('$en'): channel = message.channel message.content=message.content.replace("$en","") enter = message.content print() enter = enter.replace(" ", "%20") # enter=enter.replace("!","__") print(enter) import re import requests from bs4 import BeautifulSoup as BSHTML import urllib.request page = urllib.request.urlopen('https://myanimelist.net/search/all?q=' + enter) print(page) soup = BSHTML(page, "html.parser") images = soup.findAll('img') i = 0 for image in images: i += 1 if i == 2: # print(image['src']) response1=image['src'] #Haikyuu!To the Top texte = soup.findAll(class_='picSurround di-tc thumb') # print(texte) p = 0 for cle in texte: test = cle.find(class_='hoverinfo_trigger') # expr = re.match(r"href=\"[^ ]+\"",cle) # if expr is not None : # print(expr) if p == 0: expr = re.search(r"href=\"[^ ]+\"", str(test)) print(expr.group(0)) start = re.search(r"(?<=href=\")[a-zA-Z-0-9-/-_-.-]+", str(expr.group(0))) print(start.group(0)) reg = start.group(0) # print(test) # print(cle) # print(cle['href']) # print(cle.get('href')) p += 1 # print(test) # expr = re.match(r"href=\"[^ ]+\"",test) # if expr is not None : # print(expr) # [^href=\"][a-zA-Z-0-9-/-_-.-]+ page = urllib.request.urlopen(reg) print(page) soup = BSHTML(page, "html.parser") res = soup.find(itemprop="description") a = res.get_text() # print(res) from googletrans import Translator translator = Translator() result = translator.translate(str(a), dest='fr') de=result.text # print(result.text) await channel.send(response1) await channel.send("```"+a+"```") def check(m): return m.content == 'hello' and m.channel == channel msg = await client.wait_for('message', check=check) await channel.send('Hello {.author}!'.format(msg)) if message.content.startswith('$gre'): channel = message.channel message.content=message.content.replace("$gre","") from requests_html import HTMLSession import re import requests from bs4 import BeautifulSoup as BSHTML import urllib.request session = HTMLSession() r = session.get('http://onnada.com/search/?q='+message.content) r.encoding = 'utf-8' e=r.text soup = BSHTML(e, "html.parser") images = soup.findAll('img') #print(images) i = 0 for image in images: i += 1 if i == 2: print(image['src']) response1=image['src'] images = soup.findAll(class_='thumb') p=0 for image in images : if p == 0: expr = re.search(r"href=\"[^ ]+\"", str(image)) print(expr.group(0)) start = re.search(r"(?<=href=\")[a-zA-Z-0-9-/-_-.-]+", str(expr.group(0))) reg = start.group(0) p+=1 print(reg) page = urllib.request.urlopen(reg) print(page) soup = BSHTML(page, "html.parser") res = soup.find(id="animeContents") a = res.get_text() print(a) await channel.send(response1) await channel.send("```"+a+"```") if message.content.startswith('$en'): channel = message.channel message.content=message.content.replace("$en","") enter = message.content print() enter = enter.replace(" ", "%20") # enter=enter.replace("!","__") print(enter) import re import requests from bs4 import BeautifulSoup as BSHTML import urllib.request page = urllib.request.urlopen('https://myanimelist.net/search/all?q=' + enter) print(page) soup = BSHTML(page, "html.parser") images = soup.findAll('img') i = 0 for image in images: i += 1 if i == 2: print(image['src']) response1=image['src'] #Haikyuu!To the Top texte = soup.findAll(class_='picSurround di-tc thumb') # print(texte) p = 0 for cle in texte: test = cle.find(class_='hoverinfo_trigger') # expr = re.match(r"href=\"[^ ]+\"",cle) # if expr is not None : # print(expr) if p == 0: expr = re.search(r"href=\"[^ ]+\"", str(test)) print(expr.group(0)) start = re.search(r"(?<=href=\")[a-zA-Z-0-9-/-_-.-]+", str(expr.group(0))) print(start.group(0)) reg = start.group(0) # print(test) # print(cle) # print(cle['href']) # print(cle.get('href')) p += 1 # print(test) # expr = re.match(r"href=\"[^ ]+\"",test) # if expr is not None : # print(expr) # [^href=\"][a-zA-Z-0-9-/-_-.-]+ page = urllib.request.urlopen(reg) print(page) soup = BSHTML(page, "html.parser") res = soup.find(itemprop="description") a = res.get_text() # print(res) from googletrans import Translator translator = Translator() result = translator.translate(str(a), dest='fr') de=result.text print(result.text) await channel.send(message.content) await channel.send(response1) await channel.send("```"+de+"```") await channel.send(reg) def check(m): return m.content == 'hello' and m.channel == channel msg = await client.wait_for('message', check=check) await channel.send('Hello {.author}!'.format(msg)) if message.content.startswith('$pt'): channel = message.channel message.content=message.content.replace("$pt","") enter = message.content print() enter = enter.replace(" ", "%20") # enter=enter.replace("!","__") print(enter) import re import requests from bs4 import BeautifulSoup as BSHTML import urllib.request page = urllib.request.urlopen('https://myanimelist.net/search/all?q=' + enter) print(page) soup = BSHTML(page, "html.parser") images = soup.findAll('img') i = 0 for image in images: i += 1 if i == 2: # print(image['src']) response1=image['src'] #Haikyuu!To the Top texte = soup.findAll(class_='picSurround di-tc thumb') # print(texte) p = 0 for cle in texte: test = cle.find(class_='hoverinfo_trigger') # expr = re.match(r"href=\"[^ ]+\"",cle) # if expr is not None : # print(expr) if p == 0: expr = re.search(r"href=\"[^ ]+\"", str(test)) print(expr.group(0)) start = re.search(r"(?<=href=\")[a-zA-Z-0-9-/-_-.-]+", str(expr.group(0))) print(start.group(0)) reg = start.group(0) # print(test) # print(cle) # print(cle['href']) # print(cle.get('href')) p += 1 # print(test) # expr = re.match(r"href=\"[^ ]+\"",test) # if expr is not None : # print(expr) # [^href=\"][a-zA-Z-0-9-/-_-.-]+ page = urllib.request.urlopen(reg) print(page) soup = BSHTML(page, "html.parser") res = soup.find(itemprop="description") a = res.get_text() # print(res) from googletrans import Translator translator = Translator() result = translator.translate(str(a), dest='pt') de=result.text # print(result.text) a=de[0:250] await channel.send(response1) embed = discord.Embed(description=a, color=0x00ff00) await message.channel.send(embed=embed) await channel.send(reg) if message.content.startswith('!hello'): embed = discord.Embed(title="Title", description="Desc", color=0x00ff00) embed.add_field(name="Field1", value="hi", inline=False) embed.add_field(name="Field2", value="hi2", inline=False) await message.channel.send(embed=embed)
check_en_result = check_english(name) if not check_en_result[0]: if check_en_result[1]: name = unidecode.unidecode(name) else: # for try_times in range(10): # try: # name = translator.translate(name).txt # break # except: # time.sleep(2) # logging.warning('%s fail %s times' % (name, try_times+1)) # if try_times == 9: # os._exit(0) # else: # continue name = translator.translate(name).text name = name.replace(' ','_').replace('-','_').replace('.','_').replace('___','_').replace('__','_') outname_full = outname + str(1//1000).zfill(4) + '.txt' outfile = open(outname_full, 'a', encoding='utf8') outfile.write(' '.join([row[:idx], name]) + '\n') outfile.close() # print(name) # outfile.write(' '.join([row[:9], name]) + '\n') print(name) # outfile.close() i += 1 infile.close()
def traducer(frase, idi): traductor = Translator() traduccion = traductor.translate(frase, dest=idi) print(traduccion) return traduccion.text