def the_google_shuffle(data, n_translations: Optional[int] = 6, source_language='en', *, show_intermediate_results: bool = False): """ mutate a sentence by passing it through n_translations languages it's like the game of "telephone" if `n_translations` is `None`, will go through all languages, which is perhaps not the best move """ langs = [d['language'] for d in _translate_client.get_languages()] random.shuffle(langs) langs = [source_language] + langs[:n_translations] + [source_language] for source, target in zip(langs, langs[1:]): if source == target: continue print(source, '-->', target, end=': ') trans = translate(data, target, source) if show_intermediate_results: if target != source_language: # translate intermediate steps back to source to_source = translate(trans.translation, target_language=source_language, source_language=target) print(to_source.translation) else: print(trans.translation) else: print() data = trans.translation return trans
def double_translate(text, target_language): """Translates English text to target language then back to English """ translated_text = translate(text, target_language) english_text = translate(translated_text, 'en') return english_text
def intent_handler(text_input, intent): if intent == "translate": text_to_translate = break_string_after(text_input, "translate") result_text = translate(text_to_translate, language="en") elif intent == "quiz": return "" else: result_text = "I dont know this intent..." result_text = translate(text_input, language="en") return result_text
def detect_text(path): """Detects text in the file.""" from google.cloud import vision client = vision.ImageAnnotatorClient() # [START vision_python_migration_text_detection] with io.open(path, 'rb') as image_file: content = image_file.read() image = vision.types.Image(content=content) response = client.text_detection(image=image) texts = response.text_annotations print('Texts:') for i,text in enumerate(texts): if i < 3: #print('\n"{}"'.format(text.description)) original_text = text.description print("Text: " + original_text) trans_text = translate(original_text) print("Trans: " + trans_text) os.system(dir_aquest + 'AquesTalkPi -g {} {} | aplay -D plughw:{},{}'.format(VOLUME, trans_text, CARD, DEVICE)) vertices = (['({},{})'.format(vertex.x, vertex.y) for vertex in text.bounding_poly.vertices]) print('bounds: {}'.format(','.join(vertices)))
def normalize_commentary(cmt): if 'the' not in cmt.lower().split(): cmt = translate(cmt) sleep(0.1) cmt = replacetext(cmt) return cmt
def process_translation_to_user(user_state, response): if user_state['language'] != "en" and response.get("message"): translated = translate([response["message"]], "en", user_state['language']) if translated: response['message'] = translated[0] return response
def detect_labels(path): """Detects labels in the file.""" from google.cloud import vision client = vision.ImageAnnotatorClient() # [START vision_python_migration_label_detection] with io.open(path, 'rb') as image_file: content = image_file.read() image = vision.types.Image(content=content) response = client.label_detection(image=image) labels = response.label_annotations print('Labels:') trans_concat = "物体判別結果: " for i, label in enumerate(labels): if label.score > 0.8: if i < default_max: original_text = label.description print("Label: " + original_text) trans_text = translate(original_text) print("Trans: " + trans_text) os.system(dir_aquest + 'AquesTalkPi -g {} {} | aplay -D plughw:{},{}'. format(VOLUME, trans_text, CARD, DEVICE)) trans_concat += str(i) + "." + trans_text + " " print(trans_concat) send_line(trans_concat, path)
def main(corpus): with open(corpus, 'r') as corpusFile, open(OUT_PUT_FILE, 'w') as outputFile: for line in corpusFile: en_word, count = line.split(',') ja_word = translate(en_word) outputFile.write(en_word + ',' + ja_word + ',' + count)
def listen_print_loop(responses): """Iterates through server responses and prints them. The responses passed is a generator that will block until a response is provided by the server. Each response may contain multiple results, and each result may contain multiple alternatives; for details, see https://goo.gl/tjCPAU. Here we print only the transcription for the top alternative of the top result. In this case, responses are provided for interim results as well. If the response is an interim one, print a line feed at the end of it, to allow the next result to overwrite it, until the response is a final one. For the final one, print a newline to preserve the finalized transcription. """ num_chars_printed = 0 for response in responses: if not response.results: continue # The `results` list is consecutive. For streaming, we only care about # the first result being considered, since once it's `is_final`, it # moves on to considering the next utterance. result = response.results[0] if not result.alternatives: continue # Display the transcription of the top alternative. transcript = result.alternatives[0].transcript # Display interim results, but with a carriage return at the end of the # line, so subsequent lines will overwrite them. # # If the previous result was longer than this one, we need to print # some extra spaces to overwrite the previous result overwrite_chars = ' ' * (num_chars_printed - len(transcript)) if not result.is_final: sys.stdout.write(transcript + overwrite_chars + '\r') sys.stdout.flush() num_chars_printed = len(transcript) else: final = transcript + overwrite_chars print final final=translate(final) dialog(final) # Exit recognition if any of the transcribed phrases could be # one of our keywords. if re.search(r'\b(exit|quit)\b', transcript, re.I): print('Exiting..') break break num_chars_printed = 0
def hello_monkey(): """Take in message and send to broken english""" message_body = request.form['Body'] if "!*!" in message_body: message_body = message_body.replace("!*!", '') broken = translate(message_body, lang) resp = MessagingResponse() resp.message("Your message in broken english:\n{}".format(broken)) else: broken = translate(message_body, lang) print(broken) broken = chatbot.get_response(broken) broken = str(broken) #print(type(broken)) resp = MessagingResponse() resp.message(broken) return str(resp)
def translate_file(input_file, output_file=None): data = [] print('translate file %s' % (input_file)) if output_file is not None: fw = codecs.open(output_file, 'w', encoding='utf8') with codecs.open(input_file, 'r', encoding='utf8') as f: for idx, line in enumerate(f): print('\r%d' % (idx), end=' ') line = line.strip().split('\t') sent1 = translate(line[0]) sent2 = translate(line[1]) data.append(sent1 + '\t' + sent2) if output_file is not None: print(sent1 + '\t' + sent2, file=fw) return data
def incoming_sms(): """ Returns a message response to the Twillo client with the translated text and creates a request to /audio in the local host. Returns: response: if the message to be translated was hello or bye returns a custom message Otherwise a message with the translated text. """ lang_dict = {'ar':'ar-XA','en':'en-GB','cs':'cs-CZ','da':'da-DK','nl':'nl-NL','fi':'fi-FI','fr':'fr-FR','de':'de-DE','el':'el-GR','hi':'hi-IN','hu':'hu-HU','id':'id-ID','it':'it-IT','ja':'ja-JP','ko':'ko-KR','zh':'cmn-CN','no':'nb-NO','pl':'pl-PL','pt':'pt-BR','ru':'ru-RU','sk':'sk-SK','es':'es-ES','sv':'sv-SE','tr':'tr-TR','uk':'uk-UA','vi':'vi-VN'} body = request.values.get('Body', None) resp = MessagingResponse() if body == 'hello': resp.message("Hi! I am your helper bot and I can help you translate stuff. Just send what you need to be translated. :)") elif body == 'bye': resp.message("Goodbye. Hope you enjoyed our test program") elif body != '': translation = translate(body) resp.message(translation) lang_list = translate_client.get_languages() words = body.split() length = len(words) -1 lastword = words[length] target_lang = '' for lang in lang_list: if lastword.lower() == lang['name'].lower(): target_lang = lang['language'] if target_lang in lang_dict.keys(): response = MessagingResponse() msg = resp.message("") msg.media("https://e0c5fe4c.ngrok.io/audio1") else: num_media = int(request.values.get("NumMedia")) media_files = [] for idx in range(num_media): media_url = request.values.get("MediaUrl{}".format(idx)) mime_type = request.values.get("MediaContentType{}".format(idx)) media_files.append((media_url, mime_type)) req = requests.get(media_url) file_extension = mimetypes.guess_extension(mime_type) media_sid = os.path.basename(urlparse(media_url).path) with open("{}{}".format(media_sid,file_extension), 'wb') as f: f.write(req.content) client = speech.SpeechClient() file_name = media_sid+file_extension with io.open(file_name, 'rb') as audio_file: content = audio_file.read() audio = types.RecognitionAudio(content=content) config = types.RecognitionConfig( encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=44100, language_code='en-GB') response = client.recognize(config, audio) sentence = 'test' for result in response.results: sentence += '{}'.format(result.alternatives[0].transcript) resp.message(sentence) os.remove(file_name) return str(resp)
def foreign_pie_to_english(foreign_pie): foreign_foreign_pie = [] for x in foreign_pie: tr = translate(x["to"][1], x['to'][0], "en") foreign_foreign_pie.append({ "to": ["en", tr], "from": [x['to'][0], x["to"][1]] }) return foreign_foreign_pie
def importCSV(): data = pd.read_csv('PATH/TO/THE/CSV/FILE/FILE_NAME.csv') countRows = (len(data)) #Create a dictionary with translated words translatedCSV = { "column1": [], "column2": [] } #Change the column names accordingly to your coumns names #Translated word one by one from the CSV file and save them to the dictionary for index, row in data.iterrows(): translatedCSV["column1"].append(translate(row["column1"])) translatedCSV["column2"].append(translate(row["column2"])) #Create a Dataframe from Dictionary #Save the DataFrame to a CSV file df = pd.DataFrame(data=translatedCSV) df.to_csv("translatedCSV.csv", sep='\t')
def sft(x, lan): c = 0 t, re = [], [] for j in x: t.append([j, translate(j, lan)]) if (c > 3): c = 0 re.append(t) t = [] c += 1 return re
def piechart_to_all(languages): translations = [] for l in languages: if l['language'] == 'en': continue foreign_pie = translate("pie chart", "en", l['language']) translations.append({ "from": ["en", "pie chart"], "to": [l['language'], foreign_pie] }) return translations
def endpoint(event, context): r = event body = json.loads(r['body']) text = body['text'] target_language = body['target_language'] translated_text = translate(text, target_language) response = {"statusCode": 200, "body": json.dumps(translated_text)} return response
def translate_files_in_folder(arr): for file in arr: df = pd.read_csv(root_path + 'resources/' + file, encoding='ISO 8859-9') translation_column = [] for t in df['transcript']: translation_column.append(translate(t, "en")) df['translation'] = translation_column df.to_csv(root_path + "resources/" + file + "_translated.csv") return
def parse_po(filepath, target_lang): """Parse and process po file. Input example is below: #: ../../source/index.rst:16 msgid "Introduction" msgstr "" #: ../../source/index.rst:5 msgid "" "This is an example of multiple lines" "Hello" "World" msgstr "" "これは複数行の例です。" "こんにちは" "世界" """ with open(filepath) as f: processing_msgid = False processing_msgstr = False msgid = "" for line in f: cleaned = line.strip(" \n") if processing_msgstr: if cleaned.startswith('"') and cleaned.endswith('"'): # msgstr might be multiple lines continue else: processing_msgstr = False translated = translate(msgid, target_lang) print(f'msgstr "{translated}"') msgid = "" if processing_msgid: if cleaned.startswith('"') and cleaned.endswith('"'): msgid += cleaned.strip('"') else: processing_msgid = False if line.startswith("msgstr "): processing_msgstr = True continue print(line, end="") if cleaned.startswith("msgid"): processing_msgid = True text = cleaned[len("msgid"):].lstrip(' ').strip('"') msgid += text
def tts(text, output_file): if SRC_LANG != DEST_LANG: text = translate(text) synthesis_input = texttospeech.types.SynthesisInput(text=text) voice = texttospeech.types.VoiceSelectionParams( language_code=DEST_LANG, # Leaving gender as neutral for now. Could train # multiple models, one just for male-to-male and one for female-to-female. ssml_gender=texttospeech.enums.SsmlVoiceGender.NEUTRAL) audio_config = texttospeech.types.AudioConfig( audio_encoding=texttospeech.enums.AudioEncoding.MP3) response = TTS_CLIENT.synthesize_speech(synthesis_input, voice, audio_config) mp3_audio_content_to_wav(response.audio_content, output_file)
def main(host, user, password, db_name): with closing(mysql.connect(host=host, user=user, password=password)) as db: use_db(db, db_name) cursor = db.cursor() get_translations(cursor) result = cursor.fetchall() count = 0 for x in result: txt = get_tweet(cursor, x[0])[0] update_tranlation(cursor, x[0], translate(txt)) count += 1 print(count) db.commit()
def receiveSMS(): """Respond to incoming messages with a friendly SMS.""" # Start our response resp = MessagingResponse() body = request.values.get('Body', None) #print(body) # Add a message nums = getNums(body) text = getText(body) langs = getLangs(body) same = False if (nums != '000' and langs != '000'): nsize = len(nums) lsize = len(langs) if (nsize == lsize): same = True print(langs) if ((nums == '000' or text == '000' or langs == '000') or same == False): resp.message( "Sorry, it looks like something didn't work! " + "You might have put in an incorrect amount of phone numbers or languages, " + "made a typo, or forgotten a delimiter. Please try again.") else: keys = getDict(nums, langs) count = -1 for n in nums: count += 1 target = langs[count] target = target[:2] print(target) result = translate(text, target) print("result: ", result) message = client.messages \ .create( body=result, from_='', to=nums[count] ) detect(text) print("received") return str(resp)
def main(host, user, password, db_name): with closing(mysql.connect(host=host, user=user, password=password)) as db: use_db(db, db_name) create_table(db) cursor = db.cursor() get_tweets(cursor) result = cursor.fetchall() count = 0 for x in result: if not check_if_exists(cursor, x[0]): insert(cursor, x[0], translate(x[1])) count += 1 print(count) db.commit()
def on_intent(intent_request, session): """ Called when the user specifies an intent for this skill """ print("on_intent requestId=" + intent_request['requestId'] + ", sessionId=" + session['sessionId']) intent = intent_request['intent'] intent_name = intent_request['intent']['name'] # Dispatch to your skill's intent handlers if intent_name == "SetLanguageIntent": return set_language_in_session(intent, session, intent_request['locale']) elif intent_name == "TranslateIntent": return translate(intent, session) elif intent_name == "AMAZON.HelpIntent": return get_welcome_response(intent_request['locale']) elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent": return handle_session_end_request() else: raise ValueError("Invalid intent")
def detect_and_translate(image, target_lang): """Translate detected text in the image into the target language. Args: image (bytes): an image target_lang (string): ISO 639-1 of target langugage Returns: Image: PIL image with translation text box overlay """ # Get text, bounding boxes and line breaks detected = list(detect(image, target_lang)) if len(detected) < 1: print('No detected text to translate!') return [], [], [], [] text, bounds, detected_langs, confidence = zip(*detected) translation = translate(text, target_lang, detected_langs) if len(translation) != len(bounds): raise RuntimeError( "Inconsistency between number of texts and translations") return text, translation, extract_bounds(bounds), list(detected_langs)
def run_quickstart(): # [START vision_quickstart] import io import os # Imports the Google Cloud client library # [START vision_python_migration_import] from google.cloud import vision from google.cloud.vision import types # [END vision_python_migration_import] # Instantiates a client # [START vision_python_migration_client] client = vision.ImageAnnotatorClient() # [END vision_python_migration_client] # The name of the image file to annotate file_name = os.path.join(os.path.dirname(__file__), 'image.jpg') #'resources/wakeupcat.jpg') # Loads the image into memory with io.open(file_name, 'rb') as image_file: content = image_file.read() image = types.Image(content=content) # Performs label detection on the image file response = client.label_detection(image=image) labels = response.label_annotations print('Labels:') for label in labels: original_text = label.description print("Label: " + original_text) trans_text = translate(original_text) print("Trans: " + trans_text)
def main(): from gtts import gTTS import speech_recognition as sr import os import re import pygame from pygame import mixer import webbrowser import pyaudio import pyttsx3 from google import google import requests import tkinter as tkr import random import time from google.cloud import translate import datetime player = tkr.Tk() translate_client = translate.Client() greeting_dict = {'hi': 'hi', 'hello': 'hello', 'hey': 'hey'} google_dict = { 'what': 'what', 'why': 'why', 'who': 'who', 'which': 'which', 'how': 'how', 'when': 'when' } translate_error_message = 'Sorry, Not able to translate. Try Again.' player.title("Audio Player") player.geometry("205x340") engine = pyttsx3.init() voices = engine.getProperty('voices') engine.setProperty( 'voice', 'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0' ) rate = engine.getProperty('rate') engine.setProperty('rate', rate) playlist = tkr.Listbox(player, highlightcolor="blue", selectmode=tkr.SINGLE) #print(songlist) def play(audio): files = audio pygame.init() pygame.mixer.init() pygame.mixer.music.load(files) pygame.mixer.music.play() def ExitPlayer(): pygame.mixer.music.stop() button1 = tkr.Button(player, width=5, height=3, text="PLAY", command=play) button1.pack(fill="x") button2 = tkr.Button(player, width=5, height=3, text="STOP", command=ExitPlayer) button2.pack(fill="x") label1 = tkr.LabelFrame(player, text="Song Name") label1.pack(fill="both", expand="yes") contents1 = tkr.Label(label1, text=file) contents1.pack() def translate(phrase): try: split_phrase = phrase.split(' ') list_remove = [] list_remove.append(split_phrase[0]) list_remove.append(split_phrase[-1]) list_remove.append(split_phrase[-2]) for word in list_remove: phrase = phrase.replace(word, '') phrase = phrase.strip() target = '' languages = translate_client.get_languages() for language in languages: if list_remove[1].lower() == language.get('name').lower(): target = language.get('language') if target == '': talkToMe(translate_error_message) else: translate = translate_client.translate(values=phrase, target_language=target) return translate except AttributeError: talkToMe(translate_error_message) except IndexError: talkToMe(translate_error_message) def valid_google_search(phrase): if (google_dict.get(phrase.split(' ')[0]) == phrase.split(' ')[0]): return True def valid_greeting_search(phrase): if (greeting_dict.get(phrase.split(' ')[0]) == phrase.split(' ')[0]): return True def talkToMe(audio): "speaks audio passed as argument" print(audio) engine.say(audio) engine.runAndWait() def talk(audio): "speaks audio passed as argument" engine.say(audio) engine.runAndWait() def greetMe(): currentH = int(datetime.datetime.now().hour) if currentH >= 0 and currentH < 12: talkToMe('Good Morning Snehal!') if currentH >= 12 and currentH < 18: talkToMe('Good Afternoon Snehal!') if currentH >= 18 and currentH != 0: talkToMe('Good Evening Snehal!') greetMe() def google_search_result(query): search_result = google.search(query) for result in search_result: talkToMe('Result Found') print(result.description) talk(result.description) break #google_search_result('how many states in india') def myCommand(): "listens for commands" r = sr.Recognizer() with sr.Microphone() as source: print('Listening...') r.pause_threshold = 1 r.adjust_for_ambient_noise(source, duration=1) audio = r.listen(source) try: command = r.recognize_google(audio).lower() except sr.UnknownValueError: talkToMe('Your last command couldn\'t be heard') command = myCommand() return command def sound(): #pygame.init() mixer.init() mixer.music.load("good.mp3") def alarm(): talkToMe('can you tell me Hour') hor = int(myCommand()) talkToMe('can you tell me Minutes') minn = int(myCommand()) talkToMe('can you tell me Seconds') sec = int(myCommand()) n = 5 #talkToMe('can you tell me AM or PM') #af=myCommand() talkToMe('Alarm Set For ' + str(hor) + ' Hour ' + str(minn) + ' Minutes ' + str(sec) + ' seconds') while True: if time.localtime().tm_hour == hor and time.localtime( ).tm_min == minn and time.localtime().tm_sec == sec: print('wake up') break sound() while n > 0: mixer.music.play() time.sleep(2) n = n - 1 sn = myCommand() if sn == 'snooze': n = 3 time.sleep(60) while n > 0: mixer.music.play() time.sleep(2) else: exit() def assistant(command): "if statements for executing commands" "This Commands For Open Web Pages Like Youtube, Google, Gmail, Instagram" if 'open youtube' in command: url = 'https://www.youtube.com/' talkToMe('Sure Snehal') webbrowser.open(url) print('Done!') elif 'open google' in command: url = 'https://www.google.com/' talkToMe('Sure Snehal') webbrowser.open(url) print('Done!') elif 'open facebook' in command: url = 'https://www.facebook.com/' talkToMe('Sure Snehal') webbrowser.open(url) print('Done!') elif 'open gmail' in command: url = 'https://mail.google.com/' talkToMe('Sure Snehal') webbrowser.open(url) print('Done!') elif 'open instagram' in command: url = 'https://www.instagram.com/' talkToMe('Sure Snehal') webbrowser.open(url) print('Done!') elif 'play music' in command: talkToMe('Sure Snehal') talkToMe('There Are Five Songs In Your Playlist') talkToMe('Child') talkToMe('Finally Found You') talkToMe('we found love') talkToMe('Live Young') talkToMe('Spaceman') talkToMe('which one you want to play?') song = myCommand() if song == 'child': play("Child.mp3") player.mainloop() elif song == 'finally found you': play("Finally Found You.mp3") player.mainloop() elif song == 'we found love': play("we found love.mp3") player.mainloop() elif song == 'live young': play("Live Young.mp3") player.mainloop() elif song == 'spaceman': play("Spaceman.mp3") player.mainloop() else: talkToMe('Song is not in playlist') # elif 'open downloads' in command: # os.system('explorer C:\Users\SNEHAL\Downloads') elif 'open notepad' in command: os.system('notepad.exe') elif 'open powerpoint' in command: os.system( 'explorer C:\Program Files (x86)\Microsoft Office\Office14\POWERPNT.EXE' ) elif 'open wordpad' in command: os.system( 'explorer C:\Program Files (x86)\Microsoft Office\Office14\WINWORD.EXE' ) elif 'open paint' in command: os.system('mspaint.EXE') elif 'whats up' in command: talkToMe('Just doing my thing') #elif 'what is date' in command: now = datetime.now() print(now) elif 'how are you' in command: talkToMe('i am fine..how are you Snehal?') elif 'i am also fine' in command: talkToMe('thats good ') elif 'tell me about you' in command: talkToMe( 'Hiiii My Name is Chitti I am an Artificial Intilligence Designed by Snehal Mastud' ) elif 'tell me your name' in command: talkToMe('My Name is Chitti') elif 'tell me my name' in command: talkToMe('Your Name Is Snehal Mastud') elif 'good morning' in command: talkToMe('Good Morning Snehal') elif 'good afternooon' in command: talkToMe('Good Afternoon Snehal') elif 'good morning' in command: talkToMe('Good Night Snehal') elif 'tell me joke' in command: res = requests.get('https://icanhazdadjoke.com/', headers={"Accept": "application/json"}) if res.status_code == requests.codes.ok: talkToMe(str(res.json()['joke'])) else: talkToMe('oops!I ran out of jokes') elif 'set alarm' in command: alarm() elif 'calculate' in command: talkToMe('ok Snehal') talkToMe('what is the function Snehal?') function = myCommand() if function == 'addition': talkToMe('Can you tell me first number') a = int(myCommand()) print(a) talkToMe('Can you tell me second number') b = int(myCommand()) print(b) talkToMe('Ok Calculating') c = a + b talkToMe('Answer is') talkToMe(c) elif function == 'subtraction': talkToMe('Can you tell me first number') a = int(myCommand()) print(a) talkToMe('Can you tell me second number') b = int(myCommand()) print(b) talkToMe('Ok Calculating') c = (a - b) talkToMe('Answer is') talkToMe(c) elif function == 'multiply': talkToMe('Can you tell me first number') a = int(myCommand()) print(a) talkToMe('Can you tell me second number') b = int(myCommand()) print(b) talkToMe('Ok Calculating') c = a * b talkToMe('Answer is') talkToMe(c) elif function == 'divide': talkToMe('Can you tell me first number') a = int(myCommand()) print(a) talkToMe('Can you tell me second number') b = int(myCommand()) print(b) talkToMe('Ok Calculating') c = a / b talkToMe('Answer is') talkToMe(c) else: talkToMe('sorry you enter wrong function') else: talkToMe('I don\'t know what you mean!') talkToMe('Hello Snehal what i can do for you?') #loop to continue executing multiple commands while True: command = myCommand() if valid_greeting_search(command): talkToMe('hii Snehal') #elif 'open' in command: #talkToMe('Ok Snehal') #os.system('explorer C:\\"{}"'.format(command.replace('open ',''))) elif 'translate' in command: text = translate(command) print(text.get('translatedText')) elif valid_google_search(command): google_search_result(command) #print('In Google Search....') webbrowser.open( 'https://www.google.com/search?q={}'.format(command)) elif 'bye' in command: talkToMe('Bye Have A Nice Day') exit() else: assistant(command)
# The text to translate list = [ u'<span style=color: rgb(255,0,0);>・削除でお願いします。</span>', u'<strong>質疑応答の内容(後日の回答分を含む)、はこちら!</strong>', u'<strong>SDXCカードを初めて差したときには「本体更新」を求められます。</strong>' ] length = len(list) # The target language source = 'ja' target = 'en' # Loop for translation # Translates some text into Japanese for i in range(length): response = client.translate_text(parent=parent, contents=[list[i]], mime_type='text/plain', source_language_code=source, target_language_code=target) print('String' + str(i + 1) + ':') print(list[i]) for translation in response.translations: print('{}'.format(translation)) print(' ') if __name__ == "__main__": translate()
def main(): #words = " पर भी joke बना रहे है भड़वे #RIP इंसानियत" #words = "ktm:9843552882 biratnagar:9862005225 chitawan:9855065135 jhapa:9817976211 butwal:9812900905 रगत चाहिएको खन्डमा सम्पर्क गरौ #Nepal" #print(google_translate_CtoE(words,'auto')) paraPath = "C:\\Users\\Administrator\\Desktop\\Fire2017\\Fire2017-IRMiDis-data\\microblogs-crawl-directory\\" translate(paraPath + "allData.txt", paraPath + "translate1.txt")
# print(u'Text: {}'.format(text)) # print(u'Translation: {}'.format(translation['translatedText'])) # [END translate_quickstart] return translation['translatedText'] if __name__ == '__main__': import os # os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = "C:\\Users\\admin\\Downloads\\338243e517d6.json" print(os.environ['GOOGLE_APPLICATION_CREDENTIALS']) text = u'''Adobe Acrobat PDF Files Adobe® Portable Document Format (PDF) is a universal file format that preserves all of the fonts, formatting, colours and graphics of any source document, regardless of the application and platform used to create it. Adobe PDF is an ideal format for electronic document distribution as it overcomes the problems commonly encountered with electronic file sharing. • Anyone, anywhere can open a PDF file. All you need is the free Adobe Acrobat Reader. Recipients of other file formats sometimes can't open files because they don't have the applications used to create the documents. • PDF files always print correctly on any printing device. • PDF files always display exactly as created, regardless of fonts, software, and operating systems. Fonts, and graphics are not lost due to platform, software, and version incompatibilities. • The free Acrobat Reader is easy to download and can be freely distributed by anyone. • Compact PDF files are smaller than their source files and download a page at a time for fast display on the Web.''' translate(text)