def delete_all_notes(): f = open( "notes.txt", "w+") #+w means write/create. +a means append/create. r means read f.write("") f.close() tts("All notes have been deleted")
def conversation(): tts("ว่าไงคะ") text = stt() count = 0 if any([ 'ปิดไฟ' in text, 'ปิดปลั๊ก' in text, 'เปิดไฟ' in text, 'เปิดปลั๊ก' in text ]): count = open_close(text) if any(['เปิดเพลง' in text, 'เล่นเพลง' in text]): count = song(text) count = 1 if any([ 'เช็คเมล์' in text, 'เช็คอีเมล' in text, 'เช็ค inbox' in text, 'ตรวจอีเมล' in text, 'ตรวจเมล์' in text ]): checkemail() count = 1 if any([ 'บอกการแจ้งเตือนปฏิทิน' in text, 'ดูปฏิทิน' in text, 'แจ้งเตือนอะไร' in text, 'เช็คปฏิทิน' in text, 'เช็คแจ้งเตือน' in text, "ดูการแจ้งเตือน" in text, "มีการแจ้งเตือน" in text ]): count = checkcalendar(text) if (count == 0 and text != "ไม่เข้าใจค่ะกรุณาลองอีกครั้ง" and text != "ไม่เข้าใจที่พูดออกมาค่ะ"): tts("ไม่เข้าใจคำสั่งของคุณค่ะ")
def show_all_notes(): tts("Your notes are as follows: ") f = open("notes.txt", "r") if f.mode == "r": contents = f.read() tts(contents) f.close()
def stt(): # obtain audio from the microphone r = sr.Recognizer() with sr.Microphone() as source: print("\n\nSay Command in thai!") r.adjust_for_ambient_noise(source) audio = r.listen(source) # recognize speech using Google Speech Recognition try: print("Google Speech Recognition thinks you said " + r.recognize_google(audio, language="th-TH") + "\n") text = r.recognize_google(audio, language="th-TH") except sr.UnknownValueError: print("Google Speech Recognition could not understand audio\n") text = "ไม่เข้าใจที่พูดออกมาค่ะ" tts(text) except sr.RequestError as e: print( "Could not request results from Google Speech Recognition service; {0}\n" .format(e)) text = "Could not request results from Google Speech Recognition service" return (text)
def robot(dire): G.setmode(G.BCM) G.setwarnings(False) G.setup(23, G.OUT) G.setup(24, G.OUT) G.setup(14, G.OUT) G.setup(15, G.OUT) if ('clean' in dire): tts("i do not have hardware to clean house, so sorry") if ( 'stop' in dire): G.output(23, G.LOW) G.output(24, G.LOW) G.output(14, G.LOW) G.output(15, G.LOW) if ( 'forward' in dire): G.output(23, G.LOW) G.output(24, G.LOW) G.output(14, G.HIGH) G.output(15, G.HIGH) if ( 'backward' in dire): G.output(23, G.HIGH) G.output(24, G.HIGH) G.output(14, G.LOW) G.output(15, G.LOW) if ( 'left' in dire): G.output(23, G.LOW) G.output(24, G.HIGH) G.output(14, G.HIGH) G.output(15, G.LOW) if ( 'right' in dire): G.output(23, G.HIGH) G.output(24, G.LOW) G.output(14, G.LOW) G.output(15, G.HIGH)
def robot(dire): G.setmode(G.BCM) G.setwarnings(False) G.setup(23, G.OUT) G.setup(24, G.OUT) G.setup(14, G.OUT) G.setup(15, G.OUT) if "clean" in dire: tts("i do not have hardware to clean house, so sorry") if "stop" in dire: G.output(23, G.LOW) G.output(24, G.LOW) G.output(14, G.LOW) G.output(15, G.LOW) if "forward" in dire: G.output(23, G.LOW) G.output(24, G.LOW) G.output(14, G.HIGH) G.output(15, G.HIGH) if "backward" in dire: G.output(23, G.HIGH) G.output(24, G.HIGH) G.output(14, G.LOW) G.output(15, G.LOW) if "left" in dire: G.output(23, G.LOW) G.output(24, G.HIGH) G.output(14, G.HIGH) G.output(15, G.LOW) if "right" in dire: G.output(23, G.HIGH) G.output(24, G.LOW) G.output(14, G.LOW) G.output(15, G.HIGH)
def who_am_i(): insights = [ "You sound like a nice person. I wish you all the best.", "Is that a philosophical question or do you suffer from amnesia?", "Obviously you are my user!" ] tts(random.choice(insights))
async def quote(ctx): filename = 'quote_tts' # parse http request page = requests.get('https://www.insightoftheday.com') soup = BeautifulSoup(page.text, 'html.parser') # extract info daily_post = soup.find('div', class_='daily-post') author = daily_post.find('h2', class_='entry-title').find(text=True).rsplit(' by ', 1)[1] quote_contents = daily_post.find('div', class_='quote').find('img') quote_text = quote_contents.attrs.get('alt').split(': ', 1)[1].rsplit('\xa0', 1)[0] # create mp3 file from quote_text tts(quote_text, filename) # write message embed = discord.Embed(title=f'{calendar.day_name[datetime.datetime.today().weekday()]}\'s Inspirational Quote', description="Are you feeling motivated?", color=0x00ff00) embed.add_field(name="Quote", value=quote_text) embed.set_image(url=quote_contents.attrs.get('src')) #embed.set_footer(text='You are out of my LOS', icon_url=quote_contents.attrs.get('src')) #embed.set_footer(text='You are out of my LOS') embed.set_footer(text=author) await ctx.send(embed=embed, tts=False) #await ctx.voice_client.disconnect() # activate text to speech channel = ctx.author.voice.channel vc = await channel.connect() vc.play(discord.FFmpegPCMAudio(f'{filename}.mp3'), after=lambda e: print('done', e)) while vc.is_playing(): await asyncio.sleep(1) vc.stop() await vc.disconnect() os.remove(f'{filename}.mp3')
def query(self, texto): self.request.query = texto response = self.request.getresponse() if os.name == "posix": query = json.loads(response.read().decode())["result"] else: query = json.loads(response.read())["result"] intencion = query["metadata"]["intentName"] if query["fulfillment"]["speech"] != "": #print(query["fulfillment"]["speech"]) tts.tts(query["fulfillment"]["speech"], self.volumen) if intencion == "usar_modulos": self.usar_modulos(query["parameters"]) elif intencion == "volumen": self.controlarVolumen(0, query["parameters"]) elif intencion == "bajar_volumen": self.controlarVolumen(-1, query["parameters"]) elif intencion == "subir_volumen": self.controlarVolumen(1, query["parameters"]) self.request = self.ai.text_request()
def controlarVolumen(self, tipo, parametros, voz=True): if self.silencioAbsolutov: self.silencioAbsolutov = False self.ventilador(True) os.system("amixer sset Master {}%".format(self.volumen)) if tipo < 0: if parametros["number"] != '': os.system("amixer sset Master {}%-".format( int(parametros["number"]))) self.volumen -= int(parametros["number"]) else: os.system("amixer sset Master 10%-") self.volumen -= 10 elif tipo > 0: if parametros["number"] != '': os.system("amixer sset Master {}%+".format( int(parametros["number"]))) self.volumen += int(parametros["number"]) else: os.system("amixer sset Master 10%+") self.volumen += 10 else: if parametros["number"] != '': os.system("amixer sset Master {}%".format( int(parametros["number"]))) self.volumen = int(parametros["number"]) elif parametros["valores"] != '': os.system("amixer sset Master {}%-".format( int(parametros["valores"]))) self.volumen = int(parametros["valores"]) if voz: tts.tts("volumen al {}% señor".format(self.volumen), self.volumen) self.arduino.setVolumen(self.volumen)
def where_born(): answers = [ "I wasn't exactly born. I'm a computer program remember?", "Technically inside a computer", "Computer programs aren't born, they are written by programmers" ] tts(random.choice(answers))
def who_are_you(): messages = [ "I'm Aida, your personal assistant", "Aida, I thought I told you before", "My Name is Aida and I'm a personal assistant" ] tts(random.choice(messages))
def tell_joke(): jokes = [ "I'm afraid I'm not that funny", "Jokes are dead, look at memes instead", "No, I always forget the punch line" ] tts(random.choice(jokes))
def playSong(song): try: p = vlc.MediaPlayer("song/" + song) p.play() return 1 except Exception as e: tts("มีปัญหาเล่นเพลงค่ะ") return 0
def answer_callback(answer, final_callback): print(answer) if answer == '<unrecognized speech>': tts('I didn\'t understand. Please try again.') sd = SpeechDetector() sd.run(answer_callback, final_callback) return final_callback(answer)
def take_notes(speech_text): words_of_message = speech_text.split() words_of_message.remove("note") cleaned_message = ' '.join(words_of_message) f = open("notes.txt", "a+") f.write("'" + cleaned_message + "'" + " - note taken at: " + datetime.strftime(datetime.now(), "%d-%m-%y") + "\n") f.close() tts("Your note has been saved")
def buenasNoches(): global hiloTemporal dia = False ventilador(False) IkarosApiAI.controlarVolumen(0, {"number": '30', "valores": ''}, voz=False) tts.tts("buenas noches señor", IkarosApiAI.volumen) IkarosApiAI.query("apaga la luz y cierra la cortina") hiloTemporal = threading.Timer(7200, ventilador, args=(True, )) hiloTemporal.start()
def play_shuffle(music_path): try: music_listing = mp3gen(music_path) random.shuffle(music_listing) for i in range(0, len(music_listing)): music_player(music_listing[i]) except IndexError as e: tts("No music files found!") print("No music files found: {0}".format(e))
def play_random(music_path): try: music_listing = mp3gen(music_path) music_playing = random.choice(music_listing) tts("Now playing: " + music_playing) music_player(music_playing) except IndexError as e: tts("No music files found!") print("No music files found: {0}".format(e))
def play_random(music_path): try: music_listing = mp3gen(music_path) music_playing = random.choice(music_listing) song_name = os.path.split(music_playing) tts("Now playing: " + song_name[1]) music_player(music_playing) except IndexError as e: tts('No music files found.') print("No music files found: {0}".format(e))
def buenosDias(): global hiloTemporal dia = True ventilador(True) IkarosApiAI.controlarVolumen(0, {"number": '60', "valores": ''}, voz=False) tts.tts("buenos díasseñor", IkarosApiAI.volumen) IkarosApiAI.query("prende la luz y abre la cortina") try: hiloTemporal.cancel() except: pass
def __init__(self): self.settings_path = "settings/settings.txt" self.settings = SettingsList(self.settings_path) self.settings.LoadSettings() states_path = self.settings.GetSetting("statespath") self.states = StatesList(states_path[:-1]) self.states.LoadStates() tts("'Hello Tyler. Pineapple System is ready.'")
def vts(): try: r = sr.Recognizer() with sr.Microphone() as source: print("----Inside----") audio = r.listen(source) sound = r.recognize_google(audio) return sound except Exception as e: print("what was that ?") tts("What was that ?") return 0
def youtube(video): pid = os.fork() if pid == 0: tts.tts("Going to play {}".format(video)) video = "mpsyt ." + video + ",1" print("I entered this {0}".format(video)) os.system(video) time.sleep(20) else: time.sleep(150) print("parent end\n") os.kill(pid, 9)
def youtube(video): pid=os.fork() if pid == 0: tts.tts("Going to play {}".format(video)) video= "mpsyt ." + video + ",1" print("I entered this {0}".format(video)) os.system(video) time.sleep(20) else: time.sleep(150) print("parent end\n") os.kill(pid, 9)
def run_text_to_speech(req): # bing = BingSpeechAPI() # bing.text_to_speech(text='Can I have some coffee?') tts(req.text) # # engine = pyttsx3.init() # engine.setProperty('rate',90) #90 words per minute # engine.setProperty('volume',0.9) # engine.say(req.text) # engine.runAndWait() return True
def query_callback(self, query, ignore_params=None): global image_file query = query.replace('cap', 'cup') print(query) if query == '<unrecognized speech>': return parsed_query = parse_query(query) subject = parsed_query['subject'] label = parsed_query['label'] if label is None: return print(label) with open(image_file, 'rb') as f: data = f.read() response = processRequest(data) print(response) candidate_indices = [] for i, class_name in enumerate(response['result']['class_names']): if class_name.startswith(label + '|'): candidate_indices.append(i) if len(candidate_indices) == 1: y1, x1, y2, x2 = response['result']['yx_boxes'][ candidate_indices[0]] self.draw_box(y1, x1, y2, x2) tts("Here is the " + subject) elif len(candidate_indices) == 2: candidate_boxes = [] for i in candidate_indices: y1, x1, y2, x2 = response['result']['yx_boxes'][i] x_center = (x1 + x2) / 2 candidate_boxes.append((y1, x1, y2, x2, x_center)) candidate_boxes.sort(key=lambda box: box[4]) tts("Do you mean the " + subject + " on the left or the " + subject + " on the right?") sd = SpeechDetector() sd.run(self.answer_callback, (candidate_boxes, subject))
def behavior_alert(): """ Alert user when reach the time in user_behavior It will ask user for permission before execute """ time = datetime.datetime.now() h = time.hour m = time.minute conn = sqlite3.connect('modules/data/user_behavior.db') cur = conn.cursor() cur.execute('select * from behavior') rows = cur.fetchall() for row in rows: final = [] print("Current time || hour:", h, " minute: ", int(m), " time to open or close:", row[2]) if (int(row[2]) == int(h) and int(m) == 0): print(row[2], row[3]) if row[3] == 1: final.append("open") event = 0 command = "เปิด" elif row[4] == 1: final.append("close") event = 1 command = "ปิด" final.append(row[1]) tts("ต้องการที่จะ" + command + "ไฟ" + row[1] + "หรือไม่คะ") text = stt() print(event) if any(["ปิด" in text, "ใช่" in text]) & event == 1: run = ",".join(final) success = muterun_js('plug/plugForBot.js', run) if success.exitcode == 0: print(success.stdout.decode("utf-8")) elif any(["เปิด" in text, "ใช่" in text]) & event == 0: run = ",".join(final) success = muterun_js('plug/plugForBot.js', run) if success.exitcode == 0: print(success.stdout.decode("utf-8")) else: tts("ไม่เข้าใจคำสั่งค่ะ") print(run) print(success.exitcode) return 0
def answer_callback(self, answer, candidate_boxes_and_subject): print(answer) if answer == '<unrecognized speech>': tts('I didn\'t understand. Please try again.') sd = SpeechDetector() sd.run(self.answer_callback, candidate_boxes_and_subject) return if 'left' in answer: box = candidate_boxes_and_subject[0][0] else: box = candidate_boxes_and_subject[0][1] self.draw_box(box[0], box[1], box[2], box[3]) tts("Here is the " + candidate_boxes_and_subject[1])
def forecast(speech_text): words_of_message = speech_text.split() words_of_message.remove("weather") cleaned_message = ' '.replace(" ", "-").join(words_of_message) try: result = requests.get("https://www.weather-forecast.com/locations/" + cleaned_message + "/forecasts/latest") soup = BeautifulSoup(result.content, "lxml") forecasts = soup.find_all("p")[1:2] for forecast in forecasts: tts(forecast.text) except: tts("Something went wrong, please try again")
def writeWave(id, sentences, lang): response = [] if not os.path.isdir(id): os.makedirs(id) index = 0 for sentence in sentences: waveName = id + '_' + str(index) + '.wav' wavePath = id + '/' + waveName with open(wavePath, 'wb') as audio: audio.write(tts(sentence, lang)) subtitleName = id + '_' + str(index) + '.txt' subtitlePath = id + '/' + subtitleName with open(subtitlePath, "w") as subtitle: subtitle.write(sentence) response.append({ "sentenceId": index, "newsId": id, "dirPath": os.getcwd() + '/' + id, "wave": waveName, "text": sentence }) index += 1 return response
def writeWaveArrays(id, sentences, lang): # response = [] audioPaths = [] audioTexts = [] if not os.path.isdir(id): os.makedirs(id) index = 0 for sentence in sentences: waveName = id + '_' + str(index) + '.wav' wavePath = id + '/' + waveName with open(wavePath, 'wb') as audio: audio.write(tts(sentence, lang)) audioPaths.append(wavePath) # subtitleName = id + '_' + str(index) + '.txt' # subtitlePath = dirPath + '/' + subtitleName # with open(subtitlePath, "w") as subtitle: # subtitle.write(sentence) audioTexts.append(sentence) # response.append({"sentenceId":index, "newsId": id, "dirPath": os.getcwd() + '/' + dirPath, "wave": waveName, "text": sentence}) index += 1 return audioPaths, audioTexts
def getEventsTomorrow(): # Time Delta function for adding one day one_day = datetime.timedelta(days=1) tz = pytz.timezone('Asia/Bangkok') # Gets tomorrows Start and End Time in RFC3339 Format d = datetime.datetime.now(tz=tz) + one_day utcString = d.isoformat() m = re.search('((\+|\-)[0-9]{2}\:[0-9]{2})', str(utcString)) utcString = m.group(0) tomorrowStartTime = str(d.strftime("%Y-%m-%d")) + "T00:00:00" + utcString tomorrowEndTime = str(d.strftime("%Y-%m-%d")) + "T23:59:59" + utcString page_token = None while True: # Gets events from primary calender from each page in tomorrow day boundaries events = service.events().list(calendarId='primary', pageToken=page_token, timeMin=tomorrowStartTime, timeMax=tomorrowEndTime).execute() if(len(events['items']) == 0): tts("คุณไม่มีการแจ้งเตือนอะไรในวันพรุ่งนี้") return tts("คุณมี %d การแจ้งเตือนค่ะ" % len(events['items'])) print("คุณมี %d การแจ้งเตือนค่ะ" % len(events['items'])) count = 0 for event in events['items']: try: eventTitle = event['summary'] eventTitle = str(eventTitle) eventRawStartTime = event['start'] eventRawStartTime = eventRawStartTime['dateTime'].split("T") temp = eventRawStartTime[1] startHour, startMinute, temp = temp.split(":", 2) startHour = int(startHour) startMinute = str(startMinute) startHour = str(startHour) if(count == 0): response = eventTitle + " ตอน " + startHour + ":" + startMinute if(count > 0): response = response +"กับ"+ eventTitle + " ตอน " + startHour + ":" + startMinute count = count +1 except (KeyError): count = 500 tts("มีปัญหาในการต่อปฏิทิน Google ค่ะ") page_token = events.get('nextPageToken') if count != 500: tts(response + "ค่ะ") if not page_token: return
def process(inpQ) : while(True): inpt = 0 # Getting input images from Input Control Module from Queue while not inpQ.empty(): inpt = inpQ.get() inpQ.task_done() if inpt: starttime = time.time() imgL = inpt[0] imgR = inpt[1] pool1 = ThreadPool(processes=1) pool2 = ThreadPool(processes=1) # Calling Feature matching function as seperate thread matching = pool1.apply_async(matcher.match, (imgL,)) # Calling Stereo Matching function as seperate thread stereos = pool2.apply_async(stereo.stereo, (imgL,imgR)) point = 0 disparity = np.float32([]) # Waiting for values from threads while(True) : point = matching.get() disparity = stereos.get() if point and disparity.size: break else : point = 0 disparity = np.float32([]) output = 0 if point[0] > 0 : if disparity[point[1],point[0]] > 0.3 : output = "Book is Near" print "Processing Module",time.time()-starttime # Calling text to speech Module if output: tts.tts(output)
def stt(): va_name = profile.data['va_name'] r = sr.Recognizer() tts('Hello' + profile.data['name'] + ', systems are now ready to run. How can I help you?') if profile.data['stt'] == 'google': while True: with sr.Microphone() as source: print("Say something!") audio = r.listen(source) try: speech_text = r.recognize_google(audio).lower().replace("'", "") print(va_name + " thinks you said '" + speech_text + "'") except sr.UnknownValueError: print(va_name + " could not understand audio") except sr.RequestError as e: print("Could not request results from Google Speech Recognition service; {0}".format(e)) else: brain.query(speech_text) elif profile.data['stt'] == 'sphinx': modeldir = profile.data['pocketsphinx']['modeldir'] hmm = profile.data['pocketsphinx']['hmm'] lm = profile.data['pocketsphinx']['lm'] dic = profile.data['pocketsphinx']['dic'] config = Decoder.default_config() config.set_string('-hmm', os.path.join(modeldir, hmm)) config.set_string('-lm', os.path.join(modeldir, lm)) config.set_string('-dict', os.path.join(modeldir, dic)) config.set_string('-logfn', '/dev/null') decoder = Decoder(config) def sphinx_stt(): stream = open('recording.wav', 'rb') stream.seek(44) # bypasses wav header data = stream.read() decoder.start_utt() decoder.process_raw(data, False, True) decoder.end_utt() hyp = decoder.hyp() if hasattr(hyp, 'hypstr'): speech_text = hyp.hypstr print(profile.data['va_name'] + " thinks you said '" + speech_text + "'") return speech_text.lower().replace("'", "") else: return '' while True: with sr.Microphone() as source: print("Say something!") audio = r.listen(source) with open("recording.wav", "wb") as f: f.write(audio.get_wav_data()) brain.query(sphinx_stt()) elif profile.data['stt'] == 'keyboard': while True: keyboard_text = raw_input('Enter your query: ') brain.query(keyboard_text)
def stt(): va_name = profile.data['va_name'] r = sr.Recognizer() tts('Welcome ' + profile.data['name'] + ', systems are now ready to run. How can I help you?') if profile.data['stt'] == 'google': while True: with sr.Microphone() as source: print("Say something!") audio = r.listen(source) try: speech_text = r.recognize_google(audio).lower().replace("'", "") print(va_name + " thinks you said '" + speech_text + "'") except sr.UnknownValueError: print(va_name + " could not understand audio") except sr.RequestError as e: print("Could not request results from Google Speech Recognition service; {0}".format(e)) else: brain.query(speech_text) elif profile.data['stt'] == 'sphinx': modeldir = profile.data['pocketsphinx']['modeldir'] hmm = profile.data['pocketsphinx']['hmm'] lm = profile.data['pocketsphinx']['lm'] dic = profile.data['pocketsphinx']['dic'] config = Decoder.default_config() config.set_string('-hmm', os.path.join(modeldir, hmm)) config.set_string('-lm', os.path.join(modeldir, lm)) config.set_string('-dict', os.path.join(modeldir, dic)) config.set_string('-logfn', '/dev/null') decoder = Decoder(config) def sphinx_stt(): stream = open('recording.wav', 'rb') stream.seek(44) # bypasses wav header data = stream.read() decoder.start_utt() decoder.process_raw(data, False, True) decoder.end_utt() hyp = decoder.hyp() if hasattr(hyp, 'hypstr'): speech_text = hyp.hypstr print(profile.data['va_name'] + " thinks you said '" + speech_text + "'") return speech_text.lower().replace("'", "") else: return '' while True: with sr.Microphone() as source: print("Say something!") audio = r.listen(source) with open("recording.wav", "wb") as f: f.write(audio.get_wav_data()) brain.query(sphinx_stt()) elif profile.data['stt'] == 'keyboard': while True: keyboard_text = raw_input('Write something: ') brain.query(keyboard_text) elif profile.data['stt'] == 'telegram': def handle(msg): chat_id = msg['chat']['id'] username = msg['chat']['username'] command = msg['text'].lower().replace("'", "") if username == profile.data['telegram_username']: print(profile.data['va_name'] + " thinks you said '" + command + "'") brain.query(command) else: error_msg = 'You are not authorised to use this bot.' bot.sendMessage(chat_id, error_msg) if profile.data['telegram_token'] == 'xxxx': tts('Please enter a Telegram token or configure a different STT in the profile.json file.') quit() else: bot = telepot.Bot(profile.data['telegram_token']) bot.notifyOnMessage(handle) while 1: time.sleep(10)
from threading import Thread from Queue import Queue import tts import inputctl import processmod inpQ = Queue(maxsize=0) tts.tts(" Starting Program ") inpctl = Thread(target = inputctl.inpctl , args = (inpQ,)) processmodl = Thread(target = processmod.process , args = (inpQ,)) # Starting Input Control Module inpctl.start() # Starting Processing Module processmodl.start()
def wolf(string): flag = 0 client = wolframalpha.Client(app_id) print("you entered: {0}".format(string)) res = client.query(string) #print(next(res.results).text) if len(res.pods) > 0: texts = "" pod = res.pods[1] if pod.text: texts = pod.text else: texts = "Sorry,No annswer for that" texts = texts.encode('ascii', 'ignore') texts = texts.split('\n') if(len(texts)>1): tts.tts("there are {0}".format(texts[-1])) tts.tts("{0}".format(texts[-2])) flag = 1 tts.tts("{0}".format(texts[0])) if(flag == 1): tts.tts("Should I read more meanings?") speech = s() if ('yes' or 'yeah' or 'yup' or 'yo' in speech): tts.tts(speech[1:len(texts)-2]) else: tts.tts("Sorry, something seems wrong. ")
if o in ("-h", "--help"): usage() sys.exit() elif o in ("-u", "--units"): default_unit = a elif o in ("-c", "--current_city"): default_city = a else: assert False, "unknown option" # # # GREETINGS # # # print "" print "Ask me weather. Say [ goodbye ] to exit." tenkiba0 = "ask me weather. say goodbye to exit" tts(tenkiba0) # # # L O O P # # # stopped = False while not stopped: # Step 1-0: user_input print "" tenkiba1 = "Say a city and a day." print "(Ex: weather info + city + today/tomorrow)" print "" tts(tenkiba1) # Step 1-1: get the user input from microphone and send it to Google API user_input = get_google_asr()
# # # GREETINGS # # # print " " print "* * * * * * * * * * * * * * * * * * * * * * * *" print "* * * * * Good day! My name is Tenkiba.* * * * *" print "* * * * I am a Shiba and I know weather. * * * *" print " " print "* Say [ Goodbye ] to me if you gotta go. :3" print " " print "* The default unit is [" + default_unit + "] * *" print "* The default city is [" + default_city + "] * *" print "* Today is " + time.strftime("%x") + ", Wooooof." print "* * * * * * * * * * * * * * * * * * * * * * * *" print "* * * * * * * * * * * * * * * * * * * * * * * *" tenkiba0 = "Good day...My name is Ten-ki-ba. I am a shi-ba, and I know weather......Say Goodbye to me if you gotta go." tts(tenkiba0) # # # L O O P # # # stopped = False while not stopped: # Step 1-0: user_input print "" tenkiba1 = "What weather info you would like to know? You could give me a city,and a day..." print "What weather info you would like to know?" print "Example: [weather/chance to rain/to get tanned] + [city name] + [today/tomorrow]" print "" tts(tenkiba1) # Step 1-1: get the user input from microphone and send it to Google API user_input = get_google_asr()
self.set_dialog_text(content) self.set_finished_state(True) self.tts.speak(content) else: self.set_dialog_text(question) self.tts.speak(question) def set_finished_state(self, finished): self.root.setProperty("finished", finished) def reset(self): self.facts.reset() self.set_finished_state(False) self.next_question() tts_module = tts.tts("en") app = QApplication(sys.argv) view = QDeclarativeView() view.rootContext().setContextProperty("mainWindow", view) view.setWindowFlags(Qt.FramelessWindowHint) view.setSource(os.path.join(os.path.dirname(__file__), "QML_Interface.qml")) root = view.rootObject() view.engine().quit.connect(view.close) view.show() logic = QmlApplication(root, tts_module) sys.exit(app.exec_())