def imdb_genre(this_genre): print("\nIch suche nach Filmen mit Genre '{}'. (Das kann etwas dauern)". format(this_genre)) response = "Jetzt such ich 3 Filme mit Genre " + str(this_genre) print(response) if run_audio == True: make_accustic_response(response) # Getting a list of the top 250 movies (This number is the only option with the IMDb python library.) i_movie = imdb.IMDb() movielist = i_movie.get_top250_movies() count = 0 listed_movies = [] while count < 3: # Searching for a new random number while True: ranking = random.randint(0, 249) if ranking not in listed_movies: listed_movies.append(ranking) break movie = i_movie.get_movie(movielist[ranking].movieID) for genre in movie['genres']: # Checking if the first genre matches the genre the user was looking for if genre.lower() == this_genre: count += 1 print("{0}. {1}".format(count, movie['title'])) if run_audio == True: response = movie['title'] espeak.synth(response) print("Das sollte fürs erste genügen :)")
def speek(text, lcd=True, english=False, dummy=None): ''' Speek text ''' if dummy == None: thread.start_new_thread(DO.speek, (text, lcd, english, "",)) else: #sleep = False #if LOOP.last_speek == None: # sleep = True #LOOP.last_speek = time.time() #IO.UNITS.speeker(True) #if sleep: # time.sleep(5) t = len(text)/10 if t < 1: t = 0.8 if english: espeak.set_voice("en+f5") else: espeak.set_voice("sv+f5") espeak.synth(text) if lcd: lmatrix.animate_speak(t, leav_this=lmatrix.matrix_smile) lmatrix.clear(10)
def math(input): from espeak import espeak import math if '+' in input: find_add = input.find('+') resultAdd = str(float(input[:find_add]) + float(input[find_add+1:])) print resultAdd espeak.synth(resultAdd) if '-' in input: find_sub = input.find('-') resultSub = str(float(input[:find_sub]) - float(input[find_sub+1:])) print resultSub espeak.synth(resultSub) if '*' in input: find_mul = input.find('*') resultMul = str(float(input[:find_mul]) * float(input[find_mul+1:])) print resultMul espeak.synth(resultMul) if '/' in input: find_div = input.find('/') resultDiv = str(float(input[:find_div]) / float(input[find_div+1:])) print resultDiv espeak.synth(resultDiv) if '%' in input: find_mod = input.find('%') resultMod = str(float(input[:find_mod]) % float(input[find_mod+1:])) print resultMod espeak.synth(resultMod)
def main(): takephoto() # First take a picture """Run a label request on a single image""" credentials = GoogleCredentials.get_application_default() service = discovery.build('vision', 'v1', credentials=credentials) with open('image3.jpg', 'rb') as image: image_content = base64.b64encode(image.read()) service_request = service.images().annotate( body={ 'requests': [{ 'image': { 'content': image_content.decode('UTF-8') }, 'features': [{ 'type': 'LABEL_DETECTION', 'maxResults': 10 }] }] }) response = service_request.execute() print json.dumps(response, indent=4, sort_keys=True) abc = json.dumps( response, indent=4, sort_keys=True) #Print it out and make it somewhat pretty. resp = json.loads(abc) for i in range(0, 20): b = (resp["responses"][0]["labelAnnotations"][i]["description"]) print b espeak.synth(b) time.sleep(2)
def load_map(self, language_with_code): self.language = language_with_code.split("-")[0] espeak.set_voice(language_with_code.split("-")[1]) print("loading Map for language : %s" % self.language) self.map = {} submap_number = 1 self.append_sub_map("beginning.txt", submap_number) submap_number = 2 self.append_sub_map("middle.txt", submap_number) submap_number = 3 self.append_sub_map("punctuations.txt", submap_number) # Contraction dict self.contractions_dict = {} # load each contractions to map for text_file in os.listdir("%s/braille/%s/" % (data_dir, self.language)): if text_file not in [ "beginning.txt", "middle.txt", "abbreviations.txt", "abbreviations_default.txt", "punctuations.txt", "help.txt", ]: if self.simple_mode == 0 and "~" not in text_file: submap_number += 1 self.append_sub_map(text_file, submap_number) self.contractions_dict[text_file[:-4]] = submap_number - 1 # Load abbreviations if exist self.load_abbrivation() espeak.synth("{} Loaded!".format(self.language))
def speak(text, *args): """ Reads the specified text string using the OS-specific TTS module text: Text string to read *args: Optional voice index argument (used by Windows TTS) """ # Windows - pywin32 - SAPI if OSNAME == "Windows": speaker = win32com.client.Dispatch("SAPI.SpVoice") # If the optional argument is specified if args: # Get the voice index from the argument list voiceid = int(args[0]) # Counter of available voices voicecounter = 0 # Iterate through the avaiable voices for voice in speaker.GetVoices(): # The requested voice index reached if voicecounter == voiceid: # Select the voice and break the voice selection loop speaker.Voice = voice break # Current voice's index is lower than the requested one else: voicecounter += 1 # Read the text string speaker.Speak(text) # Linux - eSpeak elif OSNAME == "Linux": # Read the text string espeak.synth(text)
def speak_out(data, signal, signal_data): msg = parse_message(data, signal, signal_data) # filter out message that would take to long to synthesize by espeak if len(msg["message"]) > max_message_length: return W_OK # you don't want to hear your own messages elif msg["nick"] == own_nick(msg["server"]): return W_OK # don't speak out if nick is in ignore list elif msg["nick"] in ignore: return W_OK # only speak out the current buffer elif buffer_current() != buffer(msg["server"], msg["channel"]): return W_OK # build sentence sentence = syntax.format(who = msg["nick"], what = msg["message"]) # run it through espeak espeak.synth(sentence) return W_OK
def signal_handler(signal, frame): global number g = record_audio.main(number); if 'normal' in g: arduino.write('a') elif 'unhappy' in g: arduino.write('b') elif 'angry' in g: arduino.write('c') elif 'funny' in g: arduino.write('f') else: g = bot2session.think(g); print g i=0 spaces=0 while(i<len(g)): if g[i]==' ': spaces=spaces+1 i=i+1 arduino.write('p') espeak.synth(g) sleep((spaces+1)/3) arduino.write('q')
def pdfVoice(): global vee def again(): for i in vee: espeak.synth(i) file_path = filedialog.askopenfilename() pdfFileObj = open(file_path, 'rb') pdfReader = PyPDF2.PdfFileReader(pdfFileObj) sv = str(file_path) sv = sv.replace("/", "") sv = sv.replace(".", "") num = pdfReader.numPages vee = [] for i in range(num): pageObj = pdfReader.getPage(i) text = pageObj.extractText() vee.append(text) name = "PDF" + str(sv) + str(randint(0, 10000)) + ".mp3" print(name, type(name)) espeak.synth(text) tts = gTTS(text=text, lang='en', slow=True) tts.save(name) pdfFileObj.close() if vee == 1: vee = 2 replay = Button(master, text="replay pdf", command=again) replay.config(width=12, height=1) replay.configure(background="#fff") replay.config(font=("Courier", 10)) replay.pack()
def speak(text): if (speechd_available): client.speak(text) elif (espeak_available): espeak.synth(text) else: print("No tts api available!(python3-espeak/python3-speechd)")
def load_map(self, language_with_code): self.language = language_with_code.split("-")[0] espeak.set_voice(language_with_code.split("-")[1]) print("loading Map for language : %s" % self.language) self.map = {} submap_number = 1 self.append_sub_map("beginning.txt", submap_number) submap_number = 2 self.append_sub_map("middle.txt", submap_number) submap_number = 3 self.append_sub_map("punctuations.txt", submap_number) #Contraction dict self.contractions_dict = {} #load each contractions to map for text_file in os.listdir("%s/braille/%s/" % (data_dir, self.language)): if text_file not in [ "beginning.txt", "middle.txt", "abbreviations.txt", "abbreviations_default.txt", "punctuations.txt", "help.txt" ]: if (self.simple_mode == 0 and "~" not in text_file): submap_number += 1 self.append_sub_map(text_file, submap_number) self.contractions_dict[text_file[:-4]] = submap_number - 1 #Load abbreviations if exist self.load_abbrivation() espeak.synth("{} Loaded!".format(self.language))
def imageVoice(): def again(): espeak.synth(x) # try: # print ("Hi") # os.system("echo " + x + " | espeak -v female3 -s 120") --> feamle voice # except: # espeak.synth(x) ---> male voice file_path = filedialog.askopenfilename() a = PIL.Image.open(file_path).convert("RGB") sv = file_path sv = sv.replace("/", "") sv = sv.replace(".", "") x = pytesseract.image_to_string(a) x = x.replace("\n", " ") try: espeak.synth(x) name = "image" + sv + str(randint(0, 10000)) + ".mp3" tts = gTTS(text=x, lang='en', slow=True) tts.save(name) #os.system("echo " + x + " | espeak -v female3 -s 120") except: espeak.synth(x) replay = Button(master, text="replay image", command=again) replay.config(width=12, height=1) replay.configure(background="#fff") replay.config(font=("Courier", 10)) replay.pack()
def draw(): os.system('clear') # Clear the terminal screen popper = random.choice(BINGO_NUMBERS.keys( )) # Choose a key at random from the above dictionary saythis = BINGO_NUMBERS.pop( popper ) # Pop the key: value pair from the dict, then store the value in saythis print FigNums.renderText( saythis + " " + str(popper) ) # Print the chosen column letter and number in big characters # This if statement finds the chosen number in its column list and replaces the number with the same number, but with colors inverted. if 1 <= popper <= 15: B_COLUMN[B_COLUMN.index(popper)] = '\x1b[7m' + str(popper) + '\x1b[27m' elif 16 <= popper <= 30: I_COLUMN[I_COLUMN.index(popper)] = '\x1b[7m' + str(popper) + '\x1b[27m' elif 31 <= popper <= 45: N_COLUMN[N_COLUMN.index(popper)] = '\x1b[7m' + str(popper) + '\x1b[27m' elif 46 <= popper <= 60: G_COLUMN[G_COLUMN.index(popper)] = '\x1b[7m' + str(popper) + '\x1b[27m' elif 61 <= popper <= 75: O_COLUMN[O_COLUMN.index(popper)] = '\x1b[7m' + str(popper) + '\x1b[27m' # Create and print the table of bingo numbers table = PrettyTable() table.add_column("B", B_COLUMN) table.add_column("I", I_COLUMN) table.add_column("N", N_COLUMN) table.add_column("G", G_COLUMN) table.add_column("O", O_COLUMN) print table # espeak calls out the letter and number, while waiting 3 seconds espeak.synth(saythis + str(popper)) time.sleep(3)
def run(): global s if s == 0: espeak.synth("I'm off for a run") s = 1 config.atrib['i_Gait'] = 5 print ('run')
def speek(text, lcd=True, english=False, dummy=None): ''' Speek text ''' if dummy == None: thread.start_new_thread(DO.speek, ( text, lcd, english, "", )) else: #sleep = False #if LOOP.last_speek == None: # sleep = True #LOOP.last_speek = time.time() #IO.UNITS.speeker(True) #if sleep: # time.sleep(5) t = len(text) / 10 if t < 1: t = 0.8 if english: espeak.set_voice("en+f5") else: espeak.set_voice("sv+f5") espeak.synth(text) if lcd: lmatrix.animate_speak(t, leav_this=lmatrix.matrix_smile) lmatrix.clear(10)
def do_posicao_xy(self, arg): 'Informa posicao XY do robo: POSICAO_XY' if c.read() == None: raise playerc_error_str() print 'Posicao XY: (%.3f,%.3f) Angular: (%.3f)' % (p.px,p.py,p.pa * 180.0 / math.pi) espeak.synth('Posicao X: %d. Posicao Y: %d. Posicao Angular: %d graus'% (p.px, p.py, p.pa * 180.0 / math.pi))
def espeak_event(self, event, pos, length): gtk.gdk.threads_enter() if event == espeak.core.event_WORD: pos += self.to_count-1 s = self.textbuffer.get_iter_at_offset(pos) e = self.textbuffer.get_iter_at_offset(length+pos) self.textbuffer.remove_all_tags(self.textbuffer.get_start_iter(),self.textbuffer.get_end_iter()) self.textbuffer.apply_tag(self.highlight_tag, s, e) if event == espeak.event_END: self.point = self.textbuffer.get_iter_at_offset(pos+self.to_count) self.textbuffer.place_cursor(self.point) self.textview.scroll_to_iter(self.point, 0.0, use_align=True, xalign=0.0, yalign=0.2) if event == espeak.event_MSG_TERMINATED: espeak._playing = False self.textview.set_editable(True) try: self.textbuffer.remove_all_tags(self.textbuffer.get_start_iter(),self.textbuffer.get_end_iter()) except: pass if not espeak.is_playing(): mark = self.textbuffer.get_insert() start = self.textbuffer.get_iter_at_mark(mark) end = self.textbuffer.get_end_iter() self.to_count = start.get_offset() text = self.textbuffer.get_text(start,end) if text != "": espeak.synth(text) gtk.gdk.threads_leave() return True
def OnEnter(self, event): input = self.txt.GetValue() input = input.lower() if input == '' r = sr.Recognizer() with sr.Microphone() as source: audio = r.listen(source) try: self.txt.SetValue(r.recognize_google(audio)) except sr.UnknownValueError: print("Google Speech Recognition doesnot understand audio") except sr.RequestError as e: print("Couldnot request results from Google Speech Recognition service; {0}".format(e)) else: try: #wolframalpha app_id = "P26XPU-EG6443ATGU" client = wolframalpha.Client(app_id) result = client.query(input) answer = next(result.results).text print(answer) espeak.synth("The answer is:"+answer) except: #wikipedia #Split the first two strings for search like who is,what does keywords etc. input = input.split(" ") input = " ".join(input[2:]) espeak.synth("Searched For"+input) print(wikipedia.summary(input))
def saySeq(length): for num in range(0,length): espeak.synth(playingColours[sequence[num]]) movehub.led.set_color(playingToLego[sequence[num]]) time.sleep(0.8) # time between saying colour movehub.led.set_color(playingToLego[5]) time.sleep(0.5)
def start_conversation(self): #speech_response = wit.voice_query_auto(self.wit_access_token) try: conversation_starters = ["Hello", "How are you?", "Hi There", "I don't know you, but I like you.", "You are dashing in that Suit."] espeak.synth(random.choice(conversation_starters)) except Exception as err: print err
def OnEnter(self, event): input = self.txt.GetValue() input = input.lower() print(input) if(input == ""): r = sr.Recognizer() with sr.Microphone() as source: audio = r.listen(source) try: self.txt.SetValue(r.recognize_google(audio)) except sr.UnknownValueError: print("Google Speech Recognition could not understand audio") except sr.RequestError as e: print("Could not request results from Google Speech Recognition service; {0}".format(e)) try: res = client.query(input) answer = next(res.results).text print answer espeak.synth("The answer is "+str(answer)) except: try: input = input.split(' ') input = ' '.join(input[2:]) print wikipedia.summary(input) except: print "I'm not that advance to give you answer to that question ",input
def ok(): x = e1.get() espeak.synth(x) tts = gTTS(text=x, lang='en', slow=False) sv = str(randint(0, 10000)) sv = "text" + sv + ".mp3" tts.save(sv)
def battery_check(): rem = float( commands.getoutput( "grep \"^remaining capacity\" /proc/acpi/battery/BAT1/state | awk '{ print $3 }'" )) full = float( commands.getoutput( "grep \"^last full capacity\" /proc/acpi/battery/BAT1/info | awk '{ print $4 }'" )) state = commands.getoutput( "grep \"^charging state\" /proc/acpi/battery/BAT1/state | awk '{ print $3 }'" ) percentage = int((rem / full) * 100) if state == "discharging": pynotify.init("Battery Alert!") notification = pynotify.Notification( "Battery " + state, str(percentage) + "%", "/usr/share/icons/gnome/32x32/status/battery-low.png") notification.show() espeak.synth("Your battery is " + state + "at" + str(percentage) + "percent") timer = Timer(9999999999999999.9, battery_check) timer.start()
def calculate(): GPIO.setmode(GPIO.BCM) TRIG = 23 ECHO = 24 GPIO.setwarnings(False) GPIO.setup(TRIG,GPIO.OUT) GPIO.setup(ECHO,GPIO.IN) GPIO.output(TRIG, False) time.sleep(3) GPIO.output(TRIG, True) time.sleep(0.00001) GPIO.output(TRIG, False) while GPIO.input(ECHO)==0: pulse_start = time.time() while GPIO.input(ECHO)==1: pulse_end = time.time() pulse_duration = pulse_end - pulse_start distance = pulse_duration * 17150 distance = round(distance, 2) if distance > 50 and distance < 400: ret,frame = video_capture.read() capture() object=detect() espeak.synth("There is an "+object +" "+str(distance)+" centimeteres from you") else:
def publish_detected_object(): pub = rospy.Publisher('joint_steps', ArmJointState, queue_size=4) rospy.init_node('pick_and_place_object_detection', anonymous=True) rate = rospy.Rate(.1) # 20hz while not rospy.is_shutdown(): fixated_object_label = subscribe_detected_object() rospy.loginfo(fixated_object_label) # check if fixated object label is a key in object_trajectories # if so, publish each trajectory in object_trajectories[key] to ArmJointState if fixated_object_label in object_trajectories: for i in object_trajectories[fixated_object_label]: goal = ArmJointState() goal.position1 = i[0] goal.position2 = i[1] goal.position3 = i[2] goal.position4 = i[3] goal.position5 = i[4] goal.position6 = i[5] pub.publish(goal) rospy.sleep(10) espeak.synth(fixated_object_label) while espeak.is_playing(): pass
def counter_output(counter, size, pos_x, pos_y): global done_synth global old_count if voice_text != '' and voice_active: if done_synth: if old_count == 0: espeak.synth(voice_text + ' ' + str(counter)) done_synth = False else: espeak.synth('Welcome visitors ' + str(old_count) + ' to ' + str(counter)) done_synth = False old_count = 0 else: if old_count == 0: old_count = counter #subprocess.call("speak.sh '" + voice_text + ' ' + str(counter) + "'&",shell=True) #subprocess.call("echo '" + voice_text + ' ' + str(counter) + "'| festival --tts &",shell=True) if display_text != '' and display_active: ser.write(vfd_cr + str(counter) + ' people counted' + vfd_del) output_file.write( str(counter) + ',' + str(time.time()) + ',' + time.strftime('%Y-%m-%d %H:%M:%S') + ',' + str(size) + ',' + str(pos_x) + ',' + str(pos_y) + '\n') return
def OnEnter(self, event): input = self.txt.GetValue() input = input.lower() if input == '': r = sr.Recognizer() with sr.Microphone() as source: audio = r.listen(source) try: self.txt.SetValue(r.recognize_google(audio)) except sr.UnknownValueError: print("Google Speech Recognition could not understand audio") except sr.RequestError as e: print( "Could not request results from Google Speech Recognition service; {0}" .format(e)) try: #wolframalpha app_id = "JAKG9Q-QK39AE7XVY" client = wolframalpha.Client(app_id) res = client.query(input) answer = next(res.results).text print answer espeak.synth("The answer is " + answer) except: #wikipedia input = input.split(" ") input = " ".join(input[2:]) espeak.synth("Searched for " + input) print wikipedia.summary(input)
def main(): b = 1 while b < 10: espeak.synth("ee" * b) c = b / 3 time.sleep(c) b = b + 1 espeak.synth("Oh look, spinning shapes") #subprocess.call(['espeak', '-v', 'en-sc+whisper', '-s', '150', 'Yes!']) global window glutInit(sys.argv) # Select type of Display mode: # Double buffer # RGBA color # Alpha components supported # Depth buffer glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH) # get a 640 x 480 window glutInitWindowSize(640, 480) # the window starts at the upper left corner of the screen glutInitWindowPosition(0, 0) # Okay, like the C version we retain the window id to use when # closing, but for those of you new # to Python (like myself), remember this assignment would make the # variable local and not global # if it weren't for the global declaration at the start of main. window = glutCreateWindow("Jeff Molofee's GL Code Tut ... NeHe '99") # Register the drawing function with glut, BUT in Python land, at # least using PyOpenGL, we need to # set the function pointer and invoke a function to actually # register the callback, otherwise it # would be very much like the C version of the code. glutDisplayFunc(DrawGLScene) # Uncomment this line to get full screen. glutFullScreen() # When we are doing nothing, redraw the scene. glutIdleFunc(DrawGLScene) # Register the function called when our window is resized. glutReshapeFunc(ReSizeGLScene) # Register the function called when the keyboard is pressed. glutKeyboardFunc(keyPressed) # Initialize our window. InitGL(640, 480) # Start Event Processing Engine glutMainLoop()
def callSubProcess(self, args, text): try: espeak.synth(text) args = args.split(" ") subprocess.Popen(args) except Exception as e: print "Error on callSubProcess: "+str(e) espeak.synth("Sorry, I can't "+text)
def speakTrivia(self): trivia = self.fp.getChoice() espeak.set_voice("mb-us2") espeak.set_parameter(espeak.Parameter.Rate, 2) espeak.synth(trivia[0]) time.sleep(self.delay) espeak.synth(trivia[1]) time.sleep(self.delay*2)
def do_posicao_angular(self, arg): 'Informa posicao ANGULAR do robo: POSICAO_ANGULAR' if c.read() == None: raise playerc_error_str() print 'Posicao Angular: (%.3f)' % (p.pa * 180.0 / math.pi) espeak.synth('Posicao Angular: %d graus'% (p.pa * 180.0 / math.pi))
def pause(self): """pause music""" print("Pause") self.state = "pause" #current state is now paused mixer.music.pause() #pause audio if SPEAK: espeak.synth(self.files[self.curfile].split("/")[-1].split(".")[0])
def get_rot_total(): global cont_total while (1): if (GPIO.input(16) == 1): # Verificação de borda espeak.set_parameter(espeak.Parameter.Rate, 150, 0) espeak.synth("Foram dadas" + str(cont_total) + "voltas.") espeak.set_parameter(espeak.Parameter.Rate, 150, 0) time.sleep(0.5)
def do_posicao_angular(self, arg): 'Informa posicao ANGULAR do robo: POSICAO_ANGULAR' if c.read() == None: raise playerc_error_str() print 'Posicao Angular: (%.3f)' % (p.pa * 180.0 / math.pi) espeak.synth('Posicao Angular: %d graus' % (p.pa * 180.0 / math.pi))
def speak(text): text = text.decode('utf8', 'ignore') text = text.encode('ascii', 'ignore') print text espeak.synth(text) while espeak.is_playing(): pass sleep(0.1)
def callSubProcess(self, args, text): try: espeak.synth(text) args = args.split(" ") subprocess.Popen(args) except Exception as e: print "Error on callSubProcess: " + str(e) espeak.synth("Sorry, I can't " + text)
def on_message(client, userdata, msg): # This function is called everytime the topic is published to. # If you want to check each message, and do something depending on # the content, the code to do this should be run in this function topic = str(msg.topic).strip("b'").strip("\\r\\n") payload = str(msg.payload).strip("b'").strip("\\r\\n") #print ("Topic: ", topic + "\nMessage: " + str(payload)) espeak.synth("hello")
def start(): t = 0 d = 0 f = 1 monitorOff() status = 'off' while(1): dis = distancia() if (status == 'off'): print 'D1: %s' % dis keyLed('num', 'on') f = 0.7 if (dis > DISTANCIA): t = 0 if (dis < DISTANCIA and status == 'off'): print 'D2: %s' % dis keyLed('caps', 'on') f = 0.4 if (dis < DISTANCIA/2 and status == 'off'): print 'D3: %s' % dis keyLed('scroll', 'on') f = 0.2 t = t + 1 if (t == 5 and status == 'off'): monitor('on') status = 'on' time.sleep(10) print 'fala' espeak.synth("Bem vindo! Este é o Coice, Quiosque para Conscientização Eleitoral") time.sleep(10) espeak.synth("ah!!!! Eu não sou o istifem róuquim") f = 1 t = 0 time.sleep(f) keyLed('num', 'off') keyLed('caps', 'off') keyLed('scroll', 'off') if (status == 'on'): t = t + 1 if (dis < DISTANCIA/2): d = 1 print 'ON: t-%s d-%s ' % (t, d) if (t > 20): if (d == 0): monitorOff() status = 'off' print 'Deligar' t = 0 d = 0
def run(self,ang,gait): if self.s_run == 0: espeak.synth("I'm off for a run") self.s_run = 1 serial_out.setgait(gait) serial_out.state(0,1,1) nav.odo(1.5,ang) print ('run') serial_out.travel(ang,100,0)
def notify_email_contacts(self): self.lastEmailTicks = time.time() self.emailImage() if talkToEm: # Note we *tell them* we're going to record a video, even if we're not. Sneaky! :) espeak.synth('An image has just been emailed to security.') time.sleep(2) espeak.synth('A video is being recorded of you, even as we speak.') time.sleep(2)
def today_event(): today = datetime.today() now = datetime.now() for dt in mycal: print dt ev_dt = datetime.strptime(dt[0] + " " + dt[1], "%Y-%m-%d %H:%M") evnt = dt[6] if ev_dt.date() == today.date(): espeak.synth("dont forget to " + evnt + "\n")
def button_pressed(self): #when set alarm button is pressed print("Button Pressed") alarm_time = str(self.Set_Time.time()) self.alarm_h = int(alarm_time[19:21]) #value of hour is sotred in index value 19 and 20 self.alarm_m = int (alarm_time[23:25]) #value of minute is sotred in index value 23 and 24 message = "Alarm is set at " + str(self.alarm_h) + " hours " + str(self.alarm_m) + " minutes" self.label.setText(_translate("MainWindow", message, None)) #display the message on GUI screen espeak.synth (message) #speak the message through audio jack
def __init__(self): espeak.synth("Hello Master!") self.rec = VoiceRecognizer() self.actions = ActionManager().loadActions() self.canHear = True;
def do_posicao_y(self, arg): 'Informa posicao Y do robo: POSICAO_Y' if c.read() == None: raise playerc_error_str() print 'Posicao Y: (%.3f) Angular: (%.3f)' % (p.py, p.pa * 180.0 / math.pi) espeak.synth('Posicao Y: %d. Posicao Angular: %d graus.' % (p.py, p.pa * 180.0 / math.pi))
def __init__(self): espeak.synth("Hello Master!") self.rec = VoiceRecognizer() self.actions = ActionManager().loadActions() self.canHear = True
def do_para_direita(self, arg): 'Vira o robo para a direita por um dado numero de graus: PARA_DIREITA 20' audio = pygame.mixer.Sound("/home/carloscrbs/fx_stepStair.wav") t = parse(arg) espeak.synth('Para Direita %d graus' % t) for i in range(0, *parse(arg)): p.SetSpeed(0, -10.0 * math.pi / 180.0) sleep(0.1) p.SetSpeed(0, 0)
def do_para_direita(self,arg): 'Vira o robo para a direita por um dado numero de graus: PARA_DIREITA 20' audio = pygame.mixer.Sound("/home/carloscrbs/fx_stepStair.wav") t = parse(arg) espeak.synth('Para Direita %d graus'%t) for i in range(0, *parse(arg)): p.SetSpeed(0, -10.0 * math.pi / 180.0) sleep(0.1) p.SetSpeed(0,0)
def do_para_tras(self,arg): 'Move o robo para tras por uma distancia especifica: PARA_TRAS 10' audio = pygame.mixer.Sound("/home/carloscrbs/fx_stepStair.wav") t = parse(arg) espeak.synth('Para Tras %d'%t) for i in range(0, *parse(arg)): p.SetSpeed(-1,0) audio.play() sleep(.5) p.SetSpeed(0,0)
def irc_speak(word, word_eol, users): """Checks to see if a user is set to be voiced, and if so synthesizes their text""" if word[0] in options: # check to see if the user is in the options dictionary (word[0] == their nick) [espeak.set_parameter(aliases[arg], options[word[0]]["args"][arg]) for arg in options[word[0]]["args"]] # options[word[0]]["args"][arg] is the same as options[nickname]["args"][arg] (where arg is some one of the names in aliases # which corresponds to some integer value in options[nick][args]) espeak.set_voice(name=options[word[0]]["language"]) espeak.synth(word[1]) xchat.emit_print("Channel", word[0], word[1]) return xchat.EAT_NONE return xchat.EAT_NONE # return nothing because they weren't in the options dictionary
def do_para_esquerda(self,arg): if arg == "": print "No arguments!!" espeak.synth("Comando invalido") else: 'Vira o robo para a esquerda por um dado numero de graus: PARA_ESQUERDA 20' t = parse(arg) espeak.synth('Para Esquerda %d graus'%t) for i in range(0,*parse(arg)): p.SetSpeed(0, 10.0 * math.pi / 180.0) sleep(.1) p.SetSpeed(0,0)
def rndmp3 (): randomfile = random.choice(os.listdir("/home/"+output+"/Music/"+folder+"/")) file = ' /home/'+output+'/Music/'+folder+'/'+ randomfile pynotify.init( "Radio" ) song = pynotify.Notification('Playing '+randomfile) song.show() espeak.synth('Playing '+randomfile) print 'Playing '+randomfile time.sleep(1) os.system ('mplayer' + file) time.sleep(2)
def monitorOff(): keyLed('caps', 'on') keyLed('num', 'on') keyLed('scroll', 'on') time.sleep(1) keyLed('scroll', 'off') time.sleep(1) keyLed('caps', 'off') time.sleep(1) keyLed('num', 'off') espeak.synth("Vou tirar uma soneca") time.sleep(2) monitor('off')
def speech_to_text(): # user is prompted to talk speech_response = wit.voice_query_auto(wit_access_token) # response question = urllib.quote_plus(speech_response['_text']) resp = subprocess.call(['curl', 'https://www.houndify.com/textSearch?query=' + question + '&clientId=e7SgQJ_wwXjv5cUx1nLqKQ%3D%3D&clientKey=Pi_smrHYQhCA_nLgukp4C4nnQE2WyQvk3l3Bhs8hcbchrLAmjl5LWS3ewq1U8LMser8j890OfhklwNm77baPTw%3D%3D', '-H', 'Accept-Encoding: gzip, deflate, sdch', '-H', 'Accept-Language: en-US,en;q=0.8', '-H', 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36', '-H', 'Accept: */*', '-H', 'Referer: https://www.houndify.com/try/986dcfd1-0b91-4346-a5a0-6d53f0d18da2', '-H', 'Cookie: houndify-sess=s%3Ar-94jGq48cQMay2q1fgRwSolHIV4ZQpk.Y3Wns0NNtM5LCgWUcaAc8MUdH3Z0elclREmfzZ%2BJzLY; _gat=1; _ga=GA1.2.1948120585.1453572520', '-H', 'Connection: keep-alive', '-H', 'Hound-Request-Info: {"ClientID":"e7SgQJ_wwXjv5cUx1nLqKQ==","UserID":"houndify_try_api_user","PartialTranscriptsDesired":true,"SDK":"web","SDKVersion":"0.1.6"}', '--compressed']) answer = json.parse(resp) talk_answer = answer["AllResults"][0]['SpokenResponseLong']; # do something with answer # speak the answer espeak.synth(talk_answer) IS_TALKING = False
def tweet_status(status_to_tweet): text = status_to_tweet.text text = re.sub(r"@(\w+)", r"at \1", text) text = re.sub(r"#(\w+)", r"hashtag \1", text) text = re.sub(r"(http[^ ]+)", r"Link", text) text = re.sub(r"RT\W", r"", text) try: twitterSpeak = "{friend} says {text}".format(friend=status_to_tweet.user.name, text=text) except UnicodeEncodeError: print u"Unable to speak this status {friend} says {text}".format(friend=status_to_tweet.user.name, text=text) return print twitterSpeak espeak.synth(twitterSpeak) sleep(30)
def dictionary(word): i=0 #while (1): print word dictionary=PyDictionary() dict=dictionary.meaning(word) if dict is not None: if ( dict.has_key('Adjective')) : s= dict['Adjective'] if len(s)>=i : print s[i] l= len(s[i]) t = l /12.0 espeak.synth("(adjective)" + s[i]) time.sleep(t) if dict.has_key('Noun') : s= dict['Noun'] if len(s)>=i : print s[i] l= len(s[0]) t = l /12.0 espeak.synth("(NOUN)" + s[i]) time.sleep(t) if dict.has_key('Verb') : s= dict['Verb'] if len(s)>=i : print s[i] l= len(s[i]) t = l /12.0 espeak.synth("VERB" + s[i]) time.sleep(t) if dict.has_key('Adverb') : s= dict['Adverb'] if len(s)>=i : print s[i] l= len(s[i]) t = l /12.0 espeak.synth("(ADVERB)" + s[i]) time.sleep(t) if dict.has_key('Preposition') : s= dict['Preposition'] if len(s)>=i : print s[i] l= len(s[i]) t = l /12.0 espeak.synth("(PREPO)" + s[i]) time.sleep(t) print 5
def run(self): name = self.config.configs['espeak_name'] rate = int(self.config.configs['espeak_rate']) espeak.set_voice(name) espeak.set_parameter(espeak.Parameter.Rate,rate) while not self._stop.isSet(): try: phrase = self.wordsQueue.get(True,0.05) espeak.synth(phrase.encode('utf8')) except Queue.Empty: continue
def OnEnter(self, event): input = self.txt.GetValue() input = input.lower() try: res = client.query(input) answer = next(res.results).text print answer espeak.synth("The answer is "+str(answer)) except: try: input = input.split(' ') input = ' '.join(input[2:]) print wikipedia.summary(input) except: print "I don't know"
def leer(texto, esperar = False): """Utiliza el comando speak para 'leer' un texto como sonido. :param texto: Cadena de texto a pronunciar. :param esperar: Si es True la función no vuelve hasta que se termina de leer el texo. :type esperar: boolean """ try: from espeak import espeak import time espeak.set_voice('es-la') espeak.synth(texto) while esperar and espeak.is_playing(): time.sleep(1) except ImportError: pass
def speak(self, statement): import time if self.platform == 'darwin': # Use Mac's built-in say command to speak the response cmd = ['say', str(statement.text)] subprocess.call( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) return statement.text from espeak import espeak from espeak import core as espeak_core done_synth = [False] def synth_callback(event, pos, length): if event == espeak_core.event_MSG_TERMINATED: done_synth[0] = True espeak.set_SynthCallback(synth_callback) call_result = espeak.synth(statement) # Wait for the speech to stop while call_result and not done_synth[0]: time.sleep(0.05) return call_result
def do_para_frente(self,arg): if arg == "": print "No arguments!!" espeak.synth("Comando invalido") else: 'Move o robo para frente por uma distancia especifica: PARA_FRENTE 10' audio = pygame.mixer.Sound("/home/carloscrbs/fx_stepStair.wav") t = parse(arg) espeak.synth('Para Frente %d'%t) for i in range(0,*parse(arg)): c.Read() print p.GetStall() p.SetSpeed(1,0) audio.play() sleep(.5) #print 'stall: %d \n' % p.GetStall() p.SetSpeed(0,0)