def os_speak(self, f): cmd = f.lower().lstrip('speak') if ESPEAK: if '_kill_' in cmd: if espeak.is_playing(): self.speak_cancel() return try: # uses a queue so doesn't speak over it's self. esQueue.put(cmd) if not espeak.is_playing(): espeak.synth(esQueue.get()) except Exception as e: #print ('oops',e) # fallback call the system espeak - no queue used os.system('''espeak -s 160 -v m3 -p 1 "%s" &''' % cmd)
def publish_detected_object(): pub = rospy.Publisher('joint_steps', ArmJointState, queue_size=4) rospy.init_node('pick_and_place_object_detection', anonymous=True) rate = rospy.Rate(.1) # 20hz while not rospy.is_shutdown(): fixated_object_label = subscribe_detected_object() rospy.loginfo(fixated_object_label) # check if fixated object label is a key in object_trajectories # if so, publish each trajectory in object_trajectories[key] to ArmJointState if fixated_object_label in object_trajectories: for i in object_trajectories[fixated_object_label]: goal = ArmJointState() goal.position1 = i[0] goal.position2 = i[1] goal.position3 = i[2] goal.position4 = i[3] goal.position5 = i[4] goal.position6 = i[5] pub.publish(goal) rospy.sleep(10) espeak.synth(fixated_object_label) while espeak.is_playing(): pass
def espeak_event(self, event, pos, length): gtk.gdk.threads_enter() if event == espeak.core.event_WORD: pos += self.to_count-1 s = self.textbuffer.get_iter_at_offset(pos) e = self.textbuffer.get_iter_at_offset(length+pos) self.textbuffer.remove_all_tags(self.textbuffer.get_start_iter(),self.textbuffer.get_end_iter()) self.textbuffer.apply_tag(self.highlight_tag, s, e) if event == espeak.event_END: self.point = self.textbuffer.get_iter_at_offset(pos+self.to_count) self.textbuffer.place_cursor(self.point) self.textview.scroll_to_iter(self.point, 0.0, use_align=True, xalign=0.0, yalign=0.2) if event == espeak.event_MSG_TERMINATED: espeak._playing = False self.textview.set_editable(True) try: self.textbuffer.remove_all_tags(self.textbuffer.get_start_iter(),self.textbuffer.get_end_iter()) except: pass if not espeak.is_playing(): mark = self.textbuffer.get_insert() start = self.textbuffer.get_iter_at_mark(mark) end = self.textbuffer.get_end_iter() self.to_count = start.get_offset() text = self.textbuffer.get_text(start,end) if text != "": espeak.synth(text) gtk.gdk.threads_leave() return True
def speak(text): text = text.decode('utf8', 'ignore') text = text.encode('ascii', 'ignore') print text espeak.synth(text) while espeak.is_playing(): pass sleep(0.1)
def speak(text_to_speech): sense.load_image("jaffar3.png") jaff_open_mouth = True espeak.synth(text_to_speech) while espeak.is_playing(): time.sleep(0.3) jaff_open_mouth = not jaff_open_mouth sense.load_image("jaffar3.png" if jaff_open_mouth else "jaffar.png") sense.load_image("jaffar.png")
def callback(image): name = recognize(image) if name: print(names[name]) espeak.synth( "Delay. Hello %s. I was wondering where you have been." % names[name]) else: print('unknown') espeak.synth( "Delay. Hmm, have we met before. I can't seem to recall your face." ) while espeak.is_playing(): time.sleep(0.1)
def leer(texto, esperar = False): """Utiliza el comando speak para 'leer' un texto como sonido. :param texto: Cadena de texto a pronunciar. :param esperar: Si es True la funciĆ³n no vuelve hasta que se termina de leer el texo. :type esperar: boolean """ try: from espeak import espeak import time espeak.set_voice('es-la') espeak.synth(texto) while esperar and espeak.is_playing(): time.sleep(1) except ImportError: pass
def publish_detected_object(): pub = rospy.Publisher('object_detection_label', String, queue_size=10) rospy.init_node('detected_objects', anonymous=True) rate = rospy.Rate(20) # 20hz while not rospy.is_shutdown(): fixated_object_label = subscribe_detected_object() rospy.loginfo(fixated_object_label) pub.publish(fixated_object_label) espeak.synth(fixated_object_label) while espeak.is_playing(): pass rospy.sleep(3) rate.sleep()
def Read_Stop(self,wedget,data=None): image_read_stop = self.guibuilder.get_object("image_read_stop") if espeak.is_playing() == False: image_read_stop.set_from_file("/usr/share/lios/Gui/stop") self.textbuffer.remove_tag(self.highlight_tag, self.textbuffer.get_start_iter(),self.textbuffer.get_end_iter()) mark = self.textbuffer.get_insert() start = self.textbuffer.get_iter_at_mark(mark) end = self.textbuffer.get_end_iter() self.to_count = start.get_offset() text = self.textbuffer.get_text(start,end) espeak.synth(text) self.textview.set_editable(False) else: espeak.cancel() image_read_stop.set_from_file("/usr/share/lios/Gui/play") self.textbuffer.remove_tag(self.highlight_tag, self.textbuffer.get_start_iter(),self.textbuffer.get_end_iter()) self.textview.set_editable(True)
def talk(words): print("[log]" + words) if sys.platform == "linux" or sys.platform == "linux2": espeak.set_voice("ru+f") espeak.synth(words) while espeak.is_playing(): pass elif sys.platform == "darwin": os.system("say " + words) elif sys.platform == "win32": engine = tt.init() engine.say(words) engine.runAndWait() engine.stop()
def _do_notify(self): ''' Handle sending notifications to denote espeak's state. ''' state = Notifier.IDLE while self.is_running: # If espeak is playing then we are working then so are we, else # we're not if espeak.is_playing(): new_state = Notifier.WORKING else: new_state = Notifier.IDLE # Update the state if it has changed if new_state != state: state = new_state self._notify(state) # Don't busy-wait time.sleep(0.1)
def espeak_event(self, event, pos, length): gtk.gdk.threads_enter() if event == espeak.core.event_WORD: pos += self.to_count - 1 s = self.textbuffer.get_iter_at_offset(pos) e = self.textbuffer.get_iter_at_offset(length + pos) self.textbuffer.remove_all_tags(self.textbuffer.get_start_iter(), self.textbuffer.get_end_iter()) self.textbuffer.apply_tag(self.highlight_tag, s, e) if event == espeak.event_END: self.point = self.textbuffer.get_iter_at_offset(pos + self.to_count) self.textbuffer.place_cursor(self.point) self.textview.scroll_to_iter(self.point, 0.0, use_align=True, xalign=0.0, yalign=0.2) if event == espeak.event_MSG_TERMINATED: espeak._playing = False self.textview.set_editable(True) try: self.textbuffer.remove_all_tags( self.textbuffer.get_start_iter(), self.textbuffer.get_end_iter()) except: pass if not espeak.is_playing(): mark = self.textbuffer.get_insert() start = self.textbuffer.get_iter_at_mark(mark) end = self.textbuffer.get_end_iter() self.to_count = start.get_offset() text = self.textbuffer.get_text(start, end) if text != "": espeak.synth(text) gtk.gdk.threads_leave() return True
def Read_Stop(self, wedget, data=None): image_read_stop = self.guibuilder.get_object("image_read_stop") if espeak.is_playing() == False: image_read_stop.set_from_file("/usr/share/lios/Gui/stop") self.textbuffer.remove_tag(self.highlight_tag, self.textbuffer.get_start_iter(), self.textbuffer.get_end_iter()) mark = self.textbuffer.get_insert() start = self.textbuffer.get_iter_at_mark(mark) end = self.textbuffer.get_end_iter() self.to_count = start.get_offset() text = self.textbuffer.get_text(start, end) espeak.synth(text) self.textview.set_editable(False) else: espeak.cancel() image_read_stop.set_from_file("/usr/share/lios/Gui/play") self.textbuffer.remove_tag(self.highlight_tag, self.textbuffer.get_start_iter(), self.textbuffer.get_end_iter()) self.textview.set_editable(True)
def poll(): print espeak.is_playing() return "isPlaying " + str(1 if espeak.is_playing() else 0) + "\n"
textRect.y = 50 # Get Input for event in pygame.event.get(): if ( event.type == pygame.QUIT ): pygame.quit() sys.exit() keys = pygame.key.get_pressed() if ( keys[ pygame.K_UP ] ): done = True # Draw windowSurface.blit( background, ( 0, 0 ) ) windowSurface.blit( person, ( 400, 0 ) ) windowSurface.blit( textObj, textRect ) if ( espeak.is_playing() == False ): print( "Say ", text ) espeak.synth( text ) pygame.display.update() # Delay fpsClock.tick( 30 ) if ( espeak.is_playing() == False ): index += 1 if ( index >= len( thingsToSay ) ): index = 0
def say(self, phrase): espeak.synth(phrase) while espeak.is_playing(): pass #wait until the bot is finished speaking before doing anything else
def speak( self, text ): espeak.synth( text ) while( espeak.is_playing() ): time.sleep( 0.1 )
#!/usr/bin/env python # -*- coding: utf-8 -*- from scapy.all import * from scapy.utils import * from espeak import espeak load_contrib('http') a=0 def f(p): l=p.lastlayer() if isinstance(l,Raw) or 'Option' in l.name: l=l.underlayer espeak.synth(l.name) return if a: sniff( iface='wlan0',lfilter=lambda p:not espeak.is_playing(),prn=f) else: sniff(offline="a.cap",lfilter=lambda p:not espeak.is_playing(),prn=f)
#!/usr/bin/env python # -*- coding: utf-8 -*- from scapy.all import * from scapy.utils import * from espeak import espeak #apt-get install espeak python-espeak load_contrib('http') def f(p): l = p.lastlayer() if isinstance(l, Raw) or 'Option' in l.name: l = l.underlayer espeak.synth(l.name) return sniff(lfilter=lambda p: not espeak.is_playing(), prn=f)
#!/usr/bin/env python # -*- coding: utf-8 -*- from scapy.all import * from scapy.utils import * from espeak import espeak#apt-get install espeak python-espeak load_contrib('http') def f(p): l=p.lastlayer() if isinstance(l,Raw) or 'Option' in l.name: l=l.underlayer espeak.synth(l.name) return sniff(lfilter=lambda p:not espeak.is_playing(),prn=f)
def falar(texto, delay_ms): espeak.synth(texto) while espeak.is_playing(): pass sleep(delay_ms / 1000)
# sock.send("%49s" % channel_conf) rest_eegs = [] ssvep_eegs = [] # Repeat nb_trials time for i in range(experiment['n_trials']): # Acquire resting data (A random duration of 2,3 or 4 seconds to avoid adaptation) #idx, eeg = headset.acquire_data_fast(random.randint(2,4)) #rest_eegs.append(eeg) #print utils.check_packet_drops(idx) # Give an auditory cue espeak.synth(cues[i]) while espeak.is_playing(): time.sleep(0.1) time.sleep(2) # Start flickering ssvepd.send_signal(signal.SIGUSR1) # Acquire EEG data for duration seconds and stop flickering idx, eeg = headset.acquire_data_fast(duration, stop_callback, ssvepd.pid) ssvep_eegs.append(eeg) #print utils.check_packet_drops(idx) # Save dataset experiment['battery'] = headset.battery
def talk(text): synth(text) while is_playing(): sleep(.01) sleep(0.5)
# Equal amount of trials for each LED will be randomized cues = ["Left" for i in range(n_runs/2)] + ["Right" for i in range(n_runs/2)] random.shuffle(cues) random.shuffle(cues) experiment['cues'] = cues # Let classifier compute a PSD average over 2 second blocks p_conn.send(experiment) # Repeat n_runs time for i in range(experiment['n_runs']): # Give an auditory cue if CUE_BASED: espeak.synth(cues[i]) while espeak.is_playing(): time.sleep(0.1) time.sleep(2) # Start flickering ssvepd.send_signal(signal.SIGUSR1) # Acquire EEG data until classified while 1: idx, eeg = headset.acquire_data_fast(experiment['block_size']) if not p_conn.poll(): # Not classified yet p_conn.send(eeg) else: break
#!/usr/bin/python2 """ espeak bindings for python2 https://launchpad.net/python-espeak Arch Linux - AUR: python2-espeak """ from espeak import espeak espeak.set_voice("en-us") espeak.synth("Hello, world!") # Prevents sound from cutting off since program ends while (espeak.is_playing()): continue #espeak.cancel() # Stop speaking #espeak.set_voice("en-us") #print(espeak.list_voices())