def tts(message): """ This function takes a message as an argument and converts it to speech depending on the OS. """ if profile.data['tts'] == 'ivona': access_key = profile.data['ivona']['access_key'] secret_key = profile.data['ivona']['secret_key'] tts_engine = pyvona.create_voice(access_key, secret_key) if profile.data['va_gender'] == 'female': tts_engine.voice_name = 'Salli' else: tts_engine.voice_name = 'Joey' tts_engine.speak(message) else: if sys.platform == 'darwin': tts_engine = 'say' if profile.data['va_gender'] == 'male': language = '-valex' return subprocess.call([tts_engine, language, message]) else: return subprocess.call([tts_engine, message]) elif sys.platform.startswith('linux') or sys.platform == 'win32': tts_engine = 'espeak' if profile.data['va_gender'] == 'female': language = '-ven+f3' speed = '-s170' return subprocess.call([tts_engine, language, speed, message]) else: speed = '-s170' return subprocess.call([tts_engine, speed, message])
def speak(text): text = text.strip() if len(text) == 0: return voice = pyvona.create_voice('Access Key', 'Secret Key') voice.voice_name = "Salli" voice.speak(text)
def play(self, content, ramdrive='/mnt/ram/'): if self.debug: print "Trying Ivona." rval = True try: #Connect to Ivona v = pyvona.create_voice(self.sconfig['ivona_accesskey'], self.sconfig['ivona_secretkey']) #Settings for ivona v.voice_name = self.sconfig['ivona_voice'] v.speech_rate = self.sconfig['ivona_speed'] #Get ogg file with speech v.fetch_voice(content, ramdrive + 'tempspeech.ogg') # Play the oggs returned pygame.mixer.init() pygame.mixer.music.load(ramdrive + "tempspeech.ogg") pygame.mixer.music.play() while pygame.mixer.music.get_busy() == True: continue except pyvona.PyvonaException: rval = False except subprocess.CalledProcessError: rval = False # Cleanup any ogg files created in this directory. print 'cleaning up now' rmcmd = 'rm -f ' + ramdrive + '*' + self.sconfig['tail'] print subprocess.call(rmcmd, shell=True) return rval
def initYvonaTtsEngine (): ivonaVoice = pyvona.create_voice("GDNAJVR7DOF5M332VY3Q","RMlAiEzHBrFdXY0lP7TLjKptE0SjCbBcgEzVPHDz") ivonaVoice.sentence_break=1000 ivonaVoice.voice_name='Salli' # ivonaVoice.voice_name='Mathieu' ivonaVoice._codec='mp3' return ivonaVoice
def tts(message, bot, chat_id): """ This function takes a message as an argument and converts it to speech depending on the OS. """ if profile.data['tts'] == 'ivona': access_key = profile.data['ivona']['access_key'] secret_key = profile.data['ivona']['secret_key'] tts_engine = pyvona.create_voice(access_key, secret_key) if profile.data['va_gender'] == 'female': tts_engine.voice_name = 'Salli' else: tts_engine.voice_name = 'Joey' tts_engine.speak(message) elif profile.data['tts'] == 'telegram': bot.sendMessage(chat_id, message) else: if sys.platform == 'darwin': tts_engine = 'say' if profile.data['va_gender'] == 'male': language = '-valex' return subprocess.call([tts_engine, language, message]) else: return subprocess.call([tts_engine, message]) elif sys.platform.startswith('linux') or sys.platform == 'win32': tts_engine = 'espeak' if profile.data['va_gender'] == 'female': language = '-ven+f3' speed = '-s170' return subprocess.call([tts_engine, language, speed, message]) else: speed = '-s170' return subprocess.call([tts_engine, speed, message])
def __init__(self): JarvisIOHandler.__init__(self) hmm = '/usr/local/share/pocketsphinx/model/en-us/en-us' dic ='/usr/local/share/pocketsphinx/model/en-us/cmudict-en-us.dict' lm ='/usr/local/share/pocketsphinx/model/en-us/en-us.lm.bin' config = Decoder.default_config() config.set_string('-hmm',hmm) config.set_string('-lm',lm) config.set_string('-dict',dic) config.set_string('-logfn','/dev/null') self.decoder = Decoder(config) self.microphone = pyaudio.PyAudio() pyvona_config = open('configs/pyvona.txt') pvcfg = pyvona_config.readlines() pyvona_config.close() self.voice = pyvona.create_voice(pvcfg[0].strip(),pvcfg[1].strip()) self.voice.region = 'us-west' self.voice.voice_name='Brian' self.voice.sentence_break = 200 googleSTT_config = open('configs/GoogleSTT.txt') self.key = googleSTT_config.readlines()[0].strip() googleSTT_config.close() self.recognizer = sr.Recognizer() with sr.Microphone() as source: self.recognizer.adjust_for_ambient_noise(source)
def play(self, content, ramdrive='/mnt/ram/'): if self.debug: print "Trying Ivona." rval = True try: #Connect to Ivona v = pyvona.create_voice(self.sconfig['ivona_accesskey'], self.sconfig['ivona_secretkey']) #Settings for ivona v.voice_name = self.sconfig['ivona_voice'] v.speech_rate = self.sconfig['ivona_speed'] #Get ogg file with speech v.fetch_voice(content, ramdrive + 'tempspeech.ogg') # Play the oggs returned pygame.mixer.init() pygame.mixer.music.load(ramdrive + "tempspeech.ogg") pygame.mixer.music.play() while pygame.mixer.music.get_busy() == True: continue except pyvona.PyvonaException: rval = False except subprocess.CalledProcessError: rval = False # Cleanup any ogg files created in this directory. print 'cleaning up now' rmcmd = 'rm -f ' + ramdrive + '*' + self.sconfig['tail'] print subprocess.call (rmcmd, shell=True) return rval
def __init__(self): self.voice = pyvona.create_voice(access_key='GDNAJHK4FS444DC7J52Q', secret_key='w0JX9Z7mPf56pGPUWfzbxNAKSmAz7WtQXpsJ1pdV', ) self.gotowe_frazy = [] for plik in listdir(self.RECORDED_DIR): self.gotowe_frazy.append([plik.split('.')[0].replace('_',' '), self.RECORDED_DIR + plik], )
def get(self, request, *args, **kwargs): if request.GET.get('text'): print request.GET['text'] v = pyvona.create_voice(settings.IVONA_ACCESS_KEY, settings.IVONA_SECRET_KEY) v.voice_name = 'Jacek' v.speak(request.GET['text']) return render(request, self.template_name)
def __init__(self, commentatorQueue, rendererQueue): Thread.__init__(self) self.commentatorQueue = commentatorQueue self.rendererQueue = rendererQueue self.v = pyvona.create_voice( "GDNAJCIHKDP2YAKG664A", "8yaFb9+jGeuI8DDrYKdK+9jUVNAqqYRuxyG254la") self.v.codec = "mp3" self.v.region = "us-west"
def create_voice(self, text): print 'creating voice' v = pyvona.create_voice(IVONA_ACCESS_KEY, IVONA_SECRET_KEY) v.voice_name = self.name v.region = self.region try: v.speak(text) except: print "Speech: connection not found!"
def say(text): v = pyvona.create_voice(ivonaAccessKey, ivonaSecretKey) v.codec = 'mp3' v.voice_name = voice v.region = 'eu-west' tf = tempfile.NamedTemporaryFile(suffix=".mp3") v.fetch_voice(text, tf.name) os.system('mpg123 -q {}'.format(tf.name)) tf.close()
def create_voice(self, text): """""" v = pyvona.create_voice(IVONA_ACCESS_KEY, IVONA_SECRET_KEY) v.voice_name = 'Jacek' try: v.speak(text) except: print "Speech: connection not found!"
def get_voice_object(): config = configparser.ConfigParser() config.read(CONFIG_FILE) if 'credentials' not in config: return 0 credientials = config['credentials'] return pyvona.create_voice(access_key=credentials['AccessKey'], secret_key=credentials['SecretKey'])
def tts(message): """ This function takes message as an argument and converts it to speech depending on OS """ tts_engine = pyvona.create_voice() tts_engine.voice_name = 'Joey' if sys.platform == 'darwin': tts_engine = 'say' return os.system(tts_engine + ' ' + message)
def __init__(self, start_server=True): # Private properties rospack = rospkg.RosPack() self._package_path = rospack.get_path('needybot_speech') self._cache_dir = rospy.get_param( '/needybot/speech/cache_dir', os.path.join(os.path.realpath(self._package_path), 'cache') ) self._cache_manifest = os.path.join(self._cache_dir, 'manifest.json') self._feedback = SpeechFeedback() self._kill_pub = rospy.Publisher( '/needybot/speech/kill', Empty, queue_size=10) self._result = SpeechResult() self._server = actionlib.SimpleActionServer( 'needybot_speech', SpeechAction, execute_cb=self.execute_cb, auto_start=False) # Public properties self.effects = [ 'ladspa', 'tap_deesser', 'tap_deesser', '-30', '6200', 'pitch', '200', 'contrast', '75', 'norm' ] self.voice = pyvona.create_voice( os.environ.get('IVONA_ACCESS_KEY'), os.environ.get('IVONA_SECRET_KEY')) self.voice.voice_name = rospy.get_param( '/needybot/speech/voice/name', 'Justin') self.voice.speech_rate = rospy.get_param( '/needybot/speech/voice/speech_rate', 'medium') self.voice.codec = rospy.get_param('/needybot/speech/voice/codec', 'ogg') self.sound_process = None self.cleaned_pub = rospy.Publisher( '/needybot/speech/cache/cleaned', Empty, queue_size=10, latch=True ) self.warmed_pub = rospy.Publisher( '/needybot/speech/cache/warmed', Empty, queue_size=10, latch=True ) if not os.path.exists(self._cache_manifest): self.create_cache_manifest() if start_server: self._server.start()
def __init__(self, commentatorQueue, rendererQueue): Thread.__init__(self) self.commentatorQueue = commentatorQueue self.rendererQueue = rendererQueue self.v = pyvona.create_voice( "GDNAJCIHKDP2YAKG664A", "8yaFb9+jGeuI8DDrYKdK+9jUVNAqqYRuxyG254la" ) self.v.codec = "mp3" self.v.region = "us-west"
def __load_voice(self): """Loads the keys configuration file and returns pyvona voice object.""" # get configuration configParser = ConfigParser.RawConfigParser() configFilePath = r'config.cfg' configParser.read(configFilePath) access_key = configParser.get('main', 'access_key') secret_key = configParser.get('main', 'secret_key') # create pyvona voice return pyvona.create_voice(access_key, secret_key)
def contestarVeu(missatge): global funciona while funciona == False: try: v = pyvona.create_voice("GDNAIOIEYJ4TLER6BWMQ", "0keM4rjAabbuEsVXMM9+/C+Ewn8af/ZokV5/BzwI") v.speak(missatge) time.sleep(0.5) funciona = True except: time.sleep(1) funciona = False
def speak(text, fileName): text = text.strip() if len(text) == 0: return voice = pyvona.create_voice('Access Key', 'Secret Key') #If you want to see the voices available, uncomment the line below #print(voice.list_voices()) voice.voice_name = "Salli" voice.codec = 'mp3' #Save the TTS audio of 'text' as fileName.mp3 in the local folder voice.fetch_voice(text,fileName)
def make_wav(self, text): v = pyvona.create_voice(self.tts_key, self.tts_secret) v.voice_name = self.voice v.codec = 'mp3' temp_path = "/Users/user/Projects/alfred/tester/response.mp3" v.fetch_voice(text, temp_path) final_path = "/Users/user/Projects/alfred/tester/response.wav" sound = pydub.AudioSegment.from_mp3(temp_path) sound.export(final_path, format='wav') os.remove(temp_path) return final_path
def contestarVeu(self, missatge): self.updateStatus("Parlan") funciona = False while funciona == False: try: v = pyvona.create_voice("GDNAIOIEYJ4TLER6BWMQ", "0keM4rjAabbuEsVXMM9+/C+Ewn8af/ZokV5/BzwI") v.speak(missatge) time.sleep(0.5) funciona = True except: time.sleep(1) if not self.veu.playing: self.updateStatus(False) else: self.updateStatus(True)
def init_speaker(self, tts_username, tts_key, language='en'): """ set voice :param tts_username: username :type tts_username: :py:class:`str` :param tts_key: key :type tts_key: :py:class:`str` :return: """ app_keys = yaml.load(open("{}/{}".format(CONFIG_DIR, APP_KEYS_FILE))) tts = pyvona.create_voice(app_keys[VONA_USERNAME], app_keys[VONA_KEY]) tts.voice_name = TTS_PERONA return tts
def TextToSpeech(Phrase): L.debug('Phrase to be spoken: %s' % Phrase) try: if Phrase.strip() != '': SpeechFileFullPath = path.join(Settings.SoundsDir, Settings.TemporaryAudioFileName) if Settings.TextToSpeechEngine.lower() == 'ivona': TTS = pyvona.create_voice(Settings.IvonaAccessKey, Settings.IvonaSecretKey) TTS.region = 'eu-west' TTS.voice_name = Settings.IvonaVoice TTS.codec = 'mp3' TTS.fetch_voice(Phrase, SpeechFileFullPath) else: TTS = gTTS(text = Phrase, lang = Settings.GoogleLanguageCode) TTS.save(SpeechFileFullPath) SP.PlayAudioFile() os.remove(SpeechFileFullPath) except HTTPError as e: L.error('%s Text-To-Speech module might not be updated: ' % Settings.TextToSpeechEngine, e) except Exception as e: L.error('Unknown %s Text-To-Speech module error: ' % Settings.TextToSpeechEngine, e)
def make_mp3(book_name): global book # ivona voice settings voice = pyvona.create_voice('GDNAICTDMLSLU5426OAA', '2qUFTF8ZF9wqy7xoGBY+YXLEu+M2Qqalf/pSrd9m') voice.codec = 'mp3' voice.region = 'ru-RU' voice.speech_rate = 'medium' # x-slow, slow, medium, fast, x-fast voice.voice_name = 'Tatyana' # Tatyana, Maxim # book folder title = os.path.splitext(book_name)[0] if not os.path.exists(title): os.makedirs(title) # start progress bar sys.stdout.write('0%...') sys.stdout.flush() index = 0 percent = 10 count = len(book) for i, par in enumerate(book): # mp3 file if len(par) > 3: file_name = '{0}/{0}{1:05}.mp3'.format(title, index) voice.fetch_voice(par, file_name) index += 1 # process progress bar if i >= percent * count / 100: sys.stdout.write('{0}%...'.format(percent)) sys.stdout.flush() percent += 10 # finish progress bar print('100%')
def initYvonaTtsEngine (): ivonaVoice = pyvona.create_voice('GDNAJVR7DOF5M332VY3Q','RMlAiEzHBrFdXY0lP7TLjKptE0SjCbBcgEzVPHDz') ivonaVoice.voice_name='Salli' ivonaVoice._codec='mp3' return ivonaVoice
import sys import pyvona missatge = sys.argv[1] v = pyvona.create_voice("GDNAIOIEYJ4TLER6BWMQ", "0keM4rjAabbuEsVXMM9+/C+Ewn8af/ZokV5/BzwI") v.speak(missatge)
GPIO.output(C1, 1) GPIO.output(C2, 1) GPIO.output(C3, 1) GPIO.output(C4, 1) GPIO.output(C5, 1) time.sleep(1) #Let Eyes Flash For 5 seconds to make sure network connectivity exists EyesFlash() #Setup Ivona Voice os.popen( 'sudo date -s "$(curl -sD - google.com | grep "^Date:" | cut -d" " -f3-6)Z"' ) voice = pyvona.create_voice('GDNAJLXBSXZ5SF2J65LA', 'ZEPIRgVi/bppB7gq4VGlKDesbvJYir24LAdAhRAO') voice.voice_name = "Salli" #Set Eyes Blue EyesBlue() #Lift Left Wing LiftLeftWing() voice.speak("Hello. I'm ready!") time.sleep(1) #Drop Left Wing DropLeftWing() #Setup Microphone
print("You said {}".format(wake)) except sr.UnknownValueError: print("Oops! Didn't catch that") except sr.RequestError as e: print("Uh oh! Couldn't request results from Google Speech Recognition service; {0}".format(e)) #except IOError: #pass except sr.WaitTimeoutError: pass except KeyboardInterrupt: pass #except IOError: #pass except sr.WaitTimeoutError: pass w = Winston() winston_voice = pyvona.create_voice("GDNAJRUJP55AJ25DGUPA", "MXlt+SAWMs6ZUE4rRmq2e8LbhWIISuBpuJmi5sWV") winston_voice.codec = "mp3" winston_voice.voice_name = 'Brian' while True: w.wake() if w.woken == True: w.activate()
def speak(lang, text): args = text v = pyvona.create_voice("GDNAIRN4SS66PRNKPQZQ","2gURBTiaqnkjxEXZX+cslGhkJ+OVKTzWCZg7mvpp") #v.voice_name="Ruben" v.speak(args)
likelyhood = int(likelyhood_string) if (0 <= likelyhood) and (likelyhood<= 30): string += "It should not rain, so don't bother taking an umbrella." elif (30 < likelyhood) and (likelyhood<= 50): string += "Keep in mind though that there is a slight chance of rain, so maybe take an umbrella with you, when you leave." elif(50< likelyhood) and (likelyhood <65): string += "Keep in mind though that it's probably going to rain, so taking an umbrella would be a smart idea." else: string += "Keep in mind though that it's definitly going to rain, so do take an umbrella." return string v = pyvona.create_voice("GDNAJLZKU4RZDT2IO7IQ", "/otzbhhTp5AlPWO1d64RWruDxA6s2Zqc7MoY9qsi") v.voice_name ="Brian" v.sentence_break = 400 #TIME hour = datetime.now().time().hour minute = datetime.now().time().minute month = monthConverter(datetime.now().month) weekday = weekdayConverter(datetime.now().weekday()) if hour > 12: am_pm = "pm" greetings = "Good Afternoon Sir. " else: am_pm = "am"
import pyvona filename = "/home/rajonali/AI/filename.ogg" v = pyvona.create_voice("something", "something") v.speak("Hello World")
def chatvoice(string): v = pyvona.create_voice('GDNAI2AAUGNCQFO4TFSA','a+MtpzzlpqskQsYFPMaczgYMbzXurj/i5vduNEzL') #v.region('en-IN') #print v.list_voices() v.speak(string)
def chatvoice(string): v = pyvona.create_voice('token', 'key') # v.region('en-IN') # print v.list_voices() v.speak(string)
likelyhood = int(likelyhood_string) if (0 <= likelyhood) and (likelyhood<= 30): string += "It should not rain, so don't bother taking an umbrella with you." elif (30 < likelyhood) and (likelyhood<= 50): string += "Keep in mind though that there is a slight chance of rain, so maybe take an umbrella with you, when you leave." elif(50< likelyhood) and (likelyhood <65): string += "Keep in mind though that it's probably going to rain, so taking an umbrella would be a smart idea." else: string += "Keep in mind though that it's definitly going to rain, so do take an umbrella." return string v = pyvona.create_voice("GDNAJLZKU4RZDT2IO7IQ", "/otzbhhTp5AlPWO1d64RWruDxA6s2Zqc7MoY9qsi") v.voice_name ="Brian" v.sentence_break = 400 #TIME hour = datetime.now().time().hour minute = datetime.now().time().minute month = monthConverter(datetime.now().month) weekday = weekdayConverter(datetime.now().weekday()) if hour > 12: am_pm = "pm" greetings = "Good Afternoon Sir. " else: am_pm = "am"
register(parse_credentials["application_id"], parse_credentials["rest_api_key"]) class arsenic(Object): pass DIR_PATH = os.path.dirname(os.path.realpath(__file__)) NUMBER_OF_IMAGES = 5 FONT_LOC = '%s/design_assets/impact.ttf'%DIR_PATH UNDERCOLOR = 'rgba(0,0,0)' FILLCOLOR = 'rgba(251,251,255)' v = pyvona.create_voice('GDNAJJ2TZFHSNAJAEYHA', 'vOgSfcz88uZxElIU2K5PLAgWfIJiajojTg81Wla1') def clean(text): text = text.strip() text = filter(lambda x: x in string.printable, text) #text.encode('ascii', 'ignore') text.encode('ascii',errors='ignore') text = text.replace('"','') text = text.replace("'","") return text def summarize(text): text = text.split('.') text = ".".join(text) #remove text in round brackets and square brackets text = re.sub(r'\([^)]*\)', '', text)
def tts(text): v = pyvona.create_voice(ivonaAccessKey, ivonaSecretKey) v.codec = 'mp3' v.voice_name = 'Amy' v.region = 'eu-west' v.fetch_voice(text, './response.mp3')
#qa -> question/answer qaFile = open('questions.txt') #path to your questions file qas = qaFile.readlines() qaFile.close() random.shuffle(qas) #randomize list q = qas[0].split('=')[0].rstrip() #get question (before equal sign) a = qas[0].split('=')[1].rstrip() #get answer (after equal sign) #Comment this block out if not using speech speechFile = q + '.mp3' v = pyvona.create_voice('Access Key', 'Secret Key') #From https://www.ivona.com #v.speak('Hello World') #if pygame installed v.codec = 'mp3' v.voice_name = 'Mathieu' #French voice v.fetch_voice(q, speechFile) #Save tts as mp3 call(["afplay",speechFile]) #Play mp3 os.remove(speechFile) #Delete mp3 def run(q,a): print(q) #Print question wrong = True while wrong: inp = input() #Grab input
def init_ivona(self): creds = self.read_credentials('ivona_credentials.txt') voice = pyvona.create_voice(creds['access_key'], creds['secret_key']) voice.voice_name = 'Brian' return voice
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys import time import pyvona IVONA_ACCESS_KEY = 'GDNAIKZKKGPM3SPFPZGA' IVONA_SECRET_KEY = 'PXnXmq3aV1qYsV4jxG4WtoVhESq4gZaXGjrDTBke' name = 'Jacek' region = 'eu-east' text = "Witaj, jestem Raspi Bot. Asystent pokladowy Ju Es Es Enterprajs" v = pyvona.create_voice(IVONA_ACCESS_KEY, IVONA_SECRET_KEY) v.voice_name = name v.region = region try: v.fetch_voice(text, 'voice_file') except: print "Speech: connection not found!" time.sleep(0.001) os.system('play tmp/recorder.wav stretch 1.2 133.33 lin 0.2 0.4 \ overdrive 30 30 echo 0.4 0.8 15 0.8 \ synth sine fmod 30 echo 0.8 0.8 29 0.8')
# Speech requires Pyvona and Ivona API keys # Volume control requires ALSA Mixer command line program amixer # Media controls are configured to control Guayadeque and the Spotify desktop app via dbus # Settings: CONTROL_TOPIC = "hal/control" SPEECH_TOPIC = "speak" MATRIX_TOPIC = "notify/matrix" SOUND_TOPIC = "notify/sound" MQTT_BROKER = "magrathea" CLIENT_NAME = "python-hal" import paho.mqtt.client as mqtt from subprocess import call import pyvona v = pyvona.create_voice('enter your Ivona access key', 'enter your Ivona secret key') v.region = 'eu-west' v.voice_name = 'Salli' def on_message(client, userdata, msg): if msg.topic == CONTROL_TOPIC: if msg.payload == "volumeup": call("/usr/bin/amixer" + " set Master 2dB+ unmute -q", shell=True) elif msg.payload == "volumedown": call("/usr/bin/amixer" + " set Master 2dB- unmute -q", shell=True) elif msg.payload == "volumenorm": call("/usr/bin/amixer" + " set Master 65% unmute -q", shell=True) elif msg.payload == "volumemax": call("/usr/bin/amixer" + " set Master 100% unmute -q", shell=True) elif msg.payload == "mute": call("/usr/bin/amixer" + " -D pulse set Master 1+ toggle -q", shell=True)
import speech_recognition import pyvona import sys import ConfigParser configParser = ConfigParser.RawConfigParser() configFilePath = r'config.cfg' configParser.read(configFilePath) v = pyvona.create_voice(configParser.get('main', 'access_key'), configParser.get('main', 'secret_key')) v.voice_name = "Nicole" v.language = "en-AU" v.gender = "Female" recognizer = speech_recognition.Recognizer() def speak(text): v.speak(text) def listen(): with speech_recognition.Microphone() as source: recognizer.adjust_for_ambient_noise(source) audio = recognizer.listen(source) try: #return recognizer.recognize_sphinx(audio) return recognizer.recognize_google(audio)
def extract_body(payload): if isinstance(payload, str): return payload else: return '\n'.join( [extract_body(part.get_payload()) for part in payload]) conn = imaplib.IMAP4_SSL("imap.gmail.com", 993) conn.login("USERNAME", "PASSWORD") conn.select() typ, data = conn.search(None, 'UNSEEN') try: v = pyvona.create_voice('Access Key', 'Secret Key') v.region = 'us-east' v.voice_name = 'Salli' global FireAlertLED FireAlertLED = 26 GPIO.setmode(GPIO.BOARD) GPIO.setup(FireAlertLED, GPIO.OUT) for num in data[0].split(): typ, msg_data = conn.fetch(num, '(RFC822)') for response_part in msg_data: if isinstance(response_part, tuple): msg = email.message_from_string(response_part[1]) subject = msg['subject'] print(subject) payload = msg.get_payload() body = extract_body(payload)
# v.region = 'eu-west' # v.speak(recv_msg['Subject']) # print ("Release") # time.sleep(1) # # else: # # print("Generic Message") # print(recv_msg['Subject']) # time.sleep(1) # v.voice_name = 'Amy' # v.region = 'eu-west' # v.speak(recv_msg['Subject']) # print ("Release") # time.sleep(1) # # count = 6 # # except IndexError: # time.sleep(30*1) # if count < 5: # count = count + 1 # continue # else: # print("Nothing to read here") # count = 6 # v = pyvona.create_voice('GDNAIGAEGXRXXNGOQLBA', 'g4UzzjSfpfL+HkXys8fRgeJ3aBG848qA5N8qf7Vp') checkEmail()
import pyvona v = pyvona.create_voice("GDNAIGXR4JY4Q6F7RP3Q", "IwDCAScw5SZ5blP5wprvtpuzet5m7ho4WKBiliKv") v.speak("Hello World") v.
import pyvona import pygame import subprocess #Creates Time variable time = time.strftime("%I %M %p") time = list(time) if time[0][0] == "0": time.pop(0) time = ''.join(time) else: time = ''.join(time) time = "The Current Time is " + time v = pyvona.create_voice('GDNAJW3FDVSMQKUCCFKQ','RoXbQ1VnTPU/dvmzhSwx43mjnXhBzlEeMc2qoNcu') #Settings for ivona v.voice_name = 'Brian' v.speech_rate = 'slow' #Get ogg file with speech v.fetch_voice(time, '/mnt/ram/tempspeech.ogg') pygame.mixer.init() pygame.mixer.music.load("/mnt/ram/tempspeech.ogg") pygame.mixer.music.play() while pygame.mixer.music.get_busy() == True: continue print 'cleaning up now' print subprocess.call ('rm /mnt/ram/*.ogg', shell=True)
else: numeric_level = getattr(logging, args.loglevel.upper(), None) FORMAT = '%(asctime)-15s %(levelname)s - %(name)s - %(message)s' logging.basicConfig(format=FORMAT,level=numeric_level) logger = logging.getLogger('wAlpha') logger.debug("__main__: Starting ervers: %s ",Version) # Read Configurations yaml config file yconfig = readyaml(ymlfile) logger.info("main - yaml read [\033[0;32mOK\033[0m] %s " ,yconfig['pyvonna']['user_key']) logger.info("main - yaml read [\033[0;32mOK\033[0m] %s " ,yconfig['pyvonna']['cert_key']) logger.info("main - yaml read [\033[0;32mOK\033[0m] %s " ,yconfig['translate']['google_api_key']) logger.info("main - yaml read [\033[0;32mOK\033[0m] %s " ,yconfig['wolframalpha']['alpha_api_key']) #exit(0) v = pyvona.create_voice(yconfig['pyvonna']['user_key'], yconfig['pyvonna']['cert_key']) v.voice_name = 'Astrid' q_message = alphaquest() #print (q_message) trans = translator_class.translator(yconfig['translate']['google_api_key']) testout = trans.svtoen(q_message) print (testout) alphao = walpha_question_class.alpha(yconfig['wolframalpha']['alpha_api_key']) eanswer = alphao.ask(testout) print (eanswer) sanswer = trans.entosv(eanswer) v.speak(sanswer)
def checkEmail(): mail = imaplib.IMAP4_SSL('imap.gmail.com') mail.login('*****@*****.**', 'yourgmailpassword') mail.list() # Gives list of folders or labels in gmail. v = pyvona.create_voice('YourIvonaAccessKey', 'YourIvonaSecretKey') count = 0 while count < 600: try: # Connect to inbox mail.select("inbox") # Search for an unread email from user's email address result, data = mail.search( None, '(UNSEEN FROM "*****@*****.**")') ids = data[0] # data is a list id_list = ids.split() # ids is a space separated string latest_email_id = id_list[-1] # get the latest result, data = mail.fetch(latest_email_id, "(RFC822)") raw_email = data[0][1] recv_msg = email.message_from_string(raw_email) cl = "Cloudy" ss = "Sunset" sh = "Showers" cm = "Claire" fb = "facebook" tw = "twitter" yt = "youtube" sn = "Sunny" if fb in recv_msg['Subject']: print("Facebook Message") print(recv_msg['Subject']) fbon() time.sleep(1) v.voice_name = 'Amy' v.region = 'eu-west' v.speak(recv_msg['Subject']) print "Release" time.sleep(1) alloff() elif tw in recv_msg['Subject']: print("Twitter Message") print(recv_msg['Subject']) twon() time.sleep(1) v.voice_name = 'Amy' v.region = 'eu-west' v.speak(recv_msg['Subject']) print "Release" time.sleep(1) alloff() elif yt in recv_msg['Subject']: print("YouTube Message") print(recv_msg['Subject']) redon() time.sleep(1) v.voice_name = 'Amy' v.region = 'eu-west' v.speak(recv_msg['Subject']) print "Release" time.sleep(1) alloff() elif cl in recv_msg['Subject']: print("Cloudy Message") print(recv_msg['Subject']) clon() time.sleep(1) v.voice_name = 'Amy' v.region = 'eu-west' v.speak(recv_msg['Subject']) print "Release" time.sleep(1) alloff() elif ss in recv_msg['Subject']: print("Sunset Message") print(recv_msg['Subject']) sson() time.sleep(1) v.voice_name = 'Amy' v.region = 'eu-west' v.speak(recv_msg['Subject']) print "Release" time.sleep(1) alloff() elif sh in recv_msg['Subject']: print("Showery Message") print(recv_msg['Subject']) shon() time.sleep(1) v.voice_name = 'Amy' v.region = 'eu-west' v.speak(recv_msg['Subject']) print "Release" time.sleep(1) alloff() elif cm in recv_msg['Subject']: print("Wifely Message") print(recv_msg['Subject']) cmon() time.sleep(1) v.voice_name = 'Amy' v.region = 'eu-west' v.speak(recv_msg['Subject']) print "Release" time.sleep(1) alloff() elif sn in recv_msg['Subject']: print("Sunny Message") print(recv_msg['Subject']) snon() time.sleep(1) v.voice_name = 'Amy' v.region = 'eu-west' v.speak(recv_msg['Subject']) print "Release" time.sleep(1) alloff() else: print("Generic Message") print(recv_msg['Subject']) gron() time.sleep(1) v.voice_name = 'Amy' v.region = 'eu-west' v.speak(recv_msg['Subject']) print "Release" time.sleep(1) alloff() count = 6 except IndexError: time.sleep(30 * 1) if count < 5: count = count + 1 continue else: print("Nothing to read here") count = 6
# This has been extensively tested in Windows 8 - 64bit, with Python 3.4 # You need to install the following modules: speech_recognition, pygame, wolframalpha, ,wikipedia, # pyvona, pyperclip, psutil and webbrowser import speech_recognition as sr import webbrowser import wolframalpha import wikipedia import time import os import pyvona import pyperclip import win32com.client import psutil v = pyvona.create_voice('access_key', 'secret_key') #api for ivona v.voice_name = 'Emma' #selecting ivona voice cl = wolframalpha.Client('access_key') #api for wolfram alpha att = cl.query('Test/Attempt') r = sr.Recognizer() #starting the speech_recognition recognizer r.pause_threshold = 0.7 #it works with 1.2 as well r.energy_threshold = 400 shell = win32com.client.Dispatch("WScript.Shell") #to handle keyboard events v.speak('Hello! For a list of commands, plese say "keyword list"...') print("For a list of commands, please say: 'keyword list'...") #List of Available Commands keywd = 'keyword list' google = 'search for'
# -*- coding: utf8 -*- import requests, random, urllib2, pyvona from pygame import mixer from threading import Thread v = pyvona.create_voice('GDNAJVKVFZS7FXQKGDLA', 'SjID1xWuAwZgo7P6T/PztcW8Kfrhk74Tw/1fdt0T') v.voice_name = 'Maxim' BOT = "https://api.telegram.org/bot214845784:AAHxJmUQcnYejv10siWkemKYr7Y68EnLu8c"; searchArtist_query = "http://ws.audioscrobbler.com/2.0/?method=artist.search&api_key=44694551b394e4a51530539a07cfa753&format=json" artistTop_query = "http://ws.audioscrobbler.com/2.0/?method=artist.getTopTracks&api_key=44694551b394e4a51530539a07cfa753&format=json&limit=15&mbid=" artistSimilar_query = "http://ws.audioscrobbler.com/2.0/?method=artist.getSimilar&api_key=44694551b394e4a51530539a07cfa753&format=json&limit=100" trackSimilar_query = "http://ws.audioscrobbler.com/2.0/?method=track.getSimilar&api_key=44694551b394e4a51530539a07cfa753&format=json&limit=10" vkAudio_query = "https://api.vk.com/method/audio.search?access_token=9107a569ca4be8a80eead4e10e52956091055123f80eade3be60d901efbaf143c290bbfde38405be01b0b" mbid_artist = '' next_track = False is_similar = False player_isnt_sleeping = True is_volume_changed = False volume = 20 def getRandomTrack(): global mbid_artist print "Generating random track..." person = open(random.choice(['serafim.txt', 'vadim.txt', 'nickolay.txt']), 'r') artist = random.choice([line for line in person])[:-1] person.close() print "select: "+artist request = requests.get(searchArtist_query, params={'artist': artist})
def __init__(self, parent, id): wx.Frame.__init__(self, parent, id, 'ADARA - Virtual Assistant', size=(800, 720)) #create and start voice recognition thread self.recThread = threading.Thread(name="recThread", target=self.voiceRec) self.recThread.setDaemon(True) self.recThread.start() #create the information retrieval thread (although not start) self.infoThread = threading.Thread(name="infoThread", target=self.retrieveInfo) self.infoThread.setDaemon(True) #create text-to-speech object self.v = pyvona.create_voice( "GDNAIEH64WBP7PU3JMTQ", "WO72jncHEtZpLUmx47a/kUSiwHbvHiGGQ/BrmW4b") self.v.voice_name = "Salli" #creating the panel self.panel = wx.Panel(self, -1) self.panel.SetBackgroundColour('#FFFFFF') #importing and displaying the gif animation self.gif = "images/animation.gif" self.gif_ctrl = wx.animate.GIFAnimationCtrl(self.panel, -1, self.gif) self.gif_ctrl.GetPlayer().UseBackgroundColour(True) self.gif_ctrl.Play() #importing and displaying the logo image below gif self.image = wx.Image("images/logo.png").ConvertToBitmap() self.logo = wx.StaticBitmap(self.panel, -1, self.image, size=(self.image.GetWidth(), self.image.GetHeight())) #creating the textboxes for the GUI self.Txtbox = wx.TextCtrl(self.panel, -1, style=wx.TE_MULTILINE | wx.TE_NO_VSCROLL | wx.TE_RICH2) self.Outbox = rt.RichTextCtrl(self.panel, -1, style=wx.TE_MULTILINE | wx.TE_READONLY | wx.VSCROLL) #setting the background colours for the GUI self.Outbox.SetBackgroundColour("#FFFFFF") self.Outbox.SetForegroundColour("#000000") self.Txtbox.SetBackgroundColour("#FFFFFF") self.Txtbox.SetForegroundColour("#000000") #importing button images self.speak_icon = wx.Image("images/microphone.png").ConvertToBitmap() self.speak_icon2 = wx.Image( "images/microphone_on.png").ConvertToBitmap() #creating the button objects self.button1 = wx.Button(self.panel, 1, "Send", size=(50, self.Txtbox.GetSize()[1])) self.button1.SetForegroundColour("#000000") self.button2 = wx.Button(self.panel, 2, "", size=(50, self.Txtbox.GetSize()[1])) self.button2.SetForegroundColour("#000000") self.button2.SetBitmap(self.speak_icon) #button event handlers self.Bind(wx.EVT_CLOSE, self.onQuit) self.button1.Bind(wx.EVT_BUTTON, self.onSend) self.button2.Bind(wx.EVT_BUTTON, self.onRec) #creating a displaying the favicon self.favicon = wx.Icon('images/icon.ico', wx.BITMAP_TYPE_ICO, 16, 16) wx.Frame.SetIcon(self, self.favicon) #logo sizer logoSizer = wx.BoxSizer(wx.VERTICAL) logoSizer.Add(self.gif_ctrl, flag=wx.EXPAND | wx.RIGHT, border=0) logoSizer.Add(self.logo, flag=wx.EXPAND | wx.LEFT, border=2) #textbox sizers txtSizer = wx.GridBagSizer(0, 0) txtSizer.Add(self.Outbox, pos=(0, 0), span=(2, 1), flag=wx.EXPAND | wx.LEFT | wx.TOP | wx.BOTTOM, border=5) txtSizer.AddGrowableRow(0) txtSizer.AddGrowableCol(0) txtSizer.Add(self.Txtbox, pos=(2, 0), flag=wx.EXPAND | wx.LEFT | wx.BOTTOM, border=5) txtSizer.Add(logoSizer, pos=(0, 1), flag=wx.EXPAND | wx.ALL, border=15) txtSizer.Add(self.button1, pos=(2, 1), flag=wx.EXPAND | wx.RIGHT | wx.LEFT | wx.BOTTOM, border=5) txtSizer.Add(self.button2, pos=(1, 1), flag=wx.EXPAND | wx.RIGHT | wx.LEFT | wx.BOTTOM, border=5) self.panel.SetSizerAndFit(txtSizer) #AI state self.state = 0
import pyvona voice = pyvona.create_voice('GDNAJGS7LA727ED4TP2Q', 'DUyJY7AxZbaVJBmU4UYOpAHNe1FDUbuFlezAqeCW') voice.voice_name = "Eric" voice.speak("Hello! My name is Hommy. I am your smart home")
root.title('Ivona Speech Synthesizer') def callback(): #ivona will speak from here pass def thr(): #threading - so that our main app does not freeze pass entry = scrolledtext.ScrolledText(root, width=30, height=10, wrap=WORD) entry.grid(row=0, column=0) button = ttk.Button(root, text='Speak', command=thr) button.grid(row=0, column=1, sticky=N) #placeholder for ivona #v = pyvona.create_voice('key1', 'key2') v = pyvona.create_voice(key1, key2) v.voice_name = 'Emma' v.speak('I have been initialized correctly!') entry.focus() root.mainloop()
import natural.text import pyvona import random import string cwd = os.getcwd() beep = media.load('beep.mp3', streaming=False) languages = [ 'en-us', 'en-uk', ] vonavoices = [ 'Salli', 'Joey', 'Russell', 'Eric', 'Brian', 'Amy', 'Emma', 'Jennifer', 'Raveena' ] pvoice = pyvona.create_voice('GDNAJCV7Q37OKRSAHSBQ', 'uN15N6WygfoUqscgfj/st8tanQYbObcPOlTiuoBe') pvoice.codec = 'mp3' r = sr.Recognizer() player = media.Player() def exit_callback(dt): if player.playing != True: app.exit() def get_mp3(text): text = text.replace(";", " beep ") text = text.translate(string.maketrans("", ""), string.punctuation)
def __init__(self): self.v = pyvona.create_voice(Context.getPyvona('accessKey'), Context.getPyvona('secretKey')) self.v.voice_name = Context.getPyvona('name') self.v.speech_rate = Context.getPyvona('rate')
def checkEmail(): mail = imaplib.IMAP4_SSL('imap.gmail.com') mail.login('your_gmail_address', 'your_gmail_password') mail.list() # Gives list of folders or labels in gmail. v = pyvona.create_voice('your_ivona_access_key', 'your_ivona_secret_key') count = 0 while count < 60: try: # Connect to inbox mail.select("inbox") # Search for an unread email from user's email address result, data = mail.search(None, '(UNSEEN FROM "your_email_address")') ids = data[0] # data is a list id_list = ids.split() # ids is a space separated string latest_email_id = id_list[-1] # get the latest result, data = mail.fetch(latest_email_id, "(RFC822)") raw_email = data[0][1] recv_msg = email.message_from_string(raw_email) if (recv_msg['Subject'] != "selfie"): print("Normal Message") print(recv_msg['Subject']) print "Forward! " GPIO.output(24, True) myMotor.run(Adafruit_MotorHAT.FORWARD) myMotor1.run(Adafruit_MotorHAT.BACKWARD) myMotor.setSpeed(255) myMotor1.setSpeed(255) v.voice_name = 'Amy' v.region = 'eu-west' v.speak(recv_msg['Subject']) print "Release" time.sleep(1) myMotor.run(Adafruit_MotorHAT.RELEASE) myMotor1.run(Adafruit_MotorHAT.RELEASE) GPIO.output(24, False) elif (recv_msg['Subject'] == "selfie"): print("Selfie Trigger") myMotor.run(Adafruit_MotorHAT.FORWARD) myMotor1.run(Adafruit_MotorHAT.BACKWARD) myMotor.setSpeed(255) myMotor1.setSpeed(255) time.sleep(.8) GPIO.output(25, True) time.sleep(.2) GPIO.output(25, False) myMotor.run(Adafruit_MotorHAT.BACKWARD) myMotor1.run(Adafruit_MotorHAT.FORWARD) myMotor.setSpeed(255) myMotor1.setSpeed(255) time.sleep(.8) GPIO.output(25, True) time.sleep(.2) GPIO.output(25, False) myMotor.run(Adafruit_MotorHAT.FORWARD) myMotor1.run(Adafruit_MotorHAT.BACKWARD) myMotor.setSpeed(255) myMotor1.setSpeed(255) time.sleep(.8) GPIO.output(25, True) time.sleep(.2) GPIO.output(25, False) camera = PiCamera() camera.vflip = True camera.start_preview() myMotor.run(Adafruit_MotorHAT.BACKWARD) myMotor1.run(Adafruit_MotorHAT.FORWARD) myMotor.setSpeed(255) myMotor1.setSpeed(255) time.sleep(.8) GPIO.output(25, True) time.sleep(.2) GPIO.output(25, False) myMotor.run(Adafruit_MotorHAT.FORWARD) myMotor1.run(Adafruit_MotorHAT.BACKWARD) myMotor.setSpeed(255) myMotor1.setSpeed(255) time.sleep(.8) GPIO.output(25, True) time.sleep(.2) GPIO.output(25, False) myMotor.run(Adafruit_MotorHAT.RELEASE) myMotor1.run(Adafruit_MotorHAT.RELEASE) time.sleep(.2) camera.capture('/home/pi/image.jpg') camera.stop_preview() os.system('mpg123 -q {}camera.mp3'.format(path, path)) message = "RabbitPi Selfie!" with open('/home/pi/image.jpg', 'rb') as photo: twitter.update_status_with_media(status=message, media=photo) v = pyvona.create_voice('your_ivona_access_key', 'your_ivona_secret_key') v.voice_name = 'Amy' v.region = 'eu-west' GPIO.output(24, True) v.speak('Selfie Tweeted') GPIO.output(24, False) GPIO.output(25, True) time.sleep(.2) GPIO.output(25, False) time.sleep(.2) GPIO.output(25, True) time.sleep(.2) GPIO.output(25, False) time.sleep(.2) else: print("Nothing") count = 6 except IndexError: time.sleep(30 * 1) if count < 5: count = count + 1 continue else: print("Nothing to read out right now") count = 6
#!/usr/bin/python import sys import pyvona import subprocess __author__ = "Shyam" #print " ".join(sys.argv[1:]) subprocess.call(["amixer cset numid=3 1 > /dev/null 2> /dev/null"], shell=True) v = pyvona.create_voice('GDNAJ3Z7P4V6SVWR3TJQ', 'V53YvHKZRmnr57/ZfcRMk/qfcRbHjFiq9jCrGDEG') v.speak(" ".join(sys.argv[1:]))
def text_to_voice(text): voice = pyvona.create_voice('GDNAJGS7LA727ED4TP2Q', 'DUyJY7AxZbaVJBmU4UYOpAHNe1FDUbuFlezAqeCW') voice.voice_name = "Eric" voice.speak(text)
def __init__(self, master=None): #Start SKYE Talk self.voice = pyvona.create_voice( 'GDNAJLXBSXZ5SF2J65LA', 'ZEPIRgVi/bppB7gq4VGlKDesbvJYir24LAdAhRAO') self.voice.voice_name = "Salli" #Initialize Window Setting tk.Frame.__init__(self, master, padx=5, pady=5, bg="light gray") master.minsize(width=500, height=700) master.maxsize(width=1000, height=900) master.title("Skye Cortex") master.wm_title("Skye Cortex") master.wm_protocol("WM_DELETE_WINDOW", self.onClose) self.grid() #Start Camera print("Initializing Camera...") self.vs = VideoStream(usePiCamera=True).start() time.sleep(2.0) self.frame = None self.captureThread = None self.stopEvent = None self.faceCascade = cv2.CascadeClassifier( "Components/HaarClassifier.xml") self.stopEvent = threading.Event() self.captureThread = threading.Thread(target=self.renderFeed, args=()) self.captureThread.start() #Setup Raspberry Pi GPIO self.heartDutyCycle = 100 self.initializeGPIO() self.speed = tk.IntVar() # Identify Power Management Registers self.power_mgmt_1 = 0x6b self.power_mgmt_2 = 0x6c #Setup I2C self.bus = smbus.SMBus(1) self.address = 0x68 #I2C Address self.bus.write_byte_data(self.address, self.power_mgmt_1, 0) # Start the sensor in sleep mode #Draw Graphics self.renderGUI() self.temp = ImageTk.PhotoImage(Image.open("Components/Hymn.jpg")) self.captureDisplay = tk.Label(self.grpVision, image=self.temp, borderwidth=0, highlightthickness=0, width=320, height=240) self.captureDisplay.grid(row=0, column=0, padx=5, pady=5, columnspan=3, sticky=tk.N) self.captureDisplay.grid_propagate(0) #Start Metrics Thread self.metricsThread = None self.metricsThread = threading.Thread(target=self.updateMetrics, args=()) self.metricsThread.start() #Start Heart PWM Thread self.heartPWMThread = None self.heartPWMThread = threading.Thread(target=self.heartPulse, args=()) self.heartPWMThread.start()