def __reset(self): self.CVTag = Config().Get("DEFAULT", "UnknownUserTag") self.DBID = -1 self.Name = Config().Get("DEFAULT", "UnknownUserTag") self.LastName = None self.FirstName = None self.FullName = None self.CVTag = None self.Birthday = None self.LastSeenPrevious = None self.LastSeen = None self.LastSpokenTo = None self.Gender = "male" self.NameTitle = "" self.Properties = [] self.Formal = True self.Trainer = False self.Admin = False self.Language = Config().Get("DEFAULT", "Language") self.Updated = None
def __init__(self): self.__language_4letter_cc = Config().Get("TextToSpeech", "CountryCode4Letter") self.__microphoneID = None microphoneName = Config().Get("SpeechToText", "Microphone") for i, microphone_name in enumerate( sr.Microphone().list_microphone_names()): if microphone_name == microphoneName: self.__microphoneID = i if self.__microphoneID is None: FileLogger().Error("Wit Line 22: No microphone found - Exit") raise Exception("Wit: No microphone found - Exit") self.__recognizer = sr.Recognizer() self.__microphone = sr.Microphone(device_index=self.__microphoneID) with self.__microphone as source: self.__recognizer.dynamic_energy_threshold = True self.__recognizer.adjust_for_ambient_noise(source) self.__apiKey = Config().Get("TextToSpeech", "WitAPIKey") if (len(self.__apiKey) == 0): self.__apiKey = None
def __setUser(self, query, name): sqlResult = db().FetchallCacheBreaker(query.format(name)) for r in sqlResult: self.DBID = r[0] self.Name = r[1] self.LastName = r[2] self.FirstName = r[3] self.FullName = "{0} {1}".format(r[3], r[2]) self.CVTag = r[4] self.Birthday = r[5] self.LastSeenPrevious = r[6] self.LastSeen = datetime.now().strftime("%Y-%m-%d %H:%M:%S") self.LastSpokenTo = r[7] self.Gender = r[8] if self.Gender.lower() == "female": self.NameTitle = Config().Get( "DEFAULT", "FormalFormOfAddressFemale").format("") else: self.NameTitle = Config().Get( "DEFAULT", "FormalFormOfAddressMale").format("") self.Formal = r[9].lower() == "formal" self.Trainer = r[10] == 1 self.Admin = r[11] == 1 self.Language = r[12] self.Update() continue self.Updated = datetime.now().strftime("%H%M")
def __init__(self, camID): self.stream = cv2.VideoCapture(camID) self.stream.set(3, Config().GetInt("ComputerVision", "CameraWidth")) self.stream.set(4, Config().GetInt("ComputerVision", "CameraHeight")) (self.grabbed, self.frame) = self.stream.read() self.stopped = False
def callback(data): global GLOBAL_FileNamePublisher dataParts = data.data.split("|") if dataParts[0] != "TTS": return ttsProvider = TTSMemory().GetString("TTSProvider") usePygame = TTSMemory().GetBoolean("UsePygame") FileLogger().Info("TTS, callback(), Provider: {0}".format(ttsProvider)) try: if usePygame and data == "TRIGGER_STOP_AUDIO": SoundMixer().Stop() return if (ttsProvider.lower() == "google"): data = Google().Speak(dataParts[1]) if (ttsProvider.lower() == "microsoft"): data = Microsoft().Speak(dataParts[1]) if (ttsProvider.lower() == "ivona"): data = Ivona().Speak(dataParts[1]) if (ttsProvider.lower() == "watson"): data = Watson().Speak(dataParts[1]) try: audio = MP3(data) delay = Config().GetInt("TextToSpeech", "IntermediateAudioDelay") TTSMemory().Set("TTS.Until", (rospy.Time.now().to_sec() + int(round(audio.info.length)) + delay)) except Exception as e: FileLogger().Warn( "TTS, callback() - Error on getting audio duration: {0}". format(e)) if usePygame: SoundMixer().Play(data) else: audioPlayer = Config().Get("TextToSpeech", "AudioPlayer") + " '{0}'" os.system(audioPlayer.format(data)) FileLogger().Info("TTS, callback(), Play Audio: {0}".format(data)) GLOBAL_FileNamePublisher.publish("TTS|{0}".format(data)) user = User().LoadObject() if (user.GetName() is not None): user.UpdateSpokenTo() user.Update() except Exception as e: FileLogger().Error( "TTS, callback(), Error on processing TTS data: {0}".format(e))
def __init__(self): self.__RemoveBeforeRequirementCalculation = Config().GetBoolean( "Pipeline.ScopeAnalyzer", "RemoveLowPrioritySentencesBeforeRequirement") #False self.__RemoveAfterRequirementCalculation = Config().GetBoolean( "Pipeline.ScopeAnalyzer", "RemoveLowPrioritySentencesAfterRequirement") #True self.__RemoveStopwordOnlySentences = Config().GetBoolean( "Pipeline.ScopeAnalyzer", "RemoveStopwordOnlySentences") #True
def callback(data): dataParts = data.data.split("|") if (dataParts[0] is "TRIGGER"): triggerType = Config().GetBoolean("Application.SpeechToText", "TriggerType") # KEY triggerKey = Config().GetBoolean("Application.SpeechToText", "TriggerKey") # ENTER if (dataParts[1] is triggerType and dataParts[2] is triggerKey): STTMemory().Set("TriggerTimestamp", time.time())
def __init__(self, name, model, dictionary): self.Name = name self.Model = model self.Dictionary = dictionary self.PredictionResult = {} self.MaxPredictionDistance = Config().GetInt( "ComputerVision.Prediction", "MaxPredictionDistance") self.__UnknownUserTag = Config().Get("ComputerVision", "UnknownUserTag")
def __init__(self): self.wheel = [None] * 6 for i in range(6): angle = Config().GetInt("Robot.Movement", "Wheel{0}Degree".format(i + 1)) if (angle >= 0): self.wheel[i] = angle self.MappingRange = Config().GetInt("Robot.Movement", "MappingRange") self.FrontDegree = 0 self.LeftFrontAngleID = 0 self.RightFrontAngleID = 1 self.MaxMoveAngle = 45
def RunSTT(printData): pub = rospy.Publisher('/emerald_ai/io/speech_to_text', String, queue_size=10) rospy.Subscriber("/emerald_ai/io/hardware_trigger", String, callback) rospy.init_node('STT_node', anonymous=True) rospy.Rate(10) # 10hz useTrigger = Config().GetBoolean("Application.SpeechToText", "Trigger") triggerTime = Config().GetInt("Application.SpeechToText", "TriggerTime") sttProvider = Config().Get("SpeechToText", "Provider") # Google if (sttProvider.lower() == "google"): provider = Google() if (sttProvider.lower() == "microsoft"): provider = Microsoft() if (sttProvider.lower() == "wit"): provider = Wit() if (sttProvider.lower() == "watson"): return print sttProvider.lower() while True: #rate.sleep() if (useTrigger and (STTMemory().GetFloat("TriggerTimestamp") + triggerTime) < time.time()): time.sleep(1) continue data = provider.Listen() if (len(data) == 0): if (printData): print "None" continue if (printData): print "We got:", data FileLogger().Info("STT, RunSTT(), Input Data: {0}".format(data)) rospy.loginfo("STT|{0}".format(data)) pub.publish("STT|{0}".format(data))
def __init__(self): self.Name = Config().Get("Bot", "Name") self.Gender = Config().Get("Bot", "Gender") self.BuildDate = Config().Get("Bot", "BuildDate") self.Developer = Config().Get("Bot", "Developer") self.Origin = Config().Get("Bot", "Origin") born = datetime.strptime(self.BuildDate, '%d.%m.%Y') today = date.today() self.Age = today.year - born.year - ((today.month, today.day) < (born.month, born.day)) self.Status = "OK" self.Battery = "100%"
def GetName(self): if self.Formal and self.LastName: if self.Gender.lower() == "female": nameWrapper = Config().Get("DEFAULT", "FormalFormOfAddressFemale") else: nameWrapper = Config().Get("DEFAULT", "FormalFormOfAddressMale") return nameWrapper.format(self.LastName) elif not self.Name and self.FirstName: return self.FirstName elif self.Name: return self.Name return None
def __init__(self, name): self.__Timetable = {} days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] for day in days: conf = Config().GetList(name, "TimeFrom{0}".format(day)) self.__Timetable[day] = [int(conf[0]), int(conf[1])]
def __init__(self): self.CHUNK = 1024 self.BUF_MAX_SIZE = self.CHUNK * 10 self.q = Queue(maxsize=int(round(self.BUF_MAX_SIZE / self.CHUNK))) self.audio_source = AudioSource(self.q, True, True) self.FORMAT = pyaudio.paInt16 self.CHANNELS = 1 self.RATE = 44100 self.__apikey_stt = Config().Get("SpeechToText", "WatsonSTTAPIKey") self.__url_stt = Config().Get("SpeechToText", "WatsonSTTUrl") self.__apikey_tts = Config().Get("TextToSpeech", "WatsonTTSAPIKey") self.__url_tts = Config().Get("TextToSpeech", "WatsonTTSUrl") self.__voiceName = Config().Get("TextToSpeech", "WatsonVoiceName") self.__language_2letter_cc = Config().Get("SpeechToText", "CountryCode2Letter") self.__language_4letter_cc = Config().Get("SpeechToText", "CountryCode4Letter") self.__audioPlayer = Config().Get("TextToSpeech", "AudioPlayer") + " '{0}'" self.text_to_speech = TextToSpeechV1(url=self.__url_tts, iam_apikey=self.__apikey_tts) self.text_to_speech.set_default_headers( {'x-watson-learning-opt-out': "true"}) self.speech_to_text = SpeechToTextV1(url=self.__url_stt, iam_apikey=self.__apikey_stt) self.speech_to_text.set_default_headers( {'x-watson-learning-opt-out': "true"}) self.audio = pyaudio.PyAudio() # open stream using callback self.stream = self.audio.open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK, stream_callback=self.pyaudio_callback, start=False) try: rospy.init_node('STT_watson_node', anonymous=True) except: FileLogger().Info('already initialized')
def EnsureModelUpdate(): moduleList = Config().GetList("ComputerVision", "Modules") for moduleName in moduleList: if (ModelMonitor().CompareHash( moduleName, ModelMonitor().GetStoredHash(moduleName))): continue FileLogger().Info( "CV Model Rebuilder: Rebuild {0} Model".format(moduleName)) ModelMonitor().Rebuild(moduleName)
class WebhookTrigger(object): __metaclass__ = Singleton def __init__(self): self.__apiKey = Config().Get("IFTTT", "APIKey") def TriggerWebhook(self, event, value1=None, value2=None, value3=None): if (not self.__apiKey.strip()): return url = 'https://maker.ifttt.com/trigger/{e}/with/key/{k}/'.format( e=event, k=self.__apiKey) payload = {'value1': value1, 'value2': value2, 'value3': value3} return requests.post(url, data=payload)
def __init__(self): self.__language_2letter_cc = Config().Get("TextToSpeech", "CountryCode2Letter") self.__language_4letter_cc = Config().Get("TextToSpeech", "CountryCode4Letter") self.__audioPlayer = Config().Get("TextToSpeech", "AudioPlayer") + " '{0}'" self.__voiceGender = Config().Get("TextToSpeech", "MicrosoftVoiceGender") self.__voiceName = Config().Get("TextToSpeech", "MicrosoftVoiceName") self.__apiKey = Config().Get("TextToSpeech", "MicrosoftAPIKey") params = "" headers = {"Ocp-Apim-Subscription-Key": self.__apiKey} __AccessTokenHost = "api.cognitive.microsoft.com" path = "/sts/v1.0/issueToken" conn = httplib.HTTPSConnection(__AccessTokenHost) conn.request("POST", path, params, headers) response = conn.getresponse() data = response.read() conn.close() self.__accesstoken = data.decode("UTF-8") self.__microphoneID = None microphoneName = Config().Get("SpeechToText", "Microphone") for i, microphone_name in enumerate( sr.Microphone().list_microphone_names()): if microphone_name == microphoneName: self.__microphoneID = i if self.__microphoneID is None: FileLogger().Error( "Microsoft Line 44: No microphone found - skip listen initialisation" ) return self.__recognizer = sr.Recognizer() self.__microphone = sr.Microphone(device_index=self.__microphoneID) with self.__microphone as source: self.__recognizer.dynamic_energy_threshold = True self.__recognizer.adjust_for_ambient_noise(source)
def __init__(self): self.__language_2letter_cc = Config().Get("TextToSpeech", "CountryCode2Letter") self.__language_4letter_cc = Config().Get("TextToSpeech", "CountryCode4Letter") self.__audioPlayer = Config().Get("TextToSpeech", "AudioPlayer") + " '{0}'" self.__voiceGender = Config().Get("TextToSpeech", "IvonaVoiceGender") self.__voiceName = Config().Get("TextToSpeech", "IvonaVoiceName") self.__accessKey = Config().Get("TextToSpeech", "IvonaAccessKey") self.__secretKey = Config().Get("TextToSpeech", "IvonaSecretKey") self.__speechRate = 'medium' # x-slow - slow - medium - fast - x-fast self.__sentenceBreak = 400 self.__paragraphBreak = 650 self.__setRegion('eu-west') self.__setCodec('mp3')
def __init__(self, gpioTiggerName="GPIO"): rospy.init_node('emerald_trigger_node', anonymous=True) rospy.Rate(10) # 10hz pygame.init() pygame.display.set_mode((200, 200)) self.__GPIOInputChannel = Config().GetInt("Trigger", "GPIOPin") self.__GPIOTriggerName = gpioTiggerName self.__Publisher = rospy.Publisher('/emerald_ai/io/hardware_trigger', String, queue_size=5) GPIO = GPIOProxy(None, (self.__GPIOInputChannel)) GPIO.add_event_detect(self.__GPIOInputChannel, GPIO.RISING, self.GPIOTrigger, 100) while 1: for event in pygame.event.get(): #if event.type == pygame.QUIT: sys.exit() #if event.type == pygame.KEYDOWN and event.dict['key'] == 27: sys.exit() if event.type == pygame.KEYDOWN: # Enter if event.key == 13: self.SendTrigger("KEY", "ENTER") continue # Space if event.key == 32: self.SendTrigger("KEY", "SPACE") continue # Esc if event.key == 27: self.SendTrigger("KEY", "ESC") continue self.SendTrigger("KEY", event.key)
def ProcessSpeech(self, sentence): if(not BrainMemory().GetBoolean("Listen") or self.__TTSActive()): return cancelSpeech = False stopwordList = Config().GetList("Bot", "StoppwordList") if(sentence in stopwordList): cancelSpeech = True self.__ResponsePublisher.publish("TTS|TRIGGER_STOP_AUDIO") if self.Pipeline is None: self.Pipeline = PipelineArgs() BrainMemory().Set("Brain.AudioTimestamp", rospy.Time.now().to_sec()) self.Pipeline.AddSentence(sentence) self.Pipeline = AnalyzeScope().Process(self.Pipeline) self.Pipeline = ProcessResponse().Process(self.Pipeline) if(not cancelSpeech and not BrainMemory().GetBoolean("Mute")): self.ProcessAnimation(self.Pipeline.Animation) if(self.Pipeline.ResponseFound): FileLogger().Info("Brain STT, ProcessSpeech(): {0}".format(self.Pipeline.Response)) self.__ResponsePublisher.publish("TTS|{0}".format(self.Pipeline.Response)) trainerResult = Trainer().Process(self.Pipeline) contextParameter = ContextParameter().LoadObject(240) contextParameter.AppendHistory(self.Pipeline) contextParameter.SaveObject() #print "Pipeline Args", self.Pipeline.toJSON() #print "Context Parameter", contextParameter.toJSON() #print "Trainer Result: ", trainerResult print "Input: ", sentence print "Response: ", self.Pipeline.Response self.Pipeline = None
def __init__(self): self.__UnknownUserTag = Config().Get("Application.Brain", "UnknownUserTag") self.__CheckActive = Config().GetBoolean("ComputerVision.Intruder", "CheckActive") self.__CVSURVOnly = Config().GetBoolean("ComputerVision.Intruder", "CVSURVOnly") self.__Delay = Config().GetInt("ComputerVision.Intruder", "Delay") self.__IFTTTGreeting = Config().Get("IFTTT.Event", "IFTTTGreeting") self.__IFTTTIntruder = Config().Get("IFTTT.Event", "IFTTTIntruder") self.__IFTTTWebhook = WebhookTrigger.WebhookTrigger() self.__TimeTable = TimeTable.TimeTable("ComputerVision.Intruder") rospy.init_node('emerald_brain_actiontrigger_node', anonymous=True) self.__SpeechTriggerPublisher = rospy.Publisher( '/emerald_ai/io/speech_to_text', String, queue_size=10) self.__ResponsePublisher = rospy.Publisher( '/emerald_ai/io/text_to_speech', String, queue_size=10) self.__TriggerPublisher = rospy.Publisher('/emerald_ai/alert/trigger', String, queue_size=10) # in order to check if someone we know is present rospy.Subscriber("/emerald_ai/io/person", String, self.knownPersonCallback) # in order to check if a intruder rospy.Subscriber("/emerald_ai/io/computer_vision", String, self.unknownPersonCallback) # checks the app status - it sends the status change if turned on/off rospy.Subscriber("/emerald_ai/app/status", String, self.appCallback) self.__MinTriggered = 2 self.__TriggeredCounter = 1 self.__LastTriggerPerson = "" rospy.spin()
def __init__(self): self.__language_2letter_cc = Config().Get("TextToSpeech", "CountryCode2Letter") self.__language_4letter_cc = Config().Get("TextToSpeech", "CountryCode4Letter") self.__audioPlayer = Config().Get("TextToSpeech", "AudioPlayer") + " '{0}'" self.__asyncInit = False self.__apiKey = Config().Get("TextToSpeech", "GoogleAPIKey") if (len(self.__apiKey) == 0): self.__apiKey = None self.__microphoneID = None microphoneName = Config().Get("SpeechToText", "Microphone") for i, microphone_name in enumerate( sr.Microphone().list_microphone_names()): if microphone_name == microphoneName: self.__microphoneID = i if self.__microphoneID is None: FileLogger().Error( "Google Line 38: No microphone found - skip listen initialisation" ) return self.__recognizer = sr.Recognizer() #Represents the minimum length of silence (in seconds) that will register as the #end of a phrase. Can be changed. #Smaller values result in the recognition completing more quickly, but might result #in slower speakers being cut off. self.__recognizer.pause_threshold = 0.5 self.__recognizer.operation_timeout = 3 self.__microphone = sr.Microphone(device_index=self.__microphoneID) with self.__microphone as source: self.__recognizer.dynamic_energy_threshold = True self.__recognizer.adjust_for_ambient_noise(source)
#!/usr/bin/python # -*- coding: utf-8 -*- from cachetools import cached from EmeraldAI.Config.Config import Config from EmeraldAI.Logic.Singleton import Singleton if (Config().Get("Database", "ThesaurusDatabaseType").lower() == "sqlite"): from EmeraldAI.Logic.Database.SQlite3 import SQlite3 as db elif (Config().Get("Database", "ThesaurusDatabaseType").lower() == "mysql"): from EmeraldAI.Logic.Database.MySQL import MySQL as db class Thesaurus(object): __metaclass__ = Singleton def __executePragma(self): db().Fetchall("PRAGMA automatic_index=OFF;") def __executeQuery(self, query, word): return db().Fetchall(query.format(lowerword=word.lower())) @cached(cache={}) def GetSynonymsAndCategory(self, word): self.__executePragma() query = """SELECT term.normalized_word, term.word, category.category_name FROM Thesaurus_Term term, Thesaurus_Category_Link category_link, Thesaurus_Category category WHERE term.synset_id IN ( SELECT synset_id FROM Thesaurus_Term term2, Thesaurus_Synset synset WHERE (term2.word = '{lowerword}' OR term2.normalized_word = '{lowerword}') and synset.id = term2.synset_id
def __init__(self): self.__synonymFactor = Config().GetFloat("SentenceResolver", "SynonymFactor") #0.5 self.__stopwordFactor = Config().GetFloat("SentenceResolver", "StopwordFactor") #0.5 self.__parameterFactor = Config().GetFloat("SentenceResolver", "ParameterFactor") #5 self.__parameterFactorNoKeyword = Config().GetFloat( "SentenceResolver", "ParameterFactorNoKeyword") #0.2 self.__parameterStopwordThreshold = Config().GetFloat( "SentenceResolver", "ParameterStopwordThreshold") #1.5 self.__categoryBonus = Config().GetFloat("SentenceResolver", "CategoryBonus") #1 self.__RequirementBonus = Config().GetFloat("SentenceResolver", "RequirementBonus") #1 self.__ActionBonus = Config().GetFloat("SentenceResolver", "ActionBonus") #1.5 self.__InteractionBonus = Config().GetFloat("SentenceResolver", "InteractionBonus") #1.75 self.__MinSentenceCountForRemoval = Config().GetFloat( "SentenceResolver", "MinSentenceCountForRemoval") #5 self.__RemoveSentenceBelowThreshold = Config().GetFloat( "SentenceResolver", "RemoveSentenceBelowThreshold") #1.5 self.__MinNonStopwordSentences = Config().GetFloat( "SentenceResolver", "MinNonStopwordSentences") #1 self.__NoneTag = "None"
#!/usr/bin/python # -*- coding: utf-8 -*- from EmeraldAI.Logic.Singleton import Singleton from EmeraldAI.Entities.Sentence import Sentence from EmeraldAI.Config.Config import Config from EmeraldAI.Logic.Logger import FileLogger if (Config().Get("Database", "ConversationDatabaseType").lower() == "sqlite"): from EmeraldAI.Logic.Database.SQlite3 import SQlite3 as db elif (Config().Get("Database", "ConversationDatabaseType").lower() == "mysql"): from EmeraldAI.Logic.Database.MySQL import MySQL as db class SentenceResolver(object): __metaclass__ = Singleton def __init__(self): self.__synonymFactor = Config().GetFloat("SentenceResolver", "SynonymFactor") #0.5 self.__stopwordFactor = Config().GetFloat("SentenceResolver", "StopwordFactor") #0.5 self.__parameterFactor = Config().GetFloat("SentenceResolver", "ParameterFactor") #5 self.__parameterFactorNoKeyword = Config().GetFloat( "SentenceResolver", "ParameterFactorNoKeyword") #0.2 self.__parameterStopwordThreshold = Config().GetFloat( "SentenceResolver", "ParameterStopwordThreshold") #1.5 self.__categoryBonus = Config().GetFloat("SentenceResolver", "CategoryBonus") #1 self.__RequirementBonus = Config().GetFloat("SentenceResolver",
class Watson(): __metaclass__ = Singleton def __init__(self): self.CHUNK = 1024 self.BUF_MAX_SIZE = self.CHUNK * 10 self.q = Queue(maxsize=int(round(self.BUF_MAX_SIZE / self.CHUNK))) self.audio_source = AudioSource(self.q, True, True) self.FORMAT = pyaudio.paInt16 self.CHANNELS = 1 self.RATE = 44100 self.__apikey_stt = Config().Get("SpeechToText", "WatsonSTTAPIKey") self.__url_stt = Config().Get("SpeechToText", "WatsonSTTUrl") self.__apikey_tts = Config().Get("TextToSpeech", "WatsonTTSAPIKey") self.__url_tts = Config().Get("TextToSpeech", "WatsonTTSUrl") self.__voiceName = Config().Get("TextToSpeech", "WatsonVoiceName") self.__language_2letter_cc = Config().Get("SpeechToText", "CountryCode2Letter") self.__language_4letter_cc = Config().Get("SpeechToText", "CountryCode4Letter") self.__audioPlayer = Config().Get("TextToSpeech", "AudioPlayer") + " '{0}'" self.text_to_speech = TextToSpeechV1(url=self.__url_tts, iam_apikey=self.__apikey_tts) self.text_to_speech.set_default_headers( {'x-watson-learning-opt-out': "true"}) self.speech_to_text = SpeechToTextV1(url=self.__url_stt, iam_apikey=self.__apikey_stt) self.speech_to_text.set_default_headers( {'x-watson-learning-opt-out': "true"}) self.audio = pyaudio.PyAudio() # open stream using callback self.stream = self.audio.open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK, stream_callback=self.pyaudio_callback, start=False) try: rospy.init_node('STT_watson_node', anonymous=True) except: FileLogger().Info('already initialized') def Speak(self, audioString, playAudio=False): if (len(audioString) == 0): return tmpAudioFile = os.path.join(Global.EmeraldPath, "Data", "TTS", ("Watson_" + \ self.__language_2letter_cc + "_" + \ self.CleanString(audioString) + ".mp3")) if not os.path.isfile(tmpAudioFile): with open(join(dirname(__file__), tmpAudioFile), 'wb') as audio_file: response = self.text_to_speech.synthesize( audioString, accept='audio/mp3', voice=self.__voiceName).get_result() audio_file.write(response.content) if (playAudio): os.system(self.__audioPlayer.format(tmpAudioFile)) return tmpAudioFile def Listen(self): self.stream.start_stream() try: while True: recognize_thread = Thread( target=self.recognize_using_weboscket, args=()) recognize_thread.start() recognize_thread.join() except KeyboardInterrupt: # stop recording self.audio_source.completed_recording() self.stream.stop_stream() self.stream.close() self.audio.terminate() def CleanString(self, string): data = re.sub(r'\W+', '', string) return (data[:75] + '_TRIMMED') if len(data) > 75 else data def recognize_using_weboscket(self, *args): mycallback = MyRecognizeCallback() self.speech_to_text.recognize_using_websocket( audio=self.audio_source, content_type='audio/l16; rate=44100', recognize_callback=mycallback, interim_results=True, model='{0}_BroadbandModel'.format(self.__language_4letter_cc), smart_formatting=True) def pyaudio_callback(self, in_data, frame_count, time_info, status): try: self.q.put(in_data) except Full: pass return (None, pyaudio.paContinue)
def __init__(self): config = Config() database = config.Get("Database", "MySQLDatabase") password = config.Get("Database", "MySQLPassword") host = config.Get("Database", "MySQLHost") self.__Database = self.GetDB(database, password, host)
#!/usr/bin/env python import rospy from nav_msgs.msg import Odometry from EmeraldAI.Logic.Singleton import Singleton from EmeraldAI.Config.Config import Config if (Config().Get("Database", "WiFiFingerprintDatabaseType").lower() == "sqlite"): from EmeraldAI.Logic.Database.SQlite3 import SQlite3 as db elif (Config().Get("Database", "WiFiFingerprintDatabaseType").lower() == "mysql"): from EmeraldAI.Logic.Database.MySQL import MySQL as db class PositionGrabber(object): __metaclass__ = Singleton __timeout = 3 def __init__(self): rospy.init_node("Position_grabber", anonymous=True) def GetLivePosition(self): try: msg = rospy.wait_for_message("/odometry/filtered", Odometry, self.__timeout) except Exception: return None return msg.pose.pose def GetDatabasePosition(self, pose, range=0.01):
def __init__(self): self.__apiKey = Config().Get("IFTTT", "APIKey")
#!/usr/bin/python # -*- coding: utf-8 -*- import re from EmeraldAI.Logic.Modules import Global from EmeraldAI.Config.Config import Config if (Config().Get("Database", "NLPDatabaseType").lower() == "sqlite"): from EmeraldAI.Logic.Database.SQlite3 import SQlite3 as db elif (Config().Get("Database", "NLPDatabaseType").lower() == "mysql"): from EmeraldAI.Logic.Database.MySQL import MySQL as db # This is just to distinguish between german and english def DetectLanguage(input): # 207 most common words in germen + hallo = 208 words_DE = Global.ReadDataFile("Commonwords", "de.txt") # 207 most common words in english + hello = 208 words_EN = Global.ReadDataFile("Commonwords", "en.txt") exactMatch_DE = re.compile(r'\b%s\b' % '\\b|\\b'.join(words_DE), flags=re.IGNORECASE | re.UNICODE) count_DE = len(exactMatch_DE.findall(input)) exactMatch_EN = re.compile(r'\b%s\b' % '\\b|\\b'.join(words_EN), flags=re.IGNORECASE | re.UNICODE) count_EN = len(exactMatch_EN.findall(input)) if (count_EN > count_DE): return "en" return "de"