def Listen(self): if self.__microphoneID is None: raise Exception("Google: No microphone found - Exit") with self.__microphone as source: self.__audio = self.__recognizer.listen(source) data = "" try: data = self.__recognizer.recognize_google( self.__audio, key=self.__apiKey, language=self.__language_4letter_cc, show_all=False) except sr.UnknownValueError as e: FileLogger().Warn( "Google Line 75: Google Speech Recognition could not understand audio: {0}" .format(e)) except sr.RequestError as e: FileLogger().Warn( "Google Line 77: Could not request results from Google Speech Recognition service: {0}" .format(e)) except Exception as e: FileLogger().Warn( "Google Line 81: Error on executing Google Speech Recognition service: {0}" .format(e)) return data
def AsyncCallback(self, recognizer, audio): if self.__microphoneID is None: raise Exception("Google: No microphone found - Exit") data = "" try: data = self.__recognizer.recognize_google( audio, key=self.__apiKey, language=self.__language_4letter_cc, show_all=False) except sr.UnknownValueError as e: FileLogger().Warn( "Google Line 83: Google Speech Recognition could not understand audio: {0}" .format(e)) except sr.RequestError as e: FileLogger().Warn( "Google Line 85: Could not request results from Google Speech Recognition service: {0}" .format(e)) except Exception as e: FileLogger().Warn( "Google Line 87: Error on executing Google Speech Recognition service: {0}" .format(e)) if (len(data) > 0): self.__asyncResultList.append(data)
def callback(data): global GLOBAL_FileNamePublisher dataParts = data.data.split("|") if dataParts[0] != "TTS": return ttsProvider = TTSMemory().GetString("TTSProvider") usePygame = TTSMemory().GetBoolean("UsePygame") FileLogger().Info("TTS, callback(), Provider: {0}".format(ttsProvider)) try: if usePygame and data == "TRIGGER_STOP_AUDIO": SoundMixer().Stop() return if (ttsProvider.lower() == "google"): data = Google().Speak(dataParts[1]) if (ttsProvider.lower() == "microsoft"): data = Microsoft().Speak(dataParts[1]) if (ttsProvider.lower() == "ivona"): data = Ivona().Speak(dataParts[1]) if (ttsProvider.lower() == "watson"): data = Watson().Speak(dataParts[1]) try: audio = MP3(data) delay = Config().GetInt("TextToSpeech", "IntermediateAudioDelay") TTSMemory().Set("TTS.Until", (rospy.Time.now().to_sec() + int(round(audio.info.length)) + delay)) except Exception as e: FileLogger().Warn( "TTS, callback() - Error on getting audio duration: {0}". format(e)) if usePygame: SoundMixer().Play(data) else: audioPlayer = Config().Get("TextToSpeech", "AudioPlayer") + " '{0}'" os.system(audioPlayer.format(data)) FileLogger().Info("TTS, callback(), Play Audio: {0}".format(data)) GLOBAL_FileNamePublisher.publish("TTS|{0}".format(data)) user = User().LoadObject() if (user.GetName() is not None): user.UpdateSpokenTo() user.Update() except Exception as e: FileLogger().Error( "TTS, callback(), Error on processing TTS data: {0}".format(e))
def __init__(self): self.__language_4letter_cc = Config().Get("TextToSpeech", "CountryCode4Letter") self.__microphoneID = None microphoneName = Config().Get("SpeechToText", "Microphone") for i, microphone_name in enumerate( sr.Microphone().list_microphone_names()): if microphone_name == microphoneName: self.__microphoneID = i if self.__microphoneID is None: FileLogger().Error("Wit Line 22: No microphone found - Exit") raise Exception("Wit: No microphone found - Exit") self.__recognizer = sr.Recognizer() self.__microphone = sr.Microphone(device_index=self.__microphoneID) with self.__microphone as source: self.__recognizer.dynamic_energy_threshold = True self.__recognizer.adjust_for_ambient_noise(source) self.__apiKey = Config().Get("TextToSpeech", "WitAPIKey") if (len(self.__apiKey) == 0): self.__apiKey = None
def unknownPersonCallback(self, data): if (not self.__CheckActive or not self.__TimeTable.IsActive()): return dataParts = data.data.split("|") if (dataParts[0] == "CV" and self.__CVSURVOnly): return # No valid user within 5 Minutes user = User().LoadObject(300) if (user.GetName() is not None and user.GetName().lower() != "unknown"): return timestamp = BrainMemory().GetInt( "Brain.Trigger.UnknownPerson.Timestamp", self.__Delay * 3) if (timestamp is None): BrainMemory().Set("Brain.Trigger.UnknownPerson.Timestamp", rospy.Time.now().to_sec()) return if (rospy.Time.now().to_sec() - timestamp > self.__Delay): response = ProcessTrigger().ProcessCategory("Intruder") if (len(response) > 1): FileLogger().Info( "ActionTrigger, unknownPersonCallback(): {0}".format( response)) self.__ResponsePublisher.publish("TTS|{0}".format(response)) self.__IFTTTWebhook.TriggerWebhook(self.__IFTTTIntruder) self.__TriggerPublisher.publish("TRIGGER|Warning|Intruder")
def appCallback(self, data): dataParts = data.data.split("|") if dataParts[0] == "FACEAPP": response = "TRIGGER_FACEAPP_{0}".format( dataParts[1]) # == ON / OFF FileLogger().Info( "ActionTrigger, appCallback(): {0}".format(response)) self.__SpeechTriggerPublisher.publish("STT|{0}".format(response))
def Predict(self, image, predictionObjectList): if (self.__FastDetection): faces = self.DetectFaceFast(image) else: faces = self.DetectFaceBest(image) result = [] if len(faces) > 0: faceId = 1 for face in faces: croppedImage = self.__cropImage(image, face) resizedImage = cv2.resize( self.__toGrayscale(croppedImage), (self.__ResizeWidth, self.__ResizeHeight)) predictionResult = [] for predictionObject in predictionObjectList: prediction = None if predictionObject.Model != None: prediction = predictionObject.Model.predict( resizedImage) try: if prediction != None: predictionResult.append({ 'model': predictionObject.Name, 'value': predictionObject.Dictionary.keys()[ predictionObject.Dictionary.values().index( prediction[0])], 'rawvalue': prediction[0], 'distance': prediction[1] }) except Exception as e: FileLogger().Error( "ComputerVision: Value Error {0}".format(e)) result.append({ 'face': { 'id': faceId, 'data': predictionResult, 'coords': { 'x': str(face[0]), 'y': str(face[1]), 'width': str(face[2]), 'height': str(face[3]) } } }) faceId += 1 return result, faces
def Process(self, PipelineArgs): if not self.__trainData or not PipelineArgs.TrainConversation: return False context = ContextParameter().LoadObject() if len(context.History) == 0: return False FileLogger().Info("Trainer, Process(), Train Sentence") user = User().LoadObject() try: DialogTrainer().TrainSentence(context.History[-1]["Response"], PipelineArgs.Normalized, PipelineArgs.Language, user.Name) except Exception as e: FileLogger().Error( "Trainer, Process(), Error on training sentence: {0}".format( e)) return True
def EnsureModelUpdate(): moduleList = Config().GetList("ComputerVision", "Modules") for moduleName in moduleList: if (ModelMonitor().CompareHash( moduleName, ModelMonitor().GetStoredHash(moduleName))): continue FileLogger().Info( "CV Model Rebuilder: Rebuild {0} Model".format(moduleName)) ModelMonitor().Rebuild(moduleName)
def __loadImages(self, datasetName, imageSize=None): trainingData = [] trainingLabels = [] trainingLabelsDict = {} for dirname, dirnames, _ in os.walk( os.path.join(self.__DatasetBasePath, datasetName)): for subdirname in dirnames: if imageSize != None and not subdirname.startswith(imageSize): continue if subdirname == self.__DisabledFileFolder: continue subjectPath = os.path.join(dirname, subdirname) for filename in os.listdir(subjectPath): if (not filename.startswith('.') and filename != self.__DisabledFileFolder): try: image = cv2.imread( os.path.join(subjectPath, filename), cv2.IMREAD_GRAYSCALE) trainingData.append(image) trimmedSubdirname = subdirname.replace( imageSize, "") if (trimmedSubdirname not in trainingLabelsDict): trainingLabelsDict[trimmedSubdirname] = len( trainingLabelsDict) labelID = trainingLabelsDict[trimmedSubdirname] trainingLabels.append(labelID) except IOError, (errno, strerror): FileLogger().Error( "ComputerVision: IO Exception: {0}{1}".format( errno, strerror)) except Exception as e: FileLogger().Error( "ComputerVision: Exception: {0}".format(e))
def Listen(self): if self.__microphoneID is None: raise Exception("Wit: No microphone found - Exit") with self.__microphone as source: self.__audio = self.__recognizer.listen(source) data = "" try: data = self.__recognizer.recognize_wit(self.__audio, key=self.__apiKey, show_all=False) except sr.UnknownValueError as e: FileLogger().Warn( "Wit.ai Line 47: Could not understand audio: {0}".format( e)) except sr.RequestError as e: FileLogger().Warn( "Wit.ai Line 49: Could not request results from Wit.ai service: {0}" .format(e)) return data
def RunSTT(printData): pub = rospy.Publisher('/emerald_ai/io/speech_to_text', String, queue_size=10) rospy.Subscriber("/emerald_ai/io/hardware_trigger", String, callback) rospy.init_node('STT_node', anonymous=True) rospy.Rate(10) # 10hz useTrigger = Config().GetBoolean("Application.SpeechToText", "Trigger") triggerTime = Config().GetInt("Application.SpeechToText", "TriggerTime") sttProvider = Config().Get("SpeechToText", "Provider") # Google if (sttProvider.lower() == "google"): provider = Google() if (sttProvider.lower() == "microsoft"): provider = Microsoft() if (sttProvider.lower() == "wit"): provider = Wit() if (sttProvider.lower() == "watson"): return print sttProvider.lower() while True: #rate.sleep() if (useTrigger and (STTMemory().GetFloat("TriggerTimestamp") + triggerTime) < time.time()): time.sleep(1) continue data = provider.Listen() if (len(data) == 0): if (printData): print "None" continue if (printData): print "We got:", data FileLogger().Info("STT, RunSTT(), Input Data: {0}".format(data)) rospy.loginfo("STT|{0}".format(data)) pub.publish("STT|{0}".format(data))
def GetSummary(self, term, fallback=True, trimBrackets=True): summary = None try: try: #wikipedia.summary(query, sentences=0, chars=0, auto_suggest=True, redirect=True) summary = wikipedia.summary(term.title(), 0, 0, False, True) except wikipedia.exceptions.DisambiguationError as e: FileLogger().Error( "Wikipedia Line 22: DisambiguationError: {0}".format(e)) if fallback: topics = wikipedia.search(e.options[0]) for _, topic in enumerate(topics): summary = wikipedia.summary(topic) break if summary is None or len(summary) < 5: return None if (trimBrackets): summary = re.sub(r"[\(\[].*?[\)\]][,.;\s]", "", summary) return summary except Exception as e: FileLogger().Error("Wikipedia Line 36: Exception: {0}".format(e)) return None
def TrainSentence(self, OutputSentence, ResponseSentence, Language, UserName): # Train Keywords of both sentences outputKeywords = self.SaveKeywordsFromSentence(OutputSentence, Language) self.SaveKeywordsFromSentence(ResponseSentence, Language) # save sentence sentenceID = self.SaveSentence(ResponseSentence, Language, UserName) # link keywords to sentence self.LinkKeywordAndSentence(outputKeywords, Language, sentenceID) FileLogger().Info("DialogTrainer: User sentence trained: {0}".format( ResponseSentence))
def Listen(self): if self.__microphoneID is None: raise Exception("Microsoft: No microphone found - Exit") with self.__microphone as source: self.__audio = self.__recognizer.listen(source) data = "" try: data = self.__recognizer.recognize_bing( self.__audio, key=self.__apiKey, language=self.__language_4letter_cc, show_all=False) except sr.UnknownValueError as e: FileLogger().Warn( "Microsoft Line 119: Microsoft Bing Voice Recognition could not understand audio: {0}" .format(e)) except sr.RequestError as e: FileLogger().Warn( "Microsoft Line 121: Could not request results from Microsoft Bing Voice Recognition service: {0}" .format(e)) return data
def __init__(self): self.CHUNK = 1024 self.BUF_MAX_SIZE = self.CHUNK * 10 self.q = Queue(maxsize=int(round(self.BUF_MAX_SIZE / self.CHUNK))) self.audio_source = AudioSource(self.q, True, True) self.FORMAT = pyaudio.paInt16 self.CHANNELS = 1 self.RATE = 44100 self.__apikey_stt = Config().Get("SpeechToText", "WatsonSTTAPIKey") self.__url_stt = Config().Get("SpeechToText", "WatsonSTTUrl") self.__apikey_tts = Config().Get("TextToSpeech", "WatsonTTSAPIKey") self.__url_tts = Config().Get("TextToSpeech", "WatsonTTSUrl") self.__voiceName = Config().Get("TextToSpeech", "WatsonVoiceName") self.__language_2letter_cc = Config().Get("SpeechToText", "CountryCode2Letter") self.__language_4letter_cc = Config().Get("SpeechToText", "CountryCode4Letter") self.__audioPlayer = Config().Get("TextToSpeech", "AudioPlayer") + " '{0}'" self.text_to_speech = TextToSpeechV1(url=self.__url_tts, iam_apikey=self.__apikey_tts) self.text_to_speech.set_default_headers( {'x-watson-learning-opt-out': "true"}) self.speech_to_text = SpeechToTextV1(url=self.__url_stt, iam_apikey=self.__apikey_stt) self.speech_to_text.set_default_headers( {'x-watson-learning-opt-out': "true"}) self.audio = pyaudio.PyAudio() # open stream using callback self.stream = self.audio.open(format=self.FORMAT, channels=self.CHANNELS, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK, stream_callback=self.pyaudio_callback, start=False) try: rospy.init_node('STT_watson_node', anonymous=True) except: FileLogger().Info('already initialized')
def __init__(self): self.__language_2letter_cc = Config().Get("TextToSpeech", "CountryCode2Letter") self.__language_4letter_cc = Config().Get("TextToSpeech", "CountryCode4Letter") self.__audioPlayer = Config().Get("TextToSpeech", "AudioPlayer") + " '{0}'" self.__voiceGender = Config().Get("TextToSpeech", "MicrosoftVoiceGender") self.__voiceName = Config().Get("TextToSpeech", "MicrosoftVoiceName") self.__apiKey = Config().Get("TextToSpeech", "MicrosoftAPIKey") params = "" headers = {"Ocp-Apim-Subscription-Key": self.__apiKey} __AccessTokenHost = "api.cognitive.microsoft.com" path = "/sts/v1.0/issueToken" conn = httplib.HTTPSConnection(__AccessTokenHost) conn.request("POST", path, params, headers) response = conn.getresponse() data = response.read() conn.close() self.__accesstoken = data.decode("UTF-8") self.__microphoneID = None microphoneName = Config().Get("SpeechToText", "Microphone") for i, microphone_name in enumerate( sr.Microphone().list_microphone_names()): if microphone_name == microphoneName: self.__microphoneID = i if self.__microphoneID is None: FileLogger().Error( "Microsoft Line 44: No microphone found - skip listen initialisation" ) return self.__recognizer = sr.Recognizer() self.__microphone = sr.Microphone(device_index=self.__microphoneID) with self.__microphone as source: self.__recognizer.dynamic_energy_threshold = True self.__recognizer.adjust_for_ambient_noise(source)
def knownPersonCallback(self, data): dataParts = data.data.split("|") if (dataParts[0] == "PERSON" and dataParts[1] != self.__UnknownUserTag): if (self.__LastTriggerPerson == dataParts[1]): self.__TriggeredCounter += 1 else: self.__TriggeredCounter = 1 self.__LastTriggerPerson = dataParts[1] if (self.__TriggeredCounter >= self.__MinTriggered): User().SetUserByCVTag(dataParts[1]) # Greeting initGreeting = False try: lastSpokenToDate = datetime.strptime( User().LastSpokenTo, "%Y-%m-%d %H:%M:%S") initGreeting = (lastSpokenToDate.date() < datetime.today().date()) except: initGreeting = True if (initGreeting): response = ProcessTrigger().ProcessCategory( "Greeting", User()) lastAudioTimestamp = BrainMemory().GetString( "Brain.AudioTimestamp", 20) lastTriggerTimestamp = BrainMemory().GetString( "Brain.TriggerTimestamp", 20) if (lastAudioTimestamp is None and lastTriggerTimestamp is None and len(response) > 1): FileLogger().Info( "ActionTrigger, knownPersonCallback(): {0}".format( response)) self.__ResponsePublisher.publish( "TTS|{0}".format(response)) self.__IFTTTWebhook.TriggerWebhook( self.__IFTTTGreeting, User().FullName, response) self.__TriggerPublisher.publish( "TRIGGER|Info|Greeting") BrainMemory().Set("Brain.TriggerTimestamp", rospy.Time.now().to_sec())
def CallFunction(moduleName, className, functionName, arg1=None, arg2=None, arg3=None): FileLogger().Info("Action called: {0}, {1}, {2}".format( moduleName, className, functionName)) instance = CreateClass(moduleName, className) method = getattr(instance, functionName) if arg3 != None: return method(arg1, arg2, arg3) if arg2 != None: return method(arg1, arg2) if arg1 != None: return method(arg1) return method()
def TrainModel(self, datasetName, imageSize=None): if imageSize is None: imageSize = "{0}x{1}".format(self.__ResizeWidth, self.__ResizeHeight) images, labels, labelDict = self.__loadImages(datasetName, imageSize) if len(images) == 0 or len(labels) == 0: FileLogger().Error("ComputerVision: No Data given") return self.__RecognizerModel.train(images, labels) self.__RecognizerDictionary = labelDict path = os.path.join(self.__DatasetBasePath, datasetName) try: self.__RecognizerModel.save( os.path.join(path, self.__ModelFile.format(imageSize))) except: self.__RecognizerModel.write( os.path.join(path, self.__ModelFile.format(imageSize))) np.save(os.path.join(path, self.__DictionaryFile.format(imageSize)), labelDict)
def ProcessSpeech(self, sentence): if(not BrainMemory().GetBoolean("Listen") or self.__TTSActive()): return cancelSpeech = False stopwordList = Config().GetList("Bot", "StoppwordList") if(sentence in stopwordList): cancelSpeech = True self.__ResponsePublisher.publish("TTS|TRIGGER_STOP_AUDIO") if self.Pipeline is None: self.Pipeline = PipelineArgs() BrainMemory().Set("Brain.AudioTimestamp", rospy.Time.now().to_sec()) self.Pipeline.AddSentence(sentence) self.Pipeline = AnalyzeScope().Process(self.Pipeline) self.Pipeline = ProcessResponse().Process(self.Pipeline) if(not cancelSpeech and not BrainMemory().GetBoolean("Mute")): self.ProcessAnimation(self.Pipeline.Animation) if(self.Pipeline.ResponseFound): FileLogger().Info("Brain STT, ProcessSpeech(): {0}".format(self.Pipeline.Response)) self.__ResponsePublisher.publish("TTS|{0}".format(self.Pipeline.Response)) trainerResult = Trainer().Process(self.Pipeline) contextParameter = ContextParameter().LoadObject(240) contextParameter.AppendHistory(self.Pipeline) contextParameter.SaveObject() #print "Pipeline Args", self.Pipeline.toJSON() #print "Context Parameter", contextParameter.toJSON() #print "Trainer Result: ", trainerResult print "Input: ", sentence print "Response: ", self.Pipeline.Response self.Pipeline = None
def __init__(self): self.__language_2letter_cc = Config().Get("TextToSpeech", "CountryCode2Letter") self.__language_4letter_cc = Config().Get("TextToSpeech", "CountryCode4Letter") self.__audioPlayer = Config().Get("TextToSpeech", "AudioPlayer") + " '{0}'" self.__asyncInit = False self.__apiKey = Config().Get("TextToSpeech", "GoogleAPIKey") if (len(self.__apiKey) == 0): self.__apiKey = None self.__microphoneID = None microphoneName = Config().Get("SpeechToText", "Microphone") for i, microphone_name in enumerate( sr.Microphone().list_microphone_names()): if microphone_name == microphoneName: self.__microphoneID = i if self.__microphoneID is None: FileLogger().Error( "Google Line 38: No microphone found - skip listen initialisation" ) return self.__recognizer = sr.Recognizer() #Represents the minimum length of silence (in seconds) that will register as the #end of a phrase. Can be changed. #Smaller values result in the recognition completing more quickly, but might result #in slower speakers being cut off. self.__recognizer.pause_threshold = 0.5 self.__recognizer.operation_timeout = 3 self.__microphone = sr.Microphone(device_index=self.__microphoneID) with self.__microphone as source: self.__recognizer.dynamic_energy_threshold = True self.__recognizer.adjust_for_ambient_noise(source)
def LoadModel(self, datasetName, imageSize=None): if imageSize is None: imageSize = "{0}x{1}".format(self.__ResizeWidth, self.__ResizeHeight) path = os.path.join(self.__DatasetBasePath, datasetName) try: try: self.__RecognizerModel.load( os.path.join(path, self.__ModelFile.format(imageSize))) except: self.__RecognizerModel.read( os.path.join(path, self.__ModelFile.format(imageSize))) self.__RecognizerDictionary = np.load( os.path.join(path, self.__DictionaryFile.format(imageSize))).item() return self.__RecognizerModel, self.__RecognizerDictionary except Exception as e: FileLogger().Error( "ComputerVision: Exception: Error while opening File {0}". format(e)) return None, None
def CalculateRequirement(self, sentenceList, parameterList, delete=True): query = """SELECT Conversation_Sentence_Requirement.Comparison, Conversation_Sentence_Requirement.Value, Conversation_Requirement.Name FROM Conversation_Sentence_Requirement, Conversation_Requirement WHERE Conversation_Sentence_Requirement.RequirementID = Conversation_Requirement.ID AND Conversation_Sentence_Requirement.SentenceID='{0}' GROUP BY Conversation_Sentence_Requirement.SentenceID, Conversation_Sentence_Requirement.RequirementID, Conversation_Sentence_Requirement.Comparison """ deleteList = [] for sentenceID in sentenceList.iterkeys(): sqlResult = db().Fetchall(query.format(sentenceID)) for r in sqlResult: requirementName = r[2].title() if requirementName not in parameterList and r[1].lower( ) != self.__NoneTag.lower(): FileLogger().Error( "SentenceResolver Line 171: Requirement missing in parameter list: {0}" .format(requirementName)) deleteList.append(sentenceID) continue if r[0] is None: if type(parameterList[requirementName]) == list and r[ 1].lower() not in parameterList[requirementName]: deleteList.append(sentenceID) elif type( parameterList[requirementName] ) == str and parameterList[requirementName].lower( ) != r[1].lower(): deleteList.append(sentenceID) else: sentenceList[sentenceID].AddPriority( self.__RequirementBonus) continue else: if r[1].lower() == self.__NoneTag.lower(): if (r[0] == "eq" and (requirementName in parameterList and parameterList[requirementName] is not None)): deleteList.append(sentenceID) continue if (r[0] == "ne" and not (requirementName in parameterList and parameterList[requirementName] is not None)): deleteList.append(sentenceID) continue sentenceList[sentenceID].AddPriority( self.__RequirementBonus) continue if r[0] == "lt" and not parameterList[requirementName] < r[ 1]: deleteList.append(sentenceID) continue if r[0] == "le" and not parameterList[ requirementName] <= r[1]: deleteList.append(sentenceID) continue if r[0] == "eq" and not parameterList[ requirementName] == r[1]: deleteList.append(sentenceID) continue if r[0] == "ne" and not parameterList[ requirementName] != r[1]: deleteList.append(sentenceID) continue if r[0] == "ge" and not parameterList[ requirementName] >= r[1]: deleteList.append(sentenceID) continue if r[0] == "gt" and not parameterList[requirementName] > r[ 1]: deleteList.append(sentenceID) continue sentenceList[sentenceID].AddPriority( self.__RequirementBonus) if delete: for d in list(set(deleteList)): del sentenceList[d] return {'sentenceList': sentenceList, 'deleteList': deleteList}
def on_close(self): FileLogger().Warn("Connection closed")
def on_listening(self): FileLogger().Info('Service is listening')
def on_inactivity_timeout(self, error): FileLogger().Error('Inactivity timeout: {}'.format(error))
def on_error(self, error): FileLogger().Error('Error received: {}'.format(error))
def on_connected(self): FileLogger().Info('Connection was successful')
def Process(self, PipelineArgs): sentence = PipelineArgs.GetRandomSentenceWithHighestValue() FileLogger().Info("ProcessResponse, Process(), Sentence: {0}".format(sentence)) responseFound = True if sentence is None or sentence.Rating < self.__sentenceRatingThreshold: responseFound = False if responseFound: user = User().LoadObject() PipelineArgs.ResponseRaw = sentence.GetSentenceString(user.Formal) PipelineArgs.Response = PipelineArgs.ResponseRaw PipelineArgs.ResponseID = sentence.ID PipelineArgs.Animation = sentence.GetAnimation() PipelineArgs.ResponseFound = True PipelineArgs.BasewordTrimmedInput = NLP.TrimBasewords(PipelineArgs) PipelineArgs.FullyTrimmedInput = NLP.TrimStopwords(PipelineArgs.BasewordTrimmedInput, PipelineArgs.Language) contextParameter = ContextParameter().LoadObject(240) if sentence.HasInteraction(): contextParameter.InteractionName = sentence.InteractionName sentenceAction = sentence.GetAction() if sentenceAction != None and len(sentenceAction["Module"]) > 0: FileLogger().Info("ProcessResponse, Process(), Call Action: {0}, {1}, {2}".format(sentenceAction["Module"], sentenceAction["Class"], sentenceAction["Function"])) actionResult = Action.CallFunction(sentenceAction["Module"], sentenceAction["Class"], sentenceAction["Function"], PipelineArgs) if actionResult["ResultType"].title() is "Error": PipelineArgs.Response = sentence.GetActionErrorResponse(PipelineArgs.Language, user.Formal) PipelineArgs.ResponseRaw = None PipelineArgs.Error.append("ProcessResponse - Action Error") else: contextParameter.SetInput(actionResult["Input"]) contextParameter.SetResult(actionResult["Result"]) contextParameter.SaveObject() contextParameter.ResetInteraction() contextParameterDict = contextParameter.GetParameterDictionary() keywords = re.findall(r"\{(.*?)\}", PipelineArgs.Response) for keyword in keywords: if keyword.title() in contextParameterDict: replaceword = contextParameterDict[keyword.title()] if replaceword is None or replaceword == "Unknown": replaceword = "" else: replaceword = "'{0}'".format(replaceword) PipelineArgs.Response = PipelineArgs.Response.replace("{{{0}}}".format(keyword.lower()), str(replaceword)) else: PipelineArgs.Response = PipelineArgs.Response.replace("{{{0}}}".format(keyword.lower()), "") FileLogger().Error("ProcessResponse Line 63: Parameter missing: '{0}'".format(keyword)) contextParameter.UnsetInputAndResult() contextParameter.SaveObject() elif not responseFound and self.__aliceAsFallback: PipelineArgs.Response = self.__alice.GetResponse(PipelineArgs.Input) PipelineArgs.ResponseFound = True PipelineArgs.TrainConversation = False FileLogger().Info("ProcessResponse, Process(), Response: {0}".format(PipelineArgs.Response)) return PipelineArgs