def event_loop(): wit = Wit(wit_token()) my_mic = Mic(DEFAULT_DICT, DEFAULT_LANG, DEFAULT_DICT, DEFAULT_LANG) while True: # listen for activation hotword try: threshold, text = my_mic.passiveListen(PERSONA) except: continue # detected hotword if threshold: audio_file = activeListenFile(threshold) if audio_file: data = None try: # retrieve wit intent data = wit.post_speech(open(audio_file)) # send to handler service raise NotImplementedError('no handler code yet') except Exception as e: print "Exception in audio_file handling:" print str(e) if data: print "Data: " print pprint(data)
def PostTextString(self, text): # Form a text query for Wit w = Wit(self.witToken) try: return WitAiQueryResponse(w.get_message(text)) except: raise
def chat(bot, update): user_id = update.message.from_user.id answer = update.message.text if answer == 'О марафонах': text = "Марафоны - это круто!" bot.sendMessage(user_id, text=text, reply_markup=main_kbd) elif answer == 'Категории': text = "Пока категории вопросов не созданы. Вы можете ввести вопрос самостоятельно" bot.sendMessage(user_id, text=text, reply_markup=main_kbd) elif answer == 'Моя программа': text = "Подождите, какая еще программа? Вы же даже не знаете, что такое марафон. Сначала узнайте, а потом уже спрашивайте про программу!" bot.sendMessage(user_id, text=text, reply_markup=main_kbd) elif answer == 'INFO': text = "Появление информации ожидается в скором времени" bot.sendMessage(user_id, text=text, reply_markup=main_kbd) else: actions = dict() client = Wit(access_token=wit_token, actions=actions) client_answer = client.message(answer) try: if client_answer['entities']['intent'][0]['confidence'] < 0.6: text = "К сожалению, ответ на этот вопрос мне не известен. Попробуйте другой вопрос." bot.sendMessage(user_id, text=text, reply_markup=main_kbd) else: codec = client_answer['entities']['intent'][0]['value'] text = dictionary[codec] bot.sendMessage(user_id, text=text, reply_markup=main_kbd) except KeyError: text = "К сожалению, ответ на этот вопрос мне не известен. Попробуйте другой вопрос." bot.sendMessage(user_id, text=text, reply_markup=main_kbd)
def DecodeWaveFile(self, waveFileName): """Build a speech decode request around Wit""" # Form a query for Wit speech recognition w = Wit(self.witToken) try: audio = open(waveFileName) return WitAiQueryResponse(w.post_speech(audio)) except: raise
def send_message(recipient, text): client = Wit(WITAI_TOKEN, actions) session_id = 'my-user-id-42' context0 = client.run_actions(session_id, text, {}) print(context0) data = { "recipient": {"id": recipient}, "message": {"text": WitAi_returnMessage} } resp = requests.post("https://graph.facebook.com/v2.6/me/messages?access_token=" + ACCESS_TOKEN, json=data) print(resp.content)
def main(): if len(sys.argv) != 2: print('usage: python ' + sys.argv[0] + ' <wit-token>') exit(1) access_token = sys.argv[1] def send(request, response): print(response['text']) actions = { 'send': send, } client = Wit(access_token=access_token, actions=actions) client.interactive()
def facebookBot(request): try: if 'hub.verify_token' in request.GET: if request.GET['hub.verify_token'] == models.Config.get('FacebookVerifyToken'): if 'hub.challenge' in request.GET: return HttpResponse(request.GET['hub.challenge']) return HttpResponse("KO") body = json.loads(request.body) for entry in body['entry']: for message in entry['messaging']: if 'is_echo' not in message and 'message' in message: senderId = message['sender']['id'] client = Wit(access_token=models.Config.get('WitToken'), actions=actions) client.run_actions("session_%s" % senderId, message['message']['text'], {'senderId': senderId}) except Exception, e: traceback.print_exc()
def __init__(self, lisa=None): self.lisa = lisa self.configuration = configuration mongo = MongoClient(host=self.configuration['database']['server'], port=self.configuration['database']['port']) self.database = mongo.lisa self.wit = Wit(self.configuration['wit_token'])
class WitAnalysis: def __init__(self): self.client = Wit(WIT_ACCESS_TOKEN) def extract_sentiments(self, title): has_emoji = bool(emoji.get_emoji_regexp().search(title)) if has_emoji: return None resp = self.client.message(title) return resp def extract_intents(self, sentiment): if not sentiment: return None if len(sentiment['intents']) == 0: return '' return sentiment['intents'][0]['name'] def extract_intents_confidence(self, sentiment): if not sentiment: return None if len(sentiment['intents']) == 0: return '' return sentiment['intents'][0]['confidence'] def extract_traits(self, sentiment): if not sentiment: return None if len(sentiment['traits']) == 0: return '' return sentiment['traits']['wit$sentiment'][0]['value'] def extract_traits_confidence(self, sentiment): if not sentiment: return None if len(sentiment['traits']) == 0: return '' return sentiment['traits']['wit$sentiment'][0]['confidence'] def get_sentiments(self, master, top_tickers): tickers_sentiments = [] for ticker in top_tickers: sentiment_df = master.loc[master['extracted'] == {ticker}] sentiment_df['sentiments'] = sentiment_df['title'].apply(self.extract_sentiments) sentiment_df['intents'] = sentiment_df['sentiments'].apply(self.extract_intents) sentiment_df['intents_confidence'] = sentiment_df['sentiments'].apply(self.extract_intents_confidence) sentiment_df['traits'] = sentiment_df['sentiments'].apply(self.extract_traits) sentiment_df['traits_confidence'] = sentiment_df['sentiments'].apply(self.extract_traits_confidence) tickers_sentiments.append((sentiment_df, ticker)) for sentiment, ticker in tickers_sentiments: data_directory = Path('./data/sentiments') data_directory.mkdir(parents=True, exist_ok=True) output_path = data_directory / f'{dt.date.today()}_{ticker}_sentiment_df.csv' sentiment.to_csv(output_path, index=False) return tickers_sentiments
class RtmEventHandler(object): def __init__(self, slack_clients): self.clients = slack_clients self.wit_token = "CBTOMOXYK3WQ74JW47IZGICC65EXBGZ2" logging.info("wit token: {}".format(self.wit_token)) self.wit_client = Wit(self.wit_token) logging.info("wit: {}".format(dir(self.wit_client))) def handle(self, event): if 'type' in event: self._handle_by_type(event['type'], event) def _handle_by_type(self, event_type, event): # See https://api.slack.com/rtm for a full list of events if event_type == 'error': # error logger.debug('Error event') elif event_type == 'message': # message was sent to channel self._handle_message(event) elif event_type == 'channel_joined': # you joined a channel logger.debug('Channel joined') elif event_type == 'group_joined': # you joined a private group logger.debug('Group joined') else: pass def _handle_message(self, event): # Filter out messages from the bot itself if not self.clients.is_message_from_me(event['user']): logger.debug('Got event: {}'.format(event)) user = self.clients.rtm.server.users.find(event['user']) logger.debug('From user: {}'.format(user)) msg_txt = event['text'] if self.clients.is_bot_mention(msg_txt): # User mentiones bot session_id = 'test' channel_id = event['channel'] context = { 'channel_id':channel_id, 'user': user.name, 'user_id': event['user'], } logger.debug('Sending msg: {} to Wit.ai'.format(msg_txt)) resp = self.wit_client.converse(session_id, msg_txt,context) logger.debug('Got resp: {}'.format(resp)) if resp['type'] == 'msg': msg = resp['msg'] if isinstance(channel_id, dict): channel_id = channel_id['id'] logger.debug('Sending msg: {} to channel: {}'.format(msg, channel_id)) channel = self.clients.rtm.server.channels.find(channel_id) channel.send_message("{}".format(msg.encode('ascii', 'ignore'))) elif resp['type'] == 'action': logger.debug('do {}'.format(resp['action']))
def google(self, audio, d): r = sr.Recognizer() # recognize speech using Google Speech Recognition try: start = timeit.default_timer() response = r.recognize_google(audio)['alternatives'][0] stop = timeit.default_timer() confidence = response['confidence'] original_speech = response['transcript'] running_time = stop - start speech = multi_replace(self.similar, original_speech) self.log.info('Google (running time ' + str(stop - start) + ', confidence ' + str(confidence) + '): ' + speech) wit_start = timeit.default_timer() client = Wit(access_token=self.keys['Wit.ai']) message = client.message(msg=speech, verbose=True) wit_end = timeit.default_timer() d['Google'] = message end = timeit.default_timer() log_message = 'Google (running time ' + str(running_time) \ + ', confidence ' + str(confidence) + '):\n' + original_speech \ + '\nWit intent processing running time: ' + str(wit_end - wit_start) \ + '\nTotal running time: ' + str(end-start) self.log.info(log_message) except sr.UnknownValueError: d['Google'] = {} self.log.debug( "Google Speech Recognition could not understand audio") except sr.RequestError as e: d['Google'] = {} self.log.debug("Could not request results from \ Google Speech Recognition service; {0}".format(e)) except Exception as e: d['Google'] = {} self.log.debug("Google - ERROR: " + str(e))
def create_client(send_function, access_token=tokens.WIT_APP_TOKEN): """ Generates Wit Client. Called from apollobot.py """ print("Generated Wit Client with 'send' function provided: %r" % (send_function)) actions['send'] = send_function return Wit(access_token=access_token, actions=actions)
class STTHandler: def __init__(self): print "In the sttHandler class cons" self.client = Wit(access_token=config.ACCESS_TOKEN) def extractTextFromSpeech(self, f): resp = self.client.speech(f, None, {'Content-Type': 'audio/wav'}) return resp
def message_response(message_text1): access_token = "ERFWG3HTOK5TRYDHCJYJAKGZCIL2JKSH" client = Wit(access_token=access_token) intent = None entity = None value = None response = client.message(message_text1) print(response) try: intent = response['entities']['intent'][0]['value'] entity = list(response['entities'])[1] value = response["entities"][entity][0]['value'] except: pass return (intent, value)
def wit_ai_understand(msg, audio_blob=False): client = Wit(g.chatbot_proj['token']) user_resp = None if audio_blob: msg.seek(0) user_resp = client.speech(msg, {'Content-Type': 'audio/wav'}) else: user_resp = client.message(msg) if user_resp['intents'] == []: return user_resp[ 'text'], "Sorry, I don't understand your question. I can only answer questions within the topic." resp_intent = user_resp['intents'][0] proj_name = g.chatbot_proj['proj_name'] responses_fpath = os.path.join(current_app.instance_path, proj_name, 'responses', resp_intent['name'] + '.txt') if not os.path.isfile(responses_fpath): print( f"Cannot response to {resp_intent['name']} because {responses_fpath} is missing" ) return user_resp[ 'text'], "Sorry, I don't understand your question. Please ask your teacher for help." resp_df = pandas.read_csv(responses_fpath, sep='\t', encoding='utf-8', quotechar='"') ent_keys = set(user_resp['entities'].keys()) df_keys = set(resp_df.columns) #TODO: Emit a log if the ent_keys has key that df_keys does not have common_keys = df_keys.intersection(ent_keys) resp_df_sel = None for k in common_keys: v = user_resp['entities'][k][0]['value'] if resp_df_sel is None: resp_df_sel = resp_df[k] == v else: resp_df_sel = resp_df_sel & (resp_df[k] == v) resp_df = resp_df[resp_df_sel] #Related to the intent resp_df_idx = random.randint(0, resp_df['wit_response'].count() - 1) return user_resp['text'], resp_df['wit_response'].iloc[resp_df_idx]
class WitAIService: def __init__(self): self.access_token = config['wit_ai']['Server_Access_Token'] self.client = Wit(self.access_token) def retrive_message_entity(self, message): res_dict = self.client.message(message) return res_dict['entities']
def check_context_wit_ai(spoken_text): """ Sends the spoken text to wit.ai and recognizes the context/intent of it and passes a json back. :param spoken_text: german spoken text by the user :return: context as string """ client = Wit('XXX') # replace here API key wit ai response = client.message(spoken_text) # return found intent print response try: answer = response['entities'].keys()[0] except IndexError: return "not_found" return answer
class WitAi(object): __ENTITIES_TAG = 'entities' __DEFAULT = None def __init__(self, access_token, actions): self.__wit_client = Wit(access_token, actions) def analyze(self, msg): return self.__wit_client.message(msg) def get_entities(self, msg): return self.__wit_client.message(msg).get(self.__ENTITIES_TAG, self.__DEFAULT) def talk(self, session_id, msg, context): return self.__wit_client.run_actions(session_id, msg, context)
class BotWit(): client = None def __init__(self, access_token): self.client = Wit(access_token) def get_wit_response(self, message): if self.client is None: return resp = self.client.message(message) print(str(resp)) return resp def get_intent(self, message): if self.client is None: return print("Tweet: " + message) response = self.client.message(message) entities = response['entities'] lost_intent = self.first_entity_value(entities, 'lost_intent') search_type = self.first_entity_value(entities, 'search_type') lost_adj = self.first_entity_value(entities, 'lost_adj') bot_name = self.first_entity_value(entities, 'bot_name') print(search_type, " ", lost_intent, " ", lost_adj) if bot_name: return True if search_type: if lost_intent or lost_adj: return True return False def first_entity_value(self, entities, entity): if entity not in entities: return None else: val = entities[entity][0]['value'] if val: return val return None
def get_response(msg, SERVER_ACCESS_TOKEN): """ Methods gets the intent-confidence in dictionary format (form wit-ai) for a given input """ client = Wit(SERVER_ACCESS_TOKEN) response = client.message(msg) response = (list(response['intents'])) intent_confidence = {} confidence = [0] for res in response: if (res['confidence'] >= max(confidence)): intent_confidence = {} intent_confidence[res['name']] = res['confidence'] confidence.append(res['confidence']) return intent_confidence
class WitBot(): def __init__(self, witai_key, say_func): actions = { 'say': say_func, 'merge': self.merge, 'error': self.error, 'get_trip': self.get_trip, 'clear_context': self.clear_context, } self._wit_client = Wit(witai_key, actions) self._caldb = CalDbInterface() def chat(self, username, timezone, input_msg, session_id, context={}): if 'messenger_id' not in context: context['messenger_id'] = username if 'timezone' not in context: context['timezone'] = 'America/Los_Angeles' context = self._wit_client.run_actions(session_id, input_msg, context) return context def merge(self, session_id, cxt, entities, msg): logger.info(json.dumps(cxt, indent=4)) for name, vals in entities.items(): if name == 'caltrain_station_start': cxt['start_stop'] = vals[0] elif name == 'caltrain_station_end': cxt['end_stop'] = vals[0] elif name == 'datetime': cxt['stated_time'] = vals[0] return cxt def error(self, session_id, cxt, e): logger.error('Wit.ai error occurred.') raise e def clear_context(self, session_id, cxt): new_cxt = { 'messenger_id': cxt['messenger_id'], 'timezone': cxt['timezone'], } return new_cxt def get_trip(self, session_id, cxt): stated_time = cxt.get('stated_time')['value'] start_stop = cxt.get('start_stop')['value'] end_stop = cxt.get('end_stop')['value'] bullet = cxt.get('bullet') start_stop = station_mapping.get(start_stop) end_stop = station_mapping.get(end_stop) trip = self._caldb.get_trip(start_stop, end_stop, stated_time, bullet) cxt['train_time'] = trip['min_time'] return cxt
def post(self, request): #get and save user message message = request.POST.get('message', '') user = None if request.user.is_authenticated: user = User.get_user(request.user) text_message = TextMessage(message) builder = ResponseBuilder() builder.add(text_message) message_to_save = Message( user=user, sender_type=Message.SENDER_USER, content=builder.get_response() ) message_to_save.save() # get chat_responses from Wit client = Wit(access_token=config.WIT_ACCESS_TOKEN) wit_response = client.message(message) wit_response = self._parse_response(wit_response) try: response_builder = RESPONSE_BUILDERS[wit_response['type']] except (KeyError, TypeError): response_builder = RESPONSE_BUILDERS['no_intent'] # get and save bot message bot_response = response_builder().get(wit_response) if request.user.is_authenticated: for bot_message in bot_response['messages']: if bot_message['type'] == 'movies': add_collected_data(bot_message['content'], user) message_to_save = Message( user=user, content=bot_response ) message_to_save.save() return Response(bot_response)
def home(request): if request.method == 'POST': #print(request.body) #f = open('./file.wav', 'wb') #f.write(request.body) #f.close() client = Wit(access_token) resp = None #with open('./file.wav', 'rb') as f: resp = client.speech(request.body, {'Content-Type': 'audio/wav'}) print('Yay, got Wit.ai response: ' + str(resp)) #print(resp['text']) #resolve intent intent = resp['intents'][0]['name'] #resolve Color Entity color = (0, 0, 0) if resolve_color(resp) is not None: color = to_bgr[resolve_color(resp)] if intent == 'draw_square': frame_b64 = to_base64(draw_square(color=color)) if intent == 'draw_circle': frame_b64 = to_base64(draw_circle(color=color)) #context = {'text' : 'test','img': ('data:image/jpeg;base64, '+ frame_b64.decode("utf-8"))} context.update({ 'text': resp['text'], 'img': ('data:image/jpeg;base64, ' + frame_b64.decode("utf-8")) }) #return HttpResponseRedirect("/") #return JsonResponse(context) return redirect('/home') else: return render(request, 'home/home.html', context)
def recognize_voice(data, link, token, wit_token): user_id = data['user_id'] client = Wit(wit_token) doc = requests.get(link) resp = None #if len(doc.content)>100000: #resp = "Распознование больших сообщений пока что находится в альфа-версии.\nИзвините, если сообщение будет приходить несколько раз - скоро проблема будет пофикшена." #vkapi.send_message(user_id, token, resp, '') with closing(doc): try: resp = client.speech(doc.content, None, {'Content-Type': 'audio/mpeg3'}) resp = str(resp['_text']) except: resp = "Не удалось распознать сообщение" finally: vkapi.send_message(user_id, token, resp, '') return
def ask_wit(msg, page_id): client = Wit('GQ4J2DTDIZSTOFHZ744JOP5MWXKWQCX2') response = client.message(msg) print(response) if response['intents'] == []: return False intent = response['intents'][0] knowledge = Catalog.find_by_page_id( page_id).knowledge['comments']['values'] print(msg) print(knowledge) print(intent) if intent['confidence'] > 0.55: for q in knowledge: if q['key'] == intent['name']: print(q) return q['value'] return False
def main(): button = aiy.voicehat.get_button() access_token = '66665YBMQQL64GNF6PJV7OGWBFBQGI56' client = Wit(access_token) print(".....check client........OK") while True: print('Press the button and input the text') logger.info('Press the button and input the text') button.wait_for_press() logger.info('Listening.....') text = input() if text: logger.info('recognize text is : %s ',text) resp = client.message(text) logger.info(resp) json_manager.saveJson(resp)
def _find_sentiment(self, text): wit_client = Wit(Config['wit_access_token']) # texts = text.split('.') # for text in texts: # if len(text) < 3: # continue # print("len: {}".format(len(text))) # wit_reply = wit_client.message(text) self._url_info = UrlInfo(self._url)
async def video_to_text(video_url) -> str: """ Извлечь текст из видеофайла Функция использует описанные выше функции :param video_url: URL видео файла :param wit_api_key: Ключ API wit.ai :return: str - текстовая строка с распознанной речью """ extracted_text = "" video_url = video_url datetime_prefix = generate_filename_prefix() filename_prefix = 'data/' + datetime_prefix try: # Скачиваем видеофайл download_video(video_url=video_url, filename_prefix=filename_prefix) # Извлекаем аудиодорожку extract_sound_from_video(filename_prefix=filename_prefix) # Разделяем аудиодорожку split_audio(filename_prefix=filename_prefix) # Получаем список файлов audio_files = get_file_list_by_prefix(datetime_prefix + 'segment') print(audio_files) wit_client = Wit(WIT_AI_ACCESS_TOKEN) tasks = [ asyncio.create_task( extract_text_from_audio_file(wit_client=wit_client, audio_filename='data/' + filename)) for filename in audio_files ] extracted_chunks = await asyncio.gather(*tasks) for chunk in extracted_chunks: extracted_text += chunk + ' ' # # Извлекаем текст из аудиофайлов # for audio_file in audio_files: # print(audio_file) # text = extract_text_from_audio_file(wit_client=wit_client, audio_filename='data/' + audio_file) # extracted_text += ' ' # extracted_text += text finally: clear_data_directory() return extracted_text.strip()
def do(text): witClient = Wit(access_token='Z2M5NG4DUAOD3IH24BNQSXGM4LGIK4PU') wolframClient = wolframalpha.Client('5G696A-TT6AEK7L74') response = witClient.message(text) intent = response['entities']['intent'][0]['value'] if intent == 'weather': loc = Nominatim() loc = loc.geocode(response['entities']['location'][0]['value']) forecast = forecastio.load_forecast('17e86a360729736b727899a8135e33ad',loc.latitude, loc.longitude) return forecast.hourly().summary elif intent == 'greeting': return random.choice(greetings) elif intent == 'wikipedia': return wikipedia.summary(response['entities']['contact'][0]['value'], sentences=1) elif intent == 'curse': return 'F**k you too!' else: return 'I did not understand what you said.'
def start(self): """ Starts Jeeves """ self.r = sr.Recognizer() self.mic = sr.Microphone() self.wit = Wit(os.environ["WIT_ACCESS_TOKEN"]) while True: print(self.state) self.state = self.state.run()
def get_intent_and_food(inStr): client = Wit(settings.WIT_ACCESS_TOKEN) sit = 0 resp = client.message(inStr) try: intent = resp['intents'][0]['name'] except: sit = 1 return sit, '', '' try: food = resp['entities']['food:food'][0]['body'] except: sit = 2 return sit, intent, '' return sit, intent, food
def main(arg1): query = arg1 #query=raw_input("Query:") client = Wit(access_token=access_token, actions=actions) #client.interactive() msg = client.run_actions('session1', query, {}) #pprint(msg) #if 'msg' in msg: # print msg['msg'] if 'type' in msg: while msg['type'] != 'stop': if 'msg' not in msg: pass else: print msg['msg'] msg = client.run_actions('test_session', query, {})
def __init__(self, access_token=None): """ This method for initializing created objects """ self.conf = parse_yaml("conf.yaml") if access_token is None: self.access_token = self.conf['access_token'] else: self.access_token = access_token print("Access Token is:", self.access_token) #create wit client self.client = Wit(self.access_token)
def intents(self, request, **kwargs): self.method_check(request, allowed=['get']) self.is_authenticated(request) self.throttle_check(request) self.wit = Wit(self.configuration['wit_token']) from tastypie.http import HttpAccepted, HttpNotModified try: intents = self.wit.get_intents() except: log.err() return self.create_response(request, {'status': 'failure'}, HttpNotModified) self.log_throttled_access(request) return self.create_response(request, { 'status': 'success', 'intents': intents }, HttpAccepted)
def __init__(self): self.nlg = NLG(user_name=my_name) self.nlg_vn = NLG(user_name=my_name_vn, language='vi') self.speech = Speech(launch_phrase=launch_phrase, debugger_enabled=debugger_enabled) self.wit_client_vn = Wit(wit_ai_token_vn) self.wit_client_en = Wit(wit_ai_token) self.knowledge = Knowledge(weather_api_token, google_cloud_api_key=google_cloud_api_key) #self.vision = Vision(camera=camera) self.bot_vn = 'ty' subprocess.Popen([ "aplay", "/home/pi/AI-Smart-Mirror/sample-audio-files/Startup.wav" ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) time.sleep(4)
def __init__(self): self.access_token = '3Y2PWQBZDNOXGIRR46CNAOYBQJ5GYN3C' self.actions = { 'send': self.__send, 'sayHello': self.__sayHello, 'getPerson': self.__getPerson, 'checkMusic': self.__checkMusic, 'getState': self.__getState } self.client = Wit(access_token=self.access_token, actions=self.actions)
class WitApi: def retrieve_token(): return os.environ["WIT_TOKEN"] def __init__(self): self.token = WitApi.retrieve_token() self.wit_client = Wit(self.token) def send_text(self, text): return self.wit_client.message(text)
def __init__(self, slack_clients,wit_token): self.clients = slack_clients wit_token = os.getenv('WIT_TOKEN","") logging.info("wit token: {}".format(wit_token)) self.wit_client = Wit(access_token=wit_token, {'send':self.send_message, 'action':self.write_help_message } )
def consult_with_wit(): # receiving data from axios and getting the text value from it data = request.get_json() text = data["text"] print(text) # sending the text to wit.ai and receiving a response client = Wit(access_token=wit_access_token) resp_content = client.message(text) print("RESPONSE CONTENT FROM WIT.AI: ", resp_content) # based on the response from wit, # we will perform an action (hit an API perhaps) and return back results as a string final_output = handle_response(resp_content) # send the results to axios to send to React so that the broswer can speak the results return jsonify(final_output)
def __init__(self, wit_key, rive, bot, nyt_api, mongo): self.BOT = bot self.NYT_API = nyt_api self.wit_actions = {} self.wit_client = Wit(access_token=wit_key, actions=self.wit_actions) self.wit_empty_response = {'entities': []} self.WIT_SEARCH_QUERY_CONFIDENCE_THRESH = 0.5 self.RIVE = rive self.MONGO = mongo self.emojis = pickle.load(open("message_processor/unicode_emoji.pickle", "rb"))
def witintents(self, request, **kwargs): self.method_check(request, allowed=['get']) self.is_authenticated(request) self.throttle_check(request) self.wit = Wit(configuration['wit_token']) from tastypie.http import HttpAccepted, HttpNotModified try: intents = self.wit.get_intents() except: log.err() return self.create_response(request, { 'status' : 'failure' }, HttpNotModified) self.log_throttled_access(request) return self.create_response(request, { 'status': 'success', 'intents': intents}, HttpAccepted)
def processText(token, text, sessionId, funcs=None, context=None): out = [] def say(session_id, context, msg): out.append(msg) def error(session_id, context, msg): print(u"aiwitutils.processText.error: [{msg}]".format(msg=msg)) pass actions = dict(funcs) if isinstance(funcs, dict) else {} actions["say"] = say actions["error"] = error if "merge" not in actions: actions["merge"] = _mergeDefault client = Wit(token, actions) inCtx = context if context else {} outerCtx = client.run_actions(sessionId, text, inCtx) return (out, outerCtx)
class RulesEngine(): def __init__(self): client = MongoClient(configuration['database']['server'], configuration['database']['port']) self.database = client.lisa self.wit = Wit(configuration['wit_server_token']) def Rules(self, jsonData, lisaprotocol): rulescollection = self.database.rules intentscollection = self.database.intents if "outcome" in jsonData.keys(): jsonInput = {} jsonInput['outcome'] = jsonData['outcome'] else: jsonInput = self.wit.get_message(unicode(jsonData['body'])) jsonInput['from'], jsonInput['type'], jsonInput['zone'] = jsonData['from'], jsonData['type'], jsonData['zone'] if configuration['debug']['debug_before_before_rule']: log.msg(unicode(_("Before 'before' rule: %(jsonInput)s" % {'jsonInput': str(jsonInput)}))) for rule in rulescollection.find({"enabled": True, "before": {"$ne":None}}).sort([("order", 1)]): exec(rule['before']) if configuration['debug']['debug_after_before_rule']: log.msg(unicode(_("After 'before' rule: %(jsonInput)s" % {'jsonInput': str(jsonInput)}))) if configuration['debug']['debug_wit']: log.msg("WIT: " + str(jsonInput['outcome'])) oIntent = intentscollection.find_one({"name": jsonInput['outcome']['intent']}) if oIntent and jsonInput['outcome']['confidence'] >= configuration['wit_confidence']: instance = namedAny(str(oIntent["module"]))() methodToCall = getattr(instance, oIntent['function']) jsonOutput = methodToCall(jsonInput) else: jsonOutput = {} jsonOutput['plugin'] = "None" jsonOutput['method'] = "None" jsonOutput['body'] = _("I have not the right plugin installed to answer you correctly") jsonOutput['from'] = jsonData['from'] if configuration['debug']['debug_before_after_rule']: log.msg(unicode(_("Before 'after' rule: %(jsonOutput)s" % {'jsonOutput': str(jsonOutput)}))) for rule in rulescollection.find({"enabled": True, "after": {"$ne":None}}).sort([("order", 1)]): exec(rule['after']) #todo it doesn't check if the condition of the rule after has matched to end the rules if rule['end']: break if configuration['debug']['debug_after_after_rule']: log.msg(unicode(_("After 'after' rule: %(jsonOutput)s" % {'jsonOutput': str(jsonOutput)})))
def __init__(self, factory): # Init thread class threading.Thread.__init__(self) self._stopevent = threading.Event() self.running_state = False self.rec_sink = None self.factory = factory self.configuration = ConfigManager.getConfiguration() self.wit = Wit(self.configuration['wit_token']) self.wit_context = None self.record = {'activated' : False, 'start' : 0, 'started' : False, 'end' : 0, 'ended' : False, 'buffers' : deque({})} self.continuous_mode = False self.temp_file = "/tmp/asr_sound" # Start thread threading.Thread.start(self)
class WitMiddleware(MiddlewareMixin): WIT_DATA_KEY = "_wit" WIT_CONTEXT_KEY = "_context" def __init__(self, *args, **kwargs): from wit import Wit self.access_token = kwargs.pop("access_token", None) self.actions = kwargs.pop("actions", {}) self.client = Wit(access_token=self.access_token, actions=self.actions) super().__init__(*args, **kwargs) def process_request(self, request: BotRequest): if request.text: user_data = request.user_storage.get(request.user_id) wit_data = user_data.get(self.WIT_DATA_KEY, {}) context = wit_data.get(self.WIT_CONTEXT_KEY, {}) result_context = self.client.run_actions(str(request.user_id), request.text, context) wit_data[self.WIT_CONTEXT_KEY] = result_context
class Intents: def __init__(self, lisa=None): self.lisa = lisa self.configuration = configuration mongo = MongoClient(host=self.configuration['database']['server'], port=self.configuration['database']['port']) self.database = mongo.lisa self.wit = Wit(self.configuration['wit_token']) def list(self, jsonInput): intentstr = [] listintents = self.wit.get_intents() for oIntent in oIntents.objects(enabled=True): for witintent in listintents: print witintent if witintent["name"] == oIntent.name and 'metadata' in witintent: if witintent['metadata']: metadata = json.loads(witintent['metadata']) intentstr.append(metadata['tts']) return {"plugin": "Intents", "method": "list", "body": unicode(_('I can %(intentslist)s' % {'intentslist': ', '.join(intentstr)})) }
def __init__(self, lisa_client, listener): # Init thread class threading.Thread.__init__(self) self._stopevent = threading.Event() self.lisa_client = lisa_client self.configuration = ConfigManagerSingleton.get().getConfiguration() self.pipeline = listener.get_pipeline() self.capture_buffers = deque([]) self.running_state = False self.wit = Wit(self.configuration['wit_token']) self.wit_confidence = 0.5 if self.configuration.has_key('confidence'): self.wit_confidence = self.configuration['wit_confidence'] self.record_time_start = 0 self.record_time_end = 0 # Get app sink self.rec_sink = self.pipeline.get_by_name('rec_sink') self.rec_sink.connect('new-buffer', self._capture_audio_buffer) # Configure vader # Using vader on pocketsphinx branch and not a vader on record branch, # because vader forces stream to 8KHz, so record quality would be worst vader = self.pipeline.get_by_name('vad_asr') vader.connect('vader-start', self._vader_start) vader.connect('vader-stop', self._vader_stop) # Get elements to connect/disconnect pockesphinx during record self.asr_tee = self.pipeline.get_by_name('asr_tee') self.asr_sink = self.pipeline.get_by_name('asr_sink') self.asr = self.pipeline.get_by_name('asr') self.asr_tee.unlink(self.asr_sink) # Start thread self.start()
class LisaResource(tastyresources.Resource): class Meta: resource_name = 'lisa' allowed_methods = () authorization = authorization.Authorization() object_class = Lisa authentication = MultiAuthentication(CustomApiKeyAuthentication()) extra_actions = [ { 'name': 'configuration', 'http_method': 'GET', 'resource_type': 'list', 'fields': {} }, { 'name': 'version', 'http_method': 'GET', 'resource_type': 'list', 'fields': {} }, { 'name': 'engine/reload', 'http_method': 'GET', 'resource_type': 'list', 'fields': {} }, { 'name': 'scheduler/reload', 'http_method': 'GET', 'resource_type': 'list', 'fields': {} }, { 'name': 'witintents', 'http_method': 'GET', 'resource_type': 'list', 'fields': {} }, { 'name': 'speak', 'http_method': 'POST', 'resource_type': 'list', 'fields': { 'message': { 'type': 'string', 'required': True, 'description': 'The message to transmit to client(s)', 'paramType': 'body' }, 'clients_zone': { 'type': 'list', 'required': True, 'description': "Provide a list of zones : ['all','WebSocket','Bedroom'] ...", 'paramType': 'body' } } }, { 'name': 'tts-google', 'http_method': 'POST', 'resource_type': 'list', 'fields': { 'message': { 'type': 'string', 'required': True, 'description': 'The message to vocalize', 'paramType': 'body' }, 'lang': { 'type': 'string', 'required': True, 'description': 'Lang of the message', 'paramType': 'body' } } }, { 'name': 'tts-pico', 'http_method': 'POST', 'resource_type': 'list', 'fields': { 'message': { 'type': 'string', 'required': True, 'description': 'The message to vocalize', 'paramType': 'body' }, 'lang': { 'type': 'string', 'required': True, 'description': 'Lang of the message', 'paramType': 'body' } } } ] def prepend_urls(self): return [ url(r"^(?P<resource_name>%s)/configuration%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('configuration'), name="api_lisa_configuration"), url(r"^(?P<resource_name>%s)/version%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('version'), name="api_lisa_version"), url(r"^(?P<resource_name>%s)/engine/reload%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('engine_reload'), name="api_lisa_engine_reload"), url(r"^(?P<resource_name>%s)/scheduler/reload%s" % (self._meta.resource_name, trailing_slash()), self.wrap_view('scheduler_reload'), name="api_lisa_scheduler_reload"), url(r"^(?P<resource_name>%s)/tts-google%s" % (self._meta.resource_name, trailing_slash()), self.wrap_view('tts_google'), name="api_lisa_tts_google"), url(r"^(?P<resource_name>%s)/tts-pico%s" % (self._meta.resource_name, trailing_slash()), self.wrap_view('tts_pico'), name="api_lisa_tts_pico"), url(r"^(?P<resource_name>%s)/speak%s" % (self._meta.resource_name, trailing_slash()), self.wrap_view('speak'), name="api_lisa_speak"), url(r"^(?P<resource_name>%s)/witintents%s" % (self._meta.resource_name, trailing_slash()), self.wrap_view('witintents'), name="api_lisa_witintents"), ] def speak(self, request, **kwargs): self.method_check(request, allowed=['post', 'get']) self.is_authenticated(request) self.throttle_check(request) from tastypie.http import HttpAccepted, HttpNotModified data = self.deserialize(request, request.body, format=request.META.get('CONTENT_TYPE', 'application/json')) message = data.get('message', '') clients_zone = data.get('clients_zone', '') """ print request.body if request.method == 'POST': message = request.POST.get("message") clients_zone = request.POST.getlist("clients_zone") else: message = request.GET.get("message") clients_zone = request.GET.getlist("clients_zone") """ jsondata = json.dumps({ 'body': message, 'clients_zone': clients_zone, 'from': "API", 'type': "chat" }) LisaProtocolSingleton.get().answerToClient(jsondata=jsondata) self.log_throttled_access(request) return self.create_response(request, {'status': 'success', 'log': "Message sent"}, HttpAccepted) def tts_google(self, request, **kwargs): self.method_check(request, allowed=['post']) self.is_authenticated(request) self.throttle_check(request) from tastypie.http import HttpAccepted, HttpNotModified import re import requests from django.http import HttpResponse combined_sound = [] try: if request.method == 'POST': message = request.POST.get("message") lang = request.POST.get("lang") if not message: # In case there isn't form data, let's check the body post = json.loads(request.body) message = post['message'] lang = post['lang'] #process text into chunks text = message.replace('\n', '') text_list = re.split('(\.)', text) combined_text = [] for idx, val in enumerate(text_list): if idx % 2 == 0: combined_text.append(val) else: joined_text = ''.join((combined_text.pop(), val)) if len(joined_text) < 100: combined_text.append(joined_text) else: subparts = re.split('( )', joined_text) temp_string = "" temp_array = [] for part in subparts: temp_string += part if len(temp_string) > 80: temp_array.append(temp_string) temp_string = "" #append final part temp_array.append(temp_string) combined_text.extend(temp_array) #download chunks and write them to the output file for idx, val in enumerate(combined_text): headers = {"Host": "translate.google.com", "Referer": "https://translate.google.com/", "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.63 Safari/537.36"} r = requests.get("https://translate.google.com/translate_tts?ie=UTF-8&tl=%s&q=%s&total=%s&idx=%s&client=t&prev=input" % ( lang, val, len(combined_text), idx), headers=headers) combined_sound.append(r.content) except: log.err() return self.create_response(request, {'status': 'failure'}, HttpNotModified) self.log_throttled_access(request) return HttpResponse(''.join(combined_sound), content_type="audio/mpeg", mimetype="audio/mpeg") def tts_pico(self, request, **kwargs): import uuid self.method_check(request, allowed=['post', 'get']) self.is_authenticated(request) self.throttle_check(request) message = request.POST.get("message") lang = request.POST.getlist("lang") from tastypie.http import HttpAccepted, HttpNotModified from django.http import HttpResponse from subprocess import call, Popen combined_sound = [] temp = dir_path + "/tmp/" + str(uuid.uuid4()) + ".wav" language = str(lang[0])+'-'+str(lang[0]).upper() command = ['pico2wave', '-w', temp, '-l', language, '--', message] try: call(command) #combined_sound.append(content) except OSError: log.err() return self.create_response(request, { 'status' : 'failure' }, HttpNotModified) f = open(temp,"rb") combined_sound.append(f.read()) f.close() os.remove(temp) self.log_throttled_access(request) return HttpResponse(''.join(combined_sound), content_type="audio/mpeg", mimetype="audio/mpeg") def engine_reload(self, request, **kwargs): self.method_check(request, allowed=['get']) self.is_authenticated(request) self.throttle_check(request) from tastypie.http import HttpAccepted, HttpNotModified try: LisaFactorySingleton.get().LisaReload() except: log.err() return self.create_response(request, { 'status' : 'failure' }, HttpNotModified) self.log_throttled_access(request) return self.create_response(request, { 'status': 'success', 'log': "L.I.S.A Engine reloaded"}, HttpAccepted) def witintents(self, request, **kwargs): self.method_check(request, allowed=['get']) self.is_authenticated(request) self.throttle_check(request) self.wit = Wit(configuration['wit_token']) from tastypie.http import HttpAccepted, HttpNotModified try: intents = self.wit.get_intents() except: log.err() return self.create_response(request, { 'status' : 'failure' }, HttpNotModified) self.log_throttled_access(request) return self.create_response(request, { 'status': 'success', 'intents': intents}, HttpAccepted) def scheduler_reload(self, request, **kwargs): self.method_check(request, allowed=['get']) self.is_authenticated(request) self.throttle_check(request) from tastypie.http import HttpAccepted, HttpNotModified try: LisaFactorySingleton.get().SchedReload() except: log.err() return self.create_response(request, { 'status' : 'failure' }, HttpNotModified) self.log_throttled_access(request) return self.create_response(request, {'status': 'success', 'log': 'L.I.S.A Task Scheduler reloaded'}, HttpAccepted) def configuration(self, request, **kwargs): self.method_check(request, allowed=['get']) self.is_authenticated(request) self.throttle_check(request) from tastypie.http import HttpAccepted, HttpNotModified self.log_throttled_access(request) copyconfiguration = configuration copyconfiguration['database'] = None return self.create_response(request, {'configuration': configuration}, HttpAccepted) def version(self, request, **kwargs): from tastypie.http import HttpAccepted, HttpNotModified from pkg_resources import get_distribution import requests self.method_check(request, allowed=['get']) self.is_authenticated(request) self.throttle_check(request) self.log_throttled_access(request) local_version = get_distribution('lisa-server').version should_upgrade = False r = requests.get('https://pypi.python.org/pypi/lisa-server/json') if r.status_code == requests.codes.ok: remote_version = r.json()['info']['version'] else: return self.create_response(request, {'status': 'fail', 'log': 'Problem contacting pypi.python.org'}, HttpAccepted) if remote_version > local_version: should_upgrade = True response = { 'local_version': get_distribution('lisa-server').version, 'remote_version': remote_version, 'should_upgrade': should_upgrade } return self.create_response(request, response, HttpAccepted) def get_object_list(self, request): return [Lisa()]
if not val: return None return val['value'] if isinstance(val, dict) else val def select_joke(category): jokes = all_jokes[category or 'default'] shuffle(jokes) return jokes[0] def handle_message(response): entities = response['entities'] get_joke = first_entity_value(entities, 'getJoke') greetings = first_entity_value(entities, 'greetings') category = first_entity_value(entities, 'category') sentiment = first_entity_value(entities, 'sentiment') if get_joke: return select_joke(category) elif sentiment: return 'Glad you liked it.' if sentiment == 'positive' else 'Hmm.' elif greetings: return 'Hey this is joke bot :)' else: return 'I can tell jokes! Say "tell me a joke about tech"!' client = Wit(access_token=access_token) client.interactive(handle_message=handle_message)
def __init__(self): client = MongoClient(configuration['database']['server'], configuration['database']['port']) self.database = client.lisa self.wit = Wit(configuration['wit_server_token'])
class ClientFactory(Factory): # Singleton instance __instance = None #----------------------------------------------------------------------------- def __init__(self): # Check Singleton if ClientFactory.__instance is not None: raise Exception("Singleton can't be created twice !") # Variables init self.clients = {} self.zones = {} self._lock = threading.RLock() self.wit = None #----------------------------------------------------------------------------- def startFactory(self): # Init global contexts NeoContext.init(factory = self) # Init Wit self.wit = Wit(configuration['wit_token']) #----------------------------------------------------------------------------- def buildProtocol(self, addr): # Create protocol return LisaProtocol() #----------------------------------------------------------------------------- def stopFactory(self): # Clean if ClientFactory.__instance is not None: ClientFactory.__instance.clients = {} ClientFactory.__instance.zones = {} ClientFactory.__instance.wit = None ClientFactory.__instance = None # Clear global contexts NeoContext.deinit() #----------------------------------------------------------------------------- @classmethod def initClient(cls, client_name, zone_name): # Create singleton if cls.__instance is None: cls.__instance = ClientFactory() self = cls.__instance # Lock access self._lock.acquire() # Get zone zone_uid = cls.getOrCreateZone(zone_name) # Search if we already had a connection with this client client = None client_uid = None for c in self.clients: if self.clients[c]['name'] == client_name and self.clients[c]['zone'] == zone_name: return self.clients[c] # If not found if client is None: # Add client client_uid = str(uuid.uuid1()) self.clients[client_uid] = {'uid': client_uid, 'protocols': {}, 'name': client_name, 'zone': zone_name, 'zone_uid': zone_uid} client = self.clients[client_uid] # Each client has its own context client['context'] = NeoContext(client_uid = client_uid) # Add client to zone found_flag = False for c in self.zones[zone_uid]['client_uids']: if c == client['uid']: found_flag = True break if found_flag == False: self.zones[zone_uid]['client_uids'].append(client['uid']) # Release access self._lock.release() return client #----------------------------------------------------------------------------- @classmethod def parseChat(cls, jsonData, client_uid): # Create singleton if cls.__instance is None: cls.__instance = ClientFactory() self = cls.__instance # If input has already a decoded intent if jsonData.has_key("outcome") == True: jsonInput = {} jsonInput['outcome'] = jsonData['outcome'] elif len(jsonData['body']) > 0: # Ask Wit for intent decoding jsonInput = self.wit.get_message(unicode(jsonData['body'])) else: # No input => no output return # Initialize output from input jsonInput['from'], jsonInput['type'], jsonInput['zone'] = jsonData['from'], jsonData['type'], jsonData['zone'] # Show wit result if configuration['debug']['debug_wit']: log.msg("WIT: " + str(jsonInput['outcome'])) # Execute intent client = cls.getClient(client_uid) intent = PluginManager.getIntent(intent_name = jsonInput['outcome']['intent']) if intent is not None: # Call plugin client['context'].parse(jsonInput = jsonInput, plugin_name = intent.plugin_name, method_name = intent.method_name) else: # Parse without intent client['context'].parse(jsonInput = jsonInput) #----------------------------------------------------------------------------- @classmethod def getClient(cls, client_uid): # Get singleton if cls.__instance is None: return None # Return client return cls.__instance.clients[client_uid] #----------------------------------------------------------------------------- @classmethod def getOrCreateZone(cls, zone_name): # Create singleton if cls.__instance is None: cls.__instance = ClientFactory() # All zones if zone_name == "all": return "all" # Lock access cls.__instance._lock.acquire() # Search zone zone = None zone_uid = None for z in cls.__instance.zones: if cls.__instance.zones[z]['name'] == zone_name: zone = cls.__instance.zones[z] zone_uid = z break # If not found if zone is None: # Create zone zone_uid = str(uuid.uuid1()) cls.__instance.zones[zone_uid] = {'name': zone_name, 'client_uids': []} zone = cls.__instance.zones[zone_uid] # Release access cls.__instance._lock.release() return zone_uid #----------------------------------------------------------------------------- @classmethod def sendToClients(cls, jsonData, client_uids = [], zone_uids = []): # Create singleton if cls.__instance is None: cls.__instance = ClientFactory() # Parse clients for c in cls.__instance.clients: # If client is in destination if "all" in zone_uids or cls.__instance.clients[c]['zone_uid'] in zone_uids or "all" in client_uids or c in client_uids: # Parse client protocols for p in cls.__instance.clients[c]['protocols']: # Send to client through protocol cls.__instance.clients[c]['protocols'][p]['object'].sendToClient(jsonData) #----------------------------------------------------------------------------- @classmethod def LisaReload(cls): # Create singleton if cls.__instance is None: cls.__instance = ClientFactory() log.msg("Reloading engine") cls.__instance.build_activeplugins() #----------------------------------------------------------------------------- @classmethod def SchedReload(cls): global taskman # Create singleton if cls.__instance is None: cls.__instance = ClientFactory() log.msg("Reloading task scheduler") cls.__instance.taskman = taskman return cls.__instance.taskman.reload() #----------------------------------------------------------------------------- @classmethod def get(cls): # Create singleton if cls.__instance is None: cls.__instance = ClientFactory() return cls.__instance
class Recorder(threading.Thread): def __init__(self, lisa_client, listener): # Init thread class threading.Thread.__init__(self) self._stopevent = threading.Event() self.lisa_client = lisa_client self.configuration = ConfigManagerSingleton.get().getConfiguration() self.pipeline = listener.get_pipeline() self.capture_buffers = deque([]) self.running_state = False self.wit = Wit(self.configuration['wit_token']) self.wit_confidence = 0.5 if self.configuration.has_key('confidence'): self.wit_confidence = self.configuration['wit_confidence'] self.record_time_start = 0 self.record_time_end = 0 # Get app sink self.rec_sink = self.pipeline.get_by_name('rec_sink') self.rec_sink.connect('new-buffer', self._capture_audio_buffer) # Configure vader # Using vader on pocketsphinx branch and not a vader on record branch, # because vader forces stream to 8KHz, so record quality would be worst vader = self.pipeline.get_by_name('vad_asr') vader.connect('vader-start', self._vader_start) vader.connect('vader-stop', self._vader_stop) # Get elements to connect/disconnect pockesphinx during record self.asr_tee = self.pipeline.get_by_name('asr_tee') self.asr_sink = self.pipeline.get_by_name('asr_sink') self.asr = self.pipeline.get_by_name('asr') self.asr_tee.unlink(self.asr_sink) # Start thread self.start() def stop(self): # Raise stop event self.running_state = False self._stopevent.set() def get_running_state(self): """ Is the recorder recording? """ return self.running_state def set_running_state(self, running): """ Start/Stop a voice record """ if running == True and self.running_state == False: self.running_state = True # Disconnect pocketsphinx from pipeline self.asr_tee.link(self.asr_sink) self.asr_tee.unlink(self.asr) elif running == True and self.running_state == True: self.running_state = False def run(self): """ Recorder main loop """ CONTENT_TYPE = 'audio/mpeg3' result = "" retry = 1 # Thread loop while not self._stopevent.isSet(): # Wait record order if self.running_state == False: sleep(.1) continue # Activate capture, wait for 2s of silence before cancelling wit_e = None self.record_time_start = 0 self.record_time_end = time.time() + 2 self.capture_buffers.clear() result = "" print '\n [Recording]' + ' ' * 20 + '[Recording]' # Send captured voice to wit try: result = self.wit.post_speech(data = self._read_audio_buffer(), content_type=CONTENT_TYPE) except Exception as e: wit_e = e # If record was stopped during recording if self.running_state == True: # If Wit did not succeeded if len(result) == 0 or result.has_key('outcome') == False or result['outcome'].has_key('confidence') == False or result['outcome']['confidence'] < self.wit_confidence: if wit_e is not None: log.err("Wit exception : " + str(e)) # If retry is available and vader detected an utterance if self.record_time_start != 0 and retry > 0: Speaker.speak('please_repeat') # Decrement retries retry = retry - 1 continue # No more retry Speaker.speak('not_understood') # Send recognized intent to the server else: self.lisa_client.sendMessage(message=result['msg_body'], type='chat', dict=result['outcome']) # Reset running state self.running_state = False retry = 1 # Reconnect pocketsphinx to pipeline print "" print "> Ready Recognize Voice" self.asr_tee.link(self.asr) self.asr_tee.unlink(self.asr_sink) def _vader_start(self, ob, message): """ Vader start detection """ # Reset max recording time if self.running_state == True: if self.record_time_start == 0: self.record_time_start = time.time() self.record_time_end = self.record_time_start + 10 def _vader_stop(self, ob, message): """ Vader stop detection """ # Stop recording if no new sentence in next 2s if self.running_state == True: if self.record_time_start != 0 and self.record_time_end > time.time() + 2: self.record_time_end = time.time() + 2 def _capture_audio_buffer(self, app): """ Gstreamer pipeline callback : Audio buffer capture """ # Get buffer Buffer = self.rec_sink.emit('pull-buffer') # If recording is running if self.running_state == True: # Add buffer to queue self.capture_buffers.append(Buffer) def _read_audio_buffer(self): """ Read buffers from capture queue """ last_progress = -1 # While recording is running while time.time() < self.record_time_end: # If there is a captured buffer if len(self.capture_buffers) > 0: data = self.capture_buffers.popleft() yield data else: # Wait another buffer sleep(.05) # Print progression if self.record_time_start != 0: progress = (int)(2 * (time.time() - self.record_time_start)) + 1 else: progress = 0 if last_progress != progress: last_progress = progress print '\x1b[1A', print '[Recording]' + '.' * progress + ' ' * (20 - progress) + '[Recording]'
def startFactory(self): # Init global contexts NeoContext.init(factory = self) # Init Wit self.wit = Wit(configuration['wit_token'])
from wit import Wit access_token = 'YOUR_ACCESS_TOKEN' def say(session_id, msg): print(msg) def merge(context, entities): return context def error(session_id, msg): print('Oops, I don\'t know what to do.') actions = { 'say': say, 'merge': merge, 'error': error, } client = Wit(access_token, actions) session_id = 'my-user-id-42' client.run_actions(session_id, 'your message', {})
class Recorder(threading.Thread): """ Continuous recording class. """ #----------------------------------------------------------------------------- def __init__(self, factory): # Init thread class threading.Thread.__init__(self) self._stopevent = threading.Event() self.running_state = False self.rec_sink = None self.factory = factory self.configuration = ConfigManager.getConfiguration() self.wit = Wit(self.configuration['wit_token']) self.wit_context = None self.record = {'activated' : False, 'start' : 0, 'started' : False, 'end' : 0, 'ended' : False, 'buffers' : deque({})} self.continuous_mode = False self.temp_file = "/tmp/asr_sound" # Start thread threading.Thread.start(self) #----------------------------------------------------------------------------- def setRunningState(self, state, rec_sink = None): self.running_state = state # Get app sink if rec_sink is not None and self.rec_sink is not rec_sink: self.rec_sink = rec_sink self.rec_sink.connect('new-buffer', self._captureAudioBuffer) #----------------------------------------------------------------------------- def stop(self): # Raise stop event self._stopevent.set() #----------------------------------------------------------------------------- def activate(self): """ Called to activate current utter as a record """ # Activate record if self.record['started'] == True: self.record['activated'] = True #----------------------------------------------------------------------------- def setContinuousMode(self, enabled, wit_context = None): """ Called to activate continous record mode """ # Activate record self.continuous_mode = enabled self.wit_context = wit_context #----------------------------------------------------------------------------- def run(self): """ Recorder main loop """ # Encoded format CONTENT_TYPE = 'audio/mpeg3' # Thread loop while not self._stopevent.isSet(): # Test if record is ended if self.record['started'] == True and self.record['ended'] == False and self.record['end'] <= time(): # If current record was not activated before end if self.record['activated'] == False and self.continuous_mode == False: self.record['start'] = 0 self.record['started'] = False self.record['end'] = 0 self.record['ended'] = False self.record['activated'] = False continue # Current record is activated and already ended self.record['ended'] = True # If current record is not activated if self.running_state == False or self.record['started'] == False or (self.record['activated'] == False and self.continuous_mode == False): sleep(.1) continue # Send activated record to Wit wit_e = None result = "" try: if self.configuration['asr'] == "ispeech": for b in self._readAudioBuffer(file_mode = True): pass params= {} params["action"] = "recognize" params["apikey"] = "developerdemokeydeveloperdemokey" params["freeform"] = "3" params["locale"] = "fr-FR" params["output"] = "json" params["content-type"] = "speex" params["speexmode"] = "2" params["audio"] = base64.b64encode(open(self.temp_file, 'rt').read()).replace(b'\n',b'') result = requests.get("http://api.ispeech.org/api/rest?" + urlencode(params)) result = self.wit.get_message(query = result.json()['text'], context = self.wit_context) elif self.configuration['asr'] == "google": for b in self._readAudioBuffer(file_mode = True): pass url = 'https://www.google.com/speech-api/v2/recognize?output=json&lang=fr-fr&key=AIzaSyCQv4U1mTaw_r_j1bWb1peeaTihzV0q-EQ' audio = open(self.temp_file, "rb").read() header = {"Content-Type": "audio/x-flac; rate=16000"} post = urlopen(Request(url, audio, header)) result = loads(post.read().split("\n")[1])['result'][0]['alternative'][0]['transcript'] result = self.wit.get_message(query = result, context = self.wit_context) # Defautl Wit else: result = self.wit.post_speech(data = self._readAudioBuffer(), content_type = CONTENT_TYPE, context = self.wit_context) result['msg_body'] = result['msg_body'].encode("utf-8") except Exception as e: wit_e = e # If record was stopped during Wit access if self._stopevent.isSet(): break # Question mode if len(result) > 0 and self.continuous_mode == True and result.has_key('msg_body') == True and len(result['msg_body']) > 0: # Send answer self.factory.sendChatToServer(message = result['msg_body'], outcome = result['outcome']) # If Wit did not succeeded elif len(result) == 0 or result.has_key('outcome') == False or result['outcome'].has_key('confidence') == False or result['outcome']['confidence'] < self.configuration['wit_confidence']: if wit_e is not None: log.err("Wit exception : {0}".format(str(e))) elif len(result) == 0: log.err("No response from Wit") elif result.has_key('outcome') == False or result['outcome'].has_key('confidence') == False: log.err("Wit response syntax error") log.err("result : {0}".format(result)) elif result['outcome']['confidence'] < self.configuration['wit_confidence']: log.err("Wit confidence {confidence} too low : {result}".format(confidence = result['outcome']['confidence'], result = result['msg_body'])) else: log.err("Error response from Wit : {0}".format(result['msg_body'])) # Send recognized intent to the server else: log.msg("Wit result : {0}".format(result['msg_body'])) self.factory.sendChatToServer(message = result['msg_body'], outcome = result['outcome']) # Finish current record self.record['start'] = 0 self.record['started'] = False self.record['end'] = 0 self.record['ended'] = False self.record['activated'] = False self.record['buffers'].clear() #----------------------------------------------------------------------------- def vaderStart(self): """ Vader start detection """ # If record is running if self.record['ended'] == False: # New start if self.record['started'] == False: self.record['started'] = True self.record['start'] = time() # Reset max recording time self.record['end'] = self.record['start'] + MAX_RECORD_DURATION_s #----------------------------------------------------------------------------- def vaderStop(self): """ Vader stop detection """ # If record is running if self.record['ended'] == False and self.record['end'] > time() + MAX_SILENCE_s: # End recording when no new activity during next silence self.record['end'] = time() + MAX_SILENCE_s #----------------------------------------------------------------------------- def _captureAudioBuffer(self, app): """ Gstreamer pipeline callback : Audio buffer capture """ # Get buffer buf = self.rec_sink.emit('pull-buffer') # If record is running if self.record['started'] == True and self.record['ended'] == False: cur_time = time() # Add buffer to queue self.record['buffers'].append({'time' : cur_time, 'data' : buf}) # Delete too old buffers when utter is not activated if self.record['activated'] == False and self.continuous_mode == False: while self.record['buffers'][0]['time'] + MAX_TIME_BEFORE_KWS_s < cur_time: self.record['buffers'].popleft() #----------------------------------------------------------------------------- def _readAudioBuffer(self, file_mode = False): """ Read buffers from capture queue """ # Check current record if self.record['activated'] == False and self.continuous_mode == False: return f = None if file_mode == True: f = open(self.temp_file, "wb") # While recording is running log.msg("Wit send start") while not self._stopevent.isSet(): # Test if record is ended if self.record['ended'] == False and self.record['end'] <= time(): self.record['ended'] = True # If there is no captured buffer if len(self.record['buffers']) == 0: # No more buffer when record is ended, it's over if self.record['ended'] == True: break # Record is live, wait another buffer sleep(.05) continue # Read buffer buf = None while len(self.record['buffers']) > 0 and (buf is None or len(buf) + len(self.record['buffers'][0]['data']) < 1200): data = self.record['buffers'].popleft() if buf is None: buf = data['data'] else: buf = buf.merge(data['data']) if file_mode == True: f.write(buf) yield buf if file_mode == True: f.close() log.msg("Wit send end")
new_context['cat'] = category sentiment = first_entity_value(entities, 'sentiment') if sentiment: new_context['ack'] = 'Glad you liked it.' if sentiment == 'positive' else 'Hmm.' elif 'ack' in new_context: del new_context['ack'] return new_context def error(session_id, msg): print('Oops, I don\'t know what to do.') def select_joke(context): new_context = dict(context) jokes = all_jokes[new_context['cat'] or 'default'] shuffle(jokes) new_context['joke'] = jokes[0] return new_context actions = { 'say': say, 'merge': merge, 'error': error, 'select-joke': select_joke, } client = Wit(access_token, actions) session_id = 'my-user-id-42' client.run_actions(session_id, 'tell me a joke about tech', {})
if not val: return None return val['value'] if isinstance(val, dict) else val def say(session_id, msg): print(msg) def merge(session_id, context, entities, msg): loc = first_entity_value(entities, 'location') if loc: context['loc'] = loc return context def error(session_id, context, e): print(str(e)) def fetch_weather(session_id, context): context['forecast'] = 'sunny' return context actions = { 'say': say, 'merge': merge, 'error': error, 'fetch-weather': fetch_weather, } client = Wit(access_token, actions) session_id = 'my-user-id-42' client.run_actions(session_id, 'weather in London', {})
# Standard Modules import sys import os # Modules from the GitHub from wit import Wit # The chunks from recorder import Recorder from interpreter import Interpreter # Constants SECONDS = 4 if __name__ == '__main__': # Set the Wit.AI token from an environment variable if 'WIT_TOKEN' not in os.environ: os.environ['WIT_TOKEN'] = raw_input("Enter your Wit.AI token: ") witToken = os.environ['WIT_TOKEN'] # Instantiate the chunks aRecording = Recorder(SECONDS) anInterpreting = Interpreter() witty = Wit(witToken) # Run with it audio_file = aRecording.record() result = witty.post_speech(audio_file.getvalue()) anInterpreting.interpret(result) # And we're done sys.exit(0)
from wit import Wit def send(request, response): print('Bot responde: {}'.format(response['text'])) actions = { 'send': send, } ACCESS_TOKEN = 'BYP2LLV64PDAUH2KPNRHN2T7ICZUGDHV' client = Wit(access_token=ACCESS_TOKEN, actions=actions) client.interactive()
from wit import Wit import json try: with open("config.json") as f : access_token = json.load(f)['witai'] except: print("open config.json error") def send(request, response): print(response['text']) actions = { 'send': send, } client = Wit(access_token=access_token, actions=actions) resp = client.message('Turn on the light in my bedroom') print('Yay, got Wit.ai response: ' + str(resp))