def test_streaming_detect_intent_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = dialogflow_v2beta1.SessionsClient() # Setup request session = "session1984987798" query_input = {} request = {"session": session, "query_input": query_input} request = session_pb2.StreamingDetectIntentRequest(**request) requests = [request] with pytest.raises(CustomException): client.streaming_detect_intent(requests)
def chat_bot(_text): input_text = _text project_id = 'bot-0527' session_id = uuid.uuid1() credentials = service_account.Credentials.from_service_account_file( 'bot-0527-8f10a38bf932.json') session_client = dialogflow.SessionsClient(credentials=credentials) session_path = session_client.session_path(project_id, session_id) text_input = dialogflow.types.TextInput(text=input_text, language_code='zh-TW') query_input = dialogflow.types.QueryInput(text=text_input) response = session_client.detect_intent(session=session_path, query_input=query_input) return (response.query_result.fulfillment_text)
def detect_intent_with_model_selection(project_id, session_id, audio_file_path, language_code): """Returns the result of detect intent with model selection on an audio file as input Using the same `session_id` between requests allows continuation of the conversaion.""" import dialogflow_v2beta1 as dialogflow session_client = dialogflow.SessionsClient() # Note: hard coding audio_encoding and sample_rate_hertz for simplicity. audio_encoding = dialogflow.enums.AudioEncoding.AUDIO_ENCODING_LINEAR_16 sample_rate_hertz = 48000 session_path = session_client.session_path(project_id, session_id) print('Session path: {}\n'.format(session_path)) with open(audio_file_path, 'rb') as audio_file: input_audio = audio_file.read() # Which Speech model to select for the given request. # Possible models: video, phone_call, command_and_search, default model = 'phone_call' audio_config = dialogflow.types.InputAudioConfig( audio_encoding=audio_encoding, language_code=language_code, sample_rate_hertz=sample_rate_hertz, model=model) query_input = dialogflow.types.QueryInput(audio_config=audio_config) response = session_client.detect_intent(session=session_path, query_input=query_input, input_audio=input_audio) print('=' * 20) print('Query text: {}'.format(response.query_result.query_text)) print('Detected intent: {} (confidence: {})\n'.format( response.query_result.intent.display_name, response.query_result.intent_detection_confidence)) print('Fulfillment text: {}\n'.format( response.query_result.fulfillment_text))
def detect_intent_with_texttospeech_response(project_id, session_id, texts, language_code): """Returns the result of detect intent with texts as inputs and includes the response in an audio format. Using the same `session_id` between requests allows continuation of the conversaion.""" import dialogflow_v2beta1 as dialogflow session_client = dialogflow.SessionsClient() session_path = session_client.session_path(project_id, session_id) print('Session path: {}\n'.format(session_path)) for text in texts: text_input = dialogflow.types.TextInput(text=text, language_code=language_code) query_input = dialogflow.types.QueryInput(text=text_input) # Set the query parameters with sentiment analysis output_audio_config = dialogflow.types.OutputAudioConfig( audio_encoding=dialogflow.enums.OutputAudioEncoding. OUTPUT_AUDIO_ENCODING_LINEAR_16) response = session_client.detect_intent( session=session_path, query_input=query_input, output_audio_config=output_audio_config) print('=' * 20) print('Query text: {}'.format(response.query_result.query_text)) print('Detected intent: {} (confidence: {})\n'.format( response.query_result.intent.display_name, response.query_result.intent_detection_confidence)) print('Fulfillment text: {}\n'.format( response.query_result.fulfillment_text)) # The response's audio_content is binary. with codecs.open('trial.wav', 'wb') as out: print(response.output_audio[0:100]) out.write(response.output_audio) print('Audio content written to file "trial.wav"')
def detect_intent_texts(session_id, texts, language_code='en-US',project_id='amadeus-wsxhty'): """Returns the result of detect intent with texts as inputs. Using the same `session_id` between requests allows continuation of the conversation.""" path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath('api_test.py'))), 'api_demo/amadeus-wsxhty-d827a1efaa20.json') print(path) os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = path session_client = dialogflow.SessionsClient() session = session_client.session_path(project_id, session_id) print('Session path: {}\n'.format(session)) # for text in texts: # text_input = dialogflow.types.TextInput( # text=text, language_code=language_code) # # query_input = dialogflow.types.QueryInput(text=text_input) # # response = session_client.detect_intent( # session=session, query_input=query_input) # # print('=' * 20) # print('Query text: {}'.format(response.query_result.query_text)) # print('Detected intent: {} (confidence: {})\n'.format( # response.query_result.intent.display_name, # response.query_result.intent_detection_confidence)) # print('Fulfillment text: {}\n'.format( # response.query_result.fulfillment_text)) text_input = dialogflow.types.TextInput( text=texts, language_code=language_code) query_input = dialogflow.types.QueryInput(text=text_input) response = session_client.detect_intent( session=session, query_input=query_input) return response
def queryDialogueFlow(text): import dialogflow_v2beta1 as dialogflow session_client = dialogflow.SessionsClient() session = session_client.session_path('evil-bin', '123') text_input = dialogflow.types.TextInput(text=text, language_code='en-US') query_input = dialogflow.types.QueryInput(text=text_input) response = session_client.detect_intent(session=session, query_input=query_input) print('=' * 20) print('Query text: {}'.format(response.query_result.query_text)) print('Detected intent: {} (confidence: {})\n'.format( response.query_result.intent.display_name, response.query_result.intent_detection_confidence)) print('Fulfillment text: {}\n'.format( response.query_result.fulfillment_text)) return response.query_result.fulfillment_text
def detect_intent_texts(project_id, session_id, text, language_code): print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") session_client = dialogflow.SessionsClient() session = session_client.session_path(project_id, session_id) print(' * Session path: '+ str(session)) if text: text_input = dialogflow.types.TextInput( text=text, language_code=language_code) print("* Text input is: " + str(text_input)) query_input = dialogflow.types.QueryInput(text=text_input) print(" * Query input is " + str(query_input)) # We can check this by going to dialogflow and answering a question from the knowledge base. Going to details helps us with this. knowledge_base_id="MzIwODA5MTI1NTg1MDQwMTc5Mg" knowledge_base_path = dialogflow.knowledge_bases_client \ .KnowledgeBasesClient \ .knowledge_base_path(project_id, knowledge_base_id) query_params = dialogflow.types.QueryParameters( knowledge_base_names=[knowledge_base_path]) print(" * Query params is: " + str(query_params)) response = session_client.detect_intent( session=session, query_input=query_input, query_params=query_params) print('Query text: {}'.format(response.query_result.query_text)) print('Detected intent: {} (confidence: {})\n'.format( response.query_result.intent.display_name, response.query_result.intent_detection_confidence)) print('Fulfillment text: {}\n'.format( response.query_result.fulfillment_text)) print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") return response.query_result.fulfillment_text
def test_detect_intent(self): # Setup Expected Response response_id = 'responseId1847552473' expected_response = {'response_id': response_id} expected_response = session_pb2.DetectIntentResponse( **expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) client = dialogflow_v2beta1.SessionsClient(channel=channel) # Setup Request session = client.session_path('[PROJECT]', '[SESSION]') query_input = {} response = client.detect_intent(session, query_input) assert expected_response == response assert len(channel.requests) == 1 expected_request = session_pb2.DetectIntentRequest( session=session, query_input=query_input) actual_request = channel.requests[0][1] assert expected_request == actual_request
def process_input(self, text_to_be_analyzed, chat_id, first_name): SESSION_ID = chat_id session_client = dialogflow.SessionsClient() session = session_client.session_path(self.DIALOGFLOW_PROJECT_ID, SESSION_ID) text_input = dialogflow.types.TextInput( text=text_to_be_analyzed, language_code=self.DIALOGFLOW_LANGUAGE_CODE) query_input = dialogflow.types.QueryInput(text=text_input) knowledge_base_path = dialogflow.knowledge_bases_client \ .KnowledgeBasesClient \ .knowledge_base_path(self.DIALOGFLOW_PROJECT_ID, 'MTY5NjYxNzQ3MDEwOTUyMjMyOTY') query_params = dialogflow.types.QueryParameters( knowledge_base_names=[knowledge_base_path]) try: response = session_client.detect_intent(session=session, query_input=query_input) except InvalidArgument: pass detected_intent = response.query_result.intent.display_name reply = "" if detected_intent == 'city_handler': reply = self.response_city_handler(response) elif detected_intent == 'state_handler': reply = self.response_state_handler(response) elif detected_intent == 'Knowledge.KnowledgeBase.MTY5NjYxNzQ3MDEwOTUyMjMyOTY': reply = self.response_faq(response) elif detected_intent == 'Default Welcome Intent': reply = self.response_welcome(response, first_name) elif detected_intent == 'Default Fallback Intent': reply = self.response_fallback(response, first_name) elif detected_intent == 'country_handler': reply = self.response_country_handler(response) elif detected_intent == 'helpline': reply = self.response_helpline(response) return reply
def detect_intent(text_to_be_analyzed): os.environ[ "GOOGLE_APPLICATION_CREDENTIALS"] = './eatler-tgdjdx-3ab8e4382f8f.json' #[path] of eatler-tgdjdx-3ab8e4382f8f.json DIALOGFLOW_PROJECT_ID = 'eatler-tgdjdx' DIALOGFLOW_LANGUAGE_CODE = 'en' SESSION_ID = uuid.uuid1() session_client = dialogflow.SessionsClient() session = session_client.session_path(DIALOGFLOW_PROJECT_ID, SESSION_ID) text_input = dialogflow.types.TextInput( text=text_to_be_analyzed, language_code=DIALOGFLOW_LANGUAGE_CODE) query_input = dialogflow.types.QueryInput(text=text_input) try: response = session_client.detect_intent(session=session, query_input=query_input) except InvalidArgument: raise reply = response.query_result.fulfillment_text intent = response.query_result.intent.display_name return intent, reply
import RPi.GPIO as GPIO #for connections with GPIO pins import face_recognition #face_recognition library for recognizing face import numpy as np import pickle #to load .dat file which has face_encodings saved in it import serial #for connections with serial port import time r = sr.Recognizer() m = sr.Microphone(device_index=2) #device_index should be set according from which microphone you want to listen (if more than one microphone) #running file on system which has credentials of dialogflow, path where file is kept="/home/pi/", file name="botv2-33024-2903d4abace7.json" os.system('export GOOGLE_APPLICATION_CREDENTIALS="/home/pi/botv2-33024-2903d4abace7.json"') project_id='botv2-33024' #project_id is given in your dialogflow bot settings session_id='abcd' #session_id can be any string language_code='en-US' session_client = dialogflow.SessionsClient() session_path = session_client.session_path('botv2-33024', 'abcd') print('Session path: {}\n'.format(session_path)) GPIO.setmode(GPIO.BOARD) #use physical pin numbering GPIO.setwarnings(False) #ignore warning for now GPIO.setup(10, GPIO.IN, pull_up_down=GPIO.PUD_UP) #set pin 10 to be an input pin and set initial vaue to be pulled low (off) while True: # Run forever if GPIO.input(10) == GPIO.HIGH: #if getting high from GPIO pin no. 10 os.system("python3 eye.py") #running eye.py file on system (eye will be displayed on oled displays) print('Listening...') while True: try: print("A moment of silence, please...")
def test(): # data = { # "infection": '', # "treatments": [], # "drugs": [], # "tests": [], # "doctor": '' # } data = {} input_text = 'у меня болит живот' symptoms = '' if input_text and len(symptoms) == 0: session_client = dialogflow.SessionsClient() session = session_client.session_path("test-agent-3c1d1", "5f0f6fa22a46436c9695cf8333dac911") text_input = dialogflow.types.TextInput(text=input_text, language_code='ru') query_input = dialogflow.types.QueryInput(text=text_input) final = session_client.detect_intent(session=session, query_input=query_input) print(final.query_result.fulfillment_text) if final.query_result.fulfillment_text in valid_answers: symptoms = dict() txt = final.query_result.intent.display_name txt = txt[txt.find('{') + 1:txt.rfind('}')] symptoms[txt] = 1 else: data['simple_text'] = final.query_result.fulfillment_text data['symptoms'] = {} return json.dumps(data, ensure_ascii=False) prediction_result = predict_diagnosis.predict(symptoms) if 'question' in prediction_result: data['question'] = prediction_result['question'] data['question_ref'] = prediction_result['question_ref'] data['symptoms'] = symptoms return json.dumps(data, ensure_ascii=False) else: # model_response = requests.post('model', data={'final': str(final)}) if prediction_result.value in list(piluli_sheet['Infection']): # example filtered_sheet = piluli_sheet[piluli_sheet['Infection'] == prediction_result.value] data["infection"] = filtered_sheet['Инфекция'][0] treatments = ['Лечение1', 'Лечение2', 'Лечение3'] for treatment in treatments: if isinstance(filtered_sheet[treatment][0], str): data['treatments'].append(filtered_sheet[treatment][0]) drugs = ['Препарат1', 'Препарат2', 'Препарат3'] for drug in drugs: if isinstance(filtered_sheet[drug][0], str): data['drugs'].append(filtered_sheet[drug][0]) analyzes = ['Анализ1', 'Анализ2', 'Анализ3'] for analysis in analyzes: if isinstance(filtered_sheet[analysis][0], str): data['tests'].append(filtered_sheet[analysis][0]) data['doctor'] = filtered_sheet['Врач'][0] return json.dumps(data, ensure_ascii=False) else: return "{error: 'wrong data'}"
def index(): data = { "treatments": [], "drugs": [], "tests": [] } data_req = json.loads(request.data) input_text = data_req['user_question'] if (len(data_req['symptoms']) > 0): symptoms = data_req['symptoms'] else: symptoms = '' if input_text and len(symptoms) == 0: session_client = dialogflow.SessionsClient() session = session_client.session_path("test-agent-3c1d1", "5f0f6fa22a46436c9695cf8333dac911") text_input = dialogflow.types.TextInput(text=input_text, language_code='ru') query_input = dialogflow.types.QueryInput(text=text_input) final = session_client.detect_intent(session=session, query_input=query_input) print(final.query_result.fulfillment_text) if final.query_result.fulfillment_text in valid_answers: symptoms = dict() txt = final.query_result.intent.display_name print(txt) postiton1=txt.find('{') position2=txt.find('}') txt = txt[postiton1+1:] symptoms[txt] = 1 else: data['simple_text'] = final.query_result.fulfillment_text data['symptoms'] = {} return json.dumps(data, ensure_ascii=False) print(symptoms) prediction_result = predict_diagnosis.predict(symptoms) print(prediction_result) if 'error' in prediction_result: return prediction_result if 'question' in prediction_result: data['question'] = prediction_result['question'] data['question_ref'] = prediction_result['question_ref'] data['symptoms'] = symptoms return json.dumps(data, ensure_ascii=False) else: # model_response = requests.post('model', data={'final': str(final)}) print(prediction_result['diagnosis']) if prediction_result['diagnosis'] in list(piluli_sheet['Infection']): # example filtered_sheet = piluli_sheet[piluli_sheet['Infection'] == prediction_result['diagnosis']] print(filtered_sheet) data["infection"] = str(filtered_sheet['Инфекция'].values[0]) treatments = ['Лечение1', 'Лечение2', 'Лечение3'] for treatment in treatments: if str(filtered_sheet[treatment].values[0]) != 'nan': data['treatments'].append(str(filtered_sheet[treatment].values[0])) drugs = ['Препарат1', 'Препарат2', 'Препарат3'] for drug in drugs: if str(filtered_sheet[drug].values[0]) != 'nan': data['drugs'].append(str(filtered_sheet[drug].values[0])) analyzes = ['Анализ1', 'Анализ2', 'Анализ3'] for analysis in analyzes: if str(filtered_sheet[analysis].values[0]) != 'nan': data['tests'].append(str(filtered_sheet[analysis].values[0])) data['doctor'] = str(filtered_sheet['Врач'].values[0]) return json.dumps(data, ensure_ascii=False) else: return "{error: 'wrong data'}"
def __init__(self, language_code='it-IT', last_contexts=None): """Initialize all params and load data""" """ Constants and params """ self.CHUNK = 4096 self.FORMAT = pyaudio.paInt16 self.CHANNELS = 1 self.RATE = 44100 #was 16000 self.USE_AUDIO_SERVER = rospy.get_param( '/dialogflow_client/use_audio_server', False) if self.USE_AUDIO_SERVER: print("USO AUDIOSERVER") else: print("USO IL MICROFONO") self.PLAY_GOOGLE_AUDIO = rospy.get_param( '/dialogflow_client/play_google_audio', True) self.PLAY_LOCAL_AUDIO = rospy.get_param( '/dialogflow_client/play_local_audio', True) self.DEBUG = rospy.get_param('/dialogflow_client/debug', False) # Register Ctrl-C sigint signal.signal(signal.SIGINT, self._signal_handler) """ Dialogflow setup """ # Get hints/clues rp = rospkg.RosPack() file_dir = rp.get_path('dialogflow_ros') + '/config/context.yaml' with open(file_dir, 'r') as f: try: self.phrase_hints = load(f) except YAMLError: rospy.logwarn( "DF_CLIENT: Unable to open phrase hints yaml file!") self.phrase_hints = [] # Dialogflow params project_id = rospy.get_param('/dialogflow_client/project_id', 'robot-15de1') session_id = str(uuid4()) # Random self._language_code = language_code self.last_contexts = last_contexts if last_contexts else [] # DF Audio Setup audio_encoding = AudioEncoding.AUDIO_ENCODING_LINEAR_16 # Possibel models: video, phone_call, command_and_search, default self._audio_config = InputAudioConfig( audio_encoding=audio_encoding, language_code=self._language_code, sample_rate_hertz=self.RATE, phrase_hints=self.phrase_hints, model='command_and_search') self._output_audio_config = OutputAudioConfig( audio_encoding=OutputAudioEncoding.OUTPUT_AUDIO_ENCODING_LINEAR_16) #speechCfg =SynthesizeSpeechConfig() #https://cloud.google.com/dialogflow/docs/reference/rest/v2beta1/OutputAudioConfig#SynthesizeSpeechConfig #speechCfg.speakingRate=1.5 # range [0.25, 4.0]. 1.0 is the normal native speed #speechCfg.pitch= -10 # range [-20.0, 20.0]. #speechCfg.volumeGainDb # range [-96.0, 16.0]. #speechCfg.voice.ssmlGender = texttospeech.enums.SsmlVoiceGender.SSML_VOICE_GENDER_MALE # self._output_audio_config = OutputAudioConfig( # audio_encoding=OutputAudioEncoding.OUTPUT_AUDIO_ENCODING_LINEAR_16 #, # synthesizeSpeechConfig=SynthesizeSpeechConfig. # ) # Create a session self._session_cli = dialogflow_v2beta1.SessionsClient() self._session = self._session_cli.session_path(project_id, session_id) rospy.logdebug("DF_CLIENT: Session Path: {}".format(self._session)) """ ROS Setup """ results_topic = rospy.get_param('/dialogflow_client/results_topic', '/dialogflow_client/results') requests_topic = rospy.get_param('/dialogflow_client/requests_topic', '/dialogflow_client/requests') text_req_topic = requests_topic + '/string_msg' text_event_topic = requests_topic + '/string_event' msg_req_topic = requests_topic + '/df_msg' event_req_topic = requests_topic + '/df_event' self._results_pub = rospy.Publisher(results_topic, DialogflowResult, queue_size=10) rospy.Subscriber(text_req_topic, String, self._text_request_cb) rospy.Subscriber(text_event_topic, String, self._text_event_cb) rospy.Subscriber(msg_req_topic, DialogflowRequest, self._msg_request_cb) rospy.Subscriber(event_req_topic, DialogflowEvent, self._event_request_cb) """ Audio setup """ # Mic stream input setup self.audio = pyaudio.PyAudio() if self.USE_AUDIO_SERVER: self._server_name = rospy.get_param( '/dialogflow_client/server_name', '127.0.0.1') self._port = rospy.get_param('/dialogflow_client/port', 4444) #print("Audio Server at: ",self._server_name, ", on port ", self._port) print("Audio Server at: {}, on port {}".format( self._server_name, self._port)) if self.PLAY_GOOGLE_AUDIO: self._create_audio_output() print("Uso Google STT") rospy.logdebug("DF_CLIENT: Last Contexts: {}".format( self.last_contexts)) rospy.loginfo("DF_CLIENT: Ready!") self.bat_charge = 0 rospy.Subscriber('/bat_charge', Float64, self.cbk_bat_charge) # qui tutti i comandi eseguibili a voce self.pub_chatter = rospy.Publisher('/chatter', String, queue_size=10) self.pub_faretto = rospy.Publisher("/faretto", Bool, queue_size=1) self.pub_laser = rospy.Publisher("/laser", Bool, queue_size=1) self.pub_start_followme = rospy.Publisher("/start_followme", Bool, queue_size=1) self.pub_cmd_vel = rospy.Publisher("/cmd_vel", Twist, queue_size=1) self.pub_targetPose = rospy.Publisher("/target_pose", Twist, queue_size=1)
def detect_intent_texts(project_id, session_id, texts, language_code): """Returns the result of detect intent with texts as inputs. Using the same `session_id` between requests allows continuation of the conversaion.""" return_list = [] #import dialogflow_v2 as dialogflow session_client = dialogflow.SessionsClient() session = session_client.session_path(project_id, session_id) print('Session path: {}/n'.format(session)) for text in texts: text_input = dialogflow.types.TextInput(text=text, language_code=language_code) query_input = dialogflow.types.QueryInput(text=text_input) # Enable sentiment analysis sentiment_config = dialogflow.types.SentimentAnalysisRequestConfig( analyze_query_text_sentiment=True) # Set the query parameters with sentiment analysis query_params = dialogflow.types.QueryParameters( sentiment_analysis_request_config=sentiment_config) response = session_client.detect_intent( session=session, query_input=query_input, query_params=query_params) #감정분석 옵션이 붙는 경우만 query = text query_text = response.query_result.query_text response_id = response.response_id #query_result = response.query_result intent_name = response.query_result.intent.display_name intent_detection_confidence = response.query_result.intent_detection_confidence result_msg = response.query_result.fulfillment_text sentiment_score = response.query_result.sentiment_analysis_result.query_text_sentiment.score result_dic = { 'query': query, 'query_text': query_text, 'response_id': response_id, 'intent_name': intent_name, 'intent_detection_confidence': intent_detection_confidence, 'result_msg': result_msg, 'sentiment_score': sentiment_score } return_list.append(result_dic) print('=' * 20) print('query : ', query) print('query_text : ', query_text) print('response_id : ', response_id) #print('query_result : ', query_result) print('intent_name : ', intent_name) print('intent_detection_confidence : ', intent_detection_confidence) print('result_msg : ', result_msg) print('sentiment_score : ', sentiment_score) return return_list
def __init__(self, language_code='en-US', last_contexts=None): """Initialize all params and load data""" """ Constants and params """ self.CHUNK = 4096 self.FORMAT = pyaudio.paInt16 self.CHANNELS = 1 self.RATE = 16000 self.USE_AUDIO_SERVER = rospy.get_param('/dialogflow_client/use_audio_server', False) self.PLAY_AUDIO = rospy.get_param('/dialogflow_client/play_audio', True) self.DEBUG = rospy.get_param('/dialogflow_client/debug', False) # Register Ctrl-C sigint signal.signal(signal.SIGINT, self._signal_handler) """ Dialogflow setup """ # Get hints/clues rp = rospkg.RosPack() file_dir = rp.get_path('dialogflow_ros') + '/config/context.yaml' with open(file_dir, 'r') as f: try: self.phrase_hints = load(f) except YAMLError: rospy.logwarn("DF_CLIENT: Unable to open phrase hints yaml file!") self.phrase_hints = [] # Dialogflow params project_id = rospy.get_param('/dialogflow_client/project_id', 'my-project-id') session_id = str(uuid4()) # Random self._language_code = language_code self.last_contexts = last_contexts if last_contexts else [] # DF Audio Setup audio_encoding = AudioEncoding.AUDIO_ENCODING_LINEAR_16 # Possibel models: video, phone_call, command_and_search, default self._audio_config = InputAudioConfig(audio_encoding=audio_encoding, language_code=self._language_code, sample_rate_hertz=self.RATE, phrase_hints=self.phrase_hints, model='command_and_search') self._output_audio_config = OutputAudioConfig( audio_encoding=OutputAudioEncoding.OUTPUT_AUDIO_ENCODING_LINEAR_16 ) # Create a session self._session_cli = dialogflow_v2beta1.SessionsClient() self._session = self._session_cli.session_path(project_id, session_id) rospy.logdebug("DF_CLIENT: Session Path: {}".format(self._session)) """ ROS Setup """ results_topic = rospy.get_param('/dialogflow_client/results_topic', '/dialogflow_client/results') requests_topic = rospy.get_param('/dialogflow_client/requests_topic', '/dialogflow_client/requests') text_req_topic = requests_topic + '/string_msg' text_event_topic = requests_topic + '/string_event' msg_req_topic = requests_topic + '/df_msg' event_req_topic = requests_topic + '/df_event' self._results_pub = rospy.Publisher(results_topic, DialogflowResult, queue_size=10) rospy.Subscriber(text_req_topic, String, self._text_request_cb) rospy.Subscriber(text_event_topic, String, self._text_event_cb) rospy.Subscriber(msg_req_topic, DialogflowRequest, self._msg_request_cb) rospy.Subscriber(event_req_topic, DialogflowEvent, self._event_request_cb) """ Audio setup """ # Mic stream input setup self.audio = pyaudio.PyAudio() self._server_name = rospy.get_param('/dialogflow_client/server_name', '127.0.0.1') self._port = rospy.get_param('/dialogflow_client/port', 4444) if self.PLAY_AUDIO: self._create_audio_output() rospy.logdebug("DF_CLIENT: Last Contexts: {}".format(self.last_contexts)) rospy.loginfo("DF_CLIENT: Ready!")
from google.oauth2 import service_account from google.api_core.exceptions import InvalidArgument import dialogflow_v2beta1 import os os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'package.json' DIALOGFLOW_PROJECT_ID = 'karanassistant-ushv' DIALOGFLOW_LANGUAGE_CODE = 'en' SESSION_ID = 'me' text_to_be_analyzed = "Hello!" session_client = dialogflow_v2beta1.SessionsClient() session = session_client.session_path(DIALOGFLOW_PROJECT_ID, SESSION_ID) text_input = dialogflow_v2beta1.types.TextInput( text=text_to_be_analyzed, language_code=DIALOGFLOW_LANGUAGE_CODE) query_input = dialogflow_v2beta1.types.QueryInput(text=text_input) try: response = session_client.detect_intent(session=session, query_input=query_input) except InvalidArgument: raise print("Query text:", response.query_result.query_text) print("Detected intent:", response.query_result.intent.display_name) print("Detected intent confidence:", response.query_result.intent_detection_confidence) print("Fulfillment text:", response.query_result.fulfillment_text)
def detect_intent(text_to_be_analyzed,user,SESSION_ID): os.environ[ "GOOGLE_APPLICATION_CREDENTIALS"] = 'main/authentication/eatler-web-ywksta-c938ecbc544e.json' # [path] of eatler-tgdjdx-3ab8e4382f8f.json DIALOGFLOW_PROJECT_ID = 'eatler-web-ywksta' DIALOGFLOW_LANGUAGE_CODE = 'en' session_client = dialogflow.SessionsClient() session = session_client.session_path(DIALOGFLOW_PROJECT_ID, SESSION_ID) text_input = dialogflow.types.TextInput(text=text_to_be_analyzed, language_code=DIALOGFLOW_LANGUAGE_CODE) query_input = dialogflow.types.QueryInput(text=text_input) try: response = session_client.detect_intent(session=session, query_input=query_input) except InvalidArgument: raise reply = response.query_result.fulfillment_text reply = {'text': reply, 'Buttons': []} intent = response.query_result.intent.display_name text = reply['text'] Buttons = [] url=None card_restaurant={} card_product={} if intent == 'Welcome Intent': global gintent global how global address global locality global city global restaurants gintent = None how = None address = None locality = None city = None restaurants *= 0 Buttons = ['Order food 🍔', 'Book a table 🍽', 'Let\'s Talk 💬'] elif intent == 'Order': gintent = 'order' how = None address = None locality = None city = None restaurants *= 0 Buttons = ['Delivery 🛵', 'Carry Out 🥡'] elif intent == 'Order.Delivery': gintent = 'order' how = 'delivery' address = None locality = None city = None restaurants *= 0 elif intent == 'Order.address': restaurants *= 0 print(how) # print(response.query_result.parameters.fields) locality = response.query_result.parameters.fields['Locality'].string_value city = response.query_result.parameters.fields['City'].string_value add_request = requests.get('http://127.0.0.1:8000/main_api/nearbyRestaurant_get/?address=' + locality + ' ' + city + '&city=' + city) add_request = add_request.json() current_time = datetime.now().time() for key in add_request: if key != 'status': open_time = datetime.strptime(add_request[key]['open_time'], '%H:%M:%S').time() close_time = datetime.strptime(add_request[key]['close_time'], '%H:%M:%S').time() if open_time < current_time and current_time < close_time: restaurants.append(add_request[key]['name']) if gintent == 'order' and how != None: if how == 'delivery': text = 'What type of menu would you prefer?Should I help you with:' Buttons = ['Cuisine🌮 based Menu', 'Veg🥙 or Non-Veg🍗'] elif how == 'takeaway': text = 'Here are the restaurants nearest to your location.' else: text = 'You want delivery or takeaway' elif intent == 'order.takeaway': gintent = 'order' how = 'takeaway' address = None locality = None city = None restaurants *= 0 elif intent == 'Dinein': gintent = 'BOOK A TABLE' how = None address = None locality = None city = None restaurants *= 0 elif intent == 'Login': text="You are already login" elif intent == 'chatbot_active': gintent = None how = None address = None locality = None city = None restaurants *= 0 Buttons = ['Order food 🍔', 'Book a table 🍽', 'Let\'s Talk 💬'] reply = {'text': text, 'Buttons': Buttons, 'url': url, 'card_restaurant':card_restaurant, 'card_product':card_product } response_data = {'reply': reply, 'intent': intent} return response_data