def test_shape_dataset(self): order = "Do the kshape of 3 clusters for energy" data = response(self, order) self.assertEqual(data['queryResult']['intent']['displayName'], 'DoClustering') self.assertGreater(data['queryResult']['intentDetectionConfidence'], 0.8) self.assertEqual(data['queryResult']['parameters']['operation'], 'kshape') self.assertEqual(data['queryResult']['parameters']['number'], 3) self.assertEqual(data['queryResult']['parameters']['Dataset'], 'energy') tts = pd.DataFrame([[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], [0.0, 10.0, 4.0, 5.0, 7.0, -3.0, 0.0], [-1.0, 15.0, -12.0, 8.0, 9.0, 4.0, 5.0], [2.0, 8.0, 7.0, -6.0, -1.0, 2.0, 9.0], [-5.0, -5.0, -6.0, 7.0, 9.0, 9.0, 0.0]]) self.workspace.save_dataset('energy', tts) expected_c = pd.DataFrame( [[-0.5234, 0.1560, -0.3627, -1.2764, -0.7781, 0.9135, 1.8711], [-0.7825, 1.5990, 0.1701, 0.4082, 0.8845, -1.4969, -0.7825], [-0.6278, 1.3812, -2.0090, 0.5022, 0.6278, 0.0000, 0.1256]]) al.do_clustering(data['queryResult']['parameters']) (centroid, labels) = (al.Workspace().get_dataset('centroids0'), al.Workspace().get_dataset('labels0')) for i in range(len(expected_c)): self.assertAlmostEqual(centroid[0][i], expected_c[0][i], delta=self.DELTA) self.assertAlmostEqual(centroid[1][i], expected_c[1][i], delta=self.DELTA) self.assertAlmostEqual(centroid[2][i], expected_c[2][i], delta=self.DELTA)
def test_kmeans_dataset(self): order = "Do the kmeans of 3 clusters for energy" data = response(self, order) self.assertEqual(data['queryResult']['intent']['displayName'], 'DoClustering') self.assertGreater(data['queryResult']['intentDetectionConfidence'], 0.8) self.assertEqual(data['queryResult']['parameters']['operation'], 'kmeans') self.assertEqual(data['queryResult']['parameters']['number'], 3) self.assertEqual(data['queryResult']['parameters']['Dataset'], 'energy') tts = pd.DataFrame([[0.0, 1.0, 2.0, 3.0], [6.0, 7.0, 8.0, 9.0], [2.0, -2.0, 4.0, -4.0], [8.0, 5.0, 3.0, 1.0], [15.0, 10.0, 5.0, 0.0], [7.0, -7.0, 1.0, -1.0]]) self.workspace.save_dataset('energy', tts) expected_c = pd.DataFrame([[0.0, 0.1667, 0.3333, 0.5], [1.5, -1.5, 0.8333, -0.8333], [4.8333, 3.6667, 2.6667, 1.6667]]) al.do_clustering(data['queryResult']['parameters']) (centroid, labels) = (al.Workspace().get_dataset('centroids0'), al.Workspace().get_dataset('labels0')) for i in range(len(expected_c)): self.assertAlmostEqual(centroid[0][i], expected_c[0][i], delta=self.DELTA) self.assertAlmostEqual(centroid[1][i], expected_c[1][i], delta=self.DELTA) self.assertAlmostEqual(centroid[2][i], expected_c[2][i], delta=self.DELTA)
def detect_intent_text(project_id, session_id, text, language_code): """ Detects the intent of the text and execute some instruction Using the same `session_id` between requests allows continuation of the conversation. :param project_id: ID of the project :param session_id: ID of the session :param text: The text input for analyse :param language_code: Code of the language """ session_client = dialogflow.SessionsClient() session = session_client.session_path(project_id, session_id) print('Session path: {}\n'.format(session)) text_input = dialogflow.types.TextInput(text=text, language_code=language_code) query_input = dialogflow.types.QueryInput(text=text_input) response = session_client.detect_intent(session=session, query_input=query_input) """Conversion of Protocol Buffer to JSON""" response_json = pbjson.MessageToJson(response) data = json.loads(response_json) parameters = data['queryResult']['parameters'] print(parameters) print('=' * 20) print('DEBUG: Query text: {}'.format(response.query_result.query_text)) print('DEBUG: Detected intent: {} (confidence: {})\n'.format( response.query_result.intent.display_name, response.query_result.intent_detection_confidence)) try: if response.query_result.intent.display_name == 'RandomDataset': al.create_dataset(parameters) elif response.query_result.intent.display_name == 'LoadDataset': al.load_dataset(parameters) elif response.query_result.intent.display_name == 'ShowWorkspace': workspace = al.Workspace() print(list(workspace.get_all_dataset())) elif response.query_result.intent.display_name == 'GetBackend': al.get_library_backend(parameters['library']) elif response.query_result.intent.display_name == 'SetBackend': al.set_library_backend(parameters) elif response.query_result.intent.display_name == 'Exit - yes': al.exiting_yes(response.query_result.fulfillment_text) elif response.query_result.intent.display_name == 'Exit - no': al.exiting_no(response.query_result.fulfillment_text) elif not re.search("^Default|Exit", response.query_result.intent.display_name): if not parameters.get("Dataset"): parameters['Dataset'] = 'current' if al.check_dataset(parameters): if response.query_result.intent.display_name == 'ChangeName': al.change_name(parameters) elif response.query_result.intent.display_name == 'ShowResult': al.execute_plot(parameters) elif response.query_result.intent.display_name == 'PrintResult': al.execute_print(parameters) elif response.query_result.intent.display_name == 'SubDatasetRow': al.get_subdataset_rows(parameters) elif response.query_result.intent.display_name == 'SubDatasetCols': al.get_subdataset_columns(parameters) elif response.query_result.intent.display_name == 'JoinByCols': al.join_by_cols(parameters) elif response.query_result.intent.display_name == 'JoinByRows': al.join_by_rows(parameters) elif response.query_result.intent.display_name == 'SplitByCols': al.split_by_cols(parameters) elif response.query_result.intent.display_name == 'SplitByRows': al.split_by_rows(parameters) elif response.query_result.intent.display_name == 'DoDimensionality': al.do_dimensionality(parameters) elif response.query_result.intent.display_name == 'DoClustering': al.do_clustering(parameters) elif response.query_result.intent.display_name == 'DoMatrix_Stomp': al.do_matrix(parameters) elif response.query_result.intent.display_name == 'DoMatrix_Best': al.do_matrix(parameters) elif response.query_result.intent.display_name == 'DoNormalization': al.do_normalization(parameters) elif response.query_result.intent.display_name == 'DoFeatures': al.do_features(parameters) else: if parameters["Dataset"] != 'current': print("The object " + parameters["Dataset"] + " does not exist.") al.voice("The object " + parameters["Dataset"] + " does not exist.") else: print("There is no loaded dataset.") al.voice("There is no loaded dataset.") print("Please, load a dataset or use a previously stored one before using any function.") al.voice("Please, load a dataset or use a previously stored one before using any function.") return print('DEBUG: Fulfillment text: {}\n'.format(response.query_result.fulfillment_text)) if response.query_result.fulfillment_text: al.voice(response.query_result.fulfillment_text) except Exception as e: print('An error in the execution has been raised.') print(e) return