def test_subdataset_rows(self): order = "obtain a subset by rows from random0 from 10 to 60" data = response(self, order) self.assertEqual(data['queryResult']['intent']['displayName'], 'SubDatasetRow') self.assertGreater(data['queryResult']['intentDetectionConfidence'], 0.8) self.assertEqual(data['queryResult']['parameters']['Dataset'], 'random0') self.assertEqual(data['queryResult']['parameters']['from'], 10) self.assertEqual(data['queryResult']['parameters']['to'], 60) al.create_dataset({'columns': 10, 'rows': 200, 'values': [0, 1]}) al.get_subdataset_rows(data['queryResult']['parameters']) dataset = al.Workspace().get_dataset('subrow0random0') index = dataset.index nrow = dataset.index.size self.assertEqual(index.min(), data['queryResult']['parameters']['from']) self.assertEqual(index.max(), data['queryResult']['parameters']['to'] - 1) self.assertEqual(nrow, 50)
def test_change_name(self): order = "rename random0 to r_a_n_d_o_m_0" data = response(self, order) self.assertEqual(data['queryResult']['intent']['displayName'], 'ChangeName') self.assertGreater(data['queryResult']['intentDetectionConfidence'], 0.95) self.assertEqual(data['queryResult']['parameters']['Dataset'], 'random0') self.assertEqual(data['queryResult']['parameters']['NameInto'], 'r_a_n_d_o_m_0') al.create_dataset({'columns': 10, 'rows': 200, 'values': [0, 1]}) self.assertTrue(al.Workspace().get_dataset('random0') is not None) self.assertTrue(al.Workspace().get_dataset('r_a_n_d_o_m_0') is None) data1 = al.Workspace().get_dataset('random0') al.change_name(data['queryResult']['parameters']) self.assertTrue(al.Workspace().get_dataset('random0') is None) self.assertTrue( al.Workspace().get_dataset('r_a_n_d_o_m_0') is not None) data2 = al.Workspace().get_dataset('r_a_n_d_o_m_0') self.assertTrue(pd.DataFrame.equals(data1, data2))
def test_split_cols(self): order = "split the dataset random0 by 2 columns" data = response(self, order) self.assertEqual(data['queryResult']['intent']['displayName'], 'SplitByCols') self.assertGreater(data['queryResult']['intentDetectionConfidence'], 0.8) self.assertEqual(data['queryResult']['parameters']['Dataset'], 'random0') self.assertEqual(data['queryResult']['parameters']['split'], 2) al.create_dataset({'columns': 10, 'rows': 200, 'values': [0, 1]}) al.split_by_cols(data['queryResult']['parameters']) datasets = re.findall('random0c[0-9]*', str(al.Workspace().get_all_dataset())) num_datasets = len(datasets) self.assertEqual(num_datasets, 5) workspace = al.Workspace() for num in range(num_datasets): dataset = workspace.get_dataset(datasets[num]) ncol = dataset.columns.size nrow = dataset.index.size self.assertEqual(ncol, 2) self.assertEqual(nrow, 200)
def test_join_rows(self): order = "join by rows the datasets random0 and random1" data = response(self, order) self.assertEqual(data['queryResult']['intent']['displayName'], 'JoinByRows') self.assertGreater(data['queryResult']['intentDetectionConfidence'], 0.8) self.assertEqual(data['queryResult']['parameters']['Dataset'], 'random0') self.assertEqual(data['queryResult']['parameters']['Dataset2'], 'random1') al.create_dataset({'columns': 10, 'rows': 200, 'values': [0, 1]}) al.create_dataset({'columns': 10, 'rows': 200, 'values': [0, 1]}) al.join_by_rows(data['queryResult']['parameters']) dataset = al.Workspace().get_dataset('join0') ncol = dataset.columns.size nrow = dataset.index.size self.assertEqual(ncol, 10) self.assertEqual(nrow, 400)
def test_subdataset_cols(self): order = "obtain a subset from random0 by columns at col0, col2, and col7" data = response(self, order) self.assertEqual(data['queryResult']['intent']['displayName'], 'SubDatasetCols') self.assertGreater(data['queryResult']['intentDetectionConfidence'], 0.8) self.assertEqual(data['queryResult']['parameters']['Dataset'], 'random0') self.assertEqual(data['queryResult']['parameters']['cols'], ['col0', 'col2', 'col7']) al.create_dataset({'columns': 10, 'rows': 200, 'values': [0, 1]}) al.get_subdataset_columns(data['queryResult']['parameters']) dataset = al.Workspace().get_dataset('subcol0random0') ncol = dataset.columns.size cols = dataset.columns.to_list() expected = ['col0', 'col2', 'col7'] self.assertEqual(ncol, 3) for n in range(3): self.assertEqual(cols[n], expected[n])
def test_create_random(self): order = "create random dataset for 5 row and 10 columns between -12.1 and 80" data = response(self, order) self.assertEqual(data['queryResult']['intent']['displayName'], 'RandomDataset') self.assertGreater(data['queryResult']['intentDetectionConfidence'], 0.8) al.create_dataset(data['queryResult']['parameters']) self.workspace = al.Workspace() random = self.workspace.get_dataset('current') self.assertEqual(random.shape, (int(data['queryResult']['parameters']['rows']), int(data['queryResult']['parameters']['columns'])), '(n_row, n_column) do not match') self.assertGreaterEqual( random.values.min(), float(data['queryResult']['parameters']['values'][0])) self.assertLessEqual( random.values.max(), float(data['queryResult']['parameters']['values'][1]))
def detect_intent_text(project_id, session_id, text, language_code): """ Detects the intent of the text and execute some instruction Using the same `session_id` between requests allows continuation of the conversation. :param project_id: ID of the project :param session_id: ID of the session :param text: The text input for analyse :param language_code: Code of the language """ session_client = dialogflow.SessionsClient() session = session_client.session_path(project_id, session_id) print('Session path: {}\n'.format(session)) text_input = dialogflow.types.TextInput(text=text, language_code=language_code) query_input = dialogflow.types.QueryInput(text=text_input) response = session_client.detect_intent(session=session, query_input=query_input) """Conversion of Protocol Buffer to JSON""" response_json = pbjson.MessageToJson(response) data = json.loads(response_json) parameters = data['queryResult']['parameters'] print(parameters) print('=' * 20) print('DEBUG: Query text: {}'.format(response.query_result.query_text)) print('DEBUG: Detected intent: {} (confidence: {})\n'.format( response.query_result.intent.display_name, response.query_result.intent_detection_confidence)) try: if response.query_result.intent.display_name == 'RandomDataset': al.create_dataset(parameters) elif response.query_result.intent.display_name == 'LoadDataset': al.load_dataset(parameters) elif response.query_result.intent.display_name == 'ShowWorkspace': workspace = al.Workspace() print(list(workspace.get_all_dataset())) elif response.query_result.intent.display_name == 'GetBackend': al.get_library_backend(parameters['library']) elif response.query_result.intent.display_name == 'SetBackend': al.set_library_backend(parameters) elif response.query_result.intent.display_name == 'Exit - yes': al.exiting_yes(response.query_result.fulfillment_text) elif response.query_result.intent.display_name == 'Exit - no': al.exiting_no(response.query_result.fulfillment_text) elif not re.search("^Default|Exit", response.query_result.intent.display_name): if not parameters.get("Dataset"): parameters['Dataset'] = 'current' if al.check_dataset(parameters): if response.query_result.intent.display_name == 'ChangeName': al.change_name(parameters) elif response.query_result.intent.display_name == 'ShowResult': al.execute_plot(parameters) elif response.query_result.intent.display_name == 'PrintResult': al.execute_print(parameters) elif response.query_result.intent.display_name == 'SubDatasetRow': al.get_subdataset_rows(parameters) elif response.query_result.intent.display_name == 'SubDatasetCols': al.get_subdataset_columns(parameters) elif response.query_result.intent.display_name == 'JoinByCols': al.join_by_cols(parameters) elif response.query_result.intent.display_name == 'JoinByRows': al.join_by_rows(parameters) elif response.query_result.intent.display_name == 'SplitByCols': al.split_by_cols(parameters) elif response.query_result.intent.display_name == 'SplitByRows': al.split_by_rows(parameters) elif response.query_result.intent.display_name == 'DoDimensionality': al.do_dimensionality(parameters) elif response.query_result.intent.display_name == 'DoClustering': al.do_clustering(parameters) elif response.query_result.intent.display_name == 'DoMatrix_Stomp': al.do_matrix(parameters) elif response.query_result.intent.display_name == 'DoMatrix_Best': al.do_matrix(parameters) elif response.query_result.intent.display_name == 'DoNormalization': al.do_normalization(parameters) elif response.query_result.intent.display_name == 'DoFeatures': al.do_features(parameters) else: if parameters["Dataset"] != 'current': print("The object " + parameters["Dataset"] + " does not exist.") al.voice("The object " + parameters["Dataset"] + " does not exist.") else: print("There is no loaded dataset.") al.voice("There is no loaded dataset.") print("Please, load a dataset or use a previously stored one before using any function.") al.voice("Please, load a dataset or use a previously stored one before using any function.") return print('DEBUG: Fulfillment text: {}\n'.format(response.query_result.fulfillment_text)) if response.query_result.fulfillment_text: al.voice(response.query_result.fulfillment_text) except Exception as e: print('An error in the execution has been raised.') print(e) return