def test_response_evaluation_report(tmp_path: Path): path = tmp_path / "evaluation" path.mkdir() report_folder = str(path / "reports") report_filename = os.path.join(report_folder, "response_selection_report.json") rasa.shared.utils.io.create_directory(report_folder) response_results = [ ResponseSelectionEvaluationResult( "chitchat/ask_weather", "chitchat/ask_weather", "What's the weather", 0.65432, ), ResponseSelectionEvaluationResult("chitchat/ask_name", "chitchat/ask_name", "What's your name?", 0.98765), ] result = evaluate_response_selections( response_results, report_folder, successes=True, errors=True, disable_plotting=False, ) report = json.loads(rasa.shared.utils.io.read_file(report_filename)) name_query_results = { "precision": 1.0, "recall": 1.0, "f1-score": 1.0, "support": 1, "confused_with": {}, } prediction = { "text": "What's your name?", "intent_response_key_target": "chitchat/ask_name", "intent_response_key_prediction": "chitchat/ask_name", "confidence": 0.98765, } assert len(report.keys()) == 5 assert report["chitchat/ask_name"] == name_query_results assert result["predictions"][1] == prediction assert os.path.exists( os.path.join(report_folder, "response_selection_confusion_matrix.png")) assert os.path.exists( os.path.join(report_folder, "response_selection_histogram.png")) assert not os.path.exists( os.path.join(report_folder, "response_selection_errors.json")) assert os.path.exists( os.path.join(report_folder, "response_selection_successes.json"))
def test_response_evaluation_report(tmpdir_factory): path = tmpdir_factory.mktemp("evaluation").strpath report_folder = os.path.join(path, "reports") report_filename = os.path.join(report_folder, "response_selection_report.json") rasa.utils.io.create_directory(report_folder) response_results = [ ResponseSelectionEvaluationResult( "chitchat", "It's sunny in Berlin", "It's sunny in Berlin", "What's the weather", 0.65432, ), ResponseSelectionEvaluationResult( "chitchat", "My name is Mr.bot", "My name is Mr.bot", "What's your name?", 0.98765, ), ] result = evaluate_response_selections( response_results, report_folder, successes=False, errors=False, disable_plotting=True, ) report = json.loads(rasa.utils.io.read_file(report_filename)) name_query_results = { "precision": 1.0, "recall": 1.0, "f1-score": 1.0, "support": 1, "confused_with": {}, } prediction = { "text": "What's your name?", "intent_target": "chitchat", "response_target": "My name is Mr.bot", "response_predicted": "My name is Mr.bot", "confidence": 0.98765, } assert len(report.keys()) == 5 assert report["My name is Mr.bot"] == name_query_results assert result["predictions"][1] == prediction
def run_test_on_nlu(nlu_path: str, model_path: str): """ Run tests on stories. Args: nlu_path: path where nlu test data is present as YAML. model_path: Model path where model on which test has to be run is present. Returns: dictionary with evaluation results """ from rasa.model import get_model import rasa.shared.nlu.training_data.loading from rasa.nlu.model import Interpreter from rasa.nlu.test import ( remove_pretrained_extractors, get_eval_data, evaluate_intents, evaluate_response_selections, get_entity_extractors, ) from kairon import Utility unpacked_model = get_model(model_path) nlu_model = os.path.join(unpacked_model, "nlu") interpreter = Interpreter.load(nlu_model) interpreter.pipeline = remove_pretrained_extractors(interpreter.pipeline) test_data = rasa.shared.nlu.training_data.loading.load_data( nlu_path, interpreter.model_metadata.language ) result: Dict[Text, Optional[Dict]] = { "intent_evaluation": None, "entity_evaluation": None, "response_selection_evaluation": None, } (intent_results, response_selection_results, entity_results) = get_eval_data( interpreter, test_data ) if intent_results: successes = [] errors = [] result["intent_evaluation"] = evaluate_intents(intent_results, None, False, False, True) if result["intent_evaluation"].get('predictions'): del result["intent_evaluation"]['predictions'] del result["intent_evaluation"]['report'] for r in intent_results: if r.intent_target == r.intent_prediction: pass # successes.append({ # "text": r.message, # "intent": r.intent_target, # "intent_prediction": { # 'name': r.intent_prediction, # "confidence": r.confidence, # }, # }) else: errors.append({ "text": r.message, "intent": r.intent_target, "intent_prediction": { 'name': r.intent_prediction, "confidence": r.confidence, }, }) result["intent_evaluation"]['total_count'] = len(successes) + len(errors) result["intent_evaluation"]['success_count'] = len(successes) result["intent_evaluation"]['failure_count'] = len(errors) result["intent_evaluation"]['successes'] = successes result["intent_evaluation"]['errors'] = errors if response_selection_results: successes = [] errors = [] result["response_selection_evaluation"] = evaluate_response_selections( response_selection_results, None, False, False, True ) if result["response_selection_evaluation"].get('predictions'): del result["response_selection_evaluation"]['predictions'] del result["response_selection_evaluation"]['report'] for r in response_selection_results: if r.intent_response_key_prediction == r.intent_response_key_target: pass # successes.append({ # "text": r.message, # "intent_response_key_target": r.intent_response_key_target, # "intent_response_key_prediction": { # "name": r.intent_response_key_prediction, # "confidence": r.confidence, # }, # }) else: if not Utility.check_empty_string(r.intent_response_key_target): errors.append( { "text": r.message, "intent_response_key_target": r.intent_response_key_target, "intent_response_key_prediction": { "name": r.intent_response_key_prediction, "confidence": r.confidence, }, } ) result["response_selection_evaluation"]['total_count'] = len(successes) + len(errors) result["response_selection_evaluation"]['success_count'] = len(successes) result["response_selection_evaluation"]['failure_count'] = len(errors) result["response_selection_evaluation"]['successes'] = successes result["response_selection_evaluation"]['errors'] = errors if any(entity_results): extractors = get_entity_extractors(interpreter) result["entity_evaluation"] = ModelTester.__evaluate_entities(entity_results, extractors) return result