Esempio n. 1
0
def test_intent():
    #load model
    interpreter = Interpreter.load(model_location)
    # get true target of the testing data
    targets = evaluate.get_intent_targets(testing_data)
    # get predictions of the testing data
    predictions = evaluate.get_intent_predictions(interpreter, testing_data)
    #create a confusion matrix and summary statistics for intent predictions
    evaluate.evaluate_intents(targets, predictions)

    #generate classification report, precision, f1 score and accuary
    report, precision, f1, accuracy = evaluate.get_evaluation_metrics(
        targets, predictions)
    print("F1-Score:  {}\n".format(f1), "Precision: {}\n".format(precision),
          "Accuracy:  {}\n".format(accuracy),
          "Classification report: \n{}".format(report))
Esempio n. 2
0
def test_intent_evaluation_report(tmpdir_factory):

    path = tmpdir_factory.mktemp("evaluation").strpath
    report_folder = os.path.join(path, "reports")
    report_filename = os.path.join(report_folder, "intent_report.json")

    utils.create_dir(report_folder)

    intent_results = [
        IntentEvaluationResult("", "restaurant_search", "I am hungry",
                               0.12345),
        IntentEvaluationResult("greet", "greet", "hello", 0.98765)
    ]

    result = evaluate_intents(intent_results,
                              report_folder,
                              successes_filename=None,
                              errors_filename=None,
                              confmat_filename=None,
                              intent_hist_filename=None)

    report = json.loads(utils.read_file(report_filename))

    greet_results = {
        "precision": 1.0,
        "recall": 1.0,
        "f1-score": 1.0,
        "support": 1
    }

    prediction = {
        'text': 'hello',
        'intent': 'greet',
        'predicted': 'greet',
        'confidence': 0.98765
    }

    assert len(report.keys()) == 4
    assert report["greet"] == greet_results
    assert result["predictions"][0] == prediction