def run_evaluation(data_path, model,
                   report_folder=None,
                   successes_filename=None,
                   errors_filename='errors.json',
                   confmat_filename=None,
                   intent_hist_filename=None,
                   component_builder=None):  # pragma: no cover
    """Evaluate intent classification and entity extraction."""

    # get the metadata config from the package data
    if isinstance(model, Interpreter):
        interpreter = model
    else:
        interpreter = Interpreter.load(model, component_builder)
    test_data = training_data.load_data(data_path,
                                        interpreter.model_metadata.language)
    extractors = get_entity_extractors(interpreter)
    entity_predictions, tokens = get_entity_predictions(interpreter,
                                                        test_data)

    if duckling_extractors.intersection(extractors):
        entity_predictions = remove_duckling_entities(entity_predictions)
        extractors = remove_duckling_extractors(extractors)

    result = {
        "intent_evaluation": None,
        "entity_evaluation": None
    }

    if report_folder:
        utils.create_dir(report_folder)

    if is_intent_classifier_present(interpreter):
        intent_targets = get_intent_targets(test_data)
        intent_results = get_intent_predictions(
                intent_targets, interpreter, test_data)

        logger.info("Intent evaluation results:")
        result['intent_evaluation'] = evaluate_intents(intent_results,
                                                       report_folder,
                                                       successes_filename,
                                                       errors_filename,
                                                       confmat_filename,
                                                       intent_hist_filename)

    if extractors:
        entity_targets = get_entity_targets(test_data)

        logger.info("Entity evaluation results:")
        result['entity_evaluation'] = evaluate_entities(entity_targets,
                                                        entity_predictions,
                                                        tokens,
                                                        extractors,
                                                        report_folder)

    return result
def test_entity():
    #load model
    interpreter = Interpreter.load(model_location)

    duckling_extractors = {"ner_duckling", "ner_duckling_http"}

    #create dictionary of entity results
    entity_results = defaultdict(lambda: defaultdict(list))

    #get extractors of the interpreter
    extractors = evaluate.get_entity_extractors(interpreter)

    #get entity predictions and tokens
    entity_predictions, tokens = evaluate.get_entity_predictions(
        interpreter, testing_data)

    # Create classification report
    if duckling_extractors.intersection(extractors):
        entity_predictions = evaluate.remove_duckling_entities(
            entity_predictions)
        extractors = evaluate.remove_duckling_extractors(extractors)

    if not extractors:
        return entity_results

    #get entity_targets
    entity_targets = evaluate.get_entity_targets(testing_data)

    #get aligned_prections
    aligned_predictions = evaluate.align_all_entity_predictions(
        entity_targets, entity_predictions, tokens, extractors)

    merged_targets = evaluate.merge_labels(aligned_predictions)
    merged_targets = evaluate.substitute_labels(merged_targets, "O",
                                                "no_entity")

    for extractor in extractors:
        merged_predictions = evaluate.merge_labels(aligned_predictions,
                                                   extractor)
        merged_predictions = evaluate.substitute_labels(
            merged_predictions, "O", "no_entity")
        report, precision, f1, accuracy = evaluate.get_evaluation_metrics(
            merged_targets, merged_predictions)
        entity_results[extractor]["Accuracy"].append(accuracy)
        entity_results[extractor]["F1-score"].append(f1)
        entity_results[extractor]["Precision"].append(precision)

    print("entity_results:  {}\n".format(entity_results),
          "Classification report: \n{}".format(report))
def test_remove_duckling_extractors(duckling_interpreter):
    target = set([])

    patched = remove_duckling_extractors({"ner_duckling_http"})
    assert patched == target
Exemple #4
0
def test_remove_duckling_extractors(duckling_interpreter):
    target = set([])

    patched = remove_duckling_extractors({"ner_duckling"})
    assert patched == target