Ejemplo n.º 1
0
def test_intent_evaluation_report(tmpdir_factory):
    path = tmpdir_factory.mktemp("evaluation").strpath
    report_folder = os.path.join(path, "reports")
    report_filename = os.path.join(report_folder, "intent_report.json")

    utils.create_dir(report_folder)

    intent_results = [
        IntentEvaluationResult("", "restaurant_search",
                               "I am hungry", 0.12345),
        IntentEvaluationResult("greet", "greet",
                               "hello", 0.98765)]

    result = evaluate_intents(intent_results,
                              report_folder,
                              successes_filename=None,
                              errors_filename=None,
                              confmat_filename=None,
                              intent_hist_filename=None)

    report = json.loads(utils.read_file(report_filename))

    greet_results = {"precision": 1.0,
                     "recall": 1.0,
                     "f1-score": 1.0,
                     "support": 1}

    prediction = {'text': 'hello',
                  'intent': 'greet',
                  'predicted': 'greet',
                  'confidence': 0.98765}

    assert len(report.keys()) == 4
    assert report["greet"] == greet_results
    assert result["predictions"][0] == prediction
Ejemplo n.º 2
0
def test_intent_evaluation_report(tmpdir_factory):
    path = tmpdir_factory.mktemp("evaluation").strpath
    report_folder = os.path.join(path, "reports")
    report_filename = os.path.join(report_folder, "intent_report.json")

    utils.create_dir(report_folder)

    intent_results = [
        IntentEvaluationResult("", "restaurant_search",
                               "I am hungry", 0.12345),
        IntentEvaluationResult("greet", "greet",
                               "hello", 0.98765)]

    result = evaluate_intents(intent_results,
                              report_folder,
                              successes_filename=None,
                              errors_filename=None,
                              confmat_filename=None,
                              intent_hist_filename=None)

    report = json.loads(utils.read_file(report_filename))

    greet_results = {"precision": 1.0,
                     "recall": 1.0,
                     "f1-score": 1.0,
                     "support": 1}

    prediction = {'text': 'hello',
                  'intent': 'greet',
                  'predicted': 'greet',
                  'confidence': 0.98765}

    assert len(report.keys()) == 4
    assert report["greet"] == greet_results
    assert result["predictions"][0] == prediction
Ejemplo n.º 3
0
def test_entity_evaluation_report(tmpdir_factory):
    path = tmpdir_factory.mktemp("evaluation").strpath
    report_folder = os.path.join(path, "reports")

    mock_extractors = ["A", "B"]
    report_filename_a = os.path.join(report_folder, "A_report.json")
    report_filename_b = os.path.join(report_folder, "B_report.json")

    utils.create_dir(report_folder)

    result = evaluate_entities([EN_targets], [EN_predicted], [EN_tokens],
                               mock_extractors, report_folder)

    report_a = json.loads(utils.read_file(report_filename_a))
    report_b = json.loads(utils.read_file(report_filename_b))

    assert len(report_a) == 8
    assert report_a["datetime"]["support"] == 1.0
    assert report_b["macro avg"]["recall"] == 0.2
    assert result["A"]["accuracy"] == 0.75
Ejemplo n.º 4
0
def test_entity_evaluation_report(tmpdir_factory):
    path = tmpdir_factory.mktemp("evaluation").strpath
    report_folder = os.path.join(path, "reports")

    mock_extractors = ["A", "B"]
    report_filename_a = os.path.join(report_folder, "A_report.json")
    report_filename_b = os.path.join(report_folder, "B_report.json")

    utils.create_dir(report_folder)

    result = evaluate_entities([EN_targets],
                               [EN_predicted],
                               [EN_tokens],
                               mock_extractors,
                               report_folder)

    report_a = json.loads(utils.read_file(report_filename_a))
    report_b = json.loads(utils.read_file(report_filename_b))

    assert len(report_a) == 8
    assert report_a["datetime"]["support"] == 1.0
    assert report_b["macro avg"]["recall"] == 0.2
    assert result["A"]["accuracy"] == 0.75
Ejemplo n.º 5
0
def _guess_format(filename: Text) -> Text:
    """Applies heuristics to guess the data format of a file."""
    guess = UNK
    content = utils.read_file(filename)
    try:
        js = json.loads(content)
    except ValueError:
        if any([marker in content for marker in _markdown_section_markers]):
            guess = MARKDOWN
    else:
        for fformat, format_heuristic in _json_format_heuristics.items():
            if format_heuristic(js, filename):
                guess = fformat
                break

    return guess
Ejemplo n.º 6
0
def _guess_format(filename: Text) -> Text:
    """Applies heuristics to guess the data format of a file."""
    guess = UNK
    content = utils.read_file(filename)
    try:
        js = json.loads(content)
    except ValueError:
        if any([marker in content for marker in _markdown_section_markers]):
            guess = MARKDOWN
    else:
        for fformat, format_heuristic in _json_format_heuristics.items():
            if format_heuristic(js, filename):
                guess = fformat
                break

    return guess
Ejemplo n.º 7
0
def _guess_format(filename):
    # type: (Text) -> Text
    """Applies heuristics to guess the data format of a file."""
    guess = UNK
    content = utils.read_file(filename)
    try:
        js = json.loads(content)
    except ValueError:
        if "## intent:" in content:
            guess = MARKDOWN
    else:
        for fformat, format_heuristic in _json_format_heuristics.items():
            if format_heuristic(js, filename):
                guess = fformat

    return guess
Ejemplo n.º 8
0
def _guess_format(filename):
    # type: (Text) -> Text
    """Applies heuristics to guess the data format of a file."""
    guess = UNK
    content = utils.read_file(filename)
    try:
        js = json.loads(content)
    except ValueError:
        if "## intent:" in content:
            guess = MARKDOWN
    else:
        for fformat, format_heuristic in _json_format_heuristics.items():
            if format_heuristic(js, filename):
                guess = fformat

    return guess
Ejemplo n.º 9
0
    def load(
            cls,
            model_dir=None,  # type: Optional[Text]
            model_metadata=None,  # type: Optional[Metadata]
            cached_component=None,  # type: Optional[RegexFeaturizer]
            **kwargs  # type: **Any
    ):
        # type: (...) -> RegexFeaturizer

        meta = model_metadata.for_component(cls.name)
        file_name = meta.get("ner_dicts")
        regex_file = file_name

        if os.path.exists(regex_file):
            known_patterns = utils.read_file(regex_file)
            return NerdictFeaturizer(meta, known_patterns=known_patterns)
        else:
            return NerdictFeaturizer(meta)
Ejemplo n.º 10
0
 def read(self, filename, **kwargs):
     """Reads TrainingData from a file."""
     return self.reads(utils.read_file(filename), **kwargs)
Ejemplo n.º 11
0
 def read(self, filename, **kwargs):
     """Reads TrainingData from a file."""
     return self.reads(utils.read_file(filename), **kwargs)