コード例 #1
0
def test_should_raise_when_non_zero_exit(logger, beverage_dataset):
    # When/Then
    with pytest.raises(SystemExit):
        compute_cross_val_metrics(
            dataset=beverage_dataset,
            engine_class=MockEngineSegfault,
            nb_folds=4,
            num_workers=4,
        )
コード例 #2
0
    def test_should_raise_when_non_zero_exit(self):
        # Given
        dataset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                    "resources", "beverage_dataset.json")
        with io.open(dataset_path, encoding="utf8") as f:
            dataset = json.load(f)

        # When/Then
        with self.assertRaises(SystemExit):
            compute_cross_val_metrics(
                dataset=dataset, engine_class=MockEngineSegfault, nb_folds=4,
                num_workers=4)
コード例 #3
0
    def test_end_to_end_cross_val_metrics(self):
        # Given
        dataset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                    "resources", "beverage_dataset.json")
        with io.open(dataset_path, encoding="utf8") as f:
            dataset = json.load(f)

        # When/Then
        try:
            engine_class = build_nlu_engine_class(MockTrainingEngine,
                                                  MockInferenceEngine)
            compute_cross_val_metrics(dataset=dataset,
                                      engine_class=engine_class,
                                      nb_folds=5)
        except Exception as e:
            self.fail(e.args[0])
コード例 #4
0
    def test_compute_cross_val_metrics_without_slot_metrics(self):
        # Given
        dataset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                    "resources", "beverage_dataset.json")
        with io.open(dataset_path, encoding="utf8") as f:
            dataset = json.load(f)

        # When/Then
        try:
            res = compute_cross_val_metrics(
                dataset=dataset, engine_class=MockEngine, nb_folds=2,
                include_slot_metrics=False)
        except Exception as e:
            self.fail(e.args[0])

        expected_metrics = {
            "null": {
                "intent": {
                    "true_positive": 0,
                    "false_positive": 11,
                    "false_negative": 0,
                    "precision": 0.0,
                    "recall": 0.0,
                    "f1": 0.0
                },
                "intent_utterances": 0,
                "exact_parsings": 0
            },
            "MakeCoffee": {
                "intent": {
                    "true_positive": 0,
                    "false_positive": 0,
                    "false_negative": 7,
                    "precision": 0.0,
                    "recall": 0.0,
                    "f1": 0.0
                },
                "intent_utterances": 7,
                "exact_parsings": 0
            },
            "MakeTea": {
                "intent": {
                    "true_positive": 0,
                    "false_positive": 0,
                    "false_negative": 4,
                    "precision": 0.0,
                    "recall": 0.0,
                    "f1": 0.0
                },
                "intent_utterances": 4,
                "exact_parsings": 0
            }
        }

        self.assertDictEqual(expected_metrics, res["metrics"])
コード例 #5
0
    def test_compute_cross_val_metrics_with_intents_filter(self):
        # Given
        dataset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                    "resources",
                                    "keyword_matching_dataset.json")
        with io.open(dataset_path, encoding="utf8") as f:
            dataset = json.load(f)

        # When/Then

        res = compute_cross_val_metrics(
            dataset=dataset, engine_class=KeyWordMatchingEngine,
            nb_folds=2, intents_filter=["intent2", "intent3"],
            include_slot_metrics=False, seed=42)

        expected_metrics = {
            "null": {
                "intent": {
                    "true_positive": 0,
                    "false_positive": 2,
                    "false_negative": 0,
                    "precision": 0.0,
                    "recall": 0.0,
                    "f1": 0.0
                },
                "exact_parsings": 0,
                "intent_utterances": 0
            },
            "intent2": {
                "intent": {
                    "true_positive": 3,
                    "false_positive": 0,
                    "false_negative": 1,
                    "precision": 1.0,
                    "recall": 3. / 4.,
                    "f1": 0.8571428571428571
                },
                "exact_parsings": 3,
                "intent_utterances": 4
            },
            "intent3": {
                "intent": {
                    "true_positive": 2,
                    "false_positive": 0,
                    "false_negative": 1,
                    "precision": 1.0,
                    "recall": 2. / 3.,
                    "f1": 0.8
                },
                "exact_parsings": 2,
                "intent_utterances": 3
            },
        }

        self.assertDictEqual(expected_metrics, res["metrics"])
コード例 #6
0
def test_compute_cross_val_metrics_without_slot_metrics(
        logger, beverage_dataset):
    # When/Then
    try:
        res = compute_cross_val_metrics(
            dataset=beverage_dataset,
            engine_class=MockEngine,
            nb_folds=2,
            include_slot_metrics=False,
        )
    except Exception as e:
        raise AssertionError(e.args[0])

    expected_metrics = {
        "null": {
            "intent": {
                "true_positive": 0,
                "false_positive": 11,
                "false_negative": 0,
                "precision": 0.0,
                "recall": 0.0,
                "f1": 0.0,
            },
            "intent_utterances": 0,
            "exact_parsings": 0,
        },
        "MakeCoffee": {
            "intent": {
                "true_positive": 0,
                "false_positive": 0,
                "false_negative": 7,
                "precision": 0.0,
                "recall": 0.0,
                "f1": 0.0,
            },
            "intent_utterances": 7,
            "exact_parsings": 0,
        },
        "MakeTea": {
            "intent": {
                "true_positive": 0,
                "false_positive": 0,
                "false_negative": 4,
                "precision": 0.0,
                "recall": 0.0,
                "f1": 0.0,
            },
            "intent_utterances": 4,
            "exact_parsings": 0,
        },
    }

    assert expected_metrics, res["metrics"]
コード例 #7
0
def test_compute_cross_val_metrics_with_intents_filter(
        logger, keyword_matching_dataset):
    # When/Then
    res = compute_cross_val_metrics(
        dataset=keyword_matching_dataset,
        engine_class=KeyWordMatchingEngine,
        nb_folds=2,
        intents_filter=["intent2", "intent3"],
        include_slot_metrics=False,
        seed=42,
    )

    expected_metrics = {
        "null": {
            "intent": {
                "true_positive": 0,
                "false_positive": 2,
                "false_negative": 0,
                "precision": 0.0,
                "recall": 0.0,
                "f1": 0.0,
            },
            "exact_parsings": 0,
            "intent_utterances": 0,
        },
        "intent2": {
            "intent": {
                "true_positive": 3,
                "false_positive": 0,
                "false_negative": 1,
                "precision": 1.0,
                "recall": 3.0 / 4.0,
                "f1": 0.8571428571428571,
            },
            "exact_parsings": 3,
            "intent_utterances": 4,
        },
        "intent3": {
            "intent": {
                "true_positive": 2,
                "false_positive": 0,
                "false_negative": 1,
                "precision": 1.0,
                "recall": 2.0 / 3.0,
                "f1": 0.8,
            },
            "exact_parsings": 2,
            "intent_utterances": 3,
        },
    }

    assert expected_metrics, res["metrics"]
コード例 #8
0
    def test_cross_val_metrics_should_skip_when_not_enough_data(self):
        # Given
        dataset_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                    "resources", "beverage_dataset.json")

        # When
        result = compute_cross_val_metrics(dataset=dataset_path,
                                           engine_class=MockEngine,
                                           nb_folds=11)

        # Then
        expected_result = {METRICS: None, PARSING_ERRORS: []}
        self.assertDictEqual(expected_result, result)
コード例 #9
0
def test_cross_val_metrics_should_skip_when_not_enough_data(
        logger, beverage_dataset_path):
    # When
    result = compute_cross_val_metrics(dataset=beverage_dataset_path,
                                       engine_class=MockEngine,
                                       nb_folds=11)

    # Then
    expected_result = {
        AVERAGE_METRICS: None,
        CONFUSION_MATRIX: None,
        METRICS: None,
        PARSING_ERRORS: [],
    }
    assert expected_result, result
コード例 #10
0
def test_compute_cross_val_metrics_with_multiple_workers(
        logger, beverage_dataset):
    # When/Then
    expected_metrics = {
        "null": {
            "intent": {
                "true_positive": 0,
                "false_positive": 11,
                "false_negative": 0,
                "precision": 0.0,
                "recall": 0.0,
                "f1": 0.0,
            },
            "exact_parsings": 0,
            "slots": {},
            "intent_utterances": 0,
        },
        "MakeCoffee": {
            "intent": {
                "true_positive": 0,
                "false_positive": 0,
                "false_negative": 7,
                "precision": 0.0,
                "recall": 0.0,
                "f1": 0.0,
            },
            "exact_parsings": 0,
            "slots": {
                "number_of_cups": {
                    "true_positive": 0,
                    "false_positive": 0,
                    "false_negative": 0,
                    "precision": 0.0,
                    "recall": 0.0,
                    "f1": 0.0,
                }
            },
            "intent_utterances": 7,
        },
        "MakeTea": {
            "intent": {
                "true_positive": 0,
                "false_positive": 0,
                "false_negative": 4,
                "precision": 0.0,
                "recall": 0.0,
                "f1": 0.0,
            },
            "exact_parsings": 0,
            "slots": {
                "number_of_cups": {
                    "true_positive": 0,
                    "false_positive": 0,
                    "false_negative": 0,
                    "precision": 0.0,
                    "recall": 0.0,
                    "f1": 0.0,
                },
                "beverage_temperature": {
                    "true_positive": 0,
                    "false_positive": 0,
                    "false_negative": 0,
                    "precision": 0.0,
                    "recall": 0.0,
                    "f1": 0.0,
                },
            },
            "intent_utterances": 4,
        },
    }
    try:
        res = compute_cross_val_metrics(dataset=beverage_dataset,
                                        engine_class=MockEngine,
                                        nb_folds=2,
                                        num_workers=4)
    except Exception as e:
        raise AssertionError(e.args[0])
    assert expected_metrics, res["metrics"]