Ejemplo n.º 1
0
    def test_normalization(
        self,
        trained_policy: TEDPolicy,
        tracker: DialogueStateTracker,
        default_domain: Domain,
        monkeypatch: MonkeyPatch,
    ):
        # first check the output is what we expect
        prediction = trained_policy.predict_action_probabilities(
            tracker, default_domain, RegexInterpreter())
        assert not prediction.is_end_to_end_prediction
        # count number of non-zero confidences
        assert (sum([
            confidence > 0 for confidence in prediction.probabilities
        ]) == trained_policy.config[RANKING_LENGTH])
        # check that the norm is still 1
        assert sum(prediction.probabilities) == pytest.approx(1)

        # also check our function is called
        mock = Mock()
        monkeypatch.setattr(train_utils, "normalize", mock.normalize)
        trained_policy.predict_action_probabilities(tracker, default_domain,
                                                    RegexInterpreter())

        mock.normalize.assert_called_once()
Ejemplo n.º 2
0
    def test_ignore_action_unlikely_intent(
        self,
        trained_policy: TEDPolicy,
        default_domain: Domain,
        tracker_events_with_action: List[Event],
        tracker_events_without_action: List[Event],
    ):
        precomputations = None
        tracker_with_action = DialogueStateTracker.from_events(
            "test 1", evts=tracker_events_with_action)
        tracker_without_action = DialogueStateTracker.from_events(
            "test 2", evts=tracker_events_without_action)
        prediction_with_action = trained_policy.predict_action_probabilities(
            tracker_with_action,
            default_domain,
            precomputations,
        )
        prediction_without_action = trained_policy.predict_action_probabilities(
            tracker_without_action,
            default_domain,
            precomputations,
        )

        # If the weights didn't change then both trackers
        # should result in same prediction.
        assert (prediction_with_action.probabilities ==
                prediction_without_action.probabilities)
Ejemplo n.º 3
0
def test_get_next_action_probabilities_passes_interpreter_to_policies(
    monkeypatch: MonkeyPatch, ):
    policy = TEDPolicy()
    test_interpreter = Mock()

    def predict_action_probabilities(
        tracker: DialogueStateTracker,
        domain: Domain,
        interpreter: NaturalLanguageInterpreter,
        **kwargs,
    ) -> List[float]:
        assert interpreter == test_interpreter
        return [1, 0]

    policy.predict_action_probabilities = predict_action_probabilities
    ensemble = SimplePolicyEnsemble(policies=[policy])

    domain = Domain.empty()

    processor = MessageProcessor(test_interpreter, ensemble, domain,
                                 InMemoryTrackerStore(domain), Mock())

    # This should not raise
    processor._get_next_action_probabilities(
        DialogueStateTracker.from_events("lala",
                                         [ActionExecuted(ACTION_LISTEN_NAME)]))
Ejemplo n.º 4
0
def test_get_next_action_probabilities_pass_policy_predictions_without_interpreter_arg(
    predict_function: Callable,
):
    policy = TEDPolicy()

    policy.predict_action_probabilities = predict_function

    ensemble = SimplePolicyEnsemble(policies=[policy])
    interpreter = Mock()
    domain = Domain.empty()

    processor = MessageProcessor(
        interpreter,
        ensemble,
        domain,
        InMemoryTrackerStore(domain),
        InMemoryLockStore(),
        Mock(),
    )

    with pytest.warns(DeprecationWarning):
        processor._get_next_action_probabilities(
            DialogueStateTracker.from_events(
                "lala", [ActionExecuted(ACTION_LISTEN_NAME)]
            )
        )
Ejemplo n.º 5
0
def test_diagnostics(default_model_storage: ModelStorage,
                     default_execution_context: ExecutionContext):
    domain = Domain.from_yaml(DOMAIN_YAML)
    policy = TEDPolicy(
        TEDPolicy.get_default_config(),
        default_model_storage,
        Resource("TEDPolicy"),
        default_execution_context,
    )
    GREET_RULE = DialogueStateTracker.from_events(
        "greet rule",
        evts=[
            UserUttered(intent={"name": GREET_INTENT_NAME}),
            ActionExecuted(UTTER_GREET_ACTION),
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered(intent={"name": GREET_INTENT_NAME}),
            ActionExecuted(ACTION_LISTEN_NAME),
        ],
    )
    precomputations = None
    policy.train([GREET_RULE], domain, precomputations)
    prediction = policy.predict_action_probabilities(
        GREET_RULE,
        domain,
        precomputations,
    )

    assert prediction.diagnostic_data
    assert "attention_weights" in prediction.diagnostic_data
    assert isinstance(prediction.diagnostic_data.get("attention_weights"),
                      np.ndarray)
Ejemplo n.º 6
0
    def test_ranking_length_and_renormalization(
        self,
        trained_policy: TEDPolicy,
        tracker: DialogueStateTracker,
        default_domain: Domain,
        monkeypatch: MonkeyPatch,
    ):
        precomputations = None
        prediction = trained_policy.predict_action_probabilities(
            tracker,
            default_domain,
            precomputations,
        )

        # first check the output is what we expect
        assert not prediction.is_end_to_end_prediction

        # check that ranking length is applied - without normalization
        if trained_policy.config[RANKING_LENGTH] == 0:
            assert sum([confidence for confidence in prediction.probabilities
                        ]) == pytest.approx(1)
            assert all(confidence > 0
                       for confidence in prediction.probabilities)
        else:
            assert (sum([
                confidence > 0 for confidence in prediction.probabilities
            ]) == trained_policy.config[RANKING_LENGTH])
            assert sum([confidence for confidence in prediction.probabilities
                        ]) != pytest.approx(1)
Ejemplo n.º 7
0
def test_diagnostics():
    domain = Domain.from_yaml(DOMAIN_YAML)
    policy = TEDPolicy()
    GREET_RULE = DialogueStateTracker.from_events(
        "greet rule",
        evts=[
            UserUttered(intent={"name": GREET_INTENT_NAME}),
            ActionExecuted(UTTER_GREET_ACTION),
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered(intent={"name": GREET_INTENT_NAME}),
            ActionExecuted(ACTION_LISTEN_NAME),
        ],
    )
    policy.train([GREET_RULE], domain, RegexInterpreter())
    prediction = policy.predict_action_probabilities(GREET_RULE, domain,
                                                     RegexInterpreter())

    assert prediction.diagnostic_data
    assert "attention_weights" in prediction.diagnostic_data
    assert isinstance(prediction.diagnostic_data.get("attention_weights"),
                      np.ndarray)