Ejemplo n.º 1
0
    def test_normalization(
        self,
        trained_policy: Policy,
        tracker: DialogueStateTracker,
        default_domain: Domain,
        monkeypatch: MonkeyPatch,
    ):
        precomputations = None
        # first check the output is what we expect
        predicted_probabilities = trained_policy.predict_action_probabilities(
            tracker,
            default_domain,
            precomputations,
        ).probabilities

        output_sums_to_1 = sum(predicted_probabilities) == pytest.approx(1)
        assert output_sums_to_1

        # also check our function is not called
        mock = Mock()
        monkeypatch.setattr(train_utils, "normalize", mock.normalize)
        trained_policy.predict_action_probabilities(
            tracker,
            default_domain,
            precomputations,
        )

        mock.normalize.assert_not_called()
Ejemplo n.º 2
0
    def test_normalization(
        self,
        trained_policy: Policy,
        tracker: DialogueStateTracker,
        default_domain: Domain,
        monkeypatch: MonkeyPatch,
    ):
        precomputations = None
        # first check the output is what we expect
        predicted_probabilities = trained_policy.predict_action_probabilities(
            tracker,
            default_domain,
            precomputations,
        ).probabilities
        # there should be no normalization
        assert all([confidence > 0 for confidence in predicted_probabilities])

        # also check our function is not called
        mock = Mock()
        monkeypatch.setattr(train_utils, "normalize", mock.normalize)
        trained_policy.predict_action_probabilities(
            tracker,
            default_domain,
            precomputations,
        )

        mock.normalize.assert_not_called()
Ejemplo n.º 3
0
    def test_persist_and_load(
        self,
        trained_policy: PolicyGraphComponent,
        default_domain: Domain,
        should_finetune: bool,
        stories_path: Text,
        model_storage: ModelStorage,
        resource: Resource,
        execution_context: ExecutionContext,
    ):
        loaded = trained_policy.__class__.load(
            self._config(trained_policy.config),
            model_storage,
            resource,
            dataclasses.replace(execution_context,
                                is_finetuning=should_finetune),
        )

        assert loaded.finetune_mode == should_finetune

        trackers = train_trackers(default_domain,
                                  stories_path,
                                  augmentation_factor=20)

        for tracker in trackers:
            predicted_probabilities = loaded.predict_action_probabilities(
                tracker, default_domain)
            actual_probabilities = trained_policy.predict_action_probabilities(
                tracker, default_domain)
            assert predicted_probabilities == actual_probabilities
Ejemplo n.º 4
0
 def _get_next_action(
     policy: PolicyGraphComponent, events: List[Event], domain: Domain
 ) -> Text:
     tracker = get_tracker(events)
     scores = policy.predict_action_probabilities(tracker, domain,).probabilities
     index = scores.index(max(scores))
     return domain.action_names_or_texts[index]
Ejemplo n.º 5
0
    def test_normalization(
        self,
        trained_policy: Policy,
        tracker: DialogueStateTracker,
        default_domain: Domain,
        monkeypatch: MonkeyPatch,
    ):
        # Mock actual normalization method
        mock = Mock()
        monkeypatch.setattr(train_utils, "normalize", mock.normalize)
        trained_policy.predict_action_probabilities(
            tracker,
            default_domain,
            precomputations=None,
        )

        # function should not get called for margin loss_type
        mock.normalize.assert_not_called()
Ejemplo n.º 6
0
 def test_prediction_on_empty_tracker(self, trained_policy: Policy,
                                      default_domain: Domain):
     tracker = DialogueStateTracker(DEFAULT_SENDER_ID, default_domain.slots)
     prediction = trained_policy.predict_action_probabilities(
         tracker,
         default_domain,
         precomputations=None,
     )
     assert not prediction.is_end_to_end_prediction
     assert len(prediction.probabilities) == default_domain.num_actions
     assert max(prediction.probabilities) <= 1.0
     assert min(prediction.probabilities) >= 0.0