Beispiel #1
0
 def test_featurizer(self, trained_policy: Policy, tmp_path: Path):
     assert isinstance(trained_policy.featurizer, MaxHistoryTrackerFeaturizer)
     assert trained_policy.featurizer.state_featurizer is None
     trained_policy.persist(str(tmp_path))
     loaded = trained_policy.__class__.load(str(tmp_path))
     assert isinstance(loaded.featurizer, MaxHistoryTrackerFeaturizer)
     assert loaded.featurizer.state_featurizer is None
Beispiel #2
0
    def _get_prediction(
        policy: Policy,
        tracker: DialogueStateTracker,
        domain: Domain,
        interpreter: NaturalLanguageInterpreter,
    ) -> Prediction:
        number_of_arguments_in_rasa_1_0 = 2
        arguments = rasa.shared.utils.common.arguments_of(
            policy.predict_action_probabilities)
        if (len(arguments) > number_of_arguments_in_rasa_1_0
                and "interpreter" in arguments):
            probabilities = policy.predict_action_probabilities(
                tracker, domain, interpreter)
        else:
            rasa.shared.utils.io.raise_warning(
                "The function `predict_action_probabilities` of "
                "the `Policy` interface was changed to support "
                "additional parameters. Please make sure to "
                "adapt your custom `Policy` implementation.",
                category=DeprecationWarning,
            )
            probabilities = policy.predict_action_probabilities(
                tracker, domain, RegexInterpreter())

        return Prediction(probabilities, policy.priority)
Beispiel #3
0
    async def test_persist_and_load(
        self,
        trained_policy: Policy,
        default_domain: Domain,
        tmp_path: Path,
        should_finetune: bool,
        stories_path: Text,
    ):
        trained_policy.persist(str(tmp_path))
        loaded = trained_policy.__class__.load(
            str(tmp_path), should_finetune=should_finetune
        )
        assert loaded.finetune_mode == should_finetune

        trackers = await train_trackers(
            default_domain, stories_path, augmentation_factor=20
        )

        for tracker in trackers:
            predicted_probabilities = loaded.predict_action_probabilities(
                tracker, default_domain, RegexInterpreter()
            )
            actual_probabilities = trained_policy.predict_action_probabilities(
                tracker, default_domain, RegexInterpreter()
            )
            assert predicted_probabilities == actual_probabilities
Beispiel #4
0
 def test_featurizer(self, trained_policy: Policy, tmp_path: Path):
     assert isinstance(trained_policy.featurizer,
                       FullDialogueTrackerFeaturizer)
     assert isinstance(
         trained_policy.featurizer.state_featurizer,
         LabelTokenizerSingleStateFeaturizer,
     )
     trained_policy.persist(str(tmp_path))
     loaded = trained_policy.__class__.load(str(tmp_path))
     assert isinstance(loaded.featurizer, FullDialogueTrackerFeaturizer)
     assert isinstance(loaded.featurizer.state_featurizer,
                       LabelTokenizerSingleStateFeaturizer)
Beispiel #5
0
    async def test_persist_and_load(self, trained_policy: Policy,
                                    default_domain: Domain, tmp_path: Path):
        trained_policy.persist(str(tmp_path))
        loaded = trained_policy.__class__.load(str(tmp_path))
        trackers = await train_trackers(default_domain, augmentation_factor=20)

        for tracker in trackers:
            predicted_probabilities = loaded.predict_action_probabilities(
                tracker, default_domain)
            actual_probabilities = trained_policy.predict_action_probabilities(
                tracker, default_domain)
            assert predicted_probabilities == actual_probabilities
Beispiel #6
0
 def test_featurizer(self, trained_policy: Policy, tmp_path: Path):
     assert isinstance(trained_policy.featurizer,
                       MaxHistoryTrackerFeaturizer)
     assert trained_policy.featurizer.max_history == self.max_history
     assert isinstance(trained_policy.featurizer.state_featurizer,
                       BinarySingleStateFeaturizer)
     trained_policy.persist(str(tmp_path))
     loaded = trained_policy.__class__.load(str(tmp_path))
     assert isinstance(loaded.featurizer, MaxHistoryTrackerFeaturizer)
     assert loaded.featurizer.max_history == self.max_history
     assert isinstance(loaded.featurizer.state_featurizer,
                       BinarySingleStateFeaturizer)
Beispiel #7
0
    def test_normalization(
        self,
        trained_policy: Policy,
        tracker: DialogueStateTracker,
        default_domain: Domain,
        monkeypatch: MonkeyPatch,
    ):
        # Mock actual normalization method
        mock = Mock()
        monkeypatch.setattr(train_utils, "normalize", mock.normalize)
        trained_policy.predict_action_probabilities(tracker, default_domain,
                                                    RegexInterpreter())

        # function should not get called for margin loss_type
        mock.normalize.assert_not_called()
Beispiel #8
0
    def test_persist_and_load(
        self,
        trained_policy: Policy,
        default_domain: Domain,
        should_finetune: bool,
        stories_path: Text,
        model_storage: ModelStorage,
        resource: Resource,
        execution_context: ExecutionContext,
    ):
        loaded = trained_policy.__class__.load(
            self._config(trained_policy.config),
            model_storage,
            resource,
            dataclasses.replace(execution_context,
                                is_finetuning=should_finetune),
        )

        assert loaded.finetune_mode == should_finetune

        trackers = train_trackers(default_domain,
                                  stories_path,
                                  augmentation_factor=20)

        for tracker in trackers:
            predicted_probabilities = loaded.predict_action_probabilities(
                tracker, default_domain)
            actual_probabilities = trained_policy.predict_action_probabilities(
                tracker, default_domain)
            assert predicted_probabilities == actual_probabilities
Beispiel #9
0
 def _get_next_action(policy: Policy, events: List[Event],
                      domain: Domain) -> Text:
     tracker = get_tracker(events)
     scores = policy.predict_action_probabilities(tracker,
                                                  domain).probabilities
     index = scores.index(max(scores))
     return domain.action_names_or_texts[index]
Beispiel #10
0
 def test_prediction_on_empty_tracker(self, trained_policy: Policy,
                                      default_domain: Domain):
     tracker = DialogueStateTracker(DEFAULT_SENDER_ID, default_domain.slots)
     prediction = trained_policy.predict_action_probabilities(
         tracker, default_domain, RegexInterpreter())
     assert not prediction.is_end_to_end_prediction
     assert len(prediction.probabilities) == default_domain.num_actions
     assert max(prediction.probabilities) <= 1.0
     assert min(prediction.probabilities) >= 0.0
Beispiel #11
0
 def test_ranking_length_and_renormalization(
     self,
     trained_policy: Policy,
     tracker: DialogueStateTracker,
     default_domain: Domain,
 ):
     policy_prediction = trained_policy.predict_action_probabilities(
         tracker, default_domain, precomputations=None)
     assert sum(policy_prediction.probabilities) != pytest.approx(1)
Beispiel #12
0
    def _get_prediction(
        policy: Policy,
        tracker: DialogueStateTracker,
        domain: Domain,
        interpreter: NaturalLanguageInterpreter,
    ) -> PolicyPrediction:
        number_of_arguments_in_rasa_1_0 = 2
        arguments = rasa.shared.utils.common.arguments_of(
            policy.predict_action_probabilities
        )

        if (
            len(arguments) > number_of_arguments_in_rasa_1_0
            and "interpreter" in arguments
        ):
            prediction = policy.predict_action_probabilities(
                tracker, domain, interpreter
            )
        else:
            rasa.shared.utils.io.raise_warning(
                "The function `predict_action_probabilities` of "
                "the `Policy` interface was changed to support "
                "additional parameters. Please make sure to "
                "adapt your custom `Policy` implementation.",
                category=DeprecationWarning,
            )
            prediction = policy.predict_action_probabilities(
                tracker, domain, RegexInterpreter()
            )

        if isinstance(prediction, list):
            rasa.shared.utils.io.raise_deprecation_warning(
                f"The function `predict_action_probabilities` of "
                f"the `{Policy.__name__}` interface was changed to return "
                f"a `{PolicyPrediction.__name__}` object. Please make sure to "
                f"adapt your custom `{Policy.__name__}` implementation. Support for "
                f"returning a list of floats will be removed in Rasa Open Source 3.0.0"
            )
            prediction = PolicyPrediction(
                prediction, policy.__class__.__name__, policy_priority=policy.priority
            )

        return prediction
Beispiel #13
0
    def test_normalization(
        self,
        trained_policy: Policy,
        tracker: DialogueStateTracker,
        default_domain: Domain,
        monkeypatch: MonkeyPatch,
    ):
        # first check the output is what we expect
        predicted_probabilities = trained_policy.predict_action_probabilities(
            tracker, default_domain, RegexInterpreter()).probabilities
        # there should be no normalization
        assert all([confidence > 0 for confidence in predicted_probabilities])

        # also check our function is not called
        mock = Mock()
        monkeypatch.setattr(train_utils, "normalize", mock.normalize)
        trained_policy.predict_action_probabilities(tracker, default_domain,
                                                    RegexInterpreter())

        mock.normalize.assert_not_called()
Beispiel #14
0
    def test_normalization(
        self,
        trained_policy: Policy,
        tracker: DialogueStateTracker,
        default_domain: Domain,
        monkeypatch: MonkeyPatch,
    ):
        # first check the output is what we expect
        predicted_probabilities = trained_policy.predict_action_probabilities(
            tracker, default_domain, RegexInterpreter()).probabilities

        output_sums_to_1 = sum(predicted_probabilities) == pytest.approx(1)
        assert output_sums_to_1

        # also check our function is not called
        mock = Mock()
        monkeypatch.setattr(train_utils, "normalize", mock.normalize)
        trained_policy.predict_action_probabilities(tracker, default_domain,
                                                    RegexInterpreter())

        mock.normalize.assert_not_called()
Beispiel #15
0
 def test_ranking_length_and_renormalization(
     self,
     trained_policy: Policy,
     tracker: DialogueStateTracker,
     default_domain: Domain,
 ):
     precomputations = None
     predicted_probabilities = trained_policy.predict_action_probabilities(
         tracker, default_domain, precomputations).probabilities
     assert all([confidence >= 0 for confidence in predicted_probabilities])
     assert sum([confidence > 0
                 for confidence in predicted_probabilities]) == 4
     assert sum(predicted_probabilities) == pytest.approx(1)
Beispiel #16
0
 def test_featurizer(self, trained_policy: Policy, tmp_path: Path):
     assert trained_policy.featurizer is None
     trained_policy.persist(str(tmp_path))
     loaded = trained_policy.__class__.load(str(tmp_path))
     assert loaded.featurizer is None