Beispiel #1
0
    def process(self, message: Message, **kwargs: Any) -> None:

        if self._url() is not None:
            # mod >
            params = kwargs
            timezone = self._timezone_from_config_or_request(
                self.component_config, params.get("timezone", None))
            reference_time = self._reference_time_from_message_or_request(
                message, params.get("reference_time", None))
            matches = self._duckling_parse(message.text, reference_time,
                                           timezone)
            # </ mod
            all_extracted = convert_duckling_format_to_rasa(matches)
            dimensions = self.component_config["dimensions"]
            extracted = DucklingEntityExtractor.filter_irrelevant_entities(
                all_extracted, dimensions)
        else:
            extracted = []
            raise_warning(
                "Duckling HTTP component in pipeline, but no "
                "`url` configuration in the config "
                "file nor is `RASA_DUCKLING_HTTP_URL` "
                "set as an environment variable. No entities will be extracted!",
                docs=DOCS_URL_COMPONENTS + "#ducklinghttpextractor",
            )

        extracted = self.add_extractor_name(extracted)
        message.set(
            ENTITIES,
            message.get(ENTITIES, []) + extracted,
            add_to_output=True,
        )
Beispiel #2
0
    def graph_config_for_recipe(
        self,
        config: Dict,
        cli_parameters: Dict[Text, Any],
        training_type: TrainingType = TrainingType.BOTH,
        is_finetuning: bool = False,
    ) -> GraphModelConfiguration:
        """Converts the default config to graphs (see interface for full docstring)."""
        mark_as_experimental_feature("graph recipe")
        if cli_parameters or is_finetuning:
            raise_warning(
                "Unlike the Default Recipe, Graph Recipe does not utilize CLI "
                "parameters or finetuning and these configurations will be ignored. "
                "Add configuration to the recipe itself if you want them to be used.",
                docs=DOCS_URL_GRAPH_RECIPE,
            )

        nlu_target, core_target = self.get_targets(config, training_type)

        return GraphModelConfiguration(
            train_schema=GraphSchema.from_dict(config.get("train_schema")),
            predict_schema=GraphSchema.from_dict(config.get("predict_schema")),
            training_type=training_type,
            language=config.get("language"),
            core_target=core_target,
            nlu_target=nlu_target,
        )
Beispiel #3
0
def test_raise_deprecation():
    with pytest.warns(DeprecationWarning) as record:
        io_utils.raise_warning("My warning.", DeprecationWarning)

    assert len(record) == 1
    assert record[0].message.args[0] == "My warning."
    assert isinstance(record[0].message, DeprecationWarning)
Beispiel #4
0
def check_deterministic_ops() -> None:
    """Warn user if they have set TF_DETERMINISTIC_OPS."""
    if os.getenv(TF_DETERMINISTIC_OPS, False):
        shared_io_utils.raise_warning(
            f"You have set '{TF_DETERMINISTIC_OPS}' to 1. If you are "
            f"using one or more GPU(s) and use any of 'SparseFeaturizer', "
            f"'TEDPolicy', 'DIETClassifier', 'UnexpecTEDIntentPolicy', or "
            f"'ResponseSelector' training and testing will fail as there are no "
            f"deterministic GPU implementations of some underlying TF ops.",
            category=UserWarning,
        )
Beispiel #5
0
    def train(
        self,
        training_trackers: List[TrackerWithCachedStates],
        domain: Domain,
        interpreter: NaturalLanguageInterpreter,
        **kwargs: Any,
    ) -> None:
        """Trains the policy on given training trackers.

        Args:
            training_trackers: List of training trackers to be used
                for training the model.
            domain: Domain of the assistant.
            interpreter: NLU Interpreter to be used for featurizing the states.
            **kwargs: Any other argument.
        """
        if not training_trackers:
            shared_io_utils.raise_warning(
                f"Skipping training of `{self.__class__.__name__}` "
                f"as no data was provided. You can exclude this "
                f"policy in the configuration "
                f"file to avoid this warning.",
                category=UserWarning,
            )
            return

        model_data, label_ids = self._prepare_for_training(
            training_trackers, domain, interpreter, **kwargs)

        if model_data.is_empty():
            shared_io_utils.raise_warning(
                f"Skipping training of `{self.__class__.__name__}` "
                f"as no data was provided. You can exclude this "
                f"policy in the configuration "
                f"file to avoid this warning.",
                category=UserWarning,
            )
            return

        self.run_training(model_data, label_ids)
Beispiel #6
0
def test_raise_user_warning():
    with pytest.warns(UserWarning) as record:
        io_utils.raise_warning("My warning.")

    assert len(record) == 1
    assert record[0].message.args[0] == "My warning."
Beispiel #7
0
def test_raise_future_warning():
    with pytest.warns(FutureWarning) as record:
        io_utils.raise_warning("My future warning.", FutureWarning)

    assert len(record) == 1
    assert record[0].message.args[0] == "My future warning."