Beispiel #1
0
def test_model_finetuning_core_with_default_epochs(
    tmp_path: Path,
    monkeypatch: MonkeyPatch,
    trained_moodbot_path: Text,
):
    mocked_core_training = AsyncMock()
    monkeypatch.setattr(rasa.core, rasa.core.train.__name__,
                        mocked_core_training)

    (tmp_path / "models").mkdir()
    output = str(tmp_path / "models")

    # Providing a new config with no epochs will mean the default amount are used
    # and then scaled by `finetuning_epoch_fraction`.
    old_config = rasa.shared.utils.io.read_yaml_file(
        "examples/moodbot/config.yml")
    del old_config["policies"][0]["epochs"]
    new_config_path = tmp_path / "new_config.yml"
    rasa.shared.utils.io.write_yaml(old_config, new_config_path)

    train_core(
        "examples/moodbot/domain.yml",
        str(new_config_path),
        "examples/moodbot/data/stories.yml",
        output=output,
        model_to_finetune=trained_moodbot_path,
        finetuning_epoch_fraction=2,
    )

    mocked_core_training.assert_called_once()
    _, kwargs = mocked_core_training.call_args
    model_to_finetune = kwargs["model_to_finetune"]

    ted = model_to_finetune.policy_ensemble.policies[0]
    assert ted.config[EPOCHS] == TEDPolicy.defaults[EPOCHS] * 2
Beispiel #2
0
def test_interpreter_of_old_model_passed_to_core_training(
        monkeypatch: MonkeyPatch, tmp_path: Path, trained_rasa_model: Text):
    # NLU isn't retrained
    monkeypatch.setattr(
        rasa.model.FingerprintComparisonResult,
        rasa.model.FingerprintComparisonResult.should_retrain_nlu.__name__,
        lambda _: False,
    )

    # An old model with an interpreter exists
    monkeypatch.setattr(rasa.model, rasa.model.get_latest_model.__name__,
                        lambda _: trained_rasa_model)

    # Mock the actual Core training
    _train_core = AsyncMock()
    monkeypatch.setattr(rasa.core, "train", _train_core)

    train(
        DEFAULT_DOMAIN_PATH_WITH_SLOTS,
        DEFAULT_CONFIG_PATH,
        [DEFAULT_STORIES_FILE, DEFAULT_NLU_DATA],
        str(tmp_path),
    )

    _train_core.assert_called_once()
    _, _, kwargs = _train_core.mock_calls[0]
    assert isinstance(kwargs["interpreter"], RasaNLUInterpreter)
Beispiel #3
0
def test_trained_interpreter_passed_to_core_training(
        monkeypatch: MonkeyPatch, tmp_path: Path,
        unpacked_trained_rasa_model: Text):
    # Skip actual NLU training and return trained interpreter path from fixture
    # Patching is bit more complicated as we have a module `train` and function
    # with the same name 😬
    monkeypatch.setattr(
        sys.modules["rasa.train"],
        "_train_nlu_with_validated_data",
        AsyncMock(return_value=unpacked_trained_rasa_model),
    )

    # Mock the actual Core training
    _train_core = AsyncMock()
    monkeypatch.setattr(rasa.core, "train", _train_core)

    train(
        DEFAULT_DOMAIN_PATH_WITH_SLOTS,
        DEFAULT_CONFIG_PATH,
        [DEFAULT_STORIES_FILE, DEFAULT_NLU_DATA],
        str(tmp_path),
    )

    _train_core.assert_called_once()
    _, _, kwargs = _train_core.mock_calls[0]
    assert isinstance(kwargs["interpreter"], RasaNLUInterpreter)
Beispiel #4
0
def test_model_finetuning_nlu_with_default_epochs(
    tmp_path: Path,
    monkeypatch: MonkeyPatch,
    trained_nlu_moodbot_path: Text,
):
    mocked_nlu_training = AsyncMock(return_value="")
    monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training)

    (tmp_path / "models").mkdir()
    output = str(tmp_path / "models")

    # Providing a new config with no epochs will mean the default amount are used
    # and then scaled by `finetuning_epoch_fraction`.
    old_config = rasa.shared.utils.io.read_yaml_file(
        "examples/moodbot/config.yml")
    del old_config["pipeline"][-1][EPOCHS]
    new_config_path = tmp_path / "new_config.yml"
    rasa.shared.utils.io.write_yaml(old_config, new_config_path)

    train_nlu(
        str(new_config_path),
        "examples/moodbot/data/nlu.yml",
        output=output,
        model_to_finetune=trained_nlu_moodbot_path,
        finetuning_epoch_fraction=0.1,
    )

    mocked_nlu_training.assert_called_once()
    _, nlu_train_kwargs = mocked_nlu_training.call_args
    model_to_finetune = nlu_train_kwargs["model_to_finetune"]
    new_diet_metadata = model_to_finetune.model_metadata.metadata["pipeline"][
        -1]
    assert new_diet_metadata["name"] == "DIETClassifier"
    assert new_diet_metadata[EPOCHS] == DIETClassifier.defaults[EPOCHS] * 0.1
Beispiel #5
0
def test_model_finetuning_nlu(
    tmp_path: Path,
    monkeypatch: MonkeyPatch,
    trained_nlu_moodbot_path: Text,
    use_latest_model: bool,
):
    mocked_nlu_training = AsyncMock(return_value="")
    monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training)

    mock_interpreter_create = Mock(wraps=Interpreter.create)
    monkeypatch.setattr(Interpreter, "create", mock_interpreter_create)

    mock_DIET_load = Mock(wraps=DIETClassifier.load)
    monkeypatch.setattr(DIETClassifier, "load", mock_DIET_load)

    (tmp_path / "models").mkdir()
    output = str(tmp_path / "models")

    if use_latest_model:
        trained_nlu_moodbot_path = str(Path(trained_nlu_moodbot_path).parent)

    # Typically models will be fine-tuned with a smaller number of epochs than training
    # from scratch.
    # Fine-tuning will use the number of epochs in the new config.
    old_config = rasa.shared.utils.io.read_yaml_file(
        "examples/moodbot/config.yml")
    old_config["pipeline"][-1][EPOCHS] = 10
    new_config_path = tmp_path / "new_config.yml"
    rasa.shared.utils.io.write_yaml(old_config, new_config_path)

    old_nlu = rasa.shared.utils.io.read_yaml_file(
        "examples/moodbot/data/nlu.yml")
    old_nlu["nlu"][-1]["examples"] = "-something else"
    new_nlu_path = tmp_path / "new_nlu.yml"
    rasa.shared.utils.io.write_yaml(old_nlu, new_nlu_path)

    train_nlu(
        str(new_config_path),
        str(new_nlu_path),
        domain="examples/moodbot/domain.yml",
        output=output,
        model_to_finetune=trained_nlu_moodbot_path,
        finetuning_epoch_fraction=0.2,
    )

    assert mock_interpreter_create.call_args[1]["should_finetune"]

    mocked_nlu_training.assert_called_once()
    _, nlu_train_kwargs = mocked_nlu_training.call_args
    model_to_finetune = nlu_train_kwargs["model_to_finetune"]
    assert isinstance(model_to_finetune, Interpreter)

    _, diet_kwargs = mock_DIET_load.call_args
    assert diet_kwargs["should_finetune"] is True

    new_diet_metadata = model_to_finetune.model_metadata.metadata["pipeline"][
        -1]
    assert new_diet_metadata["name"] == "DIETClassifier"
    assert new_diet_metadata[EPOCHS] == 2
Beispiel #6
0
def test_model_finetuning_core(
    tmp_path: Path,
    monkeypatch: MonkeyPatch,
    trained_moodbot_path: Text,
    use_latest_model: bool,
):
    mocked_core_training = AsyncMock()
    monkeypatch.setattr(rasa.core, rasa.core.train.__name__,
                        mocked_core_training)

    mock_agent_load = Mock(wraps=Agent.load)
    monkeypatch.setattr(Agent, "load", mock_agent_load)

    (tmp_path / "models").mkdir()
    output = str(tmp_path / "models")

    if use_latest_model:
        trained_moodbot_path = str(Path(trained_moodbot_path).parent)

    # Typically models will be fine-tuned with a smaller number of epochs than training
    # from scratch.
    # Fine-tuning will use the number of epochs in the new config.
    old_config = rasa.shared.utils.io.read_yaml_file(
        "examples/moodbot/config.yml")
    old_config["policies"][0]["epochs"] = 10
    new_config_path = tmp_path / "new_config.yml"
    rasa.shared.utils.io.write_yaml(old_config, new_config_path)

    old_stories = rasa.shared.utils.io.read_yaml_file(
        "examples/moodbot/data/stories.yml")
    old_stories["stories"].append({
        "story": "new story",
        "steps": [{
            "intent": "greet"
        }]
    })
    new_stories_path = tmp_path / "new_stories.yml"
    rasa.shared.utils.io.write_yaml(old_stories, new_stories_path)

    train_core(
        "examples/moodbot/domain.yml",
        str(new_config_path),
        str(new_stories_path),
        output=output,
        model_to_finetune=trained_moodbot_path,
        finetuning_epoch_fraction=0.2,
    )

    mocked_core_training.assert_called_once()
    _, kwargs = mocked_core_training.call_args
    model_to_finetune = kwargs["model_to_finetune"]
    assert isinstance(model_to_finetune, Agent)

    ted = model_to_finetune.policy_ensemble.policies[0]
    assert ted.config[EPOCHS] == 2
    assert ted.finetune_mode