Ejemplo n.º 1
0
def test_model_finetuning_core_new_domain_label(
    tmp_path: Path,
    monkeypatch: MonkeyPatch,
    trained_moodbot_path: Text,
):
    mocked_core_training = AsyncMock()
    monkeypatch.setattr(rasa.core, rasa.core.train.__name__,
                        mocked_core_training)

    (tmp_path / "models").mkdir()
    output = str(tmp_path / "models")

    # Simulate addition to training data
    old_domain = rasa.shared.utils.io.read_yaml_file(
        "examples/moodbot/domain.yml")
    old_domain["intents"].append("a_new_one")
    new_domain_path = tmp_path / "new_domain.yml"
    rasa.shared.utils.io.write_yaml(old_domain, new_domain_path)

    with pytest.raises(SystemExit):
        train_core(
            domain=str(new_domain_path),
            config="examples/moodbot/config.yml",
            stories="examples/moodbot/data/stories.yml",
            output=output,
            model_to_finetune=trained_moodbot_path,
        )

    mocked_core_training.assert_not_called()
Ejemplo n.º 2
0
def test_model_finetuning_core_with_default_epochs(
    tmp_path: Path,
    monkeypatch: MonkeyPatch,
    trained_moodbot_path: Text,
):
    mocked_core_training = AsyncMock()
    monkeypatch.setattr(rasa.core, rasa.core.train.__name__,
                        mocked_core_training)

    (tmp_path / "models").mkdir()
    output = str(tmp_path / "models")

    # Providing a new config with no epochs will mean the default amount are used
    # and then scaled by `finetuning_epoch_fraction`.
    old_config = rasa.shared.utils.io.read_yaml_file(
        "examples/moodbot/config.yml")
    del old_config["policies"][0]["epochs"]
    new_config_path = tmp_path / "new_config.yml"
    rasa.shared.utils.io.write_yaml(old_config, new_config_path)

    train_core(
        "examples/moodbot/domain.yml",
        str(new_config_path),
        "examples/moodbot/data/stories.yml",
        output=output,
        model_to_finetune=trained_moodbot_path,
        finetuning_epoch_fraction=2,
    )

    mocked_core_training.assert_called_once()
    _, kwargs = mocked_core_training.call_args
    model_to_finetune = kwargs["model_to_finetune"]

    ted = model_to_finetune.policy_ensemble.policies[0]
    assert ted.config[EPOCHS] == TEDPolicy.defaults[EPOCHS] * 2
Ejemplo n.º 3
0
def test_interpreter_of_old_model_passed_to_core_training(
        monkeypatch: MonkeyPatch, tmp_path: Path, trained_rasa_model: Text):
    # NLU isn't retrained
    monkeypatch.setattr(
        rasa.model.FingerprintComparisonResult,
        rasa.model.FingerprintComparisonResult.should_retrain_nlu.__name__,
        lambda _: False,
    )

    # An old model with an interpreter exists
    monkeypatch.setattr(rasa.model, rasa.model.get_latest_model.__name__,
                        lambda _: trained_rasa_model)

    # Mock the actual Core training
    _train_core = AsyncMock()
    monkeypatch.setattr(rasa.core, "train", _train_core)

    train(
        DEFAULT_DOMAIN_PATH_WITH_SLOTS,
        DEFAULT_CONFIG_PATH,
        [DEFAULT_STORIES_FILE, DEFAULT_NLU_DATA],
        str(tmp_path),
    )

    _train_core.assert_called_once()
    _, _, kwargs = _train_core.mock_calls[0]
    assert isinstance(kwargs["interpreter"], RasaNLUInterpreter)
Ejemplo n.º 4
0
def test_model_finetuning_nlu_new_entity(
    tmp_path: Path,
    monkeypatch: MonkeyPatch,
    trained_nlu_moodbot_path: Text,
):
    mocked_nlu_training = AsyncMock(return_value="")
    monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training)

    (tmp_path / "models").mkdir()
    output = str(tmp_path / "models")

    old_nlu = rasa.shared.utils.io.read_yaml_file(
        "examples/moodbot/data/nlu.yml")
    old_nlu["nlu"][-1]["examples"] = "-[blah](something)"
    new_nlu_path = tmp_path / "new_nlu.yml"
    rasa.shared.utils.io.write_yaml(old_nlu, new_nlu_path)

    with pytest.raises(SystemExit):
        train_nlu(
            "examples/moodbot/config.yml",
            str(new_nlu_path),
            domain="examples/moodbot/domain.yml",
            output=output,
            model_to_finetune=trained_nlu_moodbot_path,
        )

    mocked_nlu_training.assert_not_called()
Ejemplo n.º 5
0
def test_model_finetuning_nlu_new_label_to_domain_only(
    tmp_path: Path,
    monkeypatch: MonkeyPatch,
    trained_nlu_moodbot_path: Text,
):
    mocked_nlu_training = AsyncMock(return_value="")
    monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training)

    (tmp_path / "models").mkdir()
    output = str(tmp_path / "models")

    old_domain = rasa.shared.utils.io.read_yaml_file(
        "examples/moodbot/domain.yml")
    old_domain["intents"].append("a_new_one")
    new_domain_path = tmp_path / "new_domain.yml"
    rasa.shared.utils.io.write_yaml(old_domain, new_domain_path)

    train_nlu(
        "examples/moodbot/config.yml",
        "examples/moodbot/data/nlu.yml",
        domain=str(new_domain_path),
        output=output,
        model_to_finetune=trained_nlu_moodbot_path,
    )

    mocked_nlu_training.assert_called()
Ejemplo n.º 6
0
def test_model_finetuning_nlu_new_label_already_in_domain(
    tmp_path: Path,
    monkeypatch: MonkeyPatch,
    trained_rasa_model: Text,
    default_nlu_data: Text,
    default_config_path: Text,
    default_domain_path: Text,
):
    mocked_nlu_training = AsyncMock(return_value="")
    monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training)

    (tmp_path / "models").mkdir()
    output = str(tmp_path / "models")

    old_nlu = rasa.shared.utils.io.read_yaml_file(default_nlu_data)
    # This intent exists in `default_domain_path` but not yet in the nlu data
    old_nlu["nlu"].append({"intent": "why", "examples": "whyy??"})
    new_nlu_path = tmp_path / "new_nlu.yml"
    rasa.shared.utils.io.write_yaml(old_nlu, new_nlu_path)

    with pytest.raises(SystemExit):
        train_nlu(
            default_config_path,
            str(new_nlu_path),
            domain=default_domain_path,
            output=output,
            model_to_finetune=trained_rasa_model,
        )

    mocked_nlu_training.assert_not_called()
Ejemplo n.º 7
0
def test_model_finetuning_nlu_with_default_epochs(
    tmp_path: Path,
    monkeypatch: MonkeyPatch,
    trained_nlu_moodbot_path: Text,
):
    mocked_nlu_training = AsyncMock(return_value="")
    monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training)

    (tmp_path / "models").mkdir()
    output = str(tmp_path / "models")

    # Providing a new config with no epochs will mean the default amount are used
    # and then scaled by `finetuning_epoch_fraction`.
    old_config = rasa.shared.utils.io.read_yaml_file(
        "examples/moodbot/config.yml")
    del old_config["pipeline"][-1][EPOCHS]
    new_config_path = tmp_path / "new_config.yml"
    rasa.shared.utils.io.write_yaml(old_config, new_config_path)

    train_nlu(
        str(new_config_path),
        "examples/moodbot/data/nlu.yml",
        output=output,
        model_to_finetune=trained_nlu_moodbot_path,
        finetuning_epoch_fraction=0.1,
    )

    mocked_nlu_training.assert_called_once()
    _, nlu_train_kwargs = mocked_nlu_training.call_args
    model_to_finetune = nlu_train_kwargs["model_to_finetune"]
    new_diet_metadata = model_to_finetune.model_metadata.metadata["pipeline"][
        -1]
    assert new_diet_metadata["name"] == "DIETClassifier"
    assert new_diet_metadata[EPOCHS] == DIETClassifier.defaults[EPOCHS] * 0.1
Ejemplo n.º 8
0
def test_model_finetuning_with_invalid_model_nlu(
    tmp_path: Path,
    monkeypatch: MonkeyPatch,
    default_domain_path: Text,
    default_stack_config: Text,
    default_nlu_data: Text,
    model_to_fine_tune: Text,
    capsys: CaptureFixture,
):
    mocked_nlu_training = AsyncMock(return_value="")
    monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training)

    (tmp_path / "models").mkdir()
    output = str(tmp_path / "models")

    with pytest.raises(SystemExit):
        train_nlu(
            default_stack_config,
            default_nlu_data,
            domain=default_domain_path,
            output=output,
            model_to_finetune=model_to_fine_tune,
            finetuning_epoch_fraction=1,
        )

    mocked_nlu_training.assert_not_called()

    assert "No NLU model for finetuning found" in capsys.readouterr().out
Ejemplo n.º 9
0
async def test_correct_question_for_action_name_was_asked(
    monkeypatch: MonkeyPatch,
    mock_endpoint: EndpointConfig,
    action_name: Text,
    question: Text,
    is_marked_as_correct: bool,
    sent_action_name: Text,
):
    conversation_id = "conversation_id"
    policy = "policy"
    tracker = DialogueStateTracker.from_events("some_sender", [])

    monkeypatch.setattr(interactive, "retrieve_tracker",
                        AsyncMock(return_value=tracker.current_state()))
    monkeypatch.setattr(interactive, "_ask_questions",
                        AsyncMock(return_value=is_marked_as_correct))
    monkeypatch.setattr(
        interactive,
        "_request_action_from_user",
        AsyncMock(return_value=("action_another_one", False)),
    )

    mocked_send_action = AsyncMock()
    monkeypatch.setattr(interactive, "send_action", mocked_send_action)

    mocked_confirm = Mock(return_value=None)
    monkeypatch.setattr(interactive.questionary, "confirm", mocked_confirm)

    # validate the action and make sure that the correct question was asked
    await interactive._validate_action(action_name, policy, 1.0, [],
                                       mock_endpoint, conversation_id)
    mocked_confirm.assert_called_once_with(question)
    args, kwargs = mocked_send_action.call_args_list[-1]
    assert args[2] == sent_action_name
Ejemplo n.º 10
0
async def test_fetch_events_within_time_range():
    conversation_ids = ["some-id", "another-id"]

    # prepare events from different senders and different timestamps
    event_1 = random_user_uttered_event(3)
    event_2 = random_user_uttered_event(2)
    event_3 = random_user_uttered_event(1)
    events = {
        conversation_ids[0]: [event_1, event_2],
        conversation_ids[1]: [event_3]
    }

    def _get_tracker(conversation_id: Text) -> DialogueStateTracker:
        return DialogueStateTracker.from_events(conversation_id,
                                                events[conversation_id])

    # create mock tracker store
    tracker_store = AsyncMock()
    tracker_store.retrieve_full_tracker.side_effect = _get_tracker
    tracker_store.keys = AsyncMock(return_value=conversation_ids)

    exporter = MockExporter(tracker_store)
    exporter.requested_conversation_ids = conversation_ids

    # noinspection PyProtectedMember
    fetched_events = await exporter._fetch_events_within_time_range()

    # events should come back for all requested conversation IDs
    assert all(
        any(_id in event["sender_id"] for event in fetched_events)
        for _id in conversation_ids)

    # events are sorted by timestamp despite the initially different order
    assert fetched_events == list(
        sorted(fetched_events, key=lambda e: e["timestamp"]))
Ejemplo n.º 11
0
def test_trained_interpreter_passed_to_core_training(
        monkeypatch: MonkeyPatch, tmp_path: Path,
        unpacked_trained_rasa_model: Text):
    # Skip actual NLU training and return trained interpreter path from fixture
    # Patching is bit more complicated as we have a module `train` and function
    # with the same name 😬
    monkeypatch.setattr(
        sys.modules["rasa.train"],
        "_train_nlu_with_validated_data",
        AsyncMock(return_value=unpacked_trained_rasa_model),
    )

    # Mock the actual Core training
    _train_core = AsyncMock()
    monkeypatch.setattr(rasa.core, "train", _train_core)

    train(
        DEFAULT_DOMAIN_PATH_WITH_SLOTS,
        DEFAULT_CONFIG_PATH,
        [DEFAULT_STORIES_FILE, DEFAULT_NLU_DATA],
        str(tmp_path),
    )

    _train_core.assert_called_once()
    _, _, kwargs = _train_core.mock_calls[0]
    assert isinstance(kwargs["interpreter"], RasaNLUInterpreter)
Ejemplo n.º 12
0
def test_model_finetuning_nlu(
    tmp_path: Path,
    monkeypatch: MonkeyPatch,
    trained_nlu_moodbot_path: Text,
    use_latest_model: bool,
):
    mocked_nlu_training = AsyncMock(return_value="")
    monkeypatch.setattr(rasa.nlu, rasa.nlu.train.__name__, mocked_nlu_training)

    mock_interpreter_create = Mock(wraps=Interpreter.create)
    monkeypatch.setattr(Interpreter, "create", mock_interpreter_create)

    mock_DIET_load = Mock(wraps=DIETClassifier.load)
    monkeypatch.setattr(DIETClassifier, "load", mock_DIET_load)

    (tmp_path / "models").mkdir()
    output = str(tmp_path / "models")

    if use_latest_model:
        trained_nlu_moodbot_path = str(Path(trained_nlu_moodbot_path).parent)

    # Typically models will be fine-tuned with a smaller number of epochs than training
    # from scratch.
    # Fine-tuning will use the number of epochs in the new config.
    old_config = rasa.shared.utils.io.read_yaml_file(
        "examples/moodbot/config.yml")
    old_config["pipeline"][-1][EPOCHS] = 10
    new_config_path = tmp_path / "new_config.yml"
    rasa.shared.utils.io.write_yaml(old_config, new_config_path)

    old_nlu = rasa.shared.utils.io.read_yaml_file(
        "examples/moodbot/data/nlu.yml")
    old_nlu["nlu"][-1]["examples"] = "-something else"
    new_nlu_path = tmp_path / "new_nlu.yml"
    rasa.shared.utils.io.write_yaml(old_nlu, new_nlu_path)

    train_nlu(
        str(new_config_path),
        str(new_nlu_path),
        domain="examples/moodbot/domain.yml",
        output=output,
        model_to_finetune=trained_nlu_moodbot_path,
        finetuning_epoch_fraction=0.2,
    )

    assert mock_interpreter_create.call_args[1]["should_finetune"]

    mocked_nlu_training.assert_called_once()
    _, nlu_train_kwargs = mocked_nlu_training.call_args
    model_to_finetune = nlu_train_kwargs["model_to_finetune"]
    assert isinstance(model_to_finetune, Interpreter)

    _, diet_kwargs = mock_DIET_load.call_args
    assert diet_kwargs["should_finetune"] is True

    new_diet_metadata = model_to_finetune.model_metadata.metadata["pipeline"][
        -1]
    assert new_diet_metadata["name"] == "DIETClassifier"
    assert new_diet_metadata[EPOCHS] == 2
Ejemplo n.º 13
0
def test_model_finetuning_core(
    tmp_path: Path,
    monkeypatch: MonkeyPatch,
    trained_moodbot_path: Text,
    use_latest_model: bool,
):
    mocked_core_training = AsyncMock()
    monkeypatch.setattr(rasa.core, rasa.core.train.__name__,
                        mocked_core_training)

    mock_agent_load = Mock(wraps=Agent.load)
    monkeypatch.setattr(Agent, "load", mock_agent_load)

    (tmp_path / "models").mkdir()
    output = str(tmp_path / "models")

    if use_latest_model:
        trained_moodbot_path = str(Path(trained_moodbot_path).parent)

    # Typically models will be fine-tuned with a smaller number of epochs than training
    # from scratch.
    # Fine-tuning will use the number of epochs in the new config.
    old_config = rasa.shared.utils.io.read_yaml_file(
        "examples/moodbot/config.yml")
    old_config["policies"][0]["epochs"] = 10
    new_config_path = tmp_path / "new_config.yml"
    rasa.shared.utils.io.write_yaml(old_config, new_config_path)

    old_stories = rasa.shared.utils.io.read_yaml_file(
        "examples/moodbot/data/stories.yml")
    old_stories["stories"].append({
        "story": "new story",
        "steps": [{
            "intent": "greet"
        }]
    })
    new_stories_path = tmp_path / "new_stories.yml"
    rasa.shared.utils.io.write_yaml(old_stories, new_stories_path)

    train_core(
        "examples/moodbot/domain.yml",
        str(new_config_path),
        str(new_stories_path),
        output=output,
        model_to_finetune=trained_moodbot_path,
        finetuning_epoch_fraction=0.2,
    )

    mocked_core_training.assert_called_once()
    _, kwargs = mocked_core_training.call_args
    model_to_finetune = kwargs["model_to_finetune"]
    assert isinstance(model_to_finetune, Agent)

    ted = model_to_finetune.policy_ensemble.policies[0]
    assert ted.config[EPOCHS] == 2
    assert ted.finetune_mode
Ejemplo n.º 14
0
async def test_fetch_events_within_time_range_tracker_does_not_err():
    # create mock tracker store that returns `None` on `retrieve_full_tracker()`
    tracker_store = Mock()

    tracker_store.keys = AsyncMock(return_value=[uuid.uuid4()])
    tracker_store.retrieve_full_tracker = AsyncMock(return_value=None)

    exporter = MockExporter(tracker_store)

    # no events means `NoEventsInTimeRangeError`
    with pytest.raises(NoEventsInTimeRangeError):
        # noinspection PyProtectedMember
        await exporter._fetch_events_within_time_range()
Ejemplo n.º 15
0
async def test_fetch_events_within_time_range_tracker_contains_no_events():
    # create mock tracker store that returns `None` on `retrieve_full_tracker()`
    tracker_store = Mock()

    tracker_store.keys = AsyncMock(return_value=["a great ID"])
    tracker_store.retrieve_full_tracker = AsyncMock(
        return_value=DialogueStateTracker.from_events("a great ID", []))

    exporter = MockExporter(tracker_store)

    # no events means `NoEventsInTimeRangeError`
    with pytest.raises(NoEventsInTimeRangeError):
        # noinspection PyProtectedMember
        await exporter._fetch_events_within_time_range()
Ejemplo n.º 16
0
def test_train_core_autoconfig(
    tmp_path: Path,
    monkeypatch: MonkeyPatch,
    domain_path: Text,
    stories_path: Text,
    stack_config_path: Text,
):
    monkeypatch.setattr(tempfile, "tempdir", tmp_path)

    # mock function that returns configuration
    mocked_get_configuration = Mock()
    monkeypatch.setattr(autoconfig, "get_configuration",
                        mocked_get_configuration)

    # skip actual core training
    monkeypatch.setattr(rasa.model_training, "_train_core_with_validated_data",
                        AsyncMock())

    # do training
    rasa.model_training.train_core(
        domain_path,
        stack_config_path,
        stories_path,
        output="test_train_core_temp_files_models",
    )

    mocked_get_configuration.assert_called_once()
    _, args, _ = mocked_get_configuration.mock_calls[0]
    assert args[1] == autoconfig.TrainingType.CORE
Ejemplo n.º 17
0
def test_trained_interpreter_passed_to_core_training(
    monkeypatch: MonkeyPatch,
    tmp_path: Path,
    unpacked_trained_rasa_model: Text,
    nlu_data_path: Text,
    stories_path: Text,
    config_path: Text,
    domain_path: Text,
):
    # Skip actual NLU training and return trained interpreter path from fixture
    # Patching is bit more complicated as we have a module `train` and function
    # with the same name 😬
    monkeypatch.setattr(
        rasa.model_training,
        "_train_nlu_with_validated_data",
        AsyncMock(return_value=unpacked_trained_rasa_model),
    )

    # Mock the actual Core training
    _train_core = mock_core_training(monkeypatch)

    rasa.train(
        domain_path,
        config_path,
        [stories_path, nlu_data_path],
        str(tmp_path),
    )

    _train_core.assert_called_once()
    _, _, kwargs = _train_core.mock_calls[0]
    assert isinstance(kwargs["interpreter"], RasaNLUInterpreter)
Ejemplo n.º 18
0
def test_train_nlu_autoconfig(
    tmp_path: Path,
    monkeypatch: MonkeyPatch,
    default_stack_config: Text,
    default_nlu_data: Text,
):
    monkeypatch.setattr(tempfile, "tempdir", tmp_path)

    # mock function that returns configuration
    mocked_get_configuration = Mock()
    monkeypatch.setattr(autoconfig, "get_configuration",
                        mocked_get_configuration)

    monkeypatch.setattr(sys.modules["rasa.train"],
                        "_train_nlu_with_validated_data", AsyncMock())

    # do training
    train_nlu(
        default_stack_config,
        default_nlu_data,
        output="test_train_nlu_temp_files_models",
    )

    mocked_get_configuration.assert_called_once()
    _, args, _ = mocked_get_configuration.mock_calls[0]
    assert args[1] == autoconfig.TrainingType.NLU
Ejemplo n.º 19
0
async def test_nlu_comparison(
    tmp_path: Path, monkeypatch: MonkeyPatch, nlu_as_json_path: Text
):
    config = {
        "language": "en",
        "pipeline": [
            {"name": "WhitespaceTokenizer"},
            {"name": "KeywordIntentClassifier"},
            {"name": "RegexEntityExtractor"},
        ],
    }
    # the configs need to be at a different path, otherwise the results are
    # combined on the same dictionary key and cannot be plotted properly
    configs = [write_file_config(config).name, write_file_config(config).name]

    # mock training
    monkeypatch.setattr(Interpreter, "load", Mock(spec=RasaNLUInterpreter))
    monkeypatch.setattr(sys.modules["rasa.nlu"], "train", AsyncMock())

    monkeypatch.setattr(
        sys.modules["rasa.nlu.test"],
        "get_eval_data",
        Mock(return_value=(1, None, (None,),)),
    )
    monkeypatch.setattr(
        sys.modules["rasa.nlu.test"],
        "evaluate_intents",
        Mock(return_value={"f1_score": 1}),
    )

    output = str(tmp_path)
    test_data_importer = TrainingDataImporter.load_from_dict(
        training_data_paths=[nlu_as_json_path]
    )
    test_data = test_data_importer.get_nlu_data()
    await compare_nlu_models(
        configs, test_data, output, runs=2, exclusion_percentages=[50, 80]
    )

    assert set(os.listdir(output)) == {
        "run_1",
        "run_2",
        "results.json",
        "nlu_model_comparison_graph.pdf",
    }

    run_1_path = os.path.join(output, "run_1")
    assert set(os.listdir(run_1_path)) == {"50%_exclusion", "80%_exclusion", "test.yml"}

    exclude_50_path = os.path.join(run_1_path, "50%_exclusion")
    modelnames = [os.path.splitext(os.path.basename(config))[0] for config in configs]

    modeloutputs = set(
        ["train"]
        + [f"{m}_report" for m in modelnames]
        + [f"{m}.tar.gz" for m in modelnames]
    )
    assert set(os.listdir(exclude_50_path)) == modeloutputs
Ejemplo n.º 20
0
async def test_aio_pika_exceptions_caught(
    exception: Exception, monkeypatch: MonkeyPatch
):
    monkeypatch.setattr(PikaEventBroker, "connect", AsyncMock(side_effect=exception))

    with pytest.raises(ConnectionException):
        await EventBroker.create(
            EndpointConfig(username="******", password="******", type="pika")
        )
Ejemplo n.º 21
0
async def test_pika_raise_connection_exception(monkeypatch: MonkeyPatch):

    monkeypatch.setattr(PikaEventBroker, "connect",
                        AsyncMock(side_effect=ChannelNotFoundEntity()))

    with pytest.raises(ConnectionException):
        await EventBroker.create(
            EndpointConfig(username="******",
                           password="******",
                           type="pika"))
Ejemplo n.º 22
0
async def test_closing_broker():
    exporter = MockExporter(event_broker=SQLEventBroker())

    # noinspection PyProtectedMember
    exporter._fetch_events_within_time_range = AsyncMock(return_value=[])

    # run the export function
    with pytest.warns(None) as warnings:
        await exporter.publish_events()

    assert len(warnings) == 0
Ejemplo n.º 23
0
def rotator(
    mocker,
    tmp_log,
):
    rotator = mocker.patch.object(
        Rotator,
        'rotate',
        AsyncMock()
    )
    rotator.return_value = (False, tmp_log)
    yield rotator
Ejemplo n.º 24
0
async def test_fail_safe_tracker_store_if_no_errors():
    mocked_tracker_store = Mock()

    tracker_store = FailSafeTrackerStore(mocked_tracker_store, None)

    # test save
    mocked_tracker_store.save = AsyncMock()
    await tracker_store.save(None)
    mocked_tracker_store.save.assert_called_once()

    # test retrieve
    expected = [1]
    mocked_tracker_store.retrieve = AsyncMock(return_value=expected)
    sender_id = "10"
    assert await tracker_store.retrieve(sender_id) == expected
    mocked_tracker_store.retrieve.assert_called_once_with(sender_id)

    # test keys
    expected = ["sender 1", "sender 2"]
    mocked_tracker_store.keys = AsyncMock(return_value=expected)
    assert await tracker_store.keys() == expected
    mocked_tracker_store.keys.assert_called_once()
Ejemplo n.º 25
0
async def test_get_conversation_ids_to_process_error(
        requested_ids: Optional[List[Text]], available_ids: List[Text],
        exception: Exception):
    # create and mock tracker store containing `available_ids` as keys
    tracker_store = Mock()
    tracker_store.keys = AsyncMock(return_value=available_ids)

    exporter = MockExporter(tracker_store)
    exporter.requested_conversation_ids = requested_ids

    with pytest.raises(exception):
        # noinspection PyProtectedMember
        await exporter._get_conversation_ids_to_process()
Ejemplo n.º 26
0
def prepare_namespace_and_mocked_tracker_store_with_events(
        temporary_path: Path, monkeypatch: MonkeyPatch
) -> Tuple[List[UserUttered], argparse.Namespace]:
    endpoints_path = write_endpoint_config_to_yaml(
        temporary_path,
        {
            "event_broker": {
                "type": "pika"
            },
            "tracker_store": {
                "type": "sql"
            }
        },
    )

    # export these conversation IDs
    all_conversation_ids = ["id-1", "id-2", "id-3"]

    requested_conversation_ids = ["id-1", "id-2"]

    # create namespace with a set of cmdline arguments
    namespace = argparse.Namespace(
        endpoints=endpoints_path,
        conversation_ids=",".join(requested_conversation_ids),
        minimum_timestamp=1.0,
        maximum_timestamp=10.0,
    )

    # prepare events from different senders and different timestamps
    events = [
        random_user_uttered_event(timestamp)
        for timestamp in [1, 2, 3, 4, 11, 5]
    ]
    events_for_conversation_id = {
        all_conversation_ids[0]: [events[0], events[1]],
        all_conversation_ids[1]: [events[2], events[3], events[4]],
        all_conversation_ids[2]: [events[5]],
    }

    async def _get_tracker(conversation_id: Text) -> DialogueStateTracker:
        return DialogueStateTracker.from_events(
            conversation_id, events_for_conversation_id[conversation_id])

    # mock tracker store
    tracker_store = Mock()
    tracker_store.keys = AsyncMock(return_value=all_conversation_ids)
    tracker_store.retrieve_full_tracker = _get_tracker

    monkeypatch.setattr(export, "_get_tracker_store", lambda _: tracker_store)

    return events, namespace
Ejemplo n.º 27
0
async def test_get_conversation_ids_to_process(
    requested_ids: Optional[List[Text]],
    available_ids: Optional[List[Text]],
    expected: Optional[List[Text]],
):
    # create and mock tracker store containing `available_ids` as keys
    tracker_store = Mock()
    tracker_store.keys = AsyncMock(return_value=available_ids)

    exporter = MockExporter(tracker_store)
    exporter.requested_conversation_ids = requested_ids

    # noinspection PyProtectedMember
    assert await exporter._get_conversation_ids_to_process() == set(expected)
Ejemplo n.º 28
0
async def test_fail_safe_tracker_store_with_save_error():
    mocked_tracker_store = Mock()
    mocked_tracker_store.save = Mock(side_effect=Exception())

    fallback_tracker_store = Mock()
    fallback_tracker_store.save = AsyncMock()

    on_error_callback = Mock()

    tracker_store = FailSafeTrackerStore(
        mocked_tracker_store, on_error_callback, fallback_tracker_store
    )
    await tracker_store.save(None)

    fallback_tracker_store.save.assert_called_once()
    on_error_callback.assert_called_once()
Ejemplo n.º 29
0
async def test_publishing_error():
    # mock event broker so it raises on `publish()`
    event_broker = Mock()
    event_broker.publish.side_effect = ValueError()

    exporter = MockExporter(event_broker=event_broker)

    user_event = random_user_uttered_event(1).as_dict()
    user_event["sender_id"] = uuid.uuid4().hex

    # noinspection PyProtectedMember
    exporter._fetch_events_within_time_range = AsyncMock(
        return_value=[user_event])

    # run the export function
    with pytest.raises(PublishingError):
        await exporter.publish_events()
Ejemplo n.º 30
0
async def test_parser_must_start_on_read_point(
    tmp_log,
    mocker,
):
    with open(tmp_log, 'w') as log:
        for _ in range(10):
            timestamp = time.time()
            line = json.dumps({
                parser_settings.TIMESTAMP_KEY: "| " + str(timestamp)
            })
            log.write(line + "\n")

    parse_line = mocker.patch.object(
        Parser,
        '_parse_line',
        AsyncMock()
    )
    parser = Parser({
        'log_file': tmp_log
    })
    parser.read_point = 9
    await parser.parse()
    parse_line.assert_called_once_with(line + "\n")