Exemplo n.º 1
0
def _collect_action_executed_predictions(
    processor: "MessageProcessor",
    partial_tracker: DialogueStateTracker,
    event: ActionExecuted,
    fail_on_prediction_errors: bool,
    circuit_breaker_tripped: bool,
) -> Tuple[EvaluationStore, PolicyPrediction,
           Optional[EntityEvaluationResult]]:
    from rasa.core.policies.form_policy import FormPolicy

    action_executed_eval_store = EvaluationStore()

    gold_action_name = event.action_name
    gold_action_text = event.action_text
    gold = gold_action_name or gold_action_text

    policy_entity_result = None

    if circuit_breaker_tripped:
        prediction = PolicyPrediction([], policy_name=None)
        predicted = "circuit breaker tripped"
    else:
        action, prediction = processor.predict_next_action(partial_tracker)
        predicted = action.name()

        policy_entity_result = _get_e2e_entity_evaluation_result(
            processor, partial_tracker, prediction)

        if (prediction.policy_name
                and predicted != gold and _form_might_have_been_rejected(
                    processor.domain, partial_tracker, predicted)):
            # Wrong action was predicted,
            # but it might be Ok if form action is rejected.
            emulate_loop_rejection(partial_tracker)
            # try again
            action, prediction = processor.predict_next_action(partial_tracker)

            # Even if the prediction is also wrong, we don't have to undo the emulation
            # of the action rejection as we know that the user explicitly specified
            # that something else than the form was supposed to run.
            predicted = action.name()

    action_executed_eval_store.add_to_store(action_predictions=[predicted],
                                            action_targets=[gold])

    if action_executed_eval_store.has_prediction_target_mismatch():
        partial_tracker.update(
            WronglyPredictedAction(
                gold_action_name,
                gold_action_text,
                predicted,
                prediction.policy_name,
                prediction.max_confidence,
                event.timestamp,
            ))
        if fail_on_prediction_errors:
            story_dump = YAMLStoryWriter().dumps(
                partial_tracker.as_story().story_steps)
            error_msg = (f"Model predicted a wrong action. Failed Story: "
                         f"\n\n{story_dump}")
            if FormPolicy.__name__ in prediction.policy_name:
                error_msg += ("FormAction is not run during "
                              "evaluation therefore it is impossible to know "
                              "if validation failed or this story is wrong. "
                              "If the story is correct, add it to the "
                              "training stories and retrain.")
            raise WrongPredictionException(error_msg)
    else:
        partial_tracker.update(
            ActionExecuted(
                predicted,
                prediction.policy_name,
                prediction.max_confidence,
                event.timestamp,
            ))

    return action_executed_eval_store, prediction, policy_entity_result
Exemplo n.º 2
0
def _message_clarification(tracker: DialogueStateTracker) -> List[Event]:
    clarification = copy.deepcopy(tracker.latest_message)
    clarification.parse_data["intent"]["confidence"] = 1.0
    clarification.timestamp = time.time()
    return [ActionExecuted(ACTION_LISTEN_NAME), clarification]
Exemplo n.º 3
0
    events = await ActionSessionStart().run(
        default_channel, template_nlg, template_sender_tracker, default_domain
    )
    assert events == [SessionStarted(), ActionExecuted(ACTION_LISTEN_NAME)]


@pytest.mark.parametrize(
    "session_config, expected_events",
    [
        (
            SessionConfig(123, True),
            [
                SessionStarted(),
                SlotSet("my_slot", "value"),
                SlotSet("another-slot", "value2"),
                ActionExecuted(action_name=ACTION_LISTEN_NAME),
            ],
        ),
        (
            SessionConfig(123, False),
            [SessionStarted(), ActionExecuted(action_name=ACTION_LISTEN_NAME)],
        ),
    ],
)
async def test_action_session_start_with_slots(
    default_channel: CollectingOutputChannel,
    template_nlg: TemplatedNaturalLanguageGenerator,
    template_sender_tracker: DialogueStateTracker,
    default_domain: Domain,
    session_config: SessionConfig,
    expected_events: List[Event],
Exemplo n.º 4
0
async def test_import_nlu_training_data_from_e2e_stories(project: Text):
    config_path = os.path.join(project, DEFAULT_CONFIG_PATH)
    domain_path = os.path.join(project, DEFAULT_DOMAIN_PATH)
    default_data_path = os.path.join(project, DEFAULT_DATA_PATH)
    importer = TrainingDataImporter.load_from_dict({}, config_path,
                                                   domain_path,
                                                   [default_data_path])

    # The `E2EImporter` correctly wraps the underlying `CombinedDataImporter`
    assert isinstance(importer, E2EImporter)
    importer_without_e2e = importer.importer

    stories = StoryGraph([
        StoryStep(events=[
            SlotSet("some slot", "doesn't matter"),
            UserUttered("greet_from_stories", {"name": "greet_from_stories"}),
            ActionExecuted("utter_greet_from_stories"),
        ]),
        StoryStep(events=[
            UserUttered("how are you doing?"),
            ActionExecuted("utter_greet_from_stories", action_text="Hi Joey."),
        ]),
    ])

    # Patch to return our test stories
    importer_without_e2e.get_stories = asyncio.coroutine(lambda *args: stories)

    # The wrapping `E2EImporter` simply forwards these method calls
    assert (await importer_without_e2e.get_stories()).as_story_string() == (
        await importer.get_stories()).as_story_string()
    assert (await importer_without_e2e.get_config()) == (await
                                                         importer.get_config())

    # Check additional NLU training data from stories was added
    nlu_data = await importer.get_nlu_data()

    # The `E2EImporter` adds NLU training data based on our training stories
    assert len(nlu_data.training_examples) > len(
        (await importer_without_e2e.get_nlu_data()).training_examples)

    # Check if the NLU training data was added correctly from the story training data
    expected_additional_messages = [
        Message(data={
            TEXT: "greet_from_stories",
            INTENT_NAME: "greet_from_stories"
        }),
        Message(data={
            ACTION_NAME: "utter_greet_from_stories",
            ACTION_TEXT: ""
        }),
        Message(data={
            TEXT: "how are you doing?",
            INTENT_NAME: None
        }),
        Message(data={
            ACTION_NAME: "utter_greet_from_stories",
            ACTION_TEXT: "Hi Joey."
        }),
    ]

    assert all(m in nlu_data.training_examples
               for m in expected_additional_messages)
Exemplo n.º 5
0
def _messages_from_action(event: ActionExecuted) -> Message:
    # sub state correctly encodes action_name vs action_text
    return Message(data=event.as_sub_state())
Exemplo n.º 6
0
    assert len(message.data) == 1


def test_container_message_lookup_fails_if_text_cannot_be_looked_up():
    table = MessageContainerForCoreFeaturization()
    with pytest.raises(ValueError, match="Expected a message with key"):
        table.lookup_message(user_text="a text not included in the table")


@pytest.mark.parametrize(
    "events,expected_num_entries",
    [
        (
            [
                UserUttered(intent={INTENT_NAME_KEY: "greet"}),
                ActionExecuted(action_name="utter_greet"),
                ActionExecuted(action_name="utter_greet"),
            ],
            2,
        ),
        (
            [
                UserUttered(text="text", intent={INTENT_NAME_KEY: "greet"}),
                ActionExecuted(action_name="utter_greet"),
            ],
            3,
        ),
    ],
)
def test_container_derive_messages_from_events_and_add(
        events: List[Event], expected_num_entries: int):
Exemplo n.º 7
0
):
    events = await ActionSessionStart().run(default_channel, template_nlg,
                                            template_sender_tracker, domain)
    assert events == [SessionStarted(), ActionExecuted(ACTION_LISTEN_NAME)]


@pytest.mark.parametrize(
    "session_config, expected_events",
    [
        (
            SessionConfig(123, True),
            [
                SessionStarted(),
                SlotSet("my_slot", "value"),
                SlotSet("another-slot", "value2"),
                ActionExecuted(action_name=ACTION_LISTEN_NAME),
            ],
        ),
        (
            SessionConfig(123, False),
            [SessionStarted(),
             ActionExecuted(action_name=ACTION_LISTEN_NAME)],
        ),
    ],
)
async def test_action_session_start_with_slots(
    default_channel: CollectingOutputChannel,
    template_nlg: TemplatedNaturalLanguageGenerator,
    template_sender_tracker: DialogueStateTracker,
    domain: Domain,
    session_config: SessionConfig,
Exemplo n.º 8
0
async def test_persist_legacy_form_story():
    domain = Domain.load("data/test_domains/form.yml")

    tracker = DialogueStateTracker("", domain.slots)

    story = ("* greet\n"
             "    - utter_greet\n"
             "* start_form\n"
             "    - some_form\n"
             '    - form{"name": "some_form"}\n'
             "* default\n"
             "    - utter_default\n"
             "    - some_form\n"
             "* stop\n"
             "    - utter_ask_continue\n"
             "* affirm\n"
             "    - some_form\n"
             "* stop\n"
             "    - utter_ask_continue\n"
             "* inform\n"
             "    - some_form\n"
             '    - form{"name": null}\n'
             "* goodbye\n"
             "    - utter_goodbye\n")

    # simulate talking to the form
    events = [
        UserUttered(intent={"name": "greet"}),
        ActionExecuted("utter_greet"),
        ActionExecuted("action_listen"),
        # start the form
        UserUttered(intent={"name": "start_form"}),
        ActionExecuted("some_form"),
        ActiveLoop("some_form"),
        ActionExecuted("action_listen"),
        # out of form input
        UserUttered(intent={"name": "default"}),
        ActionExecutionRejected("some_form"),
        ActionExecuted("utter_default"),
        ActionExecuted("some_form"),
        ActionExecuted("action_listen"),
        # out of form input
        UserUttered(intent={"name": "stop"}),
        ActionExecutionRejected("some_form"),
        ActionExecuted("utter_ask_continue"),
        ActionExecuted("action_listen"),
        # out of form input but continue with the form
        UserUttered(intent={"name": "affirm"}),
        LoopInterrupted(True),
        ActionExecuted("some_form"),
        ActionExecuted("action_listen"),
        # out of form input
        UserUttered(intent={"name": "stop"}),
        ActionExecutionRejected("some_form"),
        ActionExecuted("utter_ask_continue"),
        ActionExecuted("action_listen"),
        # form input
        UserUttered(intent={"name": "inform"}),
        LoopInterrupted(False),
        ActionExecuted("some_form"),
        ActionExecuted("action_listen"),
        ActiveLoop(None),
        UserUttered(intent={"name": "goodbye"}),
        ActionExecuted("utter_goodbye"),
        ActionExecuted("action_listen"),
    ]
    [tracker.update(e) for e in events]

    story = story.replace(f"- {LegacyForm.type_name}",
                          f"- {ActiveLoop.type_name}")

    assert story in tracker.export_stories(MarkdownStoryWriter())