Exemple #1
0
def get_tracker(events: List[Event]) -> DialogueStateTracker:
    return DialogueStateTracker.from_events("sender", events, [], 20)
Exemple #2
0
def test_tracker_init_copy(domain: Domain):
    sender_id = "some-id"
    tracker = DialogueStateTracker(sender_id, domain.slots)
    tracker_copy = tracker.init_copy()
    assert tracker.sender_id == tracker_copy.sender_id
Exemple #3
0
def test_session_start_is_not_serialised(default_domain: Domain):
    tracker = DialogueStateTracker("default", default_domain.slots)
    # the retrieved tracker should be empty
    assert len(tracker.events) == 0

    # add SlotSet event
    tracker.update(SlotSet("slot", "value"))

    # add the two SessionStarted events and a user event
    tracker.update(ActionExecuted(ACTION_SESSION_START_NAME))
    tracker.update(SessionStarted())
    tracker.update(
        UserUttered("say something", intent={INTENT_NAME_KEY: "some_intent"})
    )
    tracker.update(DefinePrevUserUtteredFeaturization(False))

    YAMLStoryWriter().dumps(
        Story.from_events(tracker.events, "some-story01").story_steps
    )

    expected = """version: "2.0"
stories:
- story: some-story01
  steps:
  - slot_was_set:
    - slot: value
  - intent: some_intent
"""

    actual = YAMLStoryWriter().dumps(
        Story.from_events(tracker.events, "some-story01").story_steps
    )
    assert actual == expected
Exemple #4
0
def test_restart_event(domain: Domain):
    tracker = DialogueStateTracker("default", domain.slots)
    # the retrieved tracker should be empty
    assert len(tracker.events) == 0

    intent = {"name": "greet", PREDICTED_CONFIDENCE_KEY: 1.0}
    tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
    tracker.update(UserUttered("/greet", intent, []))
    tracker.update(ActionExecuted("my_action"))
    tracker.update(ActionExecuted(ACTION_LISTEN_NAME))

    assert len(tracker.events) == 4
    assert tracker.latest_message.text == "/greet"
    assert len(list(tracker.generate_all_prior_trackers())) == 4

    tracker.update(Restarted())

    assert len(tracker.events) == 5
    assert tracker.followup_action == ACTION_SESSION_START_NAME

    tracker.update(SessionStarted())

    assert tracker.followup_action == ACTION_LISTEN_NAME
    assert tracker.latest_message.text is None
    assert len(list(tracker.generate_all_prior_trackers())) == 1

    dialogue = tracker.as_dialogue()

    recovered = DialogueStateTracker("default", domain.slots)
    recovered.recreate_from_dialogue(dialogue)

    assert recovered.current_state() == tracker.current_state()
    assert len(recovered.events) == 6
    assert recovered.latest_message.text is None
    assert len(list(recovered.generate_all_prior_trackers())) == 1
Exemple #5
0
def test_revert_user_utterance_event(domain: Domain):
    tracker = DialogueStateTracker("default", domain.slots)
    # the retrieved tracker should be empty
    assert len(tracker.events) == 0

    intent1 = {"name": "greet", PREDICTED_CONFIDENCE_KEY: 1.0}
    tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
    tracker.update(UserUttered("/greet", intent1, []))
    tracker.update(ActionExecuted("my_action_1"))
    tracker.update(ActionExecuted(ACTION_LISTEN_NAME))

    intent2 = {"name": "goodbye", PREDICTED_CONFIDENCE_KEY: 1.0}
    tracker.update(UserUttered("/goodbye", intent2, []))
    tracker.update(ActionExecuted("my_action_2"))
    tracker.update(ActionExecuted(ACTION_LISTEN_NAME))

    # Expecting count of 6:
    #   +5 executed actions
    #   +1 final state
    assert tracker.latest_action.get(ACTION_NAME) == ACTION_LISTEN_NAME
    assert len(list(tracker.generate_all_prior_trackers())) == 6

    tracker.update(UserUtteranceReverted())

    # Expecting count of 3:
    #   +5 executed actions
    #   +1 final state
    #   -2 rewound actions associated with the /goodbye
    #   -1 rewound action from the listen right before /goodbye
    assert tracker.latest_action.get(ACTION_NAME) == "my_action_1"
    assert len(list(tracker.generate_all_prior_trackers())) == 3

    dialogue = tracker.as_dialogue()

    recovered = DialogueStateTracker("default", domain.slots)
    recovered.recreate_from_dialogue(dialogue)

    assert recovered.current_state() == tracker.current_state()
    assert tracker.latest_action.get(ACTION_NAME) == "my_action_1"
    assert len(list(tracker.generate_all_prior_trackers())) == 3
Exemple #6
0
def default_tracker(domain: Domain) -> DialogueStateTracker:
    return DialogueStateTracker("my-sender", domain.slots)
Exemple #7
0
async def _predict_tracker_actions(
    tracker: DialogueStateTracker,
    agent: "Agent",
    fail_on_prediction_errors: bool = False,
    use_e2e: bool = False,
) -> Tuple[EvaluationStore, DialogueStateTracker, List[Dict[Text, Any]]]:

    processor = agent.create_processor()
    tracker_eval_store = EvaluationStore()

    events = list(tracker.events)

    partial_tracker = DialogueStateTracker.from_events(
        tracker.sender_id,
        events[:1],
        agent.domain.slots,
        sender_source=tracker.sender_source,
    )

    tracker_actions = []
    should_predict_another_action = True
    num_predicted_actions = 0

    for event in events[1:]:
        if isinstance(event, ActionExecuted):
            circuit_breaker_tripped = processor.is_action_limit_reached(
                num_predicted_actions, should_predict_another_action)
            (action_executed_result,
             prediction) = _collect_action_executed_predictions(
                 processor,
                 partial_tracker,
                 event,
                 fail_on_prediction_errors,
                 circuit_breaker_tripped,
             )
            tracker_eval_store.merge_store(action_executed_result)
            tracker_actions.append({
                "action":
                action_executed_result.action_targets[0],
                "predicted":
                action_executed_result.action_predictions[0],
                "policy":
                prediction.policy_name,
                "confidence":
                prediction.max_confidence,
            })
            should_predict_another_action = processor.should_predict_another_action(
                action_executed_result.action_predictions[0])
            num_predicted_actions += 1

        elif use_e2e and isinstance(event, UserUttered):
            # This means that user utterance didn't have a user message, only intent,
            # so we can skip the NLU part and take the parse data directly.
            # Indirectly that means that the test story was in YAML format.
            if not event.text:
                predicted = event.parse_data
            # Indirectly that means that the test story was either:
            # in YAML format containing a user message, or in Markdown format.
            # Leaving that as it is because Markdown is in legacy mode.
            else:
                predicted = await processor.parse_message(
                    UserMessage(event.text))
            user_uttered_result = _collect_user_uttered_predictions(
                event, predicted, partial_tracker, fail_on_prediction_errors)

            tracker_eval_store.merge_store(user_uttered_result)
        else:
            partial_tracker.update(event)
        if isinstance(event, UserUttered):
            num_predicted_actions = 0

    return tracker_eval_store, partial_tracker, tracker_actions
Exemple #8
0
async def test_form_unhappy_path_no_validation_from_rule():
    form_name = "some_form"
    handle_rejection_action_name = "utter_handle_rejection"

    domain = Domain.from_yaml(f"""
        intents:
        - {GREET_INTENT_NAME}
        actions:
        - {UTTER_GREET_ACTION}
        - {handle_rejection_action_name}
        - some-action
        slots:
          {REQUESTED_SLOT}:
            type: unfeaturized
        forms:
        - {form_name}
    """)

    unhappy_rule = TrackerWithCachedStates.from_events(
        "bla",
        domain=domain,
        slots=domain.slots,
        evts=[
            # We are in an active form
            ActiveLoop(form_name),
            SlotSet(REQUESTED_SLOT, "bla"),
            ActionExecuted(RULE_SNIPPET_ACTION_NAME),
            ActionExecuted(ACTION_LISTEN_NAME),
            # When a user says "hi", and the form is unhappy,
            # we want to run a specific action
            UserUttered(intent={"name": GREET_INTENT_NAME}),
            ActionExecuted(handle_rejection_action_name),
            # Next user utterance is an answer to the previous question
            # and shouldn't be validated by the form
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered(intent={"name": GREET_INTENT_NAME}),
            ActionExecuted(form_name),
            ActionExecuted(ACTION_LISTEN_NAME),
        ],
        is_rule_tracker=True,
    )

    policy = RulePolicy()
    # RulePolicy should memorize that unhappy_rule overrides GREET_RULE
    policy.train([GREET_RULE, unhappy_rule], domain, RegexInterpreter())

    # Check that RulePolicy predicts action to handle unhappy path
    conversation_events = [
        ActionExecuted(form_name),
        ActiveLoop(form_name),
        SlotSet(REQUESTED_SLOT, "some value"),
        ActionExecuted(ACTION_LISTEN_NAME),
        UserUttered("haha", {"name": GREET_INTENT_NAME}),
        ActionExecutionRejected(form_name),
    ]

    action_probabilities = policy.predict_action_probabilities(
        DialogueStateTracker.from_events("casd",
                                         evts=conversation_events,
                                         slots=domain.slots),
        domain,
        RegexInterpreter(),
    )
    assert_predicted_action(action_probabilities, domain,
                            handle_rejection_action_name)

    # Check that RulePolicy predicts action_listen
    conversation_events.append(ActionExecuted(handle_rejection_action_name))
    action_probabilities = policy.predict_action_probabilities(
        DialogueStateTracker.from_events("casd",
                                         evts=conversation_events,
                                         slots=domain.slots),
        domain,
        RegexInterpreter(),
    )
    assert_predicted_action(action_probabilities, domain, ACTION_LISTEN_NAME)

    # Check that RulePolicy triggers form again after handling unhappy path
    conversation_events.append(ActionExecuted(ACTION_LISTEN_NAME))
    tracker = DialogueStateTracker.from_events("casd",
                                               evts=conversation_events,
                                               slots=domain.slots)
    action_probabilities = policy.predict_action_probabilities(
        tracker, domain, RegexInterpreter())
    assert_predicted_action(action_probabilities, domain, form_name)
    # check that RulePolicy added FormValidation False event based on the training rule
    assert tracker.events[-1] == FormValidation(False)
Exemple #9
0
async def test_form_unhappy_path_no_validation_from_story():
    form_name = "some_form"
    handle_rejection_action_name = "utter_handle_rejection"

    domain = Domain.from_yaml(f"""
        intents:
        - {GREET_INTENT_NAME}
        actions:
        - {UTTER_GREET_ACTION}
        - {handle_rejection_action_name}
        - some-action
        slots:
          {REQUESTED_SLOT}:
            type: unfeaturized
        forms:
        - {form_name}
    """)

    unhappy_story = TrackerWithCachedStates.from_events(
        "bla",
        domain=domain,
        slots=domain.slots,
        evts=[
            # We are in an active form
            ActionExecuted(form_name),
            ActiveLoop(form_name),
            # When a user says "hi", and the form is unhappy,
            # we want to run a specific action
            UserUttered(intent={"name": GREET_INTENT_NAME}),
            ActionExecuted(handle_rejection_action_name),
            ActionExecuted(ACTION_LISTEN_NAME),
            # Next user utterance is an answer to the previous question
            # and shouldn't be validated by the form
            UserUttered(intent={"name": GREET_INTENT_NAME}),
            ActionExecuted(form_name),
            ActionExecuted(ACTION_LISTEN_NAME),
        ],
    )

    policy = RulePolicy()
    policy.train([unhappy_story], domain, RegexInterpreter())

    # Check that RulePolicy predicts no validation to handle unhappy path
    conversation_events = [
        ActionExecuted(form_name),
        ActiveLoop(form_name),
        SlotSet(REQUESTED_SLOT, "some value"),
        ActionExecuted(ACTION_LISTEN_NAME),
        UserUttered("haha", {"name": GREET_INTENT_NAME}),
        ActionExecutionRejected(form_name),
        ActionExecuted(handle_rejection_action_name),
        ActionExecuted(ACTION_LISTEN_NAME),
        UserUttered("haha", {"name": GREET_INTENT_NAME}),
    ]

    tracker = DialogueStateTracker.from_events("casd",
                                               evts=conversation_events,
                                               slots=domain.slots)
    action_probabilities = policy.predict_action_probabilities(
        tracker, domain, RegexInterpreter())
    # there is no rule for next action
    assert max(action_probabilities) == policy._core_fallback_threshold
    # check that RulePolicy added FormValidation False event based on the training story
    assert tracker.events[-1] == FormValidation(False)
Exemple #10
0
    FormValidation,
)
from rasa.shared.nlu.interpreter import RegexInterpreter
from rasa.core.nlg import TemplatedNaturalLanguageGenerator
from rasa.core.policies.rule_policy import RulePolicy
from rasa.shared.core.trackers import DialogueStateTracker
from rasa.shared.core.generator import TrackerWithCachedStates

UTTER_GREET_ACTION = "utter_greet"
GREET_INTENT_NAME = "greet"
GREET_RULE = DialogueStateTracker.from_events(
    "bla",
    evts=[
        ActionExecuted(RULE_SNIPPET_ACTION_NAME),
        ActionExecuted(ACTION_LISTEN_NAME),
        # Greet is a FAQ here and gets triggered in any context
        UserUttered(intent={"name": GREET_INTENT_NAME}),
        ActionExecuted(UTTER_GREET_ACTION),
        ActionExecuted(ACTION_LISTEN_NAME),
    ],
)
GREET_RULE.is_rule_tracker = True


def _form_submit_rule(domain: Domain, submit_action_name: Text,
                      form_name: Text) -> DialogueStateTracker:
    return TrackerWithCachedStates.from_events(
        "bla",
        domain=domain,
        slots=domain.slots,
        evts=[
Exemple #11
0
async def test_form_unhappy_path_from_story():
    form_name = "some_form"
    handle_rejection_action_name = "utter_handle_rejection"

    domain = Domain.from_yaml(f"""
        intents:
        - {GREET_INTENT_NAME}
        actions:
        - {UTTER_GREET_ACTION}
        - {handle_rejection_action_name}
        - some-action
        slots:
          {REQUESTED_SLOT}:
            type: unfeaturized
        forms:
        - {form_name}
    """)

    unhappy_story = TrackerWithCachedStates.from_events(
        "bla",
        domain=domain,
        slots=domain.slots,
        evts=[
            # We are in an active form
            ActionExecuted(form_name),
            ActiveLoop(form_name),
            UserUttered("haha", {"name": GREET_INTENT_NAME}),
            ActionExecuted(UTTER_GREET_ACTION),
            # After our bot says "hi", we want to run a specific action
            ActionExecuted(handle_rejection_action_name),
            ActionExecuted(form_name),
            ActionExecuted(ACTION_LISTEN_NAME),
        ],
    )

    policy = RulePolicy()
    policy.train([GREET_RULE, unhappy_story], domain, RegexInterpreter())

    # Check that RulePolicy predicts action to handle unhappy path
    conversation_events = [
        ActionExecuted(form_name),
        ActiveLoop(form_name),
        SlotSet(REQUESTED_SLOT, "some value"),
        ActionExecuted(ACTION_LISTEN_NAME),
        UserUttered("haha", {"name": GREET_INTENT_NAME}),
        ActionExecutionRejected(form_name),
    ]

    action_probabilities = policy.predict_action_probabilities(
        DialogueStateTracker.from_events("casd",
                                         evts=conversation_events,
                                         slots=domain.slots),
        domain,
        RegexInterpreter(),
    )
    assert_predicted_action(action_probabilities, domain, UTTER_GREET_ACTION)

    # Check that RulePolicy doesn't trigger form or action_listen
    # after handling unhappy path
    conversation_events.append(ActionExecuted(handle_rejection_action_name))
    action_probabilities = policy.predict_action_probabilities(
        DialogueStateTracker.from_events("casd",
                                         evts=conversation_events,
                                         slots=domain.slots),
        domain,
        RegexInterpreter(),
    )
    assert max(action_probabilities) == policy._core_fallback_threshold
Exemple #12
0
def template_sender_tracker(domain_path: Text):
    domain = Domain.load(domain_path)
    return DialogueStateTracker("template-sender", domain.slots)
Exemple #13
0
 def tracker(self, domain_with_mapping: Domain) -> DialogueStateTracker:
     return DialogueStateTracker(DEFAULT_SENDER_ID,
                                 domain_with_mapping.slots)
Exemple #14
0
 def tracker(self, default_domain: Domain) -> DialogueStateTracker:
     return DialogueStateTracker(DEFAULT_SENDER_ID, default_domain.slots)
Exemple #15
0
def template_sender_tracker(default_domain):
    return DialogueStateTracker("template-sender", default_domain.slots)
Exemple #16
0
async def test_one_stage_fallback_rule():
    domain = Domain.from_yaml(f"""
        intents:
        - {GREET_INTENT_NAME}
        - {DEFAULT_NLU_FALLBACK_INTENT_NAME}
        actions:
        - {UTTER_GREET_ACTION}
    """)

    fallback_recover_rule = TrackerWithCachedStates.from_events(
        "bla",
        domain=domain,
        slots=domain.slots,
        evts=[
            ActionExecuted(RULE_SNIPPET_ACTION_NAME),
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered(intent={"name": DEFAULT_NLU_FALLBACK_INTENT_NAME}),
            ActionExecuted(ACTION_DEFAULT_FALLBACK_NAME),
            ActionExecuted(ACTION_LISTEN_NAME),
        ],
        is_rule_tracker=True,
    )

    greet_rule_which_only_applies_at_start = TrackerWithCachedStates.from_events(
        "bla",
        domain=domain,
        evts=[
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered(intent={"name": GREET_INTENT_NAME}),
            ActionExecuted(UTTER_GREET_ACTION),
            ActionExecuted(ACTION_LISTEN_NAME),
        ],
        is_rule_tracker=True,
    )
    policy = RulePolicy()
    policy.train(
        [greet_rule_which_only_applies_at_start, fallback_recover_rule],
        domain,
        RegexInterpreter(),
    )

    # RulePolicy predicts fallback action
    conversation_events = [
        ActionExecuted(ACTION_LISTEN_NAME),
        UserUttered("dasdakl;fkasd",
                    {"name": DEFAULT_NLU_FALLBACK_INTENT_NAME}),
    ]
    tracker = DialogueStateTracker.from_events("casd",
                                               evts=conversation_events,
                                               slots=domain.slots)
    action_probabilities = policy.predict_action_probabilities(
        tracker, domain, RegexInterpreter())
    assert_predicted_action(action_probabilities, domain,
                            ACTION_DEFAULT_FALLBACK_NAME)

    # Fallback action reverts fallback events, next action is `ACTION_LISTEN`
    conversation_events += await ActionDefaultFallback().run(
        CollectingOutputChannel(),
        TemplatedNaturalLanguageGenerator(domain.templates),
        tracker,
        domain,
    )

    # Rasa is back on track when user rephrased intent
    conversation_events += [
        ActionExecuted(ACTION_LISTEN_NAME),
        UserUttered("haha", {"name": GREET_INTENT_NAME}),
    ]
    tracker = DialogueStateTracker.from_events("casd",
                                               evts=conversation_events,
                                               slots=domain.slots)

    action_probabilities = policy.predict_action_probabilities(
        tracker, domain, RegexInterpreter())
    assert_predicted_action(action_probabilities, domain, UTTER_GREET_ACTION)
Exemple #17
0
 async def _get_tracker(conversation_id: Text) -> DialogueStateTracker:
     return DialogueStateTracker.from_events(
         conversation_id, events_for_conversation_id[conversation_id])
    def predict_action_probabilities(
        self,
        tracker: DialogueStateTracker,
        domain: Domain,
        interpreter: NaturalLanguageInterpreter,
        **kwargs: Any,
    ) -> PolicyPrediction:
        """Predicts the next action the bot should take after seeing the tracker.

        Args:
            tracker: Tracker containing past conversation events.
            domain: Domain of the assistant.
            interpreter: Interpreter which may be used by the policies to create
                additional features.

        Returns:
             The policy's prediction (e.g. the probabilities for the actions).
        """
        if self.model is None:
            return self._prediction(self._default_predictions(domain))

        # Prediction through the policy is skipped if:
        # 1. If the tracker does not contain any event of type `UserUttered`
        #    till now.
        # 2. There is at least one event of type `ActionExecuted`
        #    after the last `UserUttered` event.
        if self._should_skip_prediction(tracker):
            logger.debug(
                f"Skipping predictions for {self.__class__.__name__} "
                f"as either there is no event of type `UserUttered` or "
                f"there is an event of type `ActionExecuted` after "
                f"the last `UserUttered`.")
            return self._prediction(self._default_predictions(domain))

        # create model data from tracker
        tracker_state_features = self._featurize_for_prediction(
            tracker, domain, interpreter)

        model_data = self._create_model_data(tracker_state_features)
        output = self.model.run_inference(model_data)

        # take the last prediction in the sequence
        all_similarities: np.ndarray = output["similarities"]
        sequence_similarities = all_similarities[:, -1, :]

        # Check for unlikely intent
        query_intent = tracker.get_last_event_for(UserUttered).intent_name
        is_unlikely_intent = self._check_unlikely_intent(
            domain, sequence_similarities, query_intent)

        confidences = list(np.zeros(domain.num_actions))

        if is_unlikely_intent:
            confidences[domain.index_for_action(
                ACTION_UNLIKELY_INTENT_NAME)] = 1.0

        return self._prediction(
            confidences,
            action_metadata=self._collect_action_metadata(
                domain, sequence_similarities, query_intent),
        )
Exemple #19
0
    "path, expected_format",
    [
        ("bla.json", RASA),
        ("other.md", MARKDOWN),
        ("other.yml", RASA_YAML),
        ("unknown", UNK),
    ],
)
def test_get_nlu_target_format(path: Text, expected_format: Text):
    assert interactive._get_nlu_target_format(path) == expected_format


@pytest.mark.parametrize(
    "trackers, expected_trackers",
    [
        ([DialogueStateTracker.from_events("one", [])
          ], [deque([]), DEFAULT_SENDER_ID]),
        (
            [
                str(i) for i in
                range(interactive.
                      MAX_NUMBER_OF_TRAINING_STORIES_FOR_VISUALIZATION + 1)
            ],
            [DEFAULT_SENDER_ID],
        ),
    ],
)
async def test_initial_plotting_call(
    mock_endpoint: EndpointConfig,
    monkeypatch: MonkeyPatch,
    trackers: List[Text],
    async def generate(
        self,
        template_name: Text,
        tracker: DialogueStateTracker,
        output_channel: Text,
        **kwargs: Any,
    ) -> List[Dict[Text, Any]]:

        fallback_language_slot = tracker.slots.get("fallback_language")
        fallback_language = (fallback_language_slot.initial_value
                             if fallback_language_slot else None)
        language = tracker.latest_message.metadata.get(
            "language") or fallback_language

        body = nlg_request_format(
            template_name,
            tracker,
            output_channel,
            **kwargs,
            language=language,
            projectId=os.environ.get("BF_PROJECT_ID"),
        )

        logger.debug("Requesting NLG for {} from {}."
                     "".format(template_name, self.nlg_endpoint.url))

        try:
            if "graphql" in self.nlg_endpoint.url:
                from sgqlc.endpoint.http import HTTPEndpoint

                logging.getLogger("sgqlc.endpoint.http").setLevel(
                    logging.WARNING)

                api_key = os.environ.get("API_KEY")
                headers = [{"Authorization": api_key}] if api_key else []
                response = HTTPEndpoint(self.nlg_endpoint.url,
                                        *headers)(NLG_QUERY, body)
                if response.get("errors"):
                    raise urllib.error.URLError(", ".join(
                        [e.get("message") for e in response.get("errors")]))
                response = response.get("data", {}).get("getResponse", {})
                rewrite_url(response, self.url_substitution_patterns)
                if "customText" in response:
                    response["text"] = response.pop("customText")
                if "customImage" in response:
                    response["image"] = response.pop("customImage")
                if "customQuickReplies" in response:
                    response["quick_replies"] = response.pop(
                        "customQuickReplies")
                if "customButtons" in response:
                    response["buttons"] = response.pop("customButtons")
                if "customElements" in response:
                    response["elements"] = response.pop("customElements")
                if "customAttachment" in response:
                    response["attachment"] = response.pop("customAttachment")
                metadata = response.pop("metadata", {}) or {}
                for key in metadata:
                    response[key] = metadata[key]

                keys_to_interpolate = [
                    "text",
                    "image",
                    "custom",
                    "buttons",
                    "attachment",
                    "quick_replies",
                ]
                for key in keys_to_interpolate:
                    if key in response:
                        response[key] = interpolate(
                            response[key], tracker.current_slot_values())
            else:
                response = await self.nlg_endpoint.request(
                    method="post", json=body, timeout=DEFAULT_REQUEST_TIMEOUT)
                response = response[
                    0]  # legacy route, use first message in seq
        except urllib.error.URLError as e:
            message = e.reason
            logger.error(
                f"NLG web endpoint at {self.nlg_endpoint.url} returned errors: {message}"
            )
            return {"text": template_name}

        if self.validate_response(response):
            return response
        else:
            logger.error(
                f"NLG web endpoint at {self.nlg_endpoint.url} returned an invalid response."
            )
            return {"text": template_name}
Exemple #21
0
def test_autofill_slots_for_policy_entities():
    policy_entity, policy_entity_value = "policy_entity", "end-to-end"
    nlu_entity, nlu_entity_value = "nlu_entity", "nlu rocks"
    domain = Domain.from_yaml(
        textwrap.dedent(f"""
    entities:
    - {nlu_entity}
    - {policy_entity}

    slots:
        {nlu_entity}:
            type: text
        {policy_entity}:
            type: text
    """))

    tracker = DialogueStateTracker.from_events(
        "some sender",
        evts=[
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered(
                "hi",
                intent={"name": "greet"},
                entities=[{
                    "entity": nlu_entity,
                    "value": nlu_entity_value
                }],
            ),
            DefinePrevUserUtteredFeaturization(True),
            EntitiesAdded(entities=[
                {
                    "entity": policy_entity,
                    "value": policy_entity_value
                },
                {
                    "entity": nlu_entity,
                    "value": nlu_entity_value
                },
            ]),
        ],
        domain=domain,
        slots=domain.slots,
    )

    # Slots are correctly set
    assert tracker.slots[nlu_entity].value == nlu_entity_value
    assert tracker.slots[policy_entity].value == policy_entity_value

    expected_events = [
        ActionExecuted(ACTION_LISTEN_NAME),
        UserUttered(
            "hi",
            intent={"name": "greet"},
            entities=[
                {
                    "entity": nlu_entity,
                    "value": nlu_entity_value
                },
                # Added by `DefinePrevUserUtteredEntities`
                {
                    "entity": policy_entity,
                    "value": policy_entity_value
                },
            ],
        ),
        # SlotSet event added for entity predicted by NLU
        SlotSet(nlu_entity, nlu_entity_value),
        DefinePrevUserUtteredFeaturization(True),
        EntitiesAdded(entities=[
            {
                "entity": policy_entity,
                "value": policy_entity_value
            },
            {
                "entity": nlu_entity,
                "value": nlu_entity_value
            },
        ]),
        # SlotSet event added for entity predicted by policies
        # This event is somewhat duplicate. We don't deduplicate as this is a true
        # reflection of the given events and it doesn't change the actual state.
        SlotSet(nlu_entity, nlu_entity_value),
        SlotSet(policy_entity, policy_entity_value),
    ]

    for actual, expected in zip(tracker.events, expected_events):
        assert actual == expected
Exemple #22
0
async def test_request_correct_slots_after_unhappy_path_with_custom_required_slots(
):
    form_name = "some_form"
    slot_name_1 = "slot_1"
    slot_name_2 = "slot_2"

    domain = f"""
        slots:
          {slot_name_1}:
            type: any
          {slot_name_2}:
            type: any
        forms:
          {form_name}:
            {slot_name_1}:
            - type: from_intent
              intent: some_intent
              value: some_value
            {slot_name_2}:
            - type: from_intent
              intent: some_intent
              value: some_value
        actions:
        - validate_{form_name}
        """
    domain = Domain.from_yaml(domain)

    tracker = DialogueStateTracker.from_events(
        "default",
        [
            ActiveLoop(form_name),
            SlotSet(REQUESTED_SLOT, "slot_2"),
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered(
                "hello",
                intent={
                    "name": "greet",
                    "confidence": 1.0
                },
            ),
            ActionExecutionRejected(form_name),
            ActionExecuted("utter_greet"),
        ],
    )

    action_server_url = "http://my-action-server:5055/webhook"

    # Custom form validation action changes the order of the requested slots
    validate_return_events = [
        {
            "event": "slot",
            "name": REQUESTED_SLOT,
            "value": slot_name_2
        },
    ]

    # The form should ask the same slot again when coming back after unhappy path
    expected_events = [SlotSet(REQUESTED_SLOT, slot_name_2)]

    with aioresponses() as mocked:
        mocked.post(action_server_url,
                    payload={"events": validate_return_events})

        action_server = EndpointConfig(action_server_url)
        action = FormAction(form_name, action_server)

        events = await action.run(
            CollectingOutputChannel(),
            TemplatedNaturalLanguageGenerator(domain.templates),
            tracker,
            domain,
        )
        assert events == expected_events
Exemple #23
0
def test_revert_action_event(domain: Domain):
    tracker = DialogueStateTracker("default", domain.slots)
    # the retrieved tracker should be empty
    assert len(tracker.events) == 0

    intent = {"name": "greet", PREDICTED_CONFIDENCE_KEY: 1.0}
    tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
    tracker.update(UserUttered("/greet", intent, []))
    tracker.update(ActionExecuted("my_action"))
    tracker.update(ActionExecuted(ACTION_LISTEN_NAME))

    # Expecting count of 4:
    #   +3 executed actions
    #   +1 final state
    assert tracker.latest_action.get(ACTION_NAME) == ACTION_LISTEN_NAME
    assert len(list(tracker.generate_all_prior_trackers())) == 4

    tracker.update(ActionReverted())

    # Expecting count of 3:
    #   +3 executed actions
    #   +1 final state
    #   -1 reverted action
    assert tracker.latest_action.get(ACTION_NAME) == "my_action"
    assert len(list(tracker.generate_all_prior_trackers())) == 3

    dialogue = tracker.as_dialogue()

    recovered = DialogueStateTracker("default", domain.slots)
    recovered.recreate_from_dialogue(dialogue)

    assert recovered.current_state() == tracker.current_state()
    assert tracker.latest_action.get(ACTION_NAME) == "my_action"
    assert len(list(tracker.generate_all_prior_trackers())) == 3
Exemple #24
0
async def test_switch_forms_with_same_slot(default_agent: Agent):
    """Tests switching of forms, where the first slot is the same in both forms.

    Tests the fix for issue 7710"""

    # Define two forms in the domain, with same first slot
    slot_a = "my_slot_a"

    form_1 = "my_form_1"
    utter_ask_form_1 = f"Please provide the value for {slot_a} of form 1"

    form_2 = "my_form_2"
    utter_ask_form_2 = f"Please provide the value for {slot_a} of form 2"

    domain = f"""
version: "2.0"
nlu:
- intent: order_status
  examples: |
    - check status of my order
    - when are my shoes coming in
- intent: return
  examples: |
    - start a return
    - I don't want my shoes anymore
forms:
  {form_1}:
    {slot_a}:
    - type: from_entity
      entity: number
  {form_2}:
    {slot_a}:
    - type: from_entity
      entity: number
responses:
    utter_ask_{form_1}_{slot_a}:
    - text: {utter_ask_form_1}
    utter_ask_{form_2}_{slot_a}:
    - text: {utter_ask_form_2}
"""

    domain = Domain.from_yaml(domain)

    # Driving it like rasa/core/processor
    processor = MessageProcessor(
        default_agent.interpreter,
        default_agent.policy_ensemble,
        domain,
        InMemoryTrackerStore(domain),
        TemplatedNaturalLanguageGenerator(domain.templates),
    )

    # activate the first form
    tracker = DialogueStateTracker.from_events(
        "some-sender",
        evts=[
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered("order status", {
                "name": "form_1",
                "confidence": 1.0
            }),
            DefinePrevUserUtteredFeaturization(False),
        ],
    )
    # rasa/core/processor.predict_next_action
    prediction = PolicyPrediction([], "some_policy")
    action_1 = FormAction(form_1, None)

    await processor._run_action(
        action_1,
        tracker,
        CollectingOutputChannel(),
        TemplatedNaturalLanguageGenerator(domain.templates),
        prediction,
    )

    events_expected = [
        ActionExecuted(ACTION_LISTEN_NAME),
        UserUttered("order status", {
            "name": "form_1",
            "confidence": 1.0
        }),
        DefinePrevUserUtteredFeaturization(False),
        ActionExecuted(form_1),
        ActiveLoop(form_1),
        SlotSet(REQUESTED_SLOT, slot_a),
        BotUttered(
            text=utter_ask_form_1,
            metadata={"template_name": f"utter_ask_{form_1}_{slot_a}"},
        ),
    ]
    assert tracker.applied_events() == events_expected

    next_events = [
        ActionExecuted(ACTION_LISTEN_NAME),
        UserUttered("return my shoes", {
            "name": "form_2",
            "confidence": 1.0
        }),
        DefinePrevUserUtteredFeaturization(False),
    ]
    tracker.update_with_events(
        next_events,
        domain,
    )
    events_expected.extend(next_events)

    # form_1 is still active, and bot will first validate if the user utterance
    #  provides valid data for the requested slot, which is rejected
    await processor._run_action(
        action_1,
        tracker,
        CollectingOutputChannel(),
        TemplatedNaturalLanguageGenerator(domain.templates),
        prediction,
    )
    events_expected.extend([ActionExecutionRejected(action_name=form_1)])
    assert tracker.applied_events() == events_expected

    # Next, bot predicts form_2
    action_2 = FormAction(form_2, None)
    await processor._run_action(
        action_2,
        tracker,
        CollectingOutputChannel(),
        TemplatedNaturalLanguageGenerator(domain.templates),
        prediction,
    )
    events_expected.extend([
        ActionExecuted(form_2),
        ActiveLoop(form_2),
        SlotSet(REQUESTED_SLOT, slot_a),
        BotUttered(
            text=utter_ask_form_2,
            metadata={"template_name": f"utter_ask_{form_2}_{slot_a}"},
        ),
    ])
    assert tracker.applied_events() == events_expected
Exemple #25
0
def test_traveling_back_in_time(domain: Domain):
    tracker = DialogueStateTracker("default", domain.slots)
    # the retrieved tracker should be empty
    assert len(tracker.events) == 0

    intent = {"name": "greet", PREDICTED_CONFIDENCE_KEY: 1.0}
    tracker.update(ActionExecuted(ACTION_LISTEN_NAME))
    tracker.update(UserUttered("/greet", intent, []))

    time.sleep(1)
    time_for_timemachine = time.time()
    time.sleep(1)

    tracker.update(ActionExecuted("my_action"))
    tracker.update(ActionExecuted(ACTION_LISTEN_NAME))

    # Expecting count of 4:
    #   +3 executed actions
    #   +1 final state
    assert tracker.latest_action.get(ACTION_NAME) == ACTION_LISTEN_NAME
    assert len(tracker.events) == 4
    assert len(list(tracker.generate_all_prior_trackers())) == 4

    tracker = tracker.travel_back_in_time(time_for_timemachine)

    # Expecting count of 2:
    #   +1 executed actions
    #   +1 final state
    assert tracker.latest_action.get(ACTION_NAME) == ACTION_LISTEN_NAME
    assert len(tracker.events) == 2
    assert len(list(tracker.generate_all_prior_trackers())) == 2
Exemple #26
0
 def _should_handle_message(tracker: DialogueStateTracker):
     return (
         not tracker.is_paused()
         or tracker.latest_message.intent.get(INTENT_NAME_KEY) == USER_INTENT_RESTART
     )
Exemple #27
0
def test_applied_events_with_loop_happy_path(
        events: List[Event], expected_applied_events: List[Event]):
    tracker = DialogueStateTracker.from_events("👋", events)
    applied = tracker.applied_events()

    assert applied == expected_applied_events
Exemple #28
0
    def _find_action_from_rules(self, tracker: DialogueStateTracker,
                                domain: Domain) -> Optional[Text]:
        tracker_as_states = self.featurizer.prediction_states([tracker],
                                                              domain)
        states = tracker_as_states[0]

        logger.debug(f"Current tracker state: {states}")

        rule_keys = self._get_possible_keys(self.lookup[RULES], states)
        predicted_action_name = None
        best_rule_key = ""
        if rule_keys:
            # if there are several rules,
            # it should mean that some rule is a subset of another rule
            # therefore we pick a rule of maximum length
            best_rule_key = max(rule_keys, key=len)
            predicted_action_name = self.lookup[RULES].get(best_rule_key)

        active_loop_name = tracker.active_loop_name
        if active_loop_name:
            # find rules for unhappy path of the loop
            loop_unhappy_keys = self._get_possible_keys(
                self.lookup[RULES_FOR_LOOP_UNHAPPY_PATH], states)
            # there could be several unhappy path conditions
            unhappy_path_conditions = [
                self.lookup[RULES_FOR_LOOP_UNHAPPY_PATH].get(key)
                for key in loop_unhappy_keys
            ]

            # Check if a rule that predicted action_listen
            # was applied inside the loop.
            # Rules might not explicitly switch back to the loop.
            # Hence, we have to take care of that.
            predicted_listen_from_general_rule = (
                predicted_action_name == ACTION_LISTEN_NAME
                and not get_active_loop_name(
                    self._rule_key_to_state(best_rule_key)[-1]))
            if predicted_listen_from_general_rule:
                if DO_NOT_PREDICT_LOOP_ACTION not in unhappy_path_conditions:
                    # negative rules don't contain a key that corresponds to
                    # the fact that active_loop shouldn't be predicted
                    logger.debug(
                        f"Predicted loop '{active_loop_name}' by overwriting "
                        f"'{ACTION_LISTEN_NAME}' predicted by general rule.")
                    return active_loop_name

                # do not predict anything
                predicted_action_name = None

            if DO_NOT_VALIDATE_LOOP in unhappy_path_conditions:
                logger.debug("Added `FormValidation(False)` event.")
                tracker.update(FormValidation(False))

        if predicted_action_name is not None:
            logger.debug(
                f"There is a rule for the next action '{predicted_action_name}'."
            )
        else:
            logger.debug("There is no applicable rule.")

        return predicted_action_name
Exemple #29
0
def _collect_action_executed_predictions(
    processor: "MessageProcessor",
    partial_tracker: DialogueStateTracker,
    event: ActionExecuted,
    fail_on_prediction_errors: bool,
    circuit_breaker_tripped: bool,
) -> Tuple[EvaluationStore, PolicyPrediction]:
    from rasa.core.policies.form_policy import FormPolicy

    action_executed_eval_store = EvaluationStore()

    gold_action_name = event.action_name
    gold_action_text = event.action_text
    gold = gold_action_name or gold_action_text

    if circuit_breaker_tripped:
        prediction = PolicyPrediction([], policy_name=None)
        predicted = "circuit breaker tripped"
    else:
        action, prediction = processor.predict_next_action(partial_tracker)
        predicted = action.name()

        if (prediction.policy_name
                and predicted != gold and _form_might_have_been_rejected(
                    processor.domain, partial_tracker, predicted)):
            # Wrong action was predicted,
            # but it might be Ok if form action is rejected.
            emulate_loop_rejection(partial_tracker)
            # try again
            action, prediction = processor.predict_next_action(partial_tracker)

            # Even if the prediction is also wrong, we don't have to undo the emulation
            # of the action rejection as we know that the user explicitly specified
            # that something else than the form was supposed to run.
            predicted = action.name()

    action_executed_eval_store.add_to_store(action_predictions=[predicted],
                                            action_targets=[gold])

    if action_executed_eval_store.has_prediction_target_mismatch():
        partial_tracker.update(
            WronglyPredictedAction(
                gold_action_name,
                gold_action_text,
                predicted,
                prediction.policy_name,
                prediction.max_confidence,
                event.timestamp,
            ))
        if fail_on_prediction_errors:
            story_dump = YAMLStoryWriter().dumps(
                partial_tracker.as_story().story_steps)
            error_msg = (f"Model predicted a wrong action. Failed Story: "
                         f"\n\n{story_dump}")
            if FormPolicy.__name__ in prediction.policy_name:
                error_msg += ("FormAction is not run during "
                              "evaluation therefore it is impossible to know "
                              "if validation failed or this story is wrong. "
                              "If the story is correct, add it to the "
                              "training stories and retrain.")
            raise WrongPredictionException(error_msg)
    else:
        partial_tracker.update(
            ActionExecuted(
                predicted,
                prediction.policy_name,
                prediction.max_confidence,
                event.timestamp,
            ))

    return action_executed_eval_store, prediction
Exemple #30
0
def tracker_from_dialogue(dialogue: "Dialogue",
                          domain: Domain) -> DialogueStateTracker:
    tracker = DialogueStateTracker(dialogue.name, domain.slots)
    tracker.recreate_from_dialogue(dialogue)
    return tracker