Beispiel #1
0
async def test_predict_form_action_if_multiple_turns():
    form_name = "some_form"
    other_intent = "bye"
    domain = Domain.from_yaml(
        f"""
    intents:
    - {GREET_INTENT_NAME}
    - {other_intent}
    actions:
    - {UTTER_GREET_ACTION}
    - some-action
    slots:
      {REQUESTED_SLOT}:
        type: unfeaturized
    forms:
    - {form_name}
"""
    )

    policy = RulePolicy()
    policy.train([GREET_RULE], domain, RegexInterpreter())

    form_conversation = DialogueStateTracker.from_events(
        "in a form",
        evts=[
            # We are in an active form
            ActionExecuted(form_name),
            Form(form_name),
            SlotSet(REQUESTED_SLOT, "some value"),
            # User responds to slot request
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered("haha", {"name": GREET_INTENT_NAME}),
            # Form validates input and requests another slot
            ActionExecuted(form_name),
            SlotSet(REQUESTED_SLOT, "some other"),
            # User responds to 2nd slot request
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered("haha", {"name": other_intent}),
        ],
        slots=domain.slots,
    )

    # RulePolicy triggers form again
    action_probabilities = policy.predict_action_probabilities(
        form_conversation, domain
    )
    assert_predicted_action(action_probabilities, domain, form_name)
Beispiel #2
0
def test_extract_requested_slot_default():
    """Test default extraction of a slot value from entity with the same name."""
    form = FormAction("some form", None)

    tracker = DialogueStateTracker.from_events(
        "default",
        [
            SlotSet(REQUESTED_SLOT, "some_slot"),
            UserUttered(
                "bla", entities=[{"entity": "some_slot", "value": "some_value"}]
            ),
            ActionExecuted(ACTION_LISTEN_NAME),
        ],
    )

    slot_values = form.extract_requested_slot(tracker, Domain.empty())
    assert slot_values == {"some_slot": "some_value"}
Beispiel #3
0
def test_yaml_writer_avoids_dumping_not_existing_user_messages():
    events = [
        UserUttered("greet", {"name": "greet"}),
        ActionExecuted("utter_greet"),
    ]
    tracker = DialogueStateTracker.from_events("default", events)
    dump = YAMLStoryWriter().dumps(tracker.as_story().story_steps)

    assert (dump.strip() == textwrap.dedent("""
        version: "2.0"
        stories:
        - story: default
          steps:
          - intent: greet
          - action: utter_greet

    """).strip())
Beispiel #4
0
async def test_ask_affirmation():
    tracker = DialogueStateTracker.from_events(
        "some-sender", evts=_message_requiring_fallback()
    )
    domain = Domain.empty()
    action = TwoStageFallbackAction()

    events = await action.run(
        CollectingOutputChannel(),
        TemplatedNaturalLanguageGenerator(domain.templates),
        tracker,
        domain,
    )

    assert len(events) == 2
    assert events[0] == ActiveLoop(ACTION_TWO_STAGE_FALLBACK_NAME)
    assert isinstance(events[1], BotUttered)
Beispiel #5
0
def test_form_submit_rule():
    form_name = "some_form"
    submit_action_name = "utter_submit"
    domain = Domain.from_yaml(f"""
        intents:
        - {GREET_INTENT_NAME}
        actions:
        - {UTTER_GREET_ACTION}
        - some-action
        - {submit_action_name}
        slots:
          {REQUESTED_SLOT}:
            type: unfeaturized
        forms:
        - {form_name}
    """)

    form_submit_rule = _form_submit_rule(domain, submit_action_name, form_name)

    policy = RulePolicy()
    policy.train([GREET_RULE, form_submit_rule], domain, RegexInterpreter())

    form_conversation = DialogueStateTracker.from_events(
        "in a form",
        evts=[
            # Form was activated
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered("haha", {"name": GREET_INTENT_NAME}),
            ActionExecuted(form_name),
            ActiveLoop(form_name),
            SlotSet(REQUESTED_SLOT, "some value"),
            ActionExecuted(ACTION_LISTEN_NAME),
            # User responds and fills requested slot
            UserUttered("haha", {"name": GREET_INTENT_NAME}),
            ActionExecuted(form_name),
            # Form get's deactivated
            ActiveLoop(None),
            SlotSet(REQUESTED_SLOT, None),
        ],
        slots=domain.slots,
    )

    # RulePolicy predicts action which handles submit
    action_probabilities = policy.predict_action_probabilities(
        form_conversation, domain, RegexInterpreter())
    assert_predicted_action(action_probabilities, domain, submit_action_name)
Beispiel #6
0
def test_get_next_action_probabilities_pass_policy_predictions_without_interpreter_arg(
    predict_function: Callable, ):
    policy = TEDPolicy()

    policy.predict_action_probabilities = predict_function

    ensemble = SimplePolicyEnsemble(policies=[policy])
    interpreter = Mock()
    domain = Domain.empty()

    processor = MessageProcessor(interpreter, ensemble, domain,
                                 InMemoryTrackerStore(domain), Mock())

    with pytest.warns(DeprecationWarning):
        processor._get_next_action_probabilities(
            DialogueStateTracker.from_events(
                "lala", [ActionExecuted(ACTION_LISTEN_NAME)]))
Beispiel #7
0
async def test_validate_slots(
    validate_return_events: List[Dict], expected_events: List[Event]
):
    form_name = "my form"
    slot_name = "num_people"
    slot_value = "hi"
    events = [
        ActiveLoop(form_name),
        SlotSet(REQUESTED_SLOT, slot_name),
        ActionExecuted(ACTION_LISTEN_NAME),
        UserUttered(slot_value, entities=[{"entity": "num_tables", "value": 5}]),
    ]
    tracker = DialogueStateTracker.from_events(sender_id="bla", evts=events)

    domain = f"""
    slots:
      {slot_name}:
        type: unfeaturized
      num_tables:
        type: unfeaturized
    forms:
    - {form_name}:
        {slot_name}:
        - type: from_text
        num_tables:
        - type: from_entity
          entity: num_tables
    actions:
    - validate_{form_name}
    """
    domain = Domain.from_yaml(domain)
    action_server_url = "http:/my-action-server:5055/webhook"

    with aioresponses() as mocked:
        mocked.post(action_server_url, payload={"events": validate_return_events})

        action_server = EndpointConfig(action_server_url)
        action = FormAction(form_name, action_server)

        events = await action.run(
            CollectingOutputChannel(),
            TemplatedNaturalLanguageGenerator(domain.templates),
            tracker,
            domain,
        )
        assert events == expected_events
Beispiel #8
0
def test_faq_rule():
    domain = Domain.from_yaml(f"""
intents:
- {GREET_INTENT_NAME}
actions:
- {UTTER_GREET_ACTION}
    """)

    policy = RulePolicy()
    policy.train([GREET_RULE], domain, RegexInterpreter())
    new_conversation = DialogueStateTracker.from_events(
        "bla2",
        GREET_RULE.applied_events()[1:-1])
    action_probabilities = policy.predict_action_probabilities(
        new_conversation, domain)

    assert_predicted_action(action_probabilities, domain, UTTER_GREET_ACTION)
Beispiel #9
0
async def test_dont_predict_form_if_already_finished():
    form_name = "some_form"

    domain = Domain.from_yaml(f"""
    intents:
    - {GREET_INTENT_NAME}
    actions:
    - {UTTER_GREET_ACTION}
    - some-action
    slots:
      {REQUESTED_SLOT}:
        type: unfeaturized
    forms:
    - {form_name}
""")

    policy = RulePolicy()
    policy.train([GREET_RULE], domain, RegexInterpreter())

    form_conversation = DialogueStateTracker.from_events(
        "in a form",
        evts=[
            # We are in an activate form
            ActionExecuted(form_name),
            ActiveLoop(form_name),
            SlotSet(REQUESTED_SLOT, "some value"),
            ActionExecuted(ACTION_LISTEN_NAME),
            # User sends message as response to a requested slot
            UserUttered("haha", {"name": GREET_INTENT_NAME}),
            # Form is happy and deactivates itself
            ActionExecuted(form_name),
            ActiveLoop(None),
            SlotSet(REQUESTED_SLOT, None),
            # User sends another message. Form is already done. Shouldn't get triggered
            # again
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered("haha", {"name": GREET_INTENT_NAME}),
        ],
        slots=domain.slots,
    )

    # RulePolicy triggers form again
    action_probabilities = policy.predict_action_probabilities(
        form_conversation, domain, RegexInterpreter())
    assert_predicted_action(action_probabilities, domain, UTTER_GREET_ACTION)
Beispiel #10
0
def _predict_tracker_actions(tracker,
                             agent: 'Agent',
                             fail_on_prediction_errors=False,
                             use_e2e=False):
    from rasa.core.trackers import DialogueStateTracker

    processor = agent.create_processor()
    tracker_eval_store = EvaluationStore()

    events = list(tracker.events)

    partial_tracker = DialogueStateTracker.from_events(tracker.sender_id,
                                                       events[:1],
                                                       agent.domain.slots)

    tracker_actions = []

    for event in events[1:]:
        if isinstance(event, ActionExecuted):
            action_executed_result, policy, confidence = \
                _collect_action_executed_predictions(
                    processor, partial_tracker, event,
                    fail_on_prediction_errors
                )
            tracker_eval_store.merge_store(action_executed_result)
            tracker_actions.append({
                "action":
                action_executed_result.action_targets[0],
                "predicted":
                action_executed_result.action_predictions[0],
                "policy":
                policy,
                "confidence":
                confidence
            })
        elif use_e2e and isinstance(event, UserUttered):
            user_uttered_result = \
                _collect_user_uttered_predictions(
                    event, partial_tracker, fail_on_prediction_errors)

            tracker_eval_store.merge_store(user_uttered_result)
        else:
            partial_tracker.update(event)

    return tracker_eval_store, partial_tracker, tracker_actions
Beispiel #11
0
def test_invalid_slot_mapping():
    form_name = "my_form"
    form = FormAction(form_name, None)
    slot_name = "test"
    tracker = DialogueStateTracker.from_events(
        "sender", [SlotSet(REQUESTED_SLOT, slot_name)])

    domain = Domain.from_dict(
        {"forms": [{
            form_name: {
                slot_name: [{
                    "type": "invalid"
                }]
            }
        }]})

    with pytest.raises(ValueError):
        form.extract_requested_slot(tracker, domain)
Beispiel #12
0
async def test_ask_for_slot(domain: Dict, expected_action: Text,
                            monkeypatch: MonkeyPatch):
    slot_name = "sun"

    action_from_name = Mock(return_value=action.ActionListen())
    monkeypatch.setattr(action, action.action_from_name.__name__,
                        action_from_name)

    form = FormAction("my_form", None)
    await form._ask_for_slot(
        Domain.from_dict(domain),
        None,
        None,
        slot_name,
        DialogueStateTracker.from_events("dasd", []),
    )

    action_from_name.assert_called_once_with(expected_action, None, ANY)
def create_tracker_with_partially_saved_events(
    tracker_store: TrackerStore,
) -> Tuple[List[Event], DialogueStateTracker]:
    # creates a tracker with two events and saved it to the tracker store
    # following that, it adds three more events that are not saved to the tracker store
    sender_id = uuid.uuid4().hex

    # create tracker with two events and save it
    events = [UserUttered("hello"), BotUttered("what")]
    tracker = DialogueStateTracker.from_events(sender_id, events)
    tracker_store.save(tracker)

    # add more events to the tracker, do not yet save it
    events = [ActionExecuted(ACTION_LISTEN_NAME), UserUttered("123"), BotUttered("yes")]
    for event in events:
        tracker.update(event)

    return events, tracker
Beispiel #14
0
def test_fallback_wins_over_mapping():
    domain = Domain.load("data/test_domains/default.yml")
    events = [
        ActionExecuted(ACTION_LISTEN_NAME),
        # Low confidence should trigger fallback
        utilities.user_uttered(USER_INTENT_RESTART, 0.0001),
    ]
    tracker = DialogueStateTracker.from_events("test", events, [])

    ensemble = SimplePolicyEnsemble([FallbackPolicy(), MappingPolicy()])

    result, best_policy = ensemble.probabilities_using_best_policy(
        tracker, domain)
    max_confidence_index = result.index(max(result))
    index_of_fallback_policy = 0
    next_action = domain.action_for_index(max_confidence_index, None)

    assert best_policy == f"policy_{index_of_fallback_policy}_{FallbackPolicy.__name__}"
    assert next_action.name() == ACTION_DEFAULT_FALLBACK_NAME
Beispiel #15
0
async def test_form_unhappy_path():
    form_name = "some_form"

    domain = Domain.from_yaml(
        f"""
        intents:
        - {GREET_INTENT_NAME}
        actions:
        - {UTTER_GREET_ACTION}
        - some-action
        slots:
          {REQUESTED_SLOT}:
            type: unfeaturized
        forms:
        - {form_name}
    """
    )

    policy = RulePolicy()
    policy.train([GREET_RULE], domain, RegexInterpreter())

    unhappy_form_conversation = DialogueStateTracker.from_events(
        "in a form",
        evts=[
            # We are in an active form
            ActionExecuted(form_name),
            Form(form_name),
            SlotSet(REQUESTED_SLOT, "some value"),
            # User responds to slot request
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered("haha", {"name": GREET_INTENT_NAME}),
            # Form isn't happy with the answer and rejects execution
            ActionExecutionRejected(form_name),
        ],
        slots=domain.slots,
    )

    # RulePolicy doesn't trigger form but FAQ
    action_probabilities = policy.predict_action_probabilities(
        unhappy_form_conversation, domain
    )

    assert_predicted_action(action_probabilities, domain, UTTER_GREET_ACTION)
Beispiel #16
0
def test_extract_requested_slot_from_entity(
    mapping_not_intent: Optional[Text],
    mapping_intent: Optional[Text],
    mapping_role: Optional[Text],
    mapping_group: Optional[Text],
    entities: List[Dict[Text, Any]],
    intent: Text,
    expected_slot_values: Dict[Text, Text],
):
    """Test extraction of a slot value from entity with the different restrictions."""

    form_name = "some form"
    form = FormAction(form_name, None)

    mapping = form.from_entity(
        entity="some_entity",
        role=mapping_role,
        group=mapping_group,
        intent=mapping_intent,
        not_intent=mapping_not_intent,
    )
    domain = Domain.from_dict(
        {"forms": [{
            form_name: {
                "some_slot": [mapping]
            }
        }]})

    tracker = DialogueStateTracker.from_events(
        "default",
        [
            SlotSet(REQUESTED_SLOT, "some_slot"),
            UserUttered("bla",
                        intent={
                            "name": intent,
                            "confidence": 1.0
                        },
                        entities=entities),
        ],
    )

    slot_values = form.extract_requested_slot(tracker, domain)
    assert slot_values == expected_slot_values
Beispiel #17
0
async def test_activate_and_immediate_deactivate():
    slot_name = "num_people"
    slot_value = 5

    tracker = DialogueStateTracker.from_events(
        sender_id="bla",
        evts=[
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered(
                "haha",
                {"name": "greet"},
                entities=[{
                    "entity": slot_name,
                    "value": slot_value
                }],
            ),
        ],
    )
    form_name = "my form"
    action = FormAction(form_name, None)
    domain = f"""
    forms:
    - {form_name}:
        {slot_name}:
        - type: from_entity
          entity: {slot_name}
    slots:
      {slot_name}:
        type: unfeaturized
    """
    domain = Domain.from_yaml(domain)
    events = await action.run(
        CollectingOutputChannel(),
        TemplatedNaturalLanguageGenerator(domain.templates),
        tracker,
        domain,
    )
    assert events == [
        Form(form_name),
        SlotSet(slot_name, slot_value),
        SlotSet(REQUESTED_SLOT, None),
        Form(None),
    ]
Beispiel #18
0
async def test_trigger_slot_mapping_applies(
    trigger_slot_mapping: Dict, expected_value: Text
):
    form_name = "some_form"
    entity_name = "some_slot"
    slot_filled_by_trigger_mapping = "other_slot"
    form = FormAction(form_name, None)

    domain = Domain.from_dict(
        {
            "forms": [
                {
                    form_name: {
                        entity_name: [
                            {
                                "type": "from_entity",
                                "entity": entity_name,
                                "intent": "some_intent",
                            }
                        ],
                        slot_filled_by_trigger_mapping: [trigger_slot_mapping],
                    }
                }
            ]
        }
    )

    tracker = DialogueStateTracker.from_events(
        "default",
        [
            SlotSet(REQUESTED_SLOT, "some_slot"),
            UserUttered(
                "bla",
                intent={"name": "greet", "confidence": 1.0},
                entities=[{"entity": entity_name, "value": "some_value"}],
            ),
            ActionExecuted(ACTION_LISTEN_NAME),
        ],
    )

    slot_values = form.extract_other_slots(tracker, domain)
    assert slot_values == {slot_filled_by_trigger_mapping: expected_value}
Beispiel #19
0
async def test_2nd_affirm_successful():
    tracker = DialogueStateTracker.from_events(
        "some-sender",
        evts=[
            # User sends message with low NLU confidence
            *_message_requiring_fallback(),
            ActiveLoop(ACTION_TWO_STAGE_FALLBACK_NAME),
            # Action asks user to affirm
            *_two_stage_clarification_request(),
            ActionExecuted(ACTION_LISTEN_NAME),
            # User denies suggested intents
            UserUttered("hi", {"name": USER_INTENT_OUT_OF_SCOPE}),
            # Action asks user to rephrase
            *_two_stage_clarification_request(),
            # User rephrased with low confidence
            *_message_requiring_fallback(),
            *_two_stage_clarification_request(),
            # Actions asks user to affirm for the last time
            ActionExecuted(ACTION_LISTEN_NAME),
            # User affirms successfully
            UserUttered("hi", {"name": "greet"}),
        ],
    )
    domain = Domain.empty()
    action = TwoStageFallbackAction()

    events = await action.run(
        CollectingOutputChannel(),
        TemplatedNaturalLanguageGenerator(domain.templates),
        tracker,
        domain,
    )

    for event in events:
        tracker.update(event)

    applied_events = tracker.applied_events()

    assert applied_events == [
        ActionExecuted(ACTION_LISTEN_NAME),
        UserUttered("hi", {"name": "greet"}),
    ]
Beispiel #20
0
async def test_predict_action_listen_after_form():
    form_name = "some_form"

    domain = Domain.from_yaml(
        f"""
        intents:
        - {GREET_INTENT_NAME}
        actions:
        - {UTTER_GREET_ACTION}
        - some-action
        slots:
          {REQUESTED_SLOT}:
            type: unfeaturized
        forms:
        - {form_name}
    """
    )

    policy = RulePolicy()
    policy.train([GREET_RULE], domain, RegexInterpreter())

    form_conversation = DialogueStateTracker.from_events(
        "in a form",
        evts=[
            # We are in an activate form
            ActionExecuted(form_name),
            Form(form_name),
            SlotSet(REQUESTED_SLOT, "some value"),
            ActionExecuted(ACTION_LISTEN_NAME),
            # User sends message as response to a requested slot
            UserUttered("haha", {"name": GREET_INTENT_NAME}),
            # Form is running again
            ActionExecuted(form_name),
        ],
        slots=domain.slots,
    )

    # RulePolicy predicts action listen
    action_probabilities = policy.predict_action_probabilities(
        form_conversation, domain
    )
    assert_predicted_action(action_probabilities, domain, ACTION_LISTEN_NAME)
def test_current_state_without_events(default_domain: Domain):
    tracker_store = MockedMongoTrackerStore(default_domain)

    # insert some events
    events = [
        UserUttered("Hola", {"name": "greet"}),
        BotUttered("Hi"),
        UserUttered("Ciao", {"name": "greet"}),
        BotUttered("Hi2"),
    ]

    sender_id = "test_mongo_tracker_store_current_state_without_events"
    tracker = DialogueStateTracker.from_events(sender_id, events)

    # get current state without events
    # noinspection PyProtectedMember
    state = tracker_store._current_tracker_state_without_events(tracker)

    # `events` key should not be in there
    assert state and "events" not in state
Beispiel #22
0
    async def _revert_fallback_events(
        self,
        output_channel: OutputChannel,
        nlg: NaturalLanguageGenerator,
        tracker: DialogueStateTracker,
        domain: Domain,
        events_so_far: List[Event],
    ) -> List[Event]:
        revert_events = [UserUtteranceReverted(), UserUtteranceReverted()]

        temp_tracker = DialogueStateTracker.from_events(
            tracker.sender_id,
            tracker.applied_events() + events_so_far + revert_events)

        while temp_tracker.latest_message and not await self.is_done(
                output_channel, nlg, temp_tracker, domain, []):
            temp_tracker.update(revert_events[-1])
            revert_events.append(UserUtteranceReverted())

        return revert_events
Beispiel #23
0
async def test_loop_without_deactivate():
    expected_activation_events = [
        ActionExecutionRejected("tada"),
        ActionExecuted("test"),
    ]

    expected_do_events = [ActionExecuted("do")]
    form_name = "my form"

    class MyLoop(LoopAction):
        def name(self) -> Text:
            return form_name

        async def activate(self, *args: Any) -> List[Event]:
            return expected_activation_events

        async def do(self, *args: Any) -> List[Event]:
            return expected_do_events

        async def deactivate(self, *args) -> List[Event]:
            raise ValueError("this shouldn't be called")

        async def is_done(self, *args) -> bool:
            return False

    tracker = DialogueStateTracker.from_events("some sender", [])
    domain = Domain.empty()

    action = MyLoop()
    actual = await action.run(
        CollectingOutputChannel(),
        TemplatedNaturalLanguageGenerator(domain.templates),
        tracker,
        domain,
    )

    assert actual == [
        ActiveLoop(form_name),
        *expected_activation_events,
        *expected_do_events,
    ]
Beispiel #24
0
async def test_form_unhappy_path_without_rule():
    form_name = "some_form"
    other_intent = "bye"
    domain = Domain.from_yaml(
        f"""
        intents:
        - {GREET_INTENT_NAME}
        - {other_intent}
        actions:
        - {UTTER_GREET_ACTION}
        - some-action
        slots:
          {REQUESTED_SLOT}:
            type: unfeaturized
        forms:
        - {form_name}
    """
    )

    policy = RulePolicy()
    policy.train([GREET_RULE], domain, RegexInterpreter())

    conversation_events = [
        ActionExecuted(form_name),
        Form(form_name),
        SlotSet(REQUESTED_SLOT, "some value"),
        ActionExecuted(ACTION_LISTEN_NAME),
        UserUttered("haha", {"name": other_intent}),
        Form(form_name),
        ActionExecutionRejected(form_name),
    ]

    # Unhappy path is not handled. No rule matches. Let's hope ML fixes our problems 🤞
    action_probabilities = policy.predict_action_probabilities(
        DialogueStateTracker.from_events(
            "casd", evts=conversation_events, slots=domain.slots
        ),
        domain,
    )

    assert max(action_probabilities) == policy._core_fallback_threshold
Beispiel #25
0
def test_tracker_store_retrieve_with_events_from_previous_sessions(
        tracker_store_type: Type[TrackerStore], tracker_store_kwargs: Dict):
    tracker_store = tracker_store_type(Domain.empty(), **tracker_store_kwargs)
    tracker_store.load_events_from_previous_conversation_sessions = True

    conversation_id = uuid.uuid4().hex
    tracker = DialogueStateTracker.from_events(
        conversation_id,
        [
            ActionExecuted(ACTION_SESSION_START_NAME),
            SessionStarted(),
            UserUttered("hi"),
            ActionExecuted(ACTION_SESSION_START_NAME),
            SessionStarted(),
        ],
    )
    tracker_store.save(tracker)

    actual = tracker_store.retrieve(conversation_id)

    assert len(actual.events) == len(tracker.events)
Beispiel #26
0
def test_extract_other_slots_with_entity(
    some_other_slot_mapping: List[Dict[Text, Any]],
    some_slot_mapping: List[Dict[Text, Any]],
    entities: List[Dict[Text, Any]],
    intent: Text,
    expected_slot_values: Dict[Text, Text],
):
    """Test extraction of other not requested slots values from entities."""

    form_name = "some_form"
    form = FormAction(form_name, None)

    domain = Domain.from_dict(
        {
            "forms": [
                {
                    form_name: {
                        "some_other_slot": some_other_slot_mapping,
                        "some_slot": some_slot_mapping,
                    }
                }
            ]
        }
    )

    tracker = DialogueStateTracker.from_events(
        "default",
        [
            SlotSet(REQUESTED_SLOT, "some_slot"),
            UserUttered(
                "bla", intent={"name": intent, "confidence": 1.0}, entities=entities
            ),
            ActionExecuted(ACTION_LISTEN_NAME),
        ],
    )

    slot_values = form.extract_other_slots(tracker, domain)
    # check that the value was extracted for non requested slot
    assert slot_values == expected_slot_values
Beispiel #27
0
def test_faq_rule():
    domain = Domain.from_yaml(f"""
intents:
- {GREET_INTENT_NAME}
actions:
- {UTTER_GREET_ACTION}
    """)

    policy = RulePolicy()
    policy.train([GREET_RULE], domain, RegexInterpreter())
    # remove first ... action and utter_greet and last action_listen from greet rule
    new_conversation = DialogueStateTracker.from_events(
        "simple greet",
        evts=[
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered("haha", {"name": GREET_INTENT_NAME}),
        ],
    )
    action_probabilities = policy.predict_action_probabilities(
        new_conversation, domain, RegexInterpreter())

    assert_predicted_action(action_probabilities, domain, UTTER_GREET_ACTION)
Beispiel #28
0
def test_fetch_events_within_time_range_with_session_events():
    conversation_id = "test_fetch_events_within_time_range_with_sessions"

    tracker_store = SQLTrackerStore(
        dialect="sqlite", db=f"{uuid.uuid4().hex}.db", domain=Domain.empty()
    )

    events = [
        random_user_uttered_event(1),
        SessionStarted(2),
        ActionExecuted(timestamp=3, action_name=ACTION_SESSION_START_NAME),
        random_user_uttered_event(4),
    ]
    tracker = DialogueStateTracker.from_events(conversation_id, evts=events)
    tracker_store.save(tracker)

    exporter = MockExporter(tracker_store=tracker_store)

    # noinspection PyProtectedMember
    fetched_events = exporter._fetch_events_within_time_range()

    assert len(fetched_events) == len(events)
Beispiel #29
0
def test_predict_nothing_if_fallback_disabled():
    other_intent = "other"
    domain = Domain.from_yaml(f"""
    intents:
    - {GREET_INTENT_NAME}
    - {other_intent}
    actions:
    - {UTTER_GREET_ACTION}
        """)
    policy = RulePolicy(enable_fallback_prediction=False)
    policy.train([GREET_RULE], domain, RegexInterpreter())
    new_conversation = DialogueStateTracker.from_events(
        "bla2",
        evts=[
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered("haha", {"name": other_intent}),
        ],
    )
    action_probabilities = policy.predict_action_probabilities(
        new_conversation, domain, RegexInterpreter())

    assert max(action_probabilities) == 0
Beispiel #30
0
def test_default_actions(intent_name: Text, expected_action_name: Text):
    domain = Domain.from_yaml(f"""
intents:
- {GREET_INTENT_NAME}
actions:
- {UTTER_GREET_ACTION}
    """)
    policy = RulePolicy()
    policy.train([GREET_RULE], domain, RegexInterpreter())
    new_conversation = DialogueStateTracker.from_events(
        "bla2",
        evts=[
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered("haha", {"name": GREET_INTENT_NAME}),
            ActionExecuted(ACTION_LISTEN_NAME),
            UserUttered("haha", {"name": intent_name}),
        ],
    )
    action_probabilities = policy.predict_action_probabilities(
        new_conversation, domain, RegexInterpreter())

    assert_predicted_action(action_probabilities, domain, expected_action_name)