def test_temporary_tracker(): extra_slot = "some_slot" sender_id = "test" domain = Domain.from_yaml(f""" slots: {extra_slot}: type: unfeaturized """) previous_events = [ActionExecuted(ACTION_LISTEN_NAME)] old_tracker = DialogueStateTracker.from_events(sender_id, previous_events, slots=domain.slots) new_events = [Restarted()] form_action = FormAction("some name", None) temp_tracker = form_action._temporary_tracker(old_tracker, new_events, domain) assert extra_slot in temp_tracker.slots.keys() assert list(temp_tracker.events) == [ *previous_events, SlotSet(REQUESTED_SLOT), ActionExecuted(form_action.name()), *new_events, ]
def test_extract_requested_slot_from_entity( mapping_not_intent: Optional[Text], mapping_intent: Optional[Text], mapping_role: Optional[Text], mapping_group: Optional[Text], entities: List[Dict[Text, Any]], intent: Text, expected_slot_values: Dict[Text, Text], ): """Test extraction of a slot value from entity with the different restrictions.""" form_name = "some form" form = FormAction(form_name, None) mapping = form.from_entity( entity="some_entity", role=mapping_role, group=mapping_group, intent=mapping_intent, not_intent=mapping_not_intent, ) domain = Domain.from_dict({"forms": {form_name: {"some_slot": [mapping]}}}) tracker = DialogueStateTracker.from_events( "default", [ ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, "some_slot"), UserUttered( "bla", intent={"name": intent, "confidence": 1.0}, entities=entities ), ], ) slot_values = form.extract_requested_slot(tracker, domain, "some_slot") assert slot_values == expected_slot_values
async def test_set_slot_and_deactivate(): form_name = "my form" slot_name = "num_people" slot_value = "dasdasdfasdf" events = [ ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, slot_name), ActionExecuted(ACTION_LISTEN_NAME), UserUttered(slot_value), ] tracker = DialogueStateTracker.from_events(sender_id="bla", evts=events) domain = f""" forms: {form_name}: {slot_name}: - type: from_text slots: {slot_name}: type: text influence_conversation: false """ domain = Domain.from_yaml(domain) action = FormAction(form_name, None) events = await action.run( CollectingOutputChannel(), TemplatedNaturalLanguageGenerator(domain.templates), tracker, domain, ) assert events == [ SlotSet(slot_name, slot_value), SlotSet(REQUESTED_SLOT, None), ActiveLoop(None), ]
def test_mapping_wins_over_form(events: List[Event]): domain = """ forms: - test-form """ domain = Domain.from_yaml(domain) tracker = DialogueStateTracker.from_events("test", events, []) ensemble = SimplePolicyEnsemble([ MappingPolicy(), ConstantPolicy(priority=1, predict_index=0), FormPolicy(), FallbackPolicy(), ]) prediction = ensemble.probabilities_using_best_policy( tracker, domain, RegexInterpreter()) next_action = rasa.core.actions.action.action_for_index( prediction.max_confidence_index, domain, None) index_of_mapping_policy = 0 assert (prediction.policy_name == f"policy_{index_of_mapping_policy}_{MappingPolicy.__name__}") assert next_action.name() == ACTION_RESTART_NAME
def test_form_wins_over_everything_else(ensemble: SimplePolicyEnsemble): form_name = "test-form" domain = f""" forms: - {form_name} """ domain = Domain.from_yaml(domain) events = [ ActiveLoop("test-form"), ActionExecuted(ACTION_LISTEN_NAME), utilities.user_uttered("test", 1), ] tracker = DialogueStateTracker.from_events("test", events, []) prediction = ensemble.probabilities_using_best_policy( tracker, domain, RegexInterpreter()) next_action = rasa.core.actions.action.action_for_index( prediction.max_confidence_index, domain, None) index_of_form_policy = 0 assert (prediction.policy_name == f"policy_{index_of_form_policy}_{FormPolicy.__name__}") assert next_action.name() == form_name
def test_fallback_wins_over_mapping(): domain = Domain.load("data/test_domains/default.yml") events = [ ActionExecuted(ACTION_LISTEN_NAME), # Low confidence should trigger fallback utilities.user_uttered(USER_INTENT_RESTART, 0.0001), ] tracker = DialogueStateTracker.from_events("test", events, []) ensemble = SimplePolicyEnsemble([FallbackPolicy(), MappingPolicy()]) prediction = ensemble.probabilities_using_best_policy( tracker, domain, RegexInterpreter() ) index_of_fallback_policy = 0 next_action = rasa.core.actions.action.action_for_index( prediction.max_confidence_index, domain, None ) assert ( prediction.policy_name == f"policy_{index_of_fallback_policy}_{FallbackPolicy.__name__}" ) assert next_action.name() == ACTION_DEFAULT_FALLBACK_NAME
def test_fallback_mapping_restart(): domain = Domain.load("data/test_domains/default.yml") events = [ ActionExecuted(ACTION_DEFAULT_FALLBACK_NAME, timestamp=1), utilities.user_uttered(USER_INTENT_RESTART, 1, timestamp=2), ] tracker = DialogueStateTracker.from_events("test", events, []) two_stage_fallback_policy = TwoStageFallbackPolicy( priority=2, deny_suggestion_intent_name="deny") mapping_policy = MappingPolicy(priority=1) mapping_fallback_ensemble = SimplePolicyEnsemble( [two_stage_fallback_policy, mapping_policy]) prediction = mapping_fallback_ensemble.probabilities_using_best_policy( tracker, domain, RegexInterpreter()) index_of_mapping_policy = 1 next_action = rasa.core.actions.action.action_for_index( prediction.max_confidence_index, domain, None) assert (prediction.policy_name == f"policy_{index_of_mapping_policy}_{MappingPolicy.__name__}") assert next_action.name() == ACTION_RESTART_NAME
def test_converter_for_inference( input_converter: CoreFeaturizationInputConverter): # create tracker events = [ UserUttered( text="some text with entities!", intent={INTENT_NAME_KEY: "greet"}, entities=[_create_entity(value="Bot", type="entity_name")], ), ActionExecuted(action_name="utter_greet", action_text="Hi how are you?"), ActionExecuted(action_name="action_listen"), UserUttered(text="some text with intent!", intent={INTENT_NAME_KEY: "inform"}), ] tracker = DialogueStateTracker.from_events(sender_id="arbitrary", evts=events) # convert! messages = input_converter.convert_for_inference(tracker) # check that messages were created from tracker events as expected _check_messages_created_from_events_as_expected(events=tracker.events, messages=messages) # check that each message contains only one attribute, which must be a key attribute _check_messages_contain_attribute_which_is_key_attribute(messages=messages)
async def test_give_it_up_after_low_confidence_after_affirm_request(): tracker = DialogueStateTracker.from_events( "some-sender", evts=[ # User sends message with low NLU confidence *_message_requiring_fallback(), ActiveLoop(ACTION_TWO_STAGE_FALLBACK_NAME), # Action asks user to affirm *_two_stage_clarification_request(), # User's affirms with low NLU confidence again *_message_requiring_fallback(), ], ) domain = Domain.empty() action = TwoStageFallbackAction() events = await action.run( CollectingOutputChannel(), TemplatedNaturalLanguageGenerator(domain.responses), tracker, domain, ) assert events == [ActiveLoop(None), UserUtteranceReverted()]
def test_fetch_events_within_time_range_with_session_events(tmp_path: Path): conversation_id = "test_fetch_events_within_time_range_with_sessions" tracker_store = SQLTrackerStore( dialect="sqlite", db=str(tmp_path / f"{uuid.uuid4().hex}.db"), domain=Domain.empty(), ) events = [ random_user_uttered_event(1), SessionStarted(2), ActionExecuted(timestamp=3, action_name=ACTION_SESSION_START_NAME), random_user_uttered_event(4), ] tracker = DialogueStateTracker.from_events(conversation_id, evts=events) tracker_store.save(tracker) exporter = MockExporter(tracker_store=tracker_store) # noinspection PyProtectedMember fetched_events = exporter._fetch_events_within_time_range() assert len(fetched_events) == len(events)
def test_extract_requested_slot_when_mapping_applies(slot_mapping: Dict, expected_value: Text): form_name = "some_form" entity_name = "some_slot" form = FormAction(form_name, None) domain = Domain.from_dict( {"forms": { form_name: { entity_name: [slot_mapping] } }}) tracker = DialogueStateTracker.from_events( "default", [ ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, "some_slot"), UserUttered( "bla", intent={ "name": "greet", "confidence": 1.0 }, entities=[{ "entity": entity_name, "value": "some_value" }], ), ActionExecuted(ACTION_LISTEN_NAME), ], ) slot_values = form.extract_requested_slot(tracker, domain) # check that the value was extracted for correct intent assert slot_values == {"some_slot": expected_value}
def test_policy_priority(): domain = Domain.load("data/test_domains/default.yml") tracker = DialogueStateTracker.from_events("test", [UserUttered("hi")], []) priority_1 = ConstantPolicy(priority=1, predict_index=0) priority_2 = ConstantPolicy(priority=2, predict_index=1) policy_ensemble_0 = SimplePolicyEnsemble([priority_1, priority_2]) policy_ensemble_1 = SimplePolicyEnsemble([priority_2, priority_1]) priority_2_result = priority_2.predict_action_probabilities( tracker, domain, RegexInterpreter()) i = 1 # index of priority_2 in ensemble_0 result, best_policy = policy_ensemble_0.probabilities_using_best_policy( tracker, domain, RegexInterpreter()) assert best_policy == "policy_{}_{}".format(i, type(priority_2).__name__) assert result == priority_2_result i = 0 # index of priority_2 in ensemble_1 result, best_policy = policy_ensemble_1.probabilities_using_best_policy( tracker, domain, RegexInterpreter()) assert best_policy == "policy_{}_{}".format(i, type(priority_2).__name__) assert result == priority_2_result
async def test_applied_events_after_action_session_start( default_channel: CollectingOutputChannel, template_nlg: TemplatedNaturalLanguageGenerator, ): slot_set = SlotSet("my_slot", "value") events = [ slot_set, ActionExecuted(ACTION_LISTEN_NAME), # User triggers a restart manually by triggering the intent UserUttered( text=f"/{USER_INTENT_SESSION_START}", intent={"name": USER_INTENT_SESSION_START}, ), ] tracker = DialogueStateTracker.from_events("🕵️♀️", events) # Mapping Policy kicks in and runs the session restart action events = await ActionSessionStart().run( default_channel, template_nlg, tracker, Domain.empty() ) for event in events: tracker.update(event) assert tracker.applied_events() == [slot_set, ActionExecuted(ACTION_LISTEN_NAME)]
def test_restrict_multiple_user_inputs_in_rules(): domain = Domain.from_yaml(f""" intents: - {GREET_INTENT_NAME} actions: - {UTTER_GREET_ACTION} """) policy = RulePolicy() greet_events = [ UserUttered(intent={"name": GREET_INTENT_NAME}), ActionExecuted(UTTER_GREET_ACTION), ActionExecuted(ACTION_LISTEN_NAME), ] forbidden_rule = DialogueStateTracker.from_events( "bla", evts=[ ActionExecuted(RULE_SNIPPET_ACTION_NAME), ActionExecuted(ACTION_LISTEN_NAME), ] + greet_events * (policy.ALLOWED_NUMBER_OF_USER_INPUTS + 1), ) forbidden_rule.is_rule_tracker = True with pytest.raises(InvalidRule): policy.train([forbidden_rule], domain, RegexInterpreter())
async def test_whole_loop(): expected_activation_events = [ ActionExecutionRejected("tada"), ActionExecuted("test"), ] expected_do_events = [ActionExecuted("do")] expected_deactivation_events = [SlotSet("deactivated")] form_name = "my form" class MyLoop(LoopAction): def name(self) -> Text: return form_name async def activate(self, *args: Any) -> List[Event]: return expected_activation_events async def do(self, *args: Any) -> List[Event]: events_so_far = args[-1] assert events_so_far == [ ActiveLoop(form_name), *expected_activation_events ] return expected_do_events async def deactivate(self, *args) -> List[Event]: events_so_far = args[-1] assert events_so_far == [ ActiveLoop(form_name), *expected_activation_events, *expected_do_events, ActiveLoop(None), ] return expected_deactivation_events async def is_done(self, *args) -> bool: events_so_far = args[-1] return events_so_far == [ ActiveLoop(form_name), *expected_activation_events, *expected_do_events, ] tracker = DialogueStateTracker.from_events("some sender", []) domain = Domain.empty() action = MyLoop() actual = await action.run( CollectingOutputChannel(), TemplatedNaturalLanguageGenerator(domain.responses), tracker, domain, ) assert actual == [ ActiveLoop(form_name), *expected_activation_events, *expected_do_events, ActiveLoop(None), *expected_deactivation_events, ]
"path, expected_format", [ ("bla.json", RASA), ("other.md", MARKDOWN), ("other.yml", RASA_YAML), ("unknown", UNK), ], ) def test_get_nlu_target_format(path: Text, expected_format: Text): assert interactive._get_nlu_target_format(path) == expected_format @pytest.mark.parametrize( "trackers, expected_trackers", [ ([DialogueStateTracker.from_events("one", [])], [deque([]), DEFAULT_SENDER_ID]), ( [ str(i) for i in range( interactive.MAX_NUMBER_OF_TRAINING_STORIES_FOR_VISUALIZATION + 1 ) ], [DEFAULT_SENDER_ID], ), ], ) async def test_initial_plotting_call( mock_endpoint: EndpointConfig, monkeypatch: MonkeyPatch, trackers: List[Text],
async def test_switch_forms_with_same_slot(default_agent: Agent): """Tests switching of forms, where the first slot is the same in both forms. Tests the fix for issue 7710""" # Define two forms in the domain, with same first slot slot_a = "my_slot_a" form_1 = "my_form_1" utter_ask_form_1 = f"Please provide the value for {slot_a} of form 1" form_2 = "my_form_2" utter_ask_form_2 = f"Please provide the value for {slot_a} of form 2" domain = f""" version: "2.0" nlu: - intent: order_status examples: | - check status of my order - when are my shoes coming in - intent: return examples: | - start a return - I don't want my shoes anymore forms: {form_1}: {slot_a}: - type: from_entity entity: number {form_2}: {slot_a}: - type: from_entity entity: number responses: utter_ask_{form_1}_{slot_a}: - text: {utter_ask_form_1} utter_ask_{form_2}_{slot_a}: - text: {utter_ask_form_2} """ domain = Domain.from_yaml(domain) # Driving it like rasa/core/processor processor = MessageProcessor( default_agent.interpreter, default_agent.policy_ensemble, domain, InMemoryTrackerStore(domain), TemplatedNaturalLanguageGenerator(domain.templates), ) # activate the first form tracker = DialogueStateTracker.from_events( "some-sender", evts=[ ActionExecuted(ACTION_LISTEN_NAME), UserUttered("order status", { "name": "form_1", "confidence": 1.0 }), DefinePrevUserUtteredFeaturization(False), ], ) # rasa/core/processor.predict_next_action prediction = PolicyPrediction([], "some_policy") action_1 = FormAction(form_1, None) await processor._run_action( action_1, tracker, CollectingOutputChannel(), TemplatedNaturalLanguageGenerator(domain.templates), prediction, ) events_expected = [ ActionExecuted(ACTION_LISTEN_NAME), UserUttered("order status", { "name": "form_1", "confidence": 1.0 }), DefinePrevUserUtteredFeaturization(False), ActionExecuted(form_1), ActiveLoop(form_1), SlotSet(REQUESTED_SLOT, slot_a), BotUttered( text=utter_ask_form_1, metadata={"template_name": f"utter_ask_{form_1}_{slot_a}"}, ), ] assert tracker.applied_events() == events_expected next_events = [ ActionExecuted(ACTION_LISTEN_NAME), UserUttered("return my shoes", { "name": "form_2", "confidence": 1.0 }), DefinePrevUserUtteredFeaturization(False), ] tracker.update_with_events( next_events, domain, ) events_expected.extend(next_events) # form_1 is still active, and bot will first validate if the user utterance # provides valid data for the requested slot, which is rejected await processor._run_action( action_1, tracker, CollectingOutputChannel(), TemplatedNaturalLanguageGenerator(domain.templates), prediction, ) events_expected.extend([ActionExecutionRejected(action_name=form_1)]) assert tracker.applied_events() == events_expected # Next, bot predicts form_2 action_2 = FormAction(form_2, None) await processor._run_action( action_2, tracker, CollectingOutputChannel(), TemplatedNaturalLanguageGenerator(domain.templates), prediction, ) events_expected.extend([ ActionExecuted(form_2), ActiveLoop(form_2), SlotSet(REQUESTED_SLOT, slot_a), BotUttered( text=utter_ask_form_2, metadata={"template_name": f"utter_ask_{form_2}_{slot_a}"}, ), ]) assert tracker.applied_events() == events_expected
async def test_request_correct_slots_after_unhappy_path_with_custom_required_slots( ): form_name = "some_form" slot_name_1 = "slot_1" slot_name_2 = "slot_2" domain = f""" slots: {slot_name_1}: type: any {slot_name_2}: type: any forms: {form_name}: {slot_name_1}: - type: from_intent intent: some_intent value: some_value {slot_name_2}: - type: from_intent intent: some_intent value: some_value actions: - validate_{form_name} """ domain = Domain.from_yaml(domain) tracker = DialogueStateTracker.from_events( "default", [ ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, "slot_2"), ActionExecuted(ACTION_LISTEN_NAME), UserUttered( "hello", intent={ "name": "greet", "confidence": 1.0 }, ), ActionExecutionRejected(form_name), ActionExecuted("utter_greet"), ], ) action_server_url = "http://my-action-server:5055/webhook" # Custom form validation action changes the order of the requested slots validate_return_events = [ { "event": "slot", "name": REQUESTED_SLOT, "value": slot_name_2 }, ] # The form should ask the same slot again when coming back after unhappy path expected_events = [SlotSet(REQUESTED_SLOT, slot_name_2)] with aioresponses() as mocked: mocked.post(action_server_url, payload={"events": validate_return_events}) action_server = EndpointConfig(action_server_url) action = FormAction(form_name, action_server) events = await action.run( CollectingOutputChannel(), TemplatedNaturalLanguageGenerator(domain.templates), tracker, domain, ) assert events == expected_events
async def _predict_tracker_actions( tracker: DialogueStateTracker, agent: "Agent", fail_on_prediction_errors: bool = False, use_e2e: bool = False, ) -> Tuple[EvaluationStore, DialogueStateTracker, List[Dict[Text, Any]], List[EntityEvaluationResult], ]: processor = agent.processor tracker_eval_store = EvaluationStore() events = list(tracker.events) partial_tracker = DialogueStateTracker.from_events( tracker.sender_id, events[:1], agent.domain.slots, sender_source=tracker.sender_source, ) tracker_actions = [] policy_entity_results = [] for event in events[1:]: if isinstance(event, ActionExecuted): ( action_executed_result, prediction, entity_result, ) = await _collect_action_executed_predictions( processor, partial_tracker, event, fail_on_prediction_errors) if entity_result: policy_entity_results.append(entity_result) if action_executed_result.action_targets: tracker_eval_store.merge_store(action_executed_result) tracker_actions.append({ "action": action_executed_result.action_targets[0], "predicted": action_executed_result.action_predictions[0], "policy": prediction.policy_name, "confidence": prediction.max_confidence, }) elif use_e2e and isinstance(event, UserUttered): # This means that user utterance didn't have a user message, only intent, # so we can skip the NLU part and take the parse data directly. # Indirectly that means that the test story was in YAML format. if not event.text: predicted = event.parse_data # Indirectly that means that the test story was either: # in YAML format containing a user message, or in Markdown format. # Leaving that as it is because Markdown is in legacy mode. else: predicted = await processor.parse_message( UserMessage(event.text)) user_uttered_result = _collect_user_uttered_predictions( event, predicted, partial_tracker, fail_on_prediction_errors) tracker_eval_store.merge_store(user_uttered_result) else: partial_tracker.update(event) return tracker_eval_store, partial_tracker, tracker_actions, policy_entity_results
def test_predict_next_action_with_hidden_rules(): rule_intent = "rule_intent" rule_action = "rule_action" story_intent = "story_intent" story_action = "story_action" rule_slot = "rule_slot" story_slot = "story_slot" domain = Domain.from_yaml(f""" version: "2.0" intents: - {rule_intent} - {story_intent} actions: - {rule_action} - {story_action} slots: {rule_slot}: type: text {story_slot}: type: text """) rule = TrackerWithCachedStates.from_events( "rule", domain=domain, slots=domain.slots, evts=[ ActionExecuted(RULE_SNIPPET_ACTION_NAME), ActionExecuted(ACTION_LISTEN_NAME), UserUttered(intent={"name": rule_intent}), ActionExecuted(rule_action), SlotSet(rule_slot, rule_slot), ActionExecuted(ACTION_LISTEN_NAME), ], is_rule_tracker=True, ) story = TrackerWithCachedStates.from_events( "story", domain=domain, slots=domain.slots, evts=[ ActionExecuted(ACTION_LISTEN_NAME), UserUttered(intent={"name": story_intent}), ActionExecuted(story_action), SlotSet(story_slot, story_slot), ActionExecuted(ACTION_LISTEN_NAME), ], ) interpreter = RegexInterpreter() ensemble = SimplePolicyEnsemble( policies=[RulePolicy(), MemoizationPolicy()]) ensemble.train([rule, story], domain, interpreter) tracker_store = InMemoryTrackerStore(domain) lock_store = InMemoryLockStore() processor = MessageProcessor( interpreter, ensemble, domain, tracker_store, lock_store, TemplatedNaturalLanguageGenerator(domain.responses), ) tracker = DialogueStateTracker.from_events( "casd", evts=[ ActionExecuted(ACTION_LISTEN_NAME), UserUttered(intent={"name": rule_intent}), ], slots=domain.slots, ) action, prediction = processor.predict_next_action(tracker) assert action._name == rule_action assert prediction.hide_rule_turn processor._log_action_on_tracker(tracker, action, [SlotSet(rule_slot, rule_slot)], prediction) action, prediction = processor.predict_next_action(tracker) assert isinstance(action, ActionListen) assert prediction.hide_rule_turn processor._log_action_on_tracker(tracker, action, None, prediction) tracker.events.append(UserUttered(intent={"name": story_intent})) # rules are hidden correctly if memo policy predicts next actions correctly action, prediction = processor.predict_next_action(tracker) assert action._name == story_action assert not prediction.hide_rule_turn processor._log_action_on_tracker(tracker, action, [SlotSet(story_slot, story_slot)], prediction) action, prediction = processor.predict_next_action(tracker) assert isinstance(action, ActionListen) assert not prediction.hide_rule_turn
async def test_form_unhappy_path_no_validation_from_story(): form_name = "some_form" handle_rejection_action_name = "utter_handle_rejection" domain = Domain.from_yaml(f""" intents: - {GREET_INTENT_NAME} actions: - {UTTER_GREET_ACTION} - {handle_rejection_action_name} - some-action slots: {REQUESTED_SLOT}: type: unfeaturized forms: - {form_name} """) unhappy_story = TrackerWithCachedStates.from_events( "bla", domain=domain, slots=domain.slots, evts=[ # We are in an active form ActionExecuted(form_name), ActiveLoop(form_name), ActionExecuted(ACTION_LISTEN_NAME), # When a user says "hi", and the form is unhappy, # we want to run a specific action UserUttered(intent={"name": GREET_INTENT_NAME}), ActionExecuted(handle_rejection_action_name), ActionExecuted(ACTION_LISTEN_NAME), # Next user utterance is an answer to the previous question # and shouldn't be validated by the form UserUttered(intent={"name": GREET_INTENT_NAME}), ActionExecuted(form_name), ActionExecuted(ACTION_LISTEN_NAME), ], ) policy = RulePolicy() policy.train([unhappy_story], domain, RegexInterpreter()) # Check that RulePolicy predicts no validation to handle unhappy path conversation_events = [ ActionExecuted(form_name), ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, "some value"), ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": GREET_INTENT_NAME}), ActionExecutionRejected(form_name), ActionExecuted(handle_rejection_action_name), ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": GREET_INTENT_NAME}), ] tracker = DialogueStateTracker.from_events("casd", evts=conversation_events, slots=domain.slots) action_probabilities = policy.predict_action_probabilities( tracker, domain, RegexInterpreter()) # there is no rule for next action assert max(action_probabilities) == policy._core_fallback_threshold # check that RulePolicy entered unhappy path based on the training story assert tracker.events[-1] == LoopInterrupted(True)
async def test_form_unhappy_path_no_validation_from_rule(): form_name = "some_form" handle_rejection_action_name = "utter_handle_rejection" domain = Domain.from_yaml(f""" intents: - {GREET_INTENT_NAME} actions: - {UTTER_GREET_ACTION} - {handle_rejection_action_name} - some-action slots: {REQUESTED_SLOT}: type: unfeaturized forms: - {form_name} """) unhappy_rule = TrackerWithCachedStates.from_events( "bla", domain=domain, slots=domain.slots, evts=[ # We are in an active form ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, "bla"), ActionExecuted(RULE_SNIPPET_ACTION_NAME), ActionExecuted(ACTION_LISTEN_NAME), # When a user says "hi", and the form is unhappy, # we want to run a specific action UserUttered(intent={"name": GREET_INTENT_NAME}), ActionExecuted(handle_rejection_action_name), # Next user utterance is an answer to the previous question # and shouldn't be validated by the form ActionExecuted(ACTION_LISTEN_NAME), UserUttered(intent={"name": GREET_INTENT_NAME}), ActionExecuted(form_name), ActionExecuted(ACTION_LISTEN_NAME), ], is_rule_tracker=True, ) # unhappy rule is multi user turn rule, therefore remove restriction for policy policy = RulePolicy(restrict_rules=False) # RulePolicy should memorize that unhappy_rule overrides GREET_RULE policy.train([GREET_RULE, unhappy_rule], domain, RegexInterpreter()) # Check that RulePolicy predicts action to handle unhappy path conversation_events = [ ActionExecuted(form_name), ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, "some value"), ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": GREET_INTENT_NAME}), ActionExecutionRejected(form_name), ] action_probabilities = policy.predict_action_probabilities( DialogueStateTracker.from_events("casd", evts=conversation_events, slots=domain.slots), domain, RegexInterpreter(), ) assert_predicted_action(action_probabilities, domain, handle_rejection_action_name) # Check that RulePolicy predicts action_listen conversation_events.append(ActionExecuted(handle_rejection_action_name)) action_probabilities = policy.predict_action_probabilities( DialogueStateTracker.from_events("casd", evts=conversation_events, slots=domain.slots), domain, RegexInterpreter(), ) assert_predicted_action(action_probabilities, domain, ACTION_LISTEN_NAME) # Check that RulePolicy triggers form again after handling unhappy path conversation_events.append(ActionExecuted(ACTION_LISTEN_NAME)) tracker = DialogueStateTracker.from_events("casd", evts=conversation_events, slots=domain.slots) action_probabilities = policy.predict_action_probabilities( tracker, domain, RegexInterpreter()) assert_predicted_action(action_probabilities, domain, form_name) # check that RulePolicy entered unhappy path based on the training story assert tracker.events[-1] == LoopInterrupted(True)
async def test_form_unhappy_path_from_story(): form_name = "some_form" handle_rejection_action_name = "utter_handle_rejection" domain = Domain.from_yaml(f""" intents: - {GREET_INTENT_NAME} actions: - {UTTER_GREET_ACTION} - {handle_rejection_action_name} - some-action slots: {REQUESTED_SLOT}: type: unfeaturized forms: - {form_name} """) unhappy_story = TrackerWithCachedStates.from_events( "bla", domain=domain, slots=domain.slots, evts=[ # We are in an active form ActionExecuted(form_name), ActiveLoop(form_name), ActionExecuted(ACTION_LISTEN_NAME), # in training stories there is either intent or text, never both UserUttered(intent={"name": GREET_INTENT_NAME}), ActionExecuted(UTTER_GREET_ACTION), # After our bot says "hi", we want to run a specific action ActionExecuted(handle_rejection_action_name), ActionExecuted(form_name), ActionExecuted(ACTION_LISTEN_NAME), ], ) policy = RulePolicy() policy.train([GREET_RULE, unhappy_story], domain, RegexInterpreter()) # Check that RulePolicy predicts action to handle unhappy path conversation_events = [ ActionExecuted(form_name), ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, "some value"), ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": GREET_INTENT_NAME}), ActionExecutionRejected(form_name), ] action_probabilities = policy.predict_action_probabilities( DialogueStateTracker.from_events("casd", evts=conversation_events, slots=domain.slots), domain, RegexInterpreter(), ) assert_predicted_action(action_probabilities, domain, UTTER_GREET_ACTION) # Check that RulePolicy doesn't trigger form or action_listen # after handling unhappy path conversation_events.append(ActionExecuted(handle_rejection_action_name)) action_probabilities = policy.predict_action_probabilities( DialogueStateTracker.from_events("casd", evts=conversation_events, slots=domain.slots), domain, RegexInterpreter(), ) assert max(action_probabilities) == policy._core_fallback_threshold
async def test_form_unhappy_path_from_in_form_rule(): form_name = "some_form" handle_rejection_action_name = "utter_handle_rejection" domain = Domain.from_yaml(f""" intents: - {GREET_INTENT_NAME} actions: - {UTTER_GREET_ACTION} - {handle_rejection_action_name} - some-action slots: {REQUESTED_SLOT}: type: unfeaturized forms: - {form_name} """) unhappy_rule = TrackerWithCachedStates.from_events( "bla", domain=domain, slots=domain.slots, evts=[ # We are in an active form ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, "bla"), ActionExecuted(RULE_SNIPPET_ACTION_NAME), ActionExecuted(ACTION_LISTEN_NAME), # When a user says "hi", and the form is unhappy, # we want to run a specific action UserUttered(intent={"name": GREET_INTENT_NAME}), ActionExecuted(handle_rejection_action_name), ActionExecuted(form_name), ActionExecuted(ACTION_LISTEN_NAME), ], is_rule_tracker=True, ) policy = RulePolicy() # RulePolicy should memorize that unhappy_rule overrides GREET_RULE policy.train([GREET_RULE, unhappy_rule], domain, RegexInterpreter()) # Check that RulePolicy predicts action to handle unhappy path conversation_events = [ ActionExecuted(form_name), ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, "some value"), ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": GREET_INTENT_NAME}), ActionExecutionRejected(form_name), ] action_probabilities = policy.predict_action_probabilities( DialogueStateTracker.from_events("casd", evts=conversation_events, slots=domain.slots), domain, RegexInterpreter(), ) assert_predicted_action(action_probabilities, domain, handle_rejection_action_name) # Check that RulePolicy triggers form again after handling unhappy path conversation_events.append(ActionExecuted(handle_rejection_action_name)) action_probabilities = policy.predict_action_probabilities( DialogueStateTracker.from_events("casd", evts=conversation_events, slots=domain.slots), domain, RegexInterpreter(), ) assert_predicted_action(action_probabilities, domain, form_name)
assert test_msgs == interactive._filter_messages(msgs) @pytest.mark.parametrize( "path, expected_format", [("bla.json", RASA), ("other.yml", RASA_YAML), ("unknown", UNK)], ) def test_get_nlu_target_format(path: Text, expected_format: Text): assert interactive._get_nlu_target_format(path) == expected_format @pytest.mark.parametrize( "trackers, expected_trackers", [ ([DialogueStateTracker.from_events("one", []) ], [deque([]), DEFAULT_SENDER_ID]), ( [ str(i) for i in range(interactive. MAX_NUMBER_OF_TRAINING_STORIES_FOR_VISUALIZATION + 1) ], [DEFAULT_SENDER_ID], ), ], ) async def test_initial_plotting_call( mock_endpoint: EndpointConfig, monkeypatch: MonkeyPatch, trackers: List[Text],
def _get_tracker(conversation_id: Text) -> DialogueStateTracker: return DialogueStateTracker.from_events(conversation_id, events[conversation_id])
def test_applied_events_with_loop_happy_path( events: List[Event], expected_applied_events: List[Event]): tracker = DialogueStateTracker.from_events("👋", events) applied = tracker.applied_events() assert applied == expected_applied_events
LoopInterrupted, ) from rasa.shared.nlu.interpreter import RegexInterpreter from rasa.core.nlg import TemplatedNaturalLanguageGenerator from rasa.core.policies.rule_policy import RulePolicy, InvalidRule from rasa.shared.core.trackers import DialogueStateTracker from rasa.shared.core.generator import TrackerWithCachedStates UTTER_GREET_ACTION = "utter_greet" GREET_INTENT_NAME = "greet" GREET_RULE = DialogueStateTracker.from_events( "greet rule", evts=[ ActionExecuted(RULE_SNIPPET_ACTION_NAME), ActionExecuted(ACTION_LISTEN_NAME), # Greet is a FAQ here and gets triggered in any context UserUttered(intent={"name": GREET_INTENT_NAME}), ActionExecuted(UTTER_GREET_ACTION), ActionExecuted(ACTION_LISTEN_NAME), ], ) GREET_RULE.is_rule_tracker = True def _form_submit_rule(domain: Domain, submit_action_name: Text, form_name: Text) -> TrackerWithCachedStates: return TrackerWithCachedStates.from_events( "form submit rule", domain=domain, slots=domain.slots, evts=[
async def _predict_tracker_actions( tracker: DialogueStateTracker, agent: "Agent", fail_on_prediction_errors: bool = False, use_e2e: bool = False, ) -> Tuple[EvaluationStore, DialogueStateTracker, List[Dict[Text, Any]]]: processor = agent.create_processor() tracker_eval_store = EvaluationStore() events = list(tracker.events) partial_tracker = DialogueStateTracker.from_events( tracker.sender_id, events[:1], agent.domain.slots, sender_source=tracker.sender_source, ) tracker_actions = [] should_predict_another_action = True num_predicted_actions = 0 for event in events[1:]: if isinstance(event, ActionExecuted): circuit_breaker_tripped = processor.is_action_limit_reached( num_predicted_actions, should_predict_another_action) (action_executed_result, prediction) = _collect_action_executed_predictions( processor, partial_tracker, event, fail_on_prediction_errors, circuit_breaker_tripped, ) tracker_eval_store.merge_store(action_executed_result) tracker_actions.append({ "action": action_executed_result.action_targets[0], "predicted": action_executed_result.action_predictions[0], "policy": prediction.policy_name, "confidence": prediction.max_confidence, }) should_predict_another_action = processor.should_predict_another_action( action_executed_result.action_predictions[0]) num_predicted_actions += 1 elif use_e2e and isinstance(event, UserUttered): # This means that user utterance didn't have a user message, only intent, # so we can skip the NLU part and take the parse data directly. # Indirectly that means that the test story was in YAML format. if not event.text: predicted = event.parse_data # Indirectly that means that the test story was in Markdown format. # Leaving that as it is because Markdown is in legacy mode. else: predicted = await processor.parse_message( UserMessage(event.text)) user_uttered_result = _collect_user_uttered_predictions( event, predicted, partial_tracker, fail_on_prediction_errors) tracker_eval_store.merge_store(user_uttered_result) else: partial_tracker.update(event) if isinstance(event, UserUttered): num_predicted_actions = 0 return tracker_eval_store, partial_tracker, tracker_actions
async def test_one_stage_fallback_rule(): domain = Domain.from_yaml(f""" intents: - {GREET_INTENT_NAME} - {DEFAULT_NLU_FALLBACK_INTENT_NAME} actions: - {UTTER_GREET_ACTION} """) fallback_recover_rule = TrackerWithCachedStates.from_events( "bla", domain=domain, slots=domain.slots, evts=[ ActionExecuted(RULE_SNIPPET_ACTION_NAME), ActionExecuted(ACTION_LISTEN_NAME), UserUttered(intent={"name": DEFAULT_NLU_FALLBACK_INTENT_NAME}), ActionExecuted(ACTION_DEFAULT_FALLBACK_NAME), ActionExecuted(ACTION_LISTEN_NAME), ], is_rule_tracker=True, ) greet_rule_which_only_applies_at_start = TrackerWithCachedStates.from_events( "bla", domain=domain, evts=[ ActionExecuted(ACTION_LISTEN_NAME), UserUttered(intent={"name": GREET_INTENT_NAME}), ActionExecuted(UTTER_GREET_ACTION), ActionExecuted(ACTION_LISTEN_NAME), ], is_rule_tracker=True, ) policy = RulePolicy() policy.train( [greet_rule_which_only_applies_at_start, fallback_recover_rule], domain, RegexInterpreter(), ) # RulePolicy predicts fallback action conversation_events = [ ActionExecuted(ACTION_LISTEN_NAME), UserUttered("dasdakl;fkasd", {"name": DEFAULT_NLU_FALLBACK_INTENT_NAME}), ] tracker = DialogueStateTracker.from_events("casd", evts=conversation_events, slots=domain.slots) action_probabilities = policy.predict_action_probabilities( tracker, domain, RegexInterpreter()) assert_predicted_action(action_probabilities, domain, ACTION_DEFAULT_FALLBACK_NAME) # Fallback action reverts fallback events, next action is `ACTION_LISTEN` conversation_events += await ActionDefaultFallback().run( CollectingOutputChannel(), TemplatedNaturalLanguageGenerator(domain.templates), tracker, domain, ) # Rasa is back on track when user rephrased intent conversation_events += [ ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": GREET_INTENT_NAME}), ] tracker = DialogueStateTracker.from_events("casd", evts=conversation_events, slots=domain.slots) action_probabilities = policy.predict_action_probabilities( tracker, domain, RegexInterpreter()) assert_predicted_action(action_probabilities, domain, UTTER_GREET_ACTION)