def test_contradicting_rules_and_stories(): utter_anti_greet_action = "utter_anti_greet" domain = Domain.from_yaml(f""" intents: - {GREET_INTENT_NAME} actions: - {UTTER_GREET_ACTION} - {utter_anti_greet_action} """) policy = RulePolicy() anti_greet_story = TrackerWithCachedStates.from_events( "anti greet story", domain=domain, slots=domain.slots, evts=[ ActionExecuted(RULE_SNIPPET_ACTION_NAME), ActionExecuted(ACTION_LISTEN_NAME), UserUttered(intent={"name": GREET_INTENT_NAME}), ActionExecuted(utter_anti_greet_action), ActionExecuted(ACTION_LISTEN_NAME), ], ) with pytest.raises(InvalidRule) as execinfo: policy.train([GREET_RULE, anti_greet_story], domain, RegexInterpreter()) assert all( name in execinfo.value.message for name in {utter_anti_greet_action, anti_greet_story.sender_id})
def test_provide(default_model_storage: ModelStorage, default_execution_context: ExecutionContext): resource = Resource("some resource") domain = Domain.load("examples/rules/domain.yml") trackers = rasa.core.training.load_data("examples/rules/data/rules.yml", domain) policy = RulePolicy.create( RulePolicy.get_default_config(), default_model_storage, resource, default_execution_context, ) policy.train(trackers, domain) provider = RuleOnlyDataProvider.load({}, default_model_storage, resource, default_execution_context) rule_only_data = provider.provide() assert rule_only_data for key in [RULE_ONLY_SLOTS, RULE_ONLY_LOOPS]: assert rule_only_data[key] == policy.lookup[key]
def test_predict_core_fallback(rule_policy: RulePolicy, expected_confidence: float, expected_prediction: Text): other_intent = "other" domain = Domain.from_yaml(f""" intents: - {GREET_INTENT_NAME} - {other_intent} actions: - {UTTER_GREET_ACTION} - my_core_fallback """) rule_policy.train([GREET_RULE], domain, RegexInterpreter()) new_conversation = DialogueStateTracker.from_events( "bla2", evts=[ ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": other_intent}), ], ) action_probabilities = rule_policy.predict_action_probabilities( new_conversation, domain, RegexInterpreter()) assert_predicted_action(action_probabilities, domain, expected_prediction, expected_confidence)
async def test_failing_form_activation_due_to_no_rule(): form_name = "some_form" other_intent = "bye" domain = Domain.from_yaml(f""" intents: - {GREET_INTENT_NAME} - {other_intent} actions: - {UTTER_GREET_ACTION} - some-action slots: {REQUESTED_SLOT}: type: unfeaturized forms: - {form_name} """) policy = RulePolicy() policy.train([GREET_RULE], domain, RegexInterpreter()) conversation_events = [ ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": other_intent}), ] # RulePolicy has no matching rule since no rule for form activation is given action_probabilities = policy.predict_action_probabilities( DialogueStateTracker.from_events("casd", evts=conversation_events, slots=domain.slots), domain, RegexInterpreter(), ) assert max(action_probabilities) == policy._core_fallback_threshold
def test_restrict_multiple_user_inputs_in_rules(): domain = Domain.from_yaml( f""" intents: - {GREET_INTENT_NAME} actions: - {UTTER_GREET_ACTION} """ ) policy = RulePolicy() greet_events = [ UserUttered(intent={"name": GREET_INTENT_NAME}), ActionExecuted(UTTER_GREET_ACTION), ActionExecuted(ACTION_LISTEN_NAME), ] forbidden_rule = DialogueStateTracker.from_events( "bla", evts=[ ActionExecuted(RULE_SNIPPET_ACTION_NAME), ActionExecuted(ACTION_LISTEN_NAME), ] + greet_events * (policy.ALLOWED_NUMBER_OF_USER_INPUTS + 1), ) forbidden_rule.is_rule_tracker = True with pytest.raises(InvalidRule): policy.train([forbidden_rule], domain, RegexInterpreter())
def test_faq_rule(): domain = Domain.from_yaml( f""" intents: - {GREET_INTENT_NAME} actions: - {UTTER_GREET_ACTION} """ ) policy = RulePolicy() policy.train([GREET_RULE], domain, RegexInterpreter()) # remove first ... action and utter_greet and last action_listen from greet rule new_conversation = DialogueStateTracker.from_events( "simple greet", evts=[ ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": GREET_INTENT_NAME}), ], ) prediction = policy.predict_action_probabilities( new_conversation, domain, RegexInterpreter() ) assert_predicted_action(prediction, domain, UTTER_GREET_ACTION)
def test_predict_nothing_if_fallback_disabled(): other_intent = "other" domain = Domain.from_yaml( f""" intents: - {GREET_INTENT_NAME} - {other_intent} actions: - {UTTER_GREET_ACTION} """ ) policy = RulePolicy(enable_fallback_prediction=False) policy.train([GREET_RULE], domain, RegexInterpreter()) new_conversation = DialogueStateTracker.from_events( "bla2", evts=[ ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": other_intent}), ], ) prediction = policy.predict_action_probabilities( new_conversation, domain, RegexInterpreter() ) assert prediction.max_confidence == 0
def test_default_actions(intent_name: Text, expected_action_name: Text): domain = Domain.from_yaml( f""" intents: - {GREET_INTENT_NAME} actions: - {UTTER_GREET_ACTION} """ ) policy = RulePolicy() policy.train([GREET_RULE], domain, RegexInterpreter()) new_conversation = DialogueStateTracker.from_events( "bla2", evts=[ ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": GREET_INTENT_NAME}), ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": intent_name}), ], ) prediction = policy.predict_action_probabilities( new_conversation, domain, RegexInterpreter() ) assert_predicted_action(prediction, domain, expected_action_name)
async def test_restart_triggers_session_start( default_channel: CollectingOutputChannel, default_processor: MessageProcessor, monkeypatch: MonkeyPatch, ): # The rule policy is trained and used so as to allow the default action ActionRestart to be predicted rule_policy = RulePolicy() rule_policy.train([], default_processor.domain, RegexInterpreter()) monkeypatch.setattr( default_processor.policy_ensemble, "policies", [rule_policy, *default_processor.policy_ensemble.policies], ) sender_id = uuid.uuid4().hex entity = "name" slot_1 = {entity: "name1"} await default_processor.handle_message( UserMessage(f"/greet{json.dumps(slot_1)}", default_channel, sender_id) ) assert default_channel.latest_output() == { "recipient_id": sender_id, "text": "hey there name1!", } # This restarts the chat await default_processor.handle_message( UserMessage("/restart", default_channel, sender_id) ) tracker = default_processor.tracker_store.get_or_create_tracker(sender_id) expected = [ ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted(), ActionExecuted(ACTION_LISTEN_NAME), UserUttered( f"/greet{json.dumps(slot_1)}", {INTENT_NAME_KEY: "greet", "confidence": 1.0}, [{"entity": entity, "start": 6, "end": 23, "value": "name1"}], ), SlotSet(entity, slot_1[entity]), DefinePrevUserUtteredFeaturization(use_text_for_featurization=False), ActionExecuted("utter_greet"), BotUttered("hey there name1!", metadata={"template_name": "utter_greet"}), ActionExecuted(ACTION_LISTEN_NAME), UserUttered("/restart", {INTENT_NAME_KEY: "restart", "confidence": 1.0}), DefinePrevUserUtteredFeaturization(use_text_for_featurization=False), ActionExecuted(ACTION_RESTART_NAME), Restarted(), ActionExecuted(ACTION_SESSION_START_NAME), SessionStarted(), # No previous slot is set due to restart. ActionExecuted(ACTION_LISTEN_NAME), ] for actual, expected in zip(tracker.events, expected): assert actual == expected
def test_immediate_submit(): form_name = "some_form" submit_action_name = "utter_submit" entity = "some_entity" slot = "some_slot" domain = Domain.from_yaml(f""" intents: - {GREET_INTENT_NAME} actions: - {UTTER_GREET_ACTION} - some-action - {submit_action_name} slots: {REQUESTED_SLOT}: type: unfeaturized {slot}: type: unfeaturized forms: - {form_name} entities: - {entity} """) form_activation_rule = _form_activation_rule(domain, form_name, GREET_INTENT_NAME) form_submit_rule = _form_submit_rule(domain, submit_action_name, form_name) policy = RulePolicy() policy.train([GREET_RULE, form_activation_rule, form_submit_rule], domain, RegexInterpreter()) form_conversation = DialogueStateTracker.from_events( "in a form", evts=[ # Form was activated ActionExecuted(ACTION_LISTEN_NAME), # The same intent which activates the form also deactivates it UserUttered( "haha", {"name": GREET_INTENT_NAME}, entities=[{ "entity": entity, "value": "Bruce Wayne" }], ), SlotSet(slot, "Bruce"), ActionExecuted(form_name), SlotSet("bla", "bla"), ActiveLoop(None), SlotSet(REQUESTED_SLOT, None), ], slots=domain.slots, ) # RulePolicy predicts action which handles submit action_probabilities = policy.predict_action_probabilities( form_conversation, domain, RegexInterpreter()) assert_predicted_action(action_probabilities, domain, submit_action_name)
async def trained_rule_policy( trained_rule_policy_domain: Domain) -> RulePolicy: trackers = await training.load_data("examples/rules/data/rules.yml", trained_rule_policy_domain) rule_policy = RulePolicy() rule_policy.train(trackers, trained_rule_policy_domain, RegexInterpreter()) return rule_policy
def _raise_if_a_rule_policy_is_incompatible_with_domain( self, domain: Domain) -> None: """Validates the rule policies against the domain. Raises: `InvalidDomain` if domain and rule policies do not match """ for schema_node in self._graph_schema.nodes.values(): if schema_node.uses == RulePolicy: RulePolicy.raise_if_incompatible_with_domain( config=schema_node.config, domain=domain)
def test_no_incomplete_rules_due_to_slots_after_listen(): some_action = "some_action" some_slot = "some_slot" domain = Domain.from_yaml(f""" intents: - {GREET_INTENT_NAME} actions: - {some_action} entities: - {some_slot} slots: {some_slot}: type: text """) policy = RulePolicy() complete_rule = TrackerWithCachedStates.from_events( "complete_rule", domain=domain, slots=domain.slots, evts=[ ActionExecuted(RULE_SNIPPET_ACTION_NAME), ActionExecuted(ACTION_LISTEN_NAME), UserUttered( intent={"name": GREET_INTENT_NAME}, entities=[{ "entity": some_slot, "value": "bla" }], ), SlotSet(some_slot, "bla"), ActionExecuted(some_action), ActionExecuted(ACTION_LISTEN_NAME), ], is_rule_tracker=True, ) potentially_incomplete_rule = TrackerWithCachedStates.from_events( "potentially_incomplete_rule", domain=domain, slots=domain.slots, evts=[ ActionExecuted(RULE_SNIPPET_ACTION_NAME), ActionExecuted(ACTION_LISTEN_NAME), UserUttered(intent={"name": GREET_INTENT_NAME}), ActionExecuted(some_action), ActionExecuted(ACTION_LISTEN_NAME), ], is_rule_tracker=True, ) policy.train([complete_rule, potentially_incomplete_rule], domain, RegexInterpreter())
def test_form_submit_rule(): form_name = "some_form" submit_action_name = "utter_submit" domain = Domain.from_yaml( f""" intents: - {GREET_INTENT_NAME} actions: - {UTTER_GREET_ACTION} - some-action - {submit_action_name} slots: {REQUESTED_SLOT}: type: unfeaturized forms: - {form_name} """ ) form_submit_rule = _form_submit_rule(domain, submit_action_name, form_name) policy = RulePolicy() policy.train([GREET_RULE, form_submit_rule], domain, RegexInterpreter()) form_conversation = DialogueStateTracker.from_events( "in a form", evts=[ # Form was activated ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": GREET_INTENT_NAME}), ActionExecuted(form_name), Form(form_name), SlotSet(REQUESTED_SLOT, "some value"), ActionExecuted(ACTION_LISTEN_NAME), # User responds and fills requested slot UserUttered("haha", {"name": GREET_INTENT_NAME}), ActionExecuted(form_name), # Form get's deactivated Form(None), SlotSet(REQUESTED_SLOT, None), ], slots=domain.slots, ) # RulePolicy predicts action which handles submit action_probabilities = policy.predict_action_probabilities( form_conversation, domain ) assert_predicted_action(action_probabilities, domain, submit_action_name)
async def test_dont_predict_form_if_already_finished(): form_name = "some_form" domain = Domain.from_yaml( f""" intents: - {GREET_INTENT_NAME} actions: - {UTTER_GREET_ACTION} - some-action slots: {REQUESTED_SLOT}: type: unfeaturized forms: - {form_name} """ ) policy = RulePolicy() policy.train([GREET_RULE], domain, RegexInterpreter()) form_conversation = DialogueStateTracker.from_events( "in a form", evts=[ # We are in an activate form ActionExecuted(form_name), Form(form_name), SlotSet(REQUESTED_SLOT, "some value"), ActionExecuted(ACTION_LISTEN_NAME), # User sends message as response to a requested slot UserUttered("haha", {"name": GREET_INTENT_NAME}), # Form is happy and deactivates itself ActionExecuted(form_name), Form(None), SlotSet(REQUESTED_SLOT, None), # User sends another message. Form is already done. Shouldn't get triggered # again ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": GREET_INTENT_NAME}), ], slots=domain.slots, ) # RulePolicy triggers form again action_probabilities = policy.predict_action_probabilities( form_conversation, domain ) assert_predicted_action(action_probabilities, domain, UTTER_GREET_ACTION)
async def test_rule_policy_slot_filling_from_text( trained_rule_policy: RulePolicy, trained_rule_policy_domain: Domain): form_conversation = DialogueStateTracker.from_events( "in a form", evts=[ ActionExecuted(ACTION_LISTEN_NAME), # User responds and fills requested slot UserUttered("/activate_q_form", {"name": "activate_q_form"}), ActionExecuted("loop_q_form"), ActiveLoop("loop_q_form"), SlotSet(REQUESTED_SLOT, "some_slot"), ActionExecuted(ACTION_LISTEN_NAME), UserUttered("/bla", {"name": GREET_INTENT_NAME}), ActionExecuted("loop_q_form"), SlotSet("some_slot", "/bla"), ActiveLoop(None), SlotSet(REQUESTED_SLOT, None), ], slots=trained_rule_policy_domain.slots, ) # RulePolicy predicts action which handles submit action_probabilities = trained_rule_policy.predict_action_probabilities( form_conversation, trained_rule_policy_domain, RegexInterpreter()) assert_predicted_action(action_probabilities, trained_rule_policy_domain, "utter_stop")
def _train_rule_based_agent( moodbot_domain: Domain, train_file_name: Path, monkeypatch: MonkeyPatch, ignore_action_unlikely_intent: bool, ) -> Agent: # We need `RulePolicy` to predict the correct actions # in a particular conversation context as seen during training. # Since it can get affected by `action_unlikely_intent` being triggered in # some cases. We monkey-patch the method which creates # prediction states to ignore `action_unlikely_intent`s if needed. monkeypatch.setattr( RulePolicy, "_prediction_states", _custom_prediction_states_for_rules(ignore_action_unlikely_intent), ) deterministic_policy = RulePolicy(restrict_rules=False) agent = Agent(moodbot_domain, SimplePolicyEnsemble([deterministic_policy])) training_data = agent.load_data(str(train_file_name)) # Make the trackers compatible with rules # so that they are picked up by the policy. for tracker in training_data: tracker.is_rule_tracker = True agent.train(training_data) return agent
def test_predict_next_action_raises_limit_reached_exception(domain: Domain): interpreter = RegexInterpreter() ensemble = SimplePolicyEnsemble( policies=[RulePolicy(), MemoizationPolicy()]) tracker_store = InMemoryTrackerStore(domain) lock_store = InMemoryLockStore() processor = MessageProcessor( interpreter, ensemble, domain, tracker_store, lock_store, TemplatedNaturalLanguageGenerator(domain.responses), max_number_of_predictions=1, ) tracker = DialogueStateTracker.from_events( "test", evts=[ ActionExecuted(ACTION_LISTEN_NAME), UserUttered("Hi!"), ActionExecuted("test_action"), ], ) tracker.set_latest_action({"action_name": "test_action"}) with pytest.raises(ActionLimitReached): processor.predict_next_action(tracker)
async def test_predict_form_action_if_multiple_turns(): form_name = "some_form" other_intent = "bye" domain = Domain.from_yaml( f""" intents: - {GREET_INTENT_NAME} - {other_intent} actions: - {UTTER_GREET_ACTION} - some-action slots: {REQUESTED_SLOT}: type: unfeaturized forms: - {form_name} """ ) policy = RulePolicy() policy.train([GREET_RULE], domain, RegexInterpreter()) form_conversation = DialogueStateTracker.from_events( "in a form", evts=[ # We are in an active form ActionExecuted(form_name), Form(form_name), SlotSet(REQUESTED_SLOT, "some value"), # User responds to slot request ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": GREET_INTENT_NAME}), # Form validates input and requests another slot ActionExecuted(form_name), SlotSet(REQUESTED_SLOT, "some other"), # User responds to 2nd slot request ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": other_intent}), ], slots=domain.slots, ) # RulePolicy triggers form again action_probabilities = policy.predict_action_probabilities( form_conversation, domain ) assert_predicted_action(action_probabilities, domain, form_name)
def test_faq_rule(): domain = Domain.from_yaml(f""" intents: - {GREET_INTENT_NAME} actions: - {UTTER_GREET_ACTION} """) policy = RulePolicy() policy.train([GREET_RULE], domain, RegexInterpreter()) new_conversation = DialogueStateTracker.from_events( "bla2", GREET_RULE.applied_events()[1:-1]) action_probabilities = policy.predict_action_probabilities( new_conversation, domain) assert_predicted_action(action_probabilities, domain, UTTER_GREET_ACTION)
async def test_form_unhappy_path(): form_name = "some_form" domain = Domain.from_yaml( f""" intents: - {GREET_INTENT_NAME} actions: - {UTTER_GREET_ACTION} - some-action slots: {REQUESTED_SLOT}: type: unfeaturized forms: - {form_name} """ ) policy = RulePolicy() policy.train([GREET_RULE], domain, RegexInterpreter()) unhappy_form_conversation = DialogueStateTracker.from_events( "in a form", evts=[ # We are in an active form ActionExecuted(form_name), Form(form_name), SlotSet(REQUESTED_SLOT, "some value"), # User responds to slot request ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": GREET_INTENT_NAME}), # Form isn't happy with the answer and rejects execution ActionExecutionRejected(form_name), ], slots=domain.slots, ) # RulePolicy doesn't trigger form but FAQ action_probabilities = policy.predict_action_probabilities( unhappy_form_conversation, domain ) assert_predicted_action(action_probabilities, domain, UTTER_GREET_ACTION)
async def test_form_unhappy_path_without_rule(): form_name = "some_form" other_intent = "bye" domain = Domain.from_yaml( f""" intents: - {GREET_INTENT_NAME} - {other_intent} actions: - {UTTER_GREET_ACTION} - some-action slots: {REQUESTED_SLOT}: type: unfeaturized forms: - {form_name} """ ) policy = RulePolicy() policy.train([GREET_RULE], domain, RegexInterpreter()) conversation_events = [ ActionExecuted(form_name), ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, "some value"), ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": other_intent}), ActiveLoop(form_name), ActionExecutionRejected(form_name), ] # Unhappy path is not handled. No rule matches. Let's hope ML fixes our problems 🤞 prediction = policy.predict_action_probabilities( DialogueStateTracker.from_events( "casd", evts=conversation_events, slots=domain.slots ), domain, RegexInterpreter(), ) assert prediction.max_confidence == policy._core_fallback_threshold
async def test_predict_action_listen_after_form(): form_name = "some_form" domain = Domain.from_yaml( f""" intents: - {GREET_INTENT_NAME} actions: - {UTTER_GREET_ACTION} - some-action slots: {REQUESTED_SLOT}: type: unfeaturized forms: - {form_name} """ ) policy = RulePolicy() policy.train([GREET_RULE], domain, RegexInterpreter()) form_conversation = DialogueStateTracker.from_events( "in a form", evts=[ # We are in an activate form ActionExecuted(form_name), Form(form_name), SlotSet(REQUESTED_SLOT, "some value"), ActionExecuted(ACTION_LISTEN_NAME), # User sends message as response to a requested slot UserUttered("haha", {"name": GREET_INTENT_NAME}), # Form is running again ActionExecuted(form_name), ], slots=domain.slots, ) # RulePolicy predicts action listen action_probabilities = policy.predict_action_probabilities( form_conversation, domain ) assert_predicted_action(action_probabilities, domain, ACTION_LISTEN_NAME)
def test_rule_based_data_warnings_no_rule_trackers(): trackers = [DialogueStateTracker("some-id", slots=[], is_rule_tracker=False)] policies = [RulePolicy()] ensemble = SimplePolicyEnsemble(policies) with pytest.warns(UserWarning) as record: ensemble.train(trackers, Domain.empty(), RegexInterpreter()) assert ( "Found a rule-based policy in your pipeline but no rule-based training data." ) in record[0].message.args[0]
async def _trained_default_agent(tmpdir_factory: TempdirFactory) -> Agent: model_path = tmpdir_factory.mktemp("model").strpath agent = Agent( "data/test_domains/default_with_slots.yml", policies=[AugmentedMemoizationPolicy(max_history=3), RulePolicy()], ) training_data = await agent.load_data(DEFAULT_STORIES_FILE) agent.train(training_data) agent.persist(model_path) return agent
def test_all_policy_attributes_are_persisted(tmpdir: Path): priority = 5 lookup = {"a": "b"} core_fallback_threshold = 0.451231 core_fallback_action_name = "action_some_fallback" enable_fallback_prediction = False policy = RulePolicy( priority=priority, lookup=lookup, core_fallback_threshold=core_fallback_threshold, core_fallback_action_name=core_fallback_action_name, enable_fallback_prediction=enable_fallback_prediction, ) policy.persist(tmpdir) persisted_policy = RulePolicy.load(tmpdir) assert persisted_policy.priority == priority assert persisted_policy.lookup == lookup assert persisted_policy._core_fallback_threshold == core_fallback_threshold assert persisted_policy._fallback_action_name == core_fallback_action_name assert persisted_policy._enable_fallback_prediction == enable_fallback_prediction
async def test_form_activation_rule(): form_name = "some_form" other_intent = "bye" domain = Domain.from_yaml(f""" intents: - {GREET_INTENT_NAME} - {other_intent} actions: - {UTTER_GREET_ACTION} - some-action slots: {REQUESTED_SLOT}: type: unfeaturized forms: - {form_name} """) form_activation_rule = _form_activation_rule(domain, form_name, other_intent) policy = RulePolicy() policy.train([GREET_RULE, form_activation_rule], domain, RegexInterpreter()) conversation_events = [ ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": other_intent}), ] # RulePolicy correctly predicts the form action action_probabilities = policy.predict_action_probabilities( DialogueStateTracker.from_events("casd", evts=conversation_events, slots=domain.slots), domain, RegexInterpreter(), ) assert_predicted_action(action_probabilities, domain, form_name)
def test_validate_after_adding_adding_default_parameter( get_validation_method: Callable[..., ValidationMethodType], nlu: bool, core: bool, ): # create a schema and rely on rasa to fill in defaults later schema1 = _get_example_schema() schema1.nodes["nlu-node"] = SchemaNode(needs={}, uses=WhitespaceTokenizer, constructor_name="", fn="", config={}) schema1.nodes["core-node"] = SchemaNode(needs={}, uses=RulePolicy, constructor_name="", fn="", config={}) # training validate = get_validation_method(finetuning=False, load=False, nlu=nlu, core=core, graph_schema=schema1) validate(importer=EmptyDataImporter()) # same schema -- we just explicitly pass default values schema2 = copy.deepcopy(schema1) schema2.nodes["nlu-node"] = SchemaNode( needs={}, uses=WhitespaceTokenizer, constructor_name="", fn="", config=WhitespaceTokenizer.get_default_config(), ) schema2.nodes["core-node"] = SchemaNode( needs={}, uses=RulePolicy, constructor_name="", fn="", config=RulePolicy.get_default_config(), ) # finetuning *does not raise* loaded_validate = get_validation_method(finetuning=True, load=True, nlu=nlu, core=core, graph_schema=schema2) loaded_validate(importer=EmptyDataImporter())
def _trained_default_agent(tmpdir_factory: TempdirFactory, stories_path: Text) -> Agent: model_path = tmpdir_factory.mktemp("model").strpath agent = Agent( "data/test_domains/default_with_slots.yml", policies=[AugmentedMemoizationPolicy(max_history=3), RulePolicy()], model_directory=model_path, ) training_data = agent.load_data(stories_path) agent.train(training_data) agent.persist(model_path) return agent
async def test_form_unhappy_path_from_general_rule(): form_name = "some_form" domain = Domain.from_yaml( f""" intents: - {GREET_INTENT_NAME} actions: - {UTTER_GREET_ACTION} - some-action slots: {REQUESTED_SLOT}: type: unfeaturized forms: - {form_name} """ ) policy = RulePolicy() # RulePolicy should memorize that unhappy_rule overrides GREET_RULE policy.train([GREET_RULE], domain, RegexInterpreter()) # Check that RulePolicy predicts action to handle unhappy path conversation_events = [ ActionExecuted(form_name), ActiveLoop(form_name), SlotSet(REQUESTED_SLOT, "some value"), ActionExecuted(ACTION_LISTEN_NAME), UserUttered("haha", {"name": GREET_INTENT_NAME}), ActionExecutionRejected(form_name), ] prediction = policy.predict_action_probabilities( DialogueStateTracker.from_events( "casd", evts=conversation_events, slots=domain.slots ), domain, RegexInterpreter(), ) # check that general rule action is predicted assert_predicted_action(prediction, domain, UTTER_GREET_ACTION) # Check that RulePolicy triggers form again after handling unhappy path conversation_events.append(ActionExecuted(UTTER_GREET_ACTION)) prediction = policy.predict_action_probabilities( DialogueStateTracker.from_events( "casd", evts=conversation_events, slots=domain.slots ), domain, RegexInterpreter(), ) # check that action_listen from general rule is overwritten by form action assert_predicted_action(prediction, domain, form_name)