def _log_action_on_tracker( self, tracker: DialogueStateTracker, action: Action, events: Optional[List[Event]], prediction: PolicyPrediction, ) -> None: # Ensures that the code still works even if a lazy programmer missed # to type `return []` at the end of an action or the run method # returns `None` for some other reason. if events is None: events = [] action_was_rejected_manually = any( isinstance(event, ActionExecutionRejected) for event in events) if not action_was_rejected_manually: logger.debug( f"Policy prediction ended with events '{prediction.events}'.") tracker.update_with_events(prediction.events, self.domain) # log the action and its produced events tracker.update(action.event_for_successful_execution(prediction)) logger.debug(f"Action '{action.name()}' ended with events '{events}'.") tracker.update_with_events(events, self.domain)
def _log_action_on_tracker( self, tracker: DialogueStateTracker, action_name: Text, events: Optional[List[Event]], prediction: PolicyPrediction, ) -> None: # Ensures that the code still works even if a lazy programmer missed # to type `return []` at the end of an action or the run method # returns `None` for some other reason. if events is None: events = [] self._warn_about_new_slots(tracker, action_name, events) action_was_rejected_manually = any( isinstance(event, ActionExecutionRejected) for event in events) if action_name is not None and not action_was_rejected_manually: logger.debug( f"Policy prediction ended with events '{prediction.events}'.") tracker.update_with_events(prediction.events, self.domain) # log the action and its produced events tracker.update( ActionExecuted(action_name, prediction.policy_name, prediction.max_confidence)) logger.debug(f"Action '{action_name}' ended with events '{events}'.") tracker.update_with_events(events, self.domain)
async def run_action_extract_slots( self, output_channel: OutputChannel, tracker: DialogueStateTracker, ) -> DialogueStateTracker: """Run action to extract slots and update the tracker accordingly. Args: output_channel: Output channel associated with the incoming user message. tracker: A tracker representing a conversation state. Returns: the given (updated) tracker """ action_extract_slots = rasa.core.actions.action.action_for_name_or_text( ACTION_EXTRACT_SLOTS, self.domain, self.action_endpoint, ) extraction_events = await action_extract_slots.run( output_channel, self.nlg, tracker, self.domain) tracker.update_with_events(extraction_events, self.domain) events_as_str = "\n".join([str(e) for e in extraction_events]) logger.debug( f"Default action '{ACTION_EXTRACT_SLOTS}' was executed, " f"resulting in {len(extraction_events)} events: {events_as_str}") return tracker
def test_skip_predictions_if_new_intent( self, trained_policy: UnexpecTEDIntentPolicy, model_storage: ModelStorage, resource: Resource, execution_context: ExecutionContext, default_domain: Domain, caplog: LogCaptureFixture, tracker_events: List[Event], ): """Skips predictions if there's a new intent created.""" loaded_policy = self.persist_and_load_policy( trained_policy, model_storage, resource, execution_context ) tracker = DialogueStateTracker(sender_id="init", slots=default_domain.slots) tracker.update_with_events(tracker_events, default_domain) with caplog.at_level(logging.DEBUG): prediction = loaded_policy.predict_action_probabilities( tracker, default_domain, precomputations=None, ) assert "Skipping predictions for UnexpecTEDIntentPolicy" in caplog.text assert prediction.probabilities == loaded_policy._default_predictions( default_domain )
def test_skip_predictions_to_prevent_loop( self, trained_policy: UnexpecTEDIntentPolicy, model_storage: ModelStorage, resource: Resource, execution_context: ExecutionContext, default_domain: Domain, caplog: LogCaptureFixture, tracker_events: List[Event], should_skip: bool, tmp_path: Path, ): """Skips predictions to prevent loop.""" loaded_policy = self.persist_and_load_policy( trained_policy, model_storage, resource, execution_context ) precomputations = None tracker = DialogueStateTracker(sender_id="init", slots=default_domain.slots) tracker.update_with_events(tracker_events, default_domain) with caplog.at_level(logging.DEBUG): prediction = loaded_policy.predict_action_probabilities( tracker, default_domain, precomputations ) assert ( "Skipping predictions for UnexpecTEDIntentPolicy" in caplog.text ) == should_skip if should_skip: assert prediction.probabilities == loaded_policy._default_predictions( default_domain )
def test_action_unlikely_intent_prediction( self, trained_policy: UnexpecTEDIntentPolicy, model_storage: ModelStorage, resource: Resource, execution_context: ExecutionContext, default_domain: Domain, predicted_similarity: float, threshold_value: float, is_unlikely: bool, monkeypatch: MonkeyPatch, tmp_path: Path, ): loaded_policy = self.persist_and_load_policy( trained_policy, model_storage, resource, execution_context ) similarities = np.array([[[0.0] * len(default_domain.intents)]]) dummy_intent_index = 4 similarities[0, 0, dummy_intent_index] = predicted_similarity query_intent = default_domain.intents[dummy_intent_index] loaded_policy.label_thresholds[dummy_intent_index] = threshold_value precomputations = None tracker = DialogueStateTracker(sender_id="init", slots=default_domain.slots) tracker.update_with_events( [UserUttered(text="hello", intent={"name": query_intent})], default_domain, ) # Preset the model predictions to the similarity values # so that we don't need to hardcode for particular model predictions. monkeypatch.setattr( loaded_policy.model, "run_inference", lambda data: {"similarities": similarities}, ) prediction = loaded_policy.predict_action_probabilities( tracker, default_domain, precomputations ) if not is_unlikely: assert prediction.probabilities == [0.0] * default_domain.num_actions else: assert ( prediction.probabilities[ default_domain.index_for_action(ACTION_UNLIKELY_INTENT_NAME) ] == 1.0 ) # Make sure metadata is set. The exact structure # of the metadata is tested separately and # not as part of this test. assert prediction.action_metadata is not None # Assert metadata is serializable assert json.dumps(prediction.action_metadata)
def test_slot_mappings_ignored_intents_during_active_loop(): domain = Domain.from_yaml(""" version: "{LATEST_TRAINING_DATA_FORMAT_VERSION}" intents: - greet - chitchat slots: cuisine: type: text mappings: - type: from_text conditions: - active_loop: restaurant_form forms: restaurant_form: ignored_intents: - chitchat required_slots: - cuisine """) tracker = DialogueStateTracker("sender_id", slots=domain.slots) event1 = ActiveLoop("restaurant_form") event2 = UserUttered( text="The weather is sunny today", intent={ "name": "chitchat", "confidence": 0.9604260921478271 }, entities=[], ) tracker.update_with_events([event1, event2], domain) mappings_for_cuisine = domain.as_dict().get("slots").get("cuisine").get( "mappings") assert (SlotMapping.intent_is_desired(mappings_for_cuisine[0], tracker, domain) is False)
def marker_trackerstore() -> TrackerStore: """Sets up a TrackerStore with 5 trackers in it.""" domain = Domain.empty() store = InMemoryTrackerStore(domain) for i in range(5): tracker = DialogueStateTracker(str(i), None) tracker.update_with_events([UserUttered(str(j)) for j in range(10)], domain) store.save(tracker) return store
def marker_sqlite_tracker(tmp_path: Path) -> Tuple[SQLTrackerStore, Text]: domain = Domain.empty() db_path = str(tmp_path / "rasa.db") tracker_store = SQLTrackerStore(dialect="sqlite", db=db_path) for i in range(5): tracker = DialogueStateTracker(str(i), None) tracker.update_with_events([SlotSet(str(j), "slot") for j in range(5)], domain) tracker.update(ActionExecuted(ACTION_SESSION_START_NAME)) tracker.update(UserUttered("hello")) tracker.update_with_events( [SlotSet(str(5 + j), "slot") for j in range(5)], domain) tracker_store.save(tracker) return tracker_store, db_path
def test_no_action_unlikely_intent_prediction( self, trained_policy: UnexpecTEDIntentPolicy, model_storage: ModelStorage, resource: Resource, execution_context: ExecutionContext, default_domain: Domain, tmp_path: Path, ): loaded_policy = self.persist_and_load_policy( trained_policy, model_storage, resource, execution_context ) expected_probabilities = [0] * default_domain.num_actions precomputations = None tracker = DialogueStateTracker(sender_id="init", slots=default_domain.slots) prediction = loaded_policy.predict_action_probabilities( tracker, default_domain, precomputations ) assert prediction.probabilities == expected_probabilities tracker.update_with_events( [ UserUttered(text="hello", intent={"name": "greet"}), ActionExecuted(action_name="utter_greet"), ], default_domain, ) prediction = loaded_policy.predict_action_probabilities( tracker, default_domain, precomputations ) assert prediction.probabilities == expected_probabilities loaded_policy.model = None prediction = loaded_policy.predict_action_probabilities( tracker, default_domain, precomputations ) assert prediction.probabilities == expected_probabilities
async def test_markers_cli_results_save_correctly(tmp_path: Path): domain = Domain.empty() store = InMemoryTrackerStore(domain) for i in range(5): tracker = DialogueStateTracker(str(i), None) tracker.update_with_events([SlotSet(str(j), "slot") for j in range(5)], domain) tracker.update(ActionExecuted(ACTION_SESSION_START_NAME)) tracker.update(UserUttered("hello")) tracker.update_with_events( [SlotSet(str(5 + j), "slot") for j in range(5)], domain) await store.save(tracker) tracker_loader = MarkerTrackerLoader(store, "all") results_path = tmp_path / "results.csv" markers = OrMarker(markers=[ SlotSetMarker("2", name="marker1"), SlotSetMarker("7", name="marker2") ]) await markers.evaluate_trackers(tracker_loader.load(), results_path) with open(results_path, "r") as results: result_reader = csv.DictReader(results) senders = set() for row in result_reader: senders.add(row["sender_id"]) if row["marker"] == "marker1": assert row["session_idx"] == "0" assert int(row["event_idx"]) >= 2 assert row["num_preceding_user_turns"] == "0" if row["marker"] == "marker2": assert row["session_idx"] == "1" assert int(row["event_idx"]) >= 3 assert row["num_preceding_user_turns"] == "1" assert len(senders) == 5
def test_load_sessions(tmp_path): """Tests loading a tracker with multiple sessions.""" domain = Domain.empty() store = SQLTrackerStore(domain, db=os.path.join(tmp_path, "temp.db")) tracker = DialogueStateTracker("test123", None) tracker.update_with_events( [ UserUttered("0"), UserUttered("1"), SessionStarted(), UserUttered("2"), UserUttered("3"), ], domain, ) store.save(tracker) loader = MarkerTrackerLoader(store, STRATEGY_ALL) result = list(loader.load()) assert len(result) == 1 # contains only one tracker assert len(result[0].events) == len(tracker.events)