def test_fallback_mapping_restart(): domain = Domain.load("data/test_domains/default.yml") events = [ ActionExecuted(ACTION_DEFAULT_FALLBACK_NAME), utilities.user_uttered(USER_INTENT_RESTART, 1), ] tracker = DialogueStateTracker.from_events("test", events, []) two_stage_fallback_policy = TwoStageFallbackPolicy( priority=2, deny_suggestion_intent_name="deny" ) mapping_policy = MappingPolicy(priority=1) mapping_fallback_ensemble = SimplePolicyEnsemble( [two_stage_fallback_policy, mapping_policy] ) result, best_policy = mapping_fallback_ensemble.probabilities_using_best_policy( tracker, domain, RegexInterpreter() ) max_confidence_index = result.index(max(result)) index_of_mapping_policy = 1 next_action = domain.action_for_index(max_confidence_index, None) assert best_policy == f"policy_{index_of_mapping_policy}_{MappingPolicy.__name__}" assert next_action.name() == ACTION_RESTART_NAME
def test_mapping_wins_over_form(events: List[Event]): domain = """ forms: - test-form """ domain = Domain.from_yaml(domain) tracker = DialogueStateTracker.from_events("test", events, []) ensemble = SimplePolicyEnsemble( [ MappingPolicy(), ConstantPolicy(priority=1, predict_index=0), FormPolicy(), FallbackPolicy(), ] ) result, best_policy = ensemble.probabilities_using_best_policy( tracker, domain, RegexInterpreter() ) max_confidence_index = result.index(max(result)) next_action = domain.action_for_index(max_confidence_index, None) index_of_mapping_policy = 0 assert best_policy == f"policy_{index_of_mapping_policy}_{MappingPolicy.__name__}" assert next_action.name() == ACTION_RESTART_NAME
def train_core( domain_file: Text = "domain.yml", model_directory: Text = "models", model_name: Text = "current", training_data_file: Text = "data/stories.md", ): agent = Agent( domain_file, policies=[ MemoizationPolicy(max_history=3), MappingPolicy(), RestaurantPolicy(batch_size=100, epochs=100, validation_split=0.2), ], ) # augmentation_factor 扩展系数 10 training_data = agent.load_data(training_data_file, augmentation_factor=10) agent.train(training_data) # Attention: agent.persist stores the model and all meta data into a folder. # The folder itself is not zipped. 未打包的的模型文件 model_path = os.path.join(model_directory, model_name, "core") agent.persist(model_path) logger.info("Model trained. Stored in '{}'.".format(model_path)) return model_path
async def train_core(domain_file="domain.yml", model_directory="models", model_name="current", training_data_file="data/stories.md"): agent = Agent( domain_file, policies=[ MemoizationPolicy(max_history=3), MappingPolicy(), RestaurantPolicy(batch_size=100, epochs=100, validation_split=0.2), ], ) training_data_file = "data/tiny_stories.md" training_data = await agent.load_data(training_data_file, augmentation_factor=10) # show_training_data(training_data) # print(type(training_data)) # print(training_data) for data in training_data: print(type(data)) # viz_domain(data.domain) # viz_TrackerWithCachedStates(data, view_domain=False) # exit() agent.train(training_data) # Attention: agent.persist stores the model and all meta data into a folder. # The folder itself is not zipped. model_path = os.path.join(model_directory, model_name, "core") agent.persist(model_path) logger.info(f"Model trained. Stored in '{model_path}'.") return model_path
async def test_maping_policy(): default_domain = Domain.load("{}/data/default_with_mapping.yml".format(prj_dir)) policy = MappingPolicy() events = [ ActionExecuted(ACTION_LISTEN_NAME), user_uttered(intent_mapping[0][0], 1), ] tracker = DialogueStateTracker.from_events("sender", events, [], 20) viz_tracker(tracker) scores = policy.predict_action_probabilities(tracker, default_domain) print(default_domain.action_names) class_to_idx = {cl:idx for idx, cl in enumerate(default_domain.action_names)} print(class_to_idx) index = scores.index(max(scores)) print("action names: {}".format(default_domain.action_names)) print(default_domain.action_names[index])
async def train_core(domain_file, training_data_file, model_directory): agent = Agent(domain_file, policies=[MemoizationPolicy(max_history=3), MappingPolicy(), KerasPolicy(epochs=500)]) training_data = await agent.load_data(training_data_file, augmentation_factor=10) agent.train(training_data) # Attention: agent.persist stores the model and all meta data into a folder. # The folder itself is not zipped. model_path = os.path.join(model_directory, "core") agent.persist(model_path) logger.info(f"Model trained. Stored in '{model_path}'.") return model_path
async def train_dialogue(domain_file="domain.yml", model_path="models/dialogue", training_data_file="data/stories.md"): agent = Agent(domain_file, policies=[MemoizationPolicy(max_history=3), MappingPolicy(), RestaurantPolicy(batch_size=100, epochs=400, validation_split=0.2)]) training_data = await agent.load_data(training_data_file) agent.train( training_data ) agent.persist(model_path) return agent
def test_fallback_wins_over_mapping(): domain = Domain.load("data/test_domains/default.yml") events = [ ActionExecuted(ACTION_LISTEN_NAME), # Low confidence should trigger fallback utilities.user_uttered(USER_INTENT_RESTART, 0.0001), ] tracker = DialogueStateTracker.from_events("test", events, []) ensemble = SimplePolicyEnsemble([FallbackPolicy(), MappingPolicy()]) result, best_policy = ensemble.probabilities_using_best_policy( tracker, domain) max_confidence_index = result.index(max(result)) index_of_fallback_policy = 0 next_action = domain.action_for_index(max_confidence_index, None) assert best_policy == f"policy_{index_of_fallback_policy}_{FallbackPolicy.__name__}" assert next_action.name() == ACTION_DEFAULT_FALLBACK_NAME
async def train_core( domain_file: Text = "domain.yml", model_path: Text = "models/core", training_data_file: Text = "data/stories.md", ): agent = Agent( domain_file, policies=[ MemoizationPolicy(max_history=3), MappingPolicy(), RestaurantPolicy(batch_size=100, epochs=400, validation_split=0.2), ], ) training_data = await agent.load_data(training_data_file) agent.train(training_data) # Attention: agent.persist stores the model and all meta data into a folder. # The folder itself is not zipped. agent.persist(model_path) logger.info("Model trained. Stored in '{}'.".format(model_path)) return model_path
index_of_mapping_policy = 0 assert best_policy == f"policy_{index_of_mapping_policy}_{MappingPolicy.__name__}" assert next_action.name() == ACTION_RESTART_NAME @pytest.mark.parametrize( "ensemble", [ SimplePolicyEnsemble( [ FormPolicy(), ConstantPolicy(FORM_POLICY_PRIORITY - 1, 0), FallbackPolicy(), ] ), SimplePolicyEnsemble([FormPolicy(), MappingPolicy()]), ], ) def test_form_wins_over_everything_else(ensemble: SimplePolicyEnsemble): form_name = "test-form" domain = f""" forms: - {form_name} """ domain = Domain.from_yaml(domain) events = [ Form("test-form"), ActionExecuted(ACTION_LISTEN_NAME), utilities.user_uttered("test", 1), ]
def create_policy(self, featurizer, priority): p = MappingPolicy() return p
def create_policy( self, featurizer: Optional[TrackerFeaturizer], priority: int ) -> Policy: return MappingPolicy()