async def train_core(domain_file="domain.yml", model_directory="models", model_name="current", training_data_file="data/stories.md"): agent = Agent( domain_file, policies=[ MemoizationPolicy(max_history=3), MappingPolicy(), RestaurantPolicy(batch_size=100, epochs=100, validation_split=0.2), ], ) training_data_file = "data/tiny_stories.md" training_data = await agent.load_data(training_data_file, augmentation_factor=10) # show_training_data(training_data) # print(type(training_data)) # print(training_data) for data in training_data: print(type(data)) # viz_domain(data.domain) # viz_TrackerWithCachedStates(data, view_domain=False) # exit() agent.train(training_data) # Attention: agent.persist stores the model and all meta data into a folder. # The folder itself is not zipped. model_path = os.path.join(model_directory, model_name, "core") agent.persist(model_path) logger.info(f"Model trained. Stored in '{model_path}'.") return model_path
async def train_core( domain_file: Text = "domain.yml", model_directory: Text = "models", model_name: Text = "current", training_data_file: Text = "data/stories.md", ): agent = Agent( domain_file, policies=[ MemoizationPolicy(max_history=3), MappingPolicy(), RestaurantPolicy(batch_size=100, epochs=100, validation_split=0.2), ], ) training_data = await agent.load_data(training_data_file, augmentation_factor=10) agent.train(training_data) # Attention: agent.persist stores the model and all meta data into a folder. # The folder itself is not zipped. model_path = os.path.join(model_directory, model_name, "core") agent.persist(model_path) logger.info(f"Model trained. Stored in '{model_path}'.") return model_path
def train_dialogue(domain_file="domain.yml",model_path="./Model/dialogue",training_data_file="Stories.md"): agent = Agent(domain_file,policies=[MemoizationPolicy(max_history=5),RestaurantPolicy()]) training_data = agent.load_data(training_data_file) agent.train(training_data,epochs=100, batch_size=5, validation_split=0.2 ) agent.persist(model_path) return agent
def train_core(domain_file="restaurant_domain.yml", model_path="models/dialogue", training_data_file="data/train_babi_stories.md"): policies_array = [ [MemoizationPolicy(max_history=3)], #gut [MemoizationPolicy(max_history=5)], #gut [AugmentedMemoizationPolicy()], #gut [RestaurantPolicy()], #gut [KerasPolicy()], #gut [FallbackPolicy()], #gut [FallbackPolicy(core_threshold=0.5)], #gut [SklearnPolicy(scoring=['accuracy','f1'])], #gut [MemoizationPolicy(max_history=3), RestaurantPolicy()], #gut [AugmentedMemoizationPolicy(), RestaurantPolicy()], #gut [MemoizationPolicy(max_history=3), KerasPolicy()], #gut [AugmentedMemoizationPolicy(), KerasPolicy()], #gut [MemoizationPolicy(max_history=3), SklearnPolicy(scoring=['accuracy','f1'])], #gut [AugmentedMemoizationPolicy(), SklearnPolicy(scoring=['accuracy','f1'])], #gut [MemoizationPolicy(max_history=3), KerasPolicy(), SklearnPolicy()], #gut [MemoizationPolicy(max_history=3), RestaurantPolicy(), SklearnPolicy()], #gut [AugmentedMemoizationPolicy(), RestaurantPolicy(), SklearnPolicy(scoring=['accuracy','f1'])] #gut ] index_policies = -4 print("Training policies: [%s]" % ",".join([x.__class__.__name__ for x in policies_array[index_policies]])) agent = Agent(domain_file,policies_array[index_policies]) training_data = agent.load_data(training_data_file) agent.train( training_data, epochs=400, batch_size=100, validation_split=0.2 ) agent.persist(model_path) return agent
def train_dialogue(domain_file="mydomain.yml", model_path="models/dialogue", training_data_file="data/mystories.md"): agent = Agent(domain_file, policies=[MemoizationPolicy(), RestaurantPolicy()]) agent.train(training_data_file, max_history=3, epochs=400, batch_size=100, validation_split=0.2) agent.persist(model_path) # agent.visualize(training_data_file,output_file="graph.png", max_history=2) return agent
def train_dialogue(domain_file="restaurant_domain.yml", model_path="models/dialogue", training_data_file="data/babi_stories.md"): agent = Agent(domain_file, policies=[MemoizationPolicy(max_history=3), RestaurantPolicy(batch_size=100, epochs=400, validation_split=0.2)]) training_data = agent.load_data(training_data_file) agent.train( training_data ) agent.persist(model_path) return agent
def train_dialogue(domain_file="restaurant_domain.yml", model_path="models/dialogue", training_data_file="data/restaurant_story.md"): agent = Agent(domain_file, policies=[MemoizationPolicy(), RestaurantPolicy()]) agent.train(training_data_file, max_history=2, epochs=200, batch_size=16, augmentation_factor=50, validation_split=0.2) agent.persist(model_path) return agent
def train_dialogue(domain_file="../config/fund_domain.yml", model_path="../models/dialogue/sl", training_data_file="../data/fund_stories.md"): agent = Agent(domain_file, policies=[MemoizationPolicy(max_history=3), RestaurantPolicy()]) training_data = agent.load_data(training_data_file) agent.train( training_data, epochs=400, batch_size=100, validation_split=0.2 ) agent.persist(model_path) return agent
def train_dialogue(domain_file="../rasa/jarvis_dom.yml", model_path="nlu_dialogue/models/jarvis_nlu", training_data_file="../rasa/jarvis_story.md"): fallback = FallbackPolicy(fallback_action_name="utter_default", core_threshold=0.46, nlu_threshold=0.46) agent = Agent(domain_file, policies=[ MemoizationPolicy(max_history=3), RestaurantPolicy(), fallback ]) training_data = agent.load_data(training_data_file) agent.train(training_data, epochs=400, batch_size=100, validation_split=0.2) agent.persist(model_path) return agent