def run_ivrbot_online(input_channel=ConsoleInputChannel(), interpreter=RasaNLUInterpreter("projects/ivr_nlu/demo"), domain_file="mobile_domain.yml", training_data_file="data/mobile_story.md"): agent = Agent(domain_file, policies=[MemoizationPolicy(), KerasPolicy()], interpreter=interpreter) training_data = agent.load_data(training_data_file) agent.train_online(training_data, input_channel=input_channel, batch_size=16, epochs=200, max_training_samples=300) return agent
def run_weather_online(interpreter, domain_file="domain.yml", training_data_file='data/stories.md'): action_endpoint = EndpointConfig(url="http://localhost:5055/webhook") agent = Agent(domain_file, policies=[ MemoizationPolicy(max_history=2), KerasPolicy(max_history=3, epochs=3, batch_size=50) ], interpreter=interpreter, action_endpoint=action_endpoint) data = agent.load_data(training_data_file) agent.train(data) interactive.run_interactive_learning(agent, training_data_file) return agent
def train_dialog(domain_file, training_data_file, model_dir, interpreter): _agent = Agent(domain_file, policies=[ MemoizationPolicy(max_history=6), KerasPolicy(MaxHistoryTrackerFeaturizer( BinarySingleStateFeaturizer(), max_history=6), augmentation_factor=50, epochs=300, batch_size=50, validation_split=0.2) ], interpreter=interpreter) _training_data = _agent.load_data(training_data_file) _agent.train(_training_data) _agent.persist(model_dir) return _agent
def train_dialogue(domain_file = 'restaurant_domain.yml', model_path = './models/dialogue', training_data_file = './data/core_stories.md'): featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=5) agent = Agent(domain_file, policies = [MemoizationPolicy(max_history = 5), KerasPolicy(featurizer)]) agent.train( training_data_file, epochs = 300, batch_size = 50, validation_split = 0.2, augmentation_factor = 50) agent.persist(model_path) return agent
def run_weather_online(input_channel, interpreter, domain_file="weather_domain.yml", training_data_file='data/stories.md'): agent = Agent(domain_file, policies=[MemoizationPolicy(max_history=2), KerasPolicy()], interpreter=interpreter) agent.train_online(training_data_file, input_channel=input_channel, batch_size=50, epochs=200, max_training_samples=300) return agent
def train_dialogue(domain_file = 'domain.yml', model_path = './models/dialogue', training_data_file = './data/stories.md'): agent = Agent(domain_file, policies = [MemoizationPolicy(), KerasPolicy()]) agent.train( training_data_file, max_history = 3, epochs = 300, batch_size = 50, validation_split = 0.2, augmentation_factor = 50) agent.persist(model_path) return agent
def run_fake_user(input_channel, max_training_samples=10, serve_forever=True): logger.info("Starting to train policy") agent = Agent(RASA_CORE_DOMAIN_PATH, policies=[MemoizationPolicy(), KerasPolicy()], interpreter=RegexInterpreter()) agent.train_online(RASA_CORE_TRAINING_DATA_PATH, input_channel=input_channel, epochs=RASA_CORE_EPOCHS, max_training_samples=max_training_samples) while serve_forever: agent.handle_message(UserMessage(back, ConsoleOutputChannel())) return agent
def train_dialogue(domain_file="mobile_domain.yml", model_path="projects/dialogue", training_data_file="data/mobile_story.md"): agent = Agent(domain_file, policies=[MemoizationPolicy(), KerasPolicy(), fallback]) training_data = agent.load_data(training_data_file) agent.train(training_data, epochs=200, batch_size=16, augmentation_factor=50, validation_split=0.2) agent.persist(model_path) return agent
def run_online_trainer( input_channel, interpreter, domain_def_file='chat_domain.yml', training_data_file='./data/stories.md', ): agent = Agent(domain_def_file, policies=[KerasPolicy(), MemoizationPolicy()], interpreter=interpreter) agent.train_online(training_data_file, input_channel=input_channel, max_history=2, batch_size=500, epochs=200, max_training_samples=300) return agent
def train_agent(interpreter, domain_file="domain.yml", training_file='data/stories.md'): action_endpoint = EndpointConfig('http://localhost:5055/webhook') policies = [MemoizationPolicy(max_history=3), KerasPolicy(max_history=3, epochs=10, batch_size=10)] agent = Agent(domain_file, policies=policies, interpreter=interpreter, action_endpoint=action_endpoint) stories = agent.load_data(training_file) agent.train(stories) interactive.run_interactive_learning(agent, training_file) return agent
def test_agent_and_persist(): policies = config.load("policies.yml") policies[0] = KerasPolicy(epochs=2) # Keep training times low agent = Agent("domain.yml", policies=policies) training_data = agent.load_data("data/stories.md") agent.train(training_data, validation_split=0.0) agent.persist("./tests/models/dialogue") loaded = Agent.load("./tests/models/dialogue") assert agent.handle_text("/greet") is not None assert loaded.domain.action_names == agent.domain.action_names assert loaded.domain.intents == agent.domain.intents assert loaded.domain.entities == agent.domain.entities assert loaded.domain.templates == agent.domain.templates
def train_dialogue(domain_file="restaurant_domain.yml", model_path="models/dialogue", training_data_file="data/babi_stories.md"): agent = Agent(domain_file,[MemoizationPolicy(max_history=3), RestaurantPolicy()]) training_data = agent.load_data(training_data_file) agent.train( training_data, epochs=400, batch_size=100, validation_split=0.2 ) agent.persist(model_path) return agent
def train_dialogue(domain_file="restaurant_domain.yml", model_path="models/dialogue", training_data_file="data/babi_stories.md"): agent = Agent(domain_file, policies=[MemoizationPolicy(), RestaurantPolicy()]) agent.train(training_data_file, max_history=7, epochs=100, batch_size=50, augmentation_factor=20, validation_split=0.1) agent.persist(model_path) return agent
def train_dialogue(domain_file='domain.yml', model_path='./models/dialogue', training_data_file='./data/stories.md'): agent = Agent(domain_file, policies=[ MemoizationPolicy(), KerasPolicy(max_history=3, epochs=100, batch_size=50), FormPolicy() ]) data = agent.load_data(training_data_file) agent.train(data) agent.persist(model_path) return agent
def train_dialog(dialog_training_data_file, domain_file, path_to_model='models/dialogue'): logging.basicConfig(level='INFO') fallback = FallbackPolicy(fallback_action_name="utter_unclear", core_threshold=0.3, nlu_threshold=0.3) agent = Agent(domain_file, policies=[ MemoizationPolicy(max_history=1), KerasPolicy(epochs=200, batch_size=20), fallback ]) training_data = agent.load_data(dialog_training_data_file) agent.train(training_data, augmentation_factor=50, validation_split=0.2) agent.persist(path_to_model)
def train_dialogue_model(domain_file, stories_file, output_path, nlu_model_path=None, endpoints=None, max_history=None, dump_flattened_stories=False, kwargs=None): if not kwargs: kwargs = {} action_endpoint = utils.read_endpoint_config(endpoints, "action_endpoint") fallback_args, kwargs = utils.extract_args(kwargs, {"nlu_threshold", "core_threshold", "fallback_action_name"}) policies = [ FallbackPolicy( fallback_args.get("nlu_threshold", DEFAULT_NLU_FALLBACK_THRESHOLD), fallback_args.get("core_threshold", DEFAULT_CORE_FALLBACK_THRESHOLD), fallback_args.get("fallback_action_name", DEFAULT_FALLBACK_ACTION)), MemoizationPolicy( max_history=max_history), KerasPolicy( MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=max_history))] agent = Agent(domain_file, action_endpoint=action_endpoint, interpreter=nlu_model_path, policies=policies) data_load_args, kwargs = utils.extract_args(kwargs, {"use_story_concatenation", "unique_last_num_states", "augmentation_factor", "remove_duplicates", "debug_plots"}) training_data = agent.load_data(stories_file, **data_load_args) agent.train(training_data, **kwargs) agent.persist(output_path, dump_flattened_stories) return agent
def run_online_training(self, ensemble, domain, interpreter=None, input_channel=None): from rasa_core.agent import Agent if interpreter is None: interpreter = RegexInterpreter() bot = Agent(domain, ensemble, featurizer=self.featurizer, interpreter=interpreter) bot.toggle_memoization(False) bot.handle_channel( input_channel if input_channel else ConsoleInputChannel())
def train_online(project='Lambton'): domain_file = './Core/models/' + project + '/dialogue/domain.yml' model_path = './NLU/models/default/' + project, training_data_file = './Core/models/' + project + '/stories/stories.md' agent = Agent(domain_file, policies=[MemoizationPolicy(), KerasPolicy()]) agent.train_online(training_data_file, input_channel=ConsoleInputChannel(), max_history=2, batch_size=10, epochs=250, max_training_samples=300, validation_split=0.2) agent.persist(model_path) return agent
def run_online(input_channel, interpreter, domain_file="domain.yml", training_data_file='stories.md'): fallback = FallbackPolicy(fallback_action_name="utter_unclear", core_threshold=0.2, nlu_threshold=0.4) agent = Agent('domain.yml', policies=[MemoizationPolicy(), KerasPolicy(), fallback], interpreter=interpreter) training_data = agent.load_data(training_data_file) agent.train_online(training_data, input_channel=input_channel, epochs=200) return agent
def train_rst_dm(): training_data_file = "../restaurantData/babi_task5_trn_rasa_with_slots.md" domain_path = '../restaurant_domain.yml' model_path = '../rst_models/rst_policy/current' agent = Agent(domain=domain_path, policies=[MemoizationPolicy(), RestaurantPolicy()]) agent.train(training_data_file, max_history=3, epochs=100, batch_size=50, augmentation_factor=50, validation_split=0.2) agent.persist(model_path)
def train_dm(): training_data_file = 'stories.md' model_path = 'models/policy/current' agent = Agent("domain.yml", policies=[MemoizationPolicy(), MusicPlayerPolicy()]) agent.train( training_data_file, max_history=3, epochs=100, batch_size=50, augmentation_factor=50, validation_split=0.2 ) agent.persist(model_path)
def train_dialogue(domain_id="default"): domain_file = "{}/{}/domain.yml".format(data_folder, domain_id) model_path = "{}/{}/dialogue".format(model_folder, domain_id) training_data_file = "{}/{}/stories.md".format(data_folder, domain_id) agent = Agent( domain_file, policies=[MemoizationPolicy(max_history=3), IntelleiPolicy()]) training_data = agent.load_data(training_data_file) agent.train(training_data, epochs=400, batch_size=100, validation_split=0.2) agent.persist(model_path) return agent
def train_tickets_order_dm(): training_data_file = "../tickets/data/stories.md" domain_path = '../tickets/domain.yml' model_path = '../models/policy/current' agent = Agent(domain=domain_path, policies=[MemoizationPolicy(), TicketsPolicy()]) agent.train(training_data_file, max_history=3, epochs=100, batch_size=50, augmentation_factor=50, validation_split=0.2) agent.persist(model_path)
def train_dialogue(domain_file='weather_domain.yml', model_path='./models/dialogue', training_data_file='./data/stories.md'): agent = Agent(domain_file, policies=[MemoizationPolicy(), WeatherPolicy()]) agent.train(#TODO set the model parameters training_data_file, max_history = , epochs = , batch_size = , augmentation_factor = , validation_split = ) agent.persist(model_path) return agent
def run_restaurantbot_online(input_channel=ConsoleInputChannel(), interpreter=RasaNLUInterpreter("models/default/model_20190702-213207"), domain_file="hainan_domain.yml", training_data_file="data/story.md"): agent = Agent(domain_file, policies=[MemoizationPolicy(), KerasPolicy()], interpreter=interpreter) training_data = agent.load_data(training_data_file) agent.train_online(training_data, input_channel=input_channel, max_history=2, batch_size=16, epochs=200, max_training_samples=300) return agent
def train_dialogue(domain_file='domain.yml', model_path='./models/dialogue', training_data_file='stories.md'): fallback = FallbackPolicy(fallback_action_name='utter_unclear', core_threshold=0.2, nlu_threshold=0.6) agent = Agent(domain_file, policies=[MemoizationPolicy(), KerasPolicy(), fallback]) agent.train(training_data_file, epochs=300, batch_size=50, validation_split=0.2) agent.persist(model_path) return agent
def train_dialogue(domain_file='Sell4BidsBot_domain.yml', model_path='./models/dialogue', training_data_file='./data/stories.md'): fallback = FallbackPolicy(fallback_action_name="action_default_fallback", core_threshold=0.3, nlu_threshold=0.2) agent = Agent(domain_file, policies=[ MemoizationPolicy(), KerasPolicy(max_history=3, epochs=200, batch_size=50), fallback ]) data = agent.load_data(training_data_file) agent.train(data) agent.persist(model_path) return agent
def run_restaurant_online(input_channel, interpreter, domain_file="restaurant_domain.yml", training_data_file='./data/core_stories.md'): agent = Agent(domain_file, policies=[MemoizationPolicy(), KerasPolicy()], interpreter=interpreter) agent.train_online(training_data_file, input_channel=input_channel, max_history=5, batch_size=50, epochs=200, max_training_samples=300) return agent
def train_dialogue(domain_file = './config/domain.yml', model_path = './models/dialogue', training_data_file = './data/stories.md'): fallback = FallbackPolicy(fallback_action_name="utter_fallback", core_threshold=0.3, nlu_threshold=0.3) agent = Agent(domain_file, policies = [MemoizationPolicy(), KerasPolicy(),fallback]) data = agent.load_data(training_data_file) agent.train( data, epochs = 300, batch_size = 50, validation_split = 0.2) agent.persist(model_path) return agent
def food_order_online(input_channel, interpreter, domain_file="domain.yml", training_data_file='data/stories.md'): agent = Agent(domain_file, policies=[MemoizationPolicy(), KerasPolicy()], interpreter=interpreter) agent.train_online(training_data_file, input_channel=input_channel, max_history=3, batch_size=100, epochs=400, max_training_samples=300) return agent