def train_comparison_models(story_filename, domain, output_path=None, exclusion_percentages=None, policy_configs=None, runs=None, dump_stories=False, kwargs=None): """Train multiple models for comparison of policies""" for r in range(cmdline_args.runs): logging.info("Starting run {}/{}".format(r + 1, cmdline_args.runs)) for i in exclusion_percentages: current_round = cmdline_args.percentages.index(i) + 1 for policy_config in policy_configs: policies = config.load(policy_config) if len(policies) > 1: raise ValueError("You can only specify one policy per " "model for comparison") policy_name = type(policies[0]).__name__ output = os.path.join(output_path, 'run_' + str(r + 1), policy_name + str(current_round)) logging.info("Starting to train {} round {}/{}" " with {}% exclusion".format( policy_name, current_round, len(exclusion_percentages), i)) train_dialogue_model(domain, stories, output, policy_config=policy_config, exclusion_percentage=i, kwargs=kwargs, dump_stories=dump_stories)
def visualize(config_path: Text, domain_path: Text, stories_path: Text, nlu_data_path: Text, output_path: Text, max_history: int): from rasa_core.agent import Agent from rasa_core import config policies = config.load(config_path) agent = Agent(domain_path, policies=policies) # this is optional, only needed if the `/greet` type of # messages in the stories should be replaced with actual # messages (e.g. `hello`) if nlu_data_path is not None: from rasa_nlu.training_data import load_data nlu_data_path = load_data(nlu_data_path) else: nlu_data_path = None logger.info("Starting to visualize stories...") agent.visualize(stories_path, output_path, max_history, nlu_training_data=nlu_data_path) full_output_path = "file://{}".format(os.path.abspath(output_path)) logger.info( "Finished graph creation. Saved into {}".format(full_output_path)) import webbrowser webbrowser.open(full_output_path)
def train_dialogue_model(domain_file, stories_file, output_path, interpreter=None, endpoints=AvailableEndpoints(), dump_stories=False, policy_config=None, exclusion_percentage=None, kwargs=None): if not kwargs: kwargs = {} policies = config.load(policy_config) agent = Agent(domain_file, generator=endpoints.nlg, action_endpoint=endpoints.action, interpreter=interpreter, policies=policies) data_load_args, kwargs = utils.extract_args( kwargs, { "use_story_concatenation", "unique_last_num_states", "augmentation_factor", "remove_duplicates", "debug_plots" }) training_data = agent.load_data(stories_file, exclusion_percentage=exclusion_percentage, **data_load_args) agent.train(training_data, **kwargs) agent.persist(output_path, dump_stories) return agent
def train_dialogue_model(domain_file, stories_file, output_path, interpreter=None, endpoints=AvailableEndpoints(), max_history=None, dump_flattened_stories=False, policy_config=None, kwargs=None): if not kwargs: kwargs = {} fallback_args, kwargs = utils.extract_args( kwargs, {"nlu_threshold", "core_threshold", "fallback_action_name"}) policies = config.load(policy_config, fallback_args, max_history) agent = Agent(domain_file, generator=endpoints.nlg, action_endpoint=endpoints.action, interpreter=interpreter, policies=policies) data_load_args, kwargs = utils.extract_args( kwargs, { "use_story_concatenation", "unique_last_num_states", "augmentation_factor", "remove_duplicates", "debug_plots" }) training_data = agent.load_data(stories_file, **data_load_args) agent.train(training_data, **kwargs) agent.persist(output_path, dump_flattened_stories) return agent
def train_core(domain_file, model_path, training_data_file, policy_config): logging.basicConfig(filename=logfile, level=logging.DEBUG) agent = Agent(domain_file, policies=config.load(policy_config)) training_data = agent.load_data(training_data_file) agent.train(training_data) agent.persist(model_path) return agent
def train(domain_file: Text, stories_file: Text, output_path: Text, interpreter: Optional[NaturalLanguageInterpreter] = None, endpoints: AvailableEndpoints = AvailableEndpoints(), dump_stories: bool = False, policy_config: Text = None, exclusion_percentage: int = None, kwargs: Optional[Dict] = None): from rasa_core.agent import Agent if not kwargs: kwargs = {} policies = config.load(policy_config) agent = Agent(domain_file, generator=endpoints.nlg, action_endpoint=endpoints.action, interpreter=interpreter, policies=policies) data_load_args, kwargs = utils.extract_args( kwargs, { "use_story_concatenation", "unique_last_num_states", "augmentation_factor", "remove_duplicates", "debug_plots" }) training_data = agent.load_data(stories_file, exclusion_percentage=exclusion_percentage, **data_load_args) agent.train(training_data, **kwargs) agent.persist(output_path, dump_stories) return agent
def run_weather_online(input_channel, interpreter,domain_file="weather_domain.yml",training_data_file='data/stories.md'): policies2 = policy_config.load("config.yml") agent = Agent("weather_domain.yml", policies=policies2) data = asyncio.run(agent.load_data(training_data_file)) agent.train(data) return agent
def run_weather_online(interpreter, domain_file="weather_domain.yml", training_data_file='data/stories.md'): policies2 = policy_config.load("config.yml") action_endpoint = "endpoint.yml" agent = Agent(domain_file,policies=policies2,interpreter=interpreter,action_endpoint=action_endpoint) data = asyncio.run(agent.load_data(training_data_file)) agent.train(data) interactive.run_interactive_learning(agent,training_data_file) return agent
def train_nlu(): from rasa_nlu.training_data import load_data from rasa_nlu import config from rasa_nlu.model import Trainer training_data = load_data('data/rasa_dataset_training.json') trainer = Trainer(config.load("configs/nlu_embedding_config.yml")) trainer.train(training_data) model_directory = trainer.persist('models/nlu/', fixed_model_name="current") return model_directory
def train_dialogue_transformer(domain_file="mobile_domain.yml", model_path="models/dialogue_transformer", training_data_file="data/mobile_edit_story.md"): # 通过加载yml配置文件方式配置policy policies = config.load('./policy/attention_policy.yml') agent = Agent(domain_file, policies=policies) training_data = agent.load_data(training_data_file) agent.train( training_data, validation_split=0.2 ) agent.persist(model_path) return agent
def test_agent_and_persist(): policies = config.load("policies.yml") policies[0] = KerasPolicy(epochs=2) # Keep training times low agent = Agent("domain.yml", policies=policies) training_data = agent.load_data("data/stories.md") agent.train(training_data, validation_split=0.0) agent.persist("./tests/models/dialogue") loaded = Agent.load("./tests/models/dialogue") assert agent.handle_text("/greet") is not None assert loaded.domain.action_names == agent.domain.action_names assert loaded.domain.intents == agent.domain.intents assert loaded.domain.entities == agent.domain.entities assert loaded.domain.templates == agent.domain.templates
utils.add_logging_option_arguments(parser) cli.arguments.add_config_arg(parser, nargs=1) cli.arguments.add_domain_arg(parser) cli.arguments.add_model_and_story_group(parser, allow_pretrained_model=False) return parser if __name__ == '__main__': arg_parser = create_argument_parser() cmdline_arguments = arg_parser.parse_args() utils.configure_colored_logging(cmdline_arguments.loglevel) policies = config.load(cmdline_arguments.config[0]) agent = Agent(cmdline_arguments.domain, policies=policies) # this is optional, only needed if the `/greet` type of # messages in the stories should be replaced with actual # messages (e.g. `hello`) if cmdline_arguments.nlu_data is not None: from rasa_nlu.training_data import load_data nlu_data = load_data(cmdline_arguments.nlu_data) else: nlu_data = None stories = cli.stories_from_cli_args(cmdline_arguments)
import logging from rasa_core import training from rasa_core.actions import Action from rasa_core.agent import Agent from rasa_core.domain import Domain from rasa_core.policies.keras_policy import KerasPolicy from rasa_core.policies.memoization import MemoizationPolicy from rasa_core.featurizers import MaxHistoryTrackerFeaturizer, BinarySingleStateFeaturizer from rasa_core.interpreter import RegexInterpreter from rasa_core.interpreter import RasaNLUInterpreter from rasa_core.policies import FallbackPolicy, KerasPolicy, MemoizationPolicy, FormPolicy fallback = FallbackPolicy(fallback_action_name="utter_default", core_threshold=0.2, nlu_threshold=0.1) from rasa_core import config as policy_config policies = policy_config.load("policy.yml") # - name: KerasPolicy # epochs: 100 # max_history: 3 # - name: MemoizationPolicy # max_history: 3 # - name: FallbackPolicy # nlu_threshold: 0.1 # core_threshold: 0.2 # fallback_action_name: 'utter_default' # - name: FormPolicy # Function #------------
def test_load_config(filename): loaded = load(filename, None, None) assert len(loaded) == 2 assert isinstance(loaded[0], MemoizationPolicy) assert isinstance(loaded[1], ExamplePolicy)
import logging from rasa_core.agent import Agent from rasa_core import config as policy_config from rasa_core.policies.keras_policy import KerasPolicy from rasa_core.policies.memoization import MemoizationPolicy if __name__ == '__main__': logging.basicConfig(level='INFO') model_path = './models/dialogue' policies = policy_config.load("./policies.yml") agent = Agent('chat_domain.yml', policies=policies) training_data_file = agent.load_data('./stories.md') agent.train(training_data_file) agent.persist(model_path)
def train_core(domain_file, model_path, training_data_file, policy_config): agent = Agent(domain_file, policies=config.load(policy_config)) training_data = agent.load_data(training_data_file) agent.train(training_data) agent.persist(model_path) return agent