Esempio n. 1
0
def train_dialogue_model(domain_file,
                         stories_file,
                         output_path,
                         interpreter=None,
                         endpoints=AvailableEndpoints(),
                         max_history=None,
                         dump_flattened_stories=False,
                         policy_config=None,
                         kwargs=None):
    if not kwargs:
        kwargs = {}

    fallback_args, kwargs = utils.extract_args(
        kwargs, {"nlu_threshold", "core_threshold", "fallback_action_name"})

    policies = config.load(policy_config, fallback_args, max_history)

    agent = Agent(domain_file,
                  generator=endpoints.nlg,
                  action_endpoint=endpoints.action,
                  interpreter=interpreter,
                  policies=policies)

    data_load_args, kwargs = utils.extract_args(
        kwargs, {
            "use_story_concatenation", "unique_last_num_states",
            "augmentation_factor", "remove_duplicates", "debug_plots"
        })

    training_data = agent.load_data(stories_file, **data_load_args)
    agent.train(training_data, **kwargs)
    agent.persist(output_path, dump_flattened_stories)

    return agent
Esempio n. 2
0
def train_dialogue_model(domain_file,
                         stories_file,
                         output_path,
                         interpreter=None,
                         endpoints=AvailableEndpoints(),
                         dump_stories=False,
                         policy_config=None,
                         exclusion_percentage=None,
                         kwargs=None):
    if not kwargs:
        kwargs = {}

    policies = config.load(policy_config)

    agent = Agent(domain_file,
                  generator=endpoints.nlg,
                  action_endpoint=endpoints.action,
                  interpreter=interpreter,
                  policies=policies)

    data_load_args, kwargs = utils.extract_args(
        kwargs, {
            "use_story_concatenation", "unique_last_num_states",
            "augmentation_factor", "remove_duplicates", "debug_plots"
        })

    training_data = agent.load_data(stories_file,
                                    exclusion_percentage=exclusion_percentage,
                                    **data_load_args)
    agent.train(training_data, **kwargs)
    agent.persist(output_path, dump_stories)

    return agent
Esempio n. 3
0
def train(domain_file: Text,
          stories_file: Text,
          output_path: Text,
          interpreter: Optional[NaturalLanguageInterpreter] = None,
          endpoints: AvailableEndpoints = AvailableEndpoints(),
          dump_stories: bool = False,
          policy_config: Text = None,
          exclusion_percentage: int = None,
          kwargs: Optional[Dict] = None):
    from rasa_core.agent import Agent

    if not kwargs:
        kwargs = {}

    policies = config.load(policy_config)

    agent = Agent(domain_file,
                  generator=endpoints.nlg,
                  action_endpoint=endpoints.action,
                  interpreter=interpreter,
                  policies=policies)

    data_load_args, kwargs = utils.extract_args(
        kwargs, {
            "use_story_concatenation", "unique_last_num_states",
            "augmentation_factor", "remove_duplicates", "debug_plots"
        })

    training_data = agent.load_data(stories_file,
                                    exclusion_percentage=exclusion_percentage,
                                    **data_load_args)
    agent.train(training_data, **kwargs)
    agent.persist(output_path, dump_stories)

    return agent
Esempio n. 4
0
def train_dialogue_model(domain_file,
                         stories_file,
                         output_path,
                         use_online_learning=False,
                         nlu_model_path=None,
                         max_history=None,
                         kwargs=None):
    if not kwargs:
        kwargs = {}

    agent = Agent(
        domain_file,
        policies=[MemoizationPolicy(max_history=max_history),
                  KerasPolicy()])

    data_load_args, kwargs = utils.extract_args(
        kwargs, {
            "use_story_concatenation", "unique_last_num_states",
            "augmentation_factor", "remove_duplicates", "debug_plots"
        })
    training_data = agent.load_data(stories_file, **data_load_args)

    if use_online_learning:
        if nlu_model_path:
            agent.interpreter = RasaNLUInterpreter(nlu_model_path)
        else:
            agent.interpreter = RegexInterpreter()
        agent.train_online(training_data,
                           input_channel=ConsoleInputChannel(),
                           model_path=output_path,
                           **kwargs)
    else:
        agent.train(training_data, **kwargs)

    agent.persist(output_path)
Esempio n. 5
0
def train_dialogue_model(domain_file, stories_file, output_path,
                         nlu_model_path=None,
                         endpoints=None,
                         max_history=None,
                         dump_flattened_stories=False,
                         kwargs=None):
    if not kwargs:
        kwargs = {}

    action_endpoint = utils.read_endpoint_config(endpoints, "action_endpoint")

    fallback_args, kwargs = utils.extract_args(kwargs,
                                               {"nlu_threshold",
                                                "core_threshold",
                                                "fallback_action_name"})

    policies = [
        FallbackPolicy(
                fallback_args.get("nlu_threshold",
                                  DEFAULT_NLU_FALLBACK_THRESHOLD),
                fallback_args.get("core_threshold",
                                  DEFAULT_CORE_FALLBACK_THRESHOLD),
                fallback_args.get("fallback_action_name",
                                  DEFAULT_FALLBACK_ACTION)),
        MemoizationPolicy(
                max_history=max_history),
        KerasPolicy(
                MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
                                            max_history=max_history))]

    agent = Agent(domain_file,
                  action_endpoint=action_endpoint,
                  interpreter=nlu_model_path,
                  policies=policies)

    data_load_args, kwargs = utils.extract_args(kwargs,
                                                {"use_story_concatenation",
                                                 "unique_last_num_states",
                                                 "augmentation_factor",
                                                 "remove_duplicates",
                                                 "debug_plots"})

    training_data = agent.load_data(stories_file, **data_load_args)
    agent.train(training_data, **kwargs)
    agent.persist(output_path, dump_flattened_stories)

    return agent
Esempio n. 6
0
def train_dialogue_model(domain_file, stories_file, output_path,
                         nlu_model_path=None,
                         endpoints=None,
                         max_history=None,
                         dump_flattened_stories=False,
                         kwargs=None):
    if not kwargs:
        kwargs = {}

    action_endpoint = utils.read_endpoint_config(endpoints, "action_endpoint")

    fallback_args, kwargs = utils.extract_args(kwargs,
                                               {"nlu_threshold",
                                                "core_threshold",
                                                "fallback_action_name"})

    policies = [
        FallbackPolicy(
                fallback_args.get("nlu_threshold",
                                  DEFAULT_NLU_FALLBACK_THRESHOLD),
                fallback_args.get("core_threshold",
                                  DEFAULT_CORE_FALLBACK_THRESHOLD),
                fallback_args.get("fallback_action_name",
                                  DEFAULT_FALLBACK_ACTION)),
        MemoizationPolicy(
                max_history=max_history),
        KerasPolicy(
                MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
                                            max_history=max_history))]

    agent = Agent(domain_file,
                  action_endpoint=action_endpoint,
                  interpreter=nlu_model_path,
                  policies=policies)

    data_load_args, kwargs = utils.extract_args(kwargs,
                                                {"use_story_concatenation",
                                                 "unique_last_num_states",
                                                 "augmentation_factor",
                                                 "remove_duplicates",
                                                 "debug_plots"})

    training_data = agent.load_data(stories_file, **data_load_args)
    agent.train(training_data, **kwargs)
    agent.persist(output_path, dump_flattened_stories)

    return agent