Esempio n. 1
0
def train_dialogue_model(domain_file,
                         stories_file,
                         output_path,
                         interpreter=None,
                         endpoints=AvailableEndpoints(),
                         max_history=None,
                         dump_flattened_stories=False,
                         policy_config=None,
                         kwargs=None):
    if not kwargs:
        kwargs = {}

    fallback_args, kwargs = utils.extract_args(
        kwargs, {"nlu_threshold", "core_threshold", "fallback_action_name"})

    policies = config.load(policy_config, fallback_args, max_history)

    agent = Agent(domain_file,
                  generator=endpoints.nlg,
                  action_endpoint=endpoints.action,
                  interpreter=interpreter,
                  policies=policies)

    data_load_args, kwargs = utils.extract_args(
        kwargs, {
            "use_story_concatenation", "unique_last_num_states",
            "augmentation_factor", "remove_duplicates", "debug_plots"
        })

    training_data = agent.load_data(stories_file, **data_load_args)
    agent.train(training_data, **kwargs)
    agent.persist(output_path, dump_flattened_stories)

    return agent
Esempio n. 2
0
def train_dialogue_model(domain_file,
                         stories_file,
                         output_path,
                         interpreter=None,
                         endpoints=AvailableEndpoints(),
                         dump_stories=False,
                         policy_config=None,
                         exclusion_percentage=None,
                         kwargs=None):
    if not kwargs:
        kwargs = {}

    policies = config.load(policy_config)

    agent = Agent(domain_file,
                  generator=endpoints.nlg,
                  action_endpoint=endpoints.action,
                  interpreter=interpreter,
                  policies=policies)

    data_load_args, kwargs = utils.extract_args(
        kwargs, {
            "use_story_concatenation", "unique_last_num_states",
            "augmentation_factor", "remove_duplicates", "debug_plots"
        })

    training_data = agent.load_data(stories_file,
                                    exclusion_percentage=exclusion_percentage,
                                    **data_load_args)
    agent.train(training_data, **kwargs)
    agent.persist(output_path, dump_stories)

    return agent
Esempio n. 3
0
def train(domain_file: Text,
          stories_file: Text,
          output_path: Text,
          interpreter: Optional[NaturalLanguageInterpreter] = None,
          endpoints: AvailableEndpoints = AvailableEndpoints(),
          dump_stories: bool = False,
          policy_config: Text = None,
          exclusion_percentage: int = None,
          kwargs: Optional[Dict] = None):
    from rasa_core.agent import Agent

    if not kwargs:
        kwargs = {}

    policies = config.load(policy_config)

    agent = Agent(domain_file,
                  generator=endpoints.nlg,
                  action_endpoint=endpoints.action,
                  interpreter=interpreter,
                  policies=policies)

    data_load_args, kwargs = utils.extract_args(
        kwargs, {
            "use_story_concatenation", "unique_last_num_states",
            "augmentation_factor", "remove_duplicates", "debug_plots"
        })

    training_data = agent.load_data(stories_file,
                                    exclusion_percentage=exclusion_percentage,
                                    **data_load_args)
    agent.train(training_data, **kwargs)
    agent.persist(output_path, dump_stories)

    return agent
Esempio n. 4
0
def train_dialogue_model(domain_file, stories_file, output_path,
                         interpreter=None,
                         endpoints=AvailableEndpoints(),
                         max_history=None,
                         dump_flattened_stories=False,
                         kwargs=None):
    if not kwargs:
        kwargs = {}

    fallback_args, kwargs = utils.extract_args(kwargs,
                                               {"nlu_threshold",
                                                "core_threshold",
                                                "fallback_action_name"})

    policies = [
        FallbackPolicy(
                fallback_args.get("nlu_threshold",
                                  DEFAULT_NLU_FALLBACK_THRESHOLD),
                fallback_args.get("core_threshold",
                                  DEFAULT_CORE_FALLBACK_THRESHOLD),
                fallback_args.get("fallback_action_name",
                                  DEFAULT_FALLBACK_ACTION)),
        MemoizationPolicy(
                max_history=max_history),
        KerasPolicy(
                MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
                                            max_history=max_history))]

    agent = Agent(domain_file,
                  generator=endpoints.nlg,
                  action_endpoint=endpoints.action,
                  interpreter=interpreter,
                  policies=policies)

    data_load_args, kwargs = utils.extract_args(kwargs,
                                                {"use_story_concatenation",
                                                 "unique_last_num_states",
                                                 "augmentation_factor",
                                                 "remove_duplicates",
                                                 "debug_plots"})

    training_data = agent.load_data(stories_file, **data_load_args)
    agent.train(training_data, **kwargs)
    agent.persist(output_path, dump_flattened_stories)

    return agent
Esempio n. 5
0
def do_interactive_learning(cmdline_args, stories, additional_arguments):
    _endpoints = AvailableEndpoints.read_endpoints(cmdline_args.endpoints)
    _interpreter = NaturalLanguageInterpreter.create(cmdline_args.nlu,
                                                     _endpoints.nlu)

    if cmdline_args.core:
        if cmdline_args.finetune:
            raise ValueError("--core can only be used without "
                             "--finetune flag.")

        logger.info("Loading a pre-trained model. This means that "
                    "all training-related parameters will be ignored.")

        _broker = PikaProducer.from_endpoint_config(_endpoints.event_broker)
        _tracker_store = TrackerStore.find_tracker_store(
            None, _endpoints.tracker_store, _broker)

        _agent = Agent.load(cmdline_args.core,
                            interpreter=_interpreter,
                            generator=_endpoints.nlg,
                            tracker_store=_tracker_store,
                            action_endpoint=_endpoints.action)
    else:
        if cmdline_args.out:
            model_directory = cmdline_args.out
        else:
            model_directory = tempfile.mkdtemp(suffix="_core_model")

        _agent = train_dialogue_model(cmdline_args.domain, stories,
                                      model_directory, _interpreter,
                                      _endpoints, cmdline_args.dump_stories,
                                      cmdline_args.config[0], None,
                                      additional_arguments)

    interactive.run_interactive_learning(
        _agent,
        stories,
        finetune=cmdline_args.finetune,
        skip_visualization=cmdline_args.skip_visualization)
Esempio n. 6
0
def do_interactive_learning(cmdline_args, stories, additional_arguments):
    _endpoints = AvailableEndpoints.read_endpoints(cmdline_args.endpoints)
    _interpreter = NaturalLanguageInterpreter.create(cmdline_args.nlu,
                                                     _endpoints.nlu)

    if (isinstance(cmdline_args.config, list)
            and len(cmdline_args.config) > 1):
        raise ValueError("You can only pass one config file at a time")
    if cmdline_args.core and cmdline_args.finetune:
        raise ValueError("--core can only be used without --finetune flag.")
    elif cmdline_args.core:
        logger.info("loading a pre-trained model. "
                    "all training-related parameters will be ignored")

        _broker = PikaProducer.from_endpoint_config(_endpoints.event_broker)
        _tracker_store = TrackerStore.find_tracker_store(
            None, _endpoints.tracker_store, _broker)

        _agent = Agent.load(cmdline_args.core,
                            interpreter=_interpreter,
                            generator=_endpoints.nlg,
                            tracker_store=_tracker_store,
                            action_endpoint=_endpoints.action)
    else:
        if not cmdline_args.out:
            raise ValueError("you must provide a path where the model "
                             "will be saved using -o / --out")

        _agent = train_dialogue_model(cmdline_args.domain, stories,
                                      cmdline_args.out, _interpreter,
                                      _endpoints, cmdline_args.dump_stories,
                                      cmdline_args.config[0], None,
                                      additional_arguments)
    interactive.run_interactive_learning(
        _agent,
        stories,
        finetune=cmdline_args.finetune,
        skip_visualization=cmdline_args.skip_visualization)
Esempio n. 7
0
        "epochs": cmdline_args.epochs,
        "batch_size": cmdline_args.batch_size,
        "validation_split": cmdline_args.validation_split,
        "augmentation_factor": cmdline_args.augmentation,
        "debug_plots": cmdline_args.debug_plots,
        "nlu_threshold": cmdline_args.nlu_threshold,
        "core_threshold": cmdline_args.core_threshold,
        "fallback_action_name": cmdline_args.fallback_action_name
    }

    if cmdline_args.url:
        stories = utils.download_file_from_url(cmdline_args.url)
    else:
        stories = cmdline_args.stories

    _endpoints = AvailableEndpoints.read_endpoints(cmdline_args.endpoints)
    _interpreter = NaturalLanguageInterpreter.create(cmdline_args.nlu,
                                                     _endpoints.nlu)

    if cmdline_args.core:
        if not cmdline_args.interactive:
            raise ValueError("--core can only be used together with the"
                             "--interactive flag.")
        elif cmdline_args.finetune:
            raise ValueError("--core can only be used together with the"
                             "--interactive flag and without --finetune flag.")
        else:
            logger.info(
                "loading a pre-trained model. all training-related parameters will be ignored"
            )
        _agent = Agent.load(cmdline_args.core,