예제 #1
0
def run_weather_online(interpreter,
                       domain_file='weather_domain.yml',
                       training_data_file='data/stories.md'):
    action_endpoint = EndpointConfig(url="http://localhost:5000/webhook")
    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.8,
                              nlu_threshold=0.8)
    agent = Agent('./weather_domain.yml',
                  policies=[
                      MemoizationPolicy(max_history=2, ),
                      KerasPolicy(epochs=500,
                                  batch_size=50,
                                  validation_split=0.2), fallback
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)
    data_ = agent.load_data(training_data_file, augmentation_factor=50)

    agent.train(data_)
    interactive.run_interactive_learning(agent,
                                         training_data_file,
                                         skip_visualization=True)

    # agent.handle_channels(input_channel)
    return agent
예제 #2
0
def run_online_dialogue(serve_forever=True):
    interpreter = RasaNLUInterpreter('./models/nlu/default/chat')
    action_endpoint = EndpointConfig(url="http://localhost:5004/webhook")
    agent = Agent.load('./models/dialogue',
                       interpreter=interpreter,
                       action_endpoint=action_endpoint)
    interactive.run_interactive_learning(agent)  #, channel='cmdline')
    return agent
예제 #3
0
def run_mental_bot(serve_forever=True):
    interpreter = RasaNLUInterpreter('./models/nlu/default/mentalnlu')
    agent = Agent.load('./models/dialogue', interpreter=interpreter)
    training_data_file = 'data/stories.md'
    if serve_forever:
        interactive.run_interactive_learning(agent, training_data_file)

    return agent
예제 #4
0
파일: 2.py 프로젝트: apt-x4869/WeatherBot
def run_weather_online(interpreter,
                          domain_file="weather_domain.yml",
                          training_data_file='data/stories.md'):
    policies2 = policy_config.load("config.yml")
    action_endpoint = "endpoint.yml"					  
    agent = Agent(domain_file,policies=policies2,interpreter=interpreter,action_endpoint=action_endpoint)
    				  
    data = asyncio.run(agent.load_data(training_data_file))			   
    agent.train(data)
    interactive.run_interactive_learning(agent,training_data_file)
    return agent
예제 #5
0
def do_interactive_learning(cmdline_args, stories, additional_arguments=None):
    from rasa_core.training import interactive

    if cmdline_args.cors and cmdline_args.finetune:
        raise ValueError("--core can only be used without " "--finetune flag.")

    interactive.run_interactive_learning(
        stories,
        finetune=cmdline_args.finetune,
        skip_visualization=cmdline_args.skip_visualization,
        server_args=cmdline_args.__dict__,
        additional_arguments=additional_arguments)
예제 #6
0
def train_agent(input_channel, nlu_interpreter,
                          domain_file="domain.yml",
                          training_data_file='./data/dialogue/stories.md'):

    #endpoints = "endpoints.yml"
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=2), KerasPolicy()],
                  interpreter=nlu_interpreter)
    data = agent.load_data(training_data_file)
    agent.train(data, input_channel=input_channel, batch_size=50, epochs=200, max_training_samples=300)
    agent = Agent.load('models/dialogue/default/dialogue_model', interpreter = nlu_interpreter, action_endpoint=EndpointConfig(url = "http://localhost:5055/webhook"))
    interactive.run_interactive_learning(agent, training_data_file)

    return agent
예제 #7
0
def run_mental_online(interpreter,
                      domain_file="mental_domain.yml",
                      training_data_file='data/stories.md'):
    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(max_history=2),
                      KerasPolicy(max_history=4, epochs=1000, batch_size=50)
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)
    data = agent.load_data(training_data_file)
    agent.train(data)
    interactive.run_interactive_learning(agent, training_data_file)
    return agent
def train_agent(interpreter,
                domain_file="domain.yml",
                training_file='data/stories.md'):

    action_endpoint = EndpointConfig('http://localhost:5055/webhook')
    policies = [MemoizationPolicy(max_history=3),
                KerasPolicy(max_history=3, epochs=10, batch_size=10)]
    agent = Agent(domain_file, policies=policies,
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)

    stories = agent.load_data(training_file)
    agent.train(stories)
    interactive.run_interactive_learning(agent, training_file)

    return agent
def run_restaurant_online(interpreter,
                          domain_file='restaurant_domain.yml',
                          training_data_file='data/stories.md'):

    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(),
                            KerasPolicy()],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)
    training_data = agent.load_data(training_data_file)
    agent.train(training_data,
                batch_size=50,
                epochs=200,
                max_training_samples=300)
    interactive.run_interactive_learning(agent)
    return agent
def run_weather_online(
    interpreter,
    domain_file="/home/saradindu/dev/Work-II/Happsales/assistant_domain.yml",
    training_data_file='/home/saradindu/dev/Work-II/Happsales/data/stories.md'
):
    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(max_history=2),
                      KerasPolicy(max_history=3, epochs=3, batch_size=50)
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)

    data = agent.load_data(training_data_file)
    agent.train(data)
    interactive.run_interactive_learning(agent, training_data_file)
    return agent
예제 #11
0
def run_weather_online(input_channel,
                       interpreter,
                       domain_file="weather_domain.yml",
                       training_data_file='data/stories.md'):

    #policies2 = policy_config.load("config.yml")
    action_endpoints = EndpointConfig(url="http://localhost:5055/webhook")
    agent = Agent(
        "weather_domain.yml",
        interpreter=interpreter,
        policies=[MemoizationPolicy(),
                  KerasPolicy(epochs=200, batch_size=50)],
        action_endpoint=action_endpoints)
    #data = asyncio.run(agent.load_data(training_data_file))
    data = agent.load_data(training_data_file)
    agent.train(data)
    interactive.run_interactive_learning(agent, training_data_file)
    return agent
예제 #12
0
def do_interactive_learning(cmdline_args, stories, additional_arguments):
    _endpoints = AvailableEndpoints.read_endpoints(cmdline_args.endpoints)
    _interpreter = NaturalLanguageInterpreter.create(cmdline_args.nlu,
                                                     _endpoints.nlu)

    if cmdline_args.core:
        if cmdline_args.finetune:
            raise ValueError("--core can only be used without "
                             "--finetune flag.")

        logger.info("Loading a pre-trained model. This means that "
                    "all training-related parameters will be ignored.")

        _broker = PikaProducer.from_endpoint_config(_endpoints.event_broker)
        _tracker_store = TrackerStore.find_tracker_store(
            None, _endpoints.tracker_store, _broker)

        _agent = Agent.load(cmdline_args.core,
                            interpreter=_interpreter,
                            generator=_endpoints.nlg,
                            tracker_store=_tracker_store,
                            action_endpoint=_endpoints.action)
    else:
        if cmdline_args.out:
            model_directory = cmdline_args.out
        else:
            model_directory = tempfile.mkdtemp(suffix="_core_model")

        _agent = train_dialogue_model(cmdline_args.domain, stories,
                                      model_directory, _interpreter,
                                      _endpoints, cmdline_args.dump_stories,
                                      cmdline_args.config[0], None,
                                      additional_arguments)

    interactive.run_interactive_learning(
        _agent,
        stories,
        finetune=cmdline_args.finetune,
        skip_visualization=cmdline_args.skip_visualization)
예제 #13
0
파일: train.py 프로젝트: xtutran/rasa_core
def do_interactive_learning(cmdline_args, stories, additional_arguments):
    _endpoints = AvailableEndpoints.read_endpoints(cmdline_args.endpoints)
    _interpreter = NaturalLanguageInterpreter.create(cmdline_args.nlu,
                                                     _endpoints.nlu)

    if (isinstance(cmdline_args.config, list)
            and len(cmdline_args.config) > 1):
        raise ValueError("You can only pass one config file at a time")
    if cmdline_args.core and cmdline_args.finetune:
        raise ValueError("--core can only be used without --finetune flag.")
    elif cmdline_args.core:
        logger.info("loading a pre-trained model. "
                    "all training-related parameters will be ignored")

        _broker = PikaProducer.from_endpoint_config(_endpoints.event_broker)
        _tracker_store = TrackerStore.find_tracker_store(
            None, _endpoints.tracker_store, _broker)

        _agent = Agent.load(cmdline_args.core,
                            interpreter=_interpreter,
                            generator=_endpoints.nlg,
                            tracker_store=_tracker_store,
                            action_endpoint=_endpoints.action)
    else:
        if not cmdline_args.out:
            raise ValueError("you must provide a path where the model "
                             "will be saved using -o / --out")

        _agent = train_dialogue_model(cmdline_args.domain, stories,
                                      cmdline_args.out, _interpreter,
                                      _endpoints, cmdline_args.dump_stories,
                                      cmdline_args.config[0], None,
                                      additional_arguments)
    interactive.run_interactive_learning(
        _agent,
        stories,
        finetune=cmdline_args.finetune,
        skip_visualization=cmdline_args.skip_visualization)
예제 #14
0
            raise ValueError("--core can only be used together with the"
                             "--interactive flag.")
        elif cmdline_args.finetune:
            raise ValueError("--core can only be used together with the"
                             "--interactive flag and without --finetune flag.")
        else:
            logger.info("loading a pre-trained model. ",
                        "all training-related parameters will be ignored")
        _agent = Agent.load(cmdline_args.core,
                            interpreter=_interpreter,
                            generator=_endpoints.nlg,
                            tracker_store=_tracker_store,
                            action_endpoint=_endpoints.action)
    else:
        if not cmdline_args.out:
            raise ValueError("you must provide a path where the model "
                             "will be saved using -o / --out")
        _agent = train_dialogue_model(cmdline_args.domain, stories,
                                      cmdline_args.out, _interpreter,
                                      _endpoints, cmdline_args.history,
                                      cmdline_args.dump_stories,
                                      cmdline_args.config,
                                      additional_arguments)

    if cmdline_args.interactive:
        interactive.run_interactive_learning(
            _agent,
            stories,
            finetune=cmdline_args.finetune,
            skip_visualization=cmdline_args.skip_visualization)
예제 #15
0
import asyncio
import logging

import rasa.utils
from rasa_core import utils
from rasa_core.training import interactive

logger = logging.getLogger(__name__)

if __name__ == '__main__':
    rasa.utils.configure_colored_logging(loglevel="INFO")
    loop = asyncio.get_event_loop()
    logger.info("This example does not include NLU data."
                "Please specify the desired intent with a preceding '/', e.g."
                "'/greet' .")
    loop.run_until_complete(
        interactive.run_interactive_learning("data/stories.md",
                                             server_args={
                                                 "domain": "domain.yml",
                                                 "out": "models/dialogue",
                                                 "stories": "data/stories.md",
                                                 "config": ['config.yml']
                                             }))
    def train_interactive(self):
        self.train_nlu()
        self.agent = self.train_dialogue()

        return interactive.run_interactive_learning(self.agent)
예제 #17
0
import logging

from rasa_core import utils, train
from rasa_core.training import interactive

logger = logging.getLogger(__name__)

def train_agent():
	return train.train_dialogue_model(domain_file = 'weather_domain.yml', 
									  stories_file = './data/stories.md',
									  output_path = './models/dialogue',
									  policy_config = 'policy_config.yml'
									  )

if __name__ == '__main__':
	utils.configure_colored_logging(loglevel = "INFO")
	agent = train_agent()
	interactive.run_interactive_learning(agent = agent, stories = './data/stories.md')
예제 #18
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import logging

from rasa_core import utils, train, run
from rasa_core.training import interactive

logger = logging.getLogger(__name__)


def train_agent():
    return train.train_dialogue_model(domain_file="./domain.yml",
                                      stories_file="./data/dialogue/stories.md",
                                      output_path="./models/dialogue/",
                                      policy_config="./policies.yml"
                                      )


if __name__ == '__main__':
    utils.configure_colored_logging(loglevel="INFO")
    agent = train_agent()
    interactive.run_interactive_learning(agent, "./data/dialogue/stories.md")
예제 #19
0
import logging

from rasa_core import utils, train
from rasa_core.training import interactive

logger = logging.getLogger(__name__)


def train_agent():
    return train.train_dialogue_model(domain_file="domain.yml",
                                      stories_file="data/stories.md",
                                      output_path="models/dialogue",
                                      kwargs={"batch_size": 50,
                                              "epochs": 200,
                                              "max_training_samples": 300
                                              })


if __name__ == '__main__':
    utils.configure_colored_logging(loglevel="INFO")
    agent = train_agent()
    interactive.run_interactive_learning(agent)
예제 #20
0
    if cmdline_args.core:
        if not cmdline_args.interactive:
            raise ValueError("--core can only be used together with the"
                             "--interactive flag.")
        elif cmdline_args.finetune:
            raise ValueError("--core can only be used together with the"
                             "--interactive flag and without --finetune flag.")
        else:
            logger.info("loading a pre-trained model. ",
                        "all training-related parameters will be ignored")
        _agent = Agent.load(cmdline_args.core,
                            interpreter=_interpreter,
                            generator=_endpoints.nlg,
                            action_endpoint=_endpoints.action)
    else:
        if not cmdline_args.out:
            raise ValueError("you must provide a path where the model "
                             "will be saved using -o / --out")
        _agent = train_dialogue_model(cmdline_args.domain,
                                      stories,
                                      cmdline_args.out,
                                      _interpreter,
                                      _endpoints,
                                      cmdline_args.history,
                                      cmdline_args.dump_stories,
                                      additional_arguments)

    if cmdline_args.interactive:
        interactive.run_interactive_learning(_agent, finetune=cmdline_args.finetune)