def run_bot_online(interpreter,
                   domain_file="domain.yml",
                   training_data_file='data/stories.md'):
    '''
    This function trains the bot in an interactive manner
    :param interpreter: NLU Interpreter
    :param domain_file: Domain file
    :param training_data_file: Chat story board file
    :return: Agent
    '''

    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")

    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(max_history=3),
                      KerasPolicy(max_history=5, epochs=300, batch_size=50)
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)

    data = agent.load_data(training_data_file)
    agent.train(data)
    interactive.run_interactive_learning(agent, training_data_file)
    return agent
Beispiel #2
0
def run_weather_online(interpreter, domain_file="weather_domain.yml", training_data_file = './data/stories.md'):
	action_endpoint = EndpointConfig(url = "http://localhost:5005/webhook")
	
	agent = Agent(domain_file, policies = [MemoizationPolicy(max_history = 2), KerasPolicy(max_history = 3, epochs = 3, batch_size = 50)], interpreter = interpreter, 
				  action_endpoint = action_endpoint)
	
	training_data = agent.load_data(training_data_file)
	
	agent.train(training_data)
	interactive.run_interactive_learning(agent, training_data_file)
	
	return agent
Beispiel #3
0
def train_interactive():
    current_directory = os.path.dirname(os.path.realpath(__file__))
    domain_file = 'domain.yml'
    interpreter = RasaNLUInterpreter(current_directory + '/agent-data/models/nlu/default/current')
    action_endpoint = EndpointConfig(url=actionIP)
    stories_file = current_directory + '/agent-data/data/stories.md'
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=5), KerasPolicy()],
                  interpreter=interpreter, action_endpoint=action_endpoint)
    data = agent.load_data(stories_file)
    agent.train(data)
    interactive.run_interactive_learning(agent, stories_file)
    return agent
def run_laliga_online(interpreter,
                          domain_file="./LaLiga_bot/Laliga_domain.yml",
                          training_data_file='./LaLiga_bot/data/stories.md'):
    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")						  
    '''
    train from scratch
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=2), KerasPolicy(max_history=3, epochs=3, batch_size=50)],
                  interpreter=interpreter,
				  action_endpoint=action_endpoint)
    				  
    data = agent.load_data(training_data_file)			   
    agent.train(data)
    ''' 
    agent = Agent.load(r'.\LaLiga_bot\models\dialogue', interpreter=interpreter, action_endpoint=action_endpoint)
    interactive.run_interactive_learning(agent, training_data_file)
    return agent
def run_chatbot_online(interpreter,
                       domain_file="chatbot_domain.yml",
                       training_data_file='data/stories.md'):
    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(max_history=2),
                      KerasPolicy(max_history=3, epochs=3, batch_size=90)
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)
    #max history stands for number of states our bot should remember, epochs is number of forward/backward passes of traning in process,batch size is amount of traning examples should be used

    data = agent.load_data(training_data_file)
    agent.train(data)
    interactive.run_interactive_learning(agent,
                                         training_data_file,
                                         skip_visualization=True)
    return agent
def run_weather_online(interpreter,
                       domain_file="domain.yml",
                       training_data_file='data/stories.md'):
    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(max_history=5),
                      KerasPolicy(max_history=5, epochs=4, batch_size=70),
                      FormPolicy()
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)

    data = agent.load_data(training_data_file)
    agent.train(data)
    interactive.run_interactive_learning(agent,
                                         training_data_file,
                                         skip_visualization=True)
    return agent
Beispiel #7
0
def run_online(interpreter,
               domain_file="domain.yml",
               training_data_file='data/stories.md'):
    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
    model_path = './models/dialogue'  # Where to save the model after its trained
    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(),
                      KerasPolicy(max_history=3, epochs=100, batch_size=50)
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)
    data = agent.load_data(training_data_file, augmentation_factor=50)
    agent.train(data)
    agent.persist(model_path)  # Save the Model
    interactive.run_interactive_learning(agent,
                                         training_data_file,
                                         skip_visualization=True)
    return agent
def run_cbeg(interpreter,
             domain_file="cbeg_domain.yml",
             training_data_file='data/stories.md'):
    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.4,
                              nlu_threshold=0.4)
    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(max_history=2),
                      KerasPolicy(max_history=3, epochs=3, batch_size=50)
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)

    data = agent.load_data(training_data_file)
    agent.train(data)
    interactive.run_interactive_learning(agent, training_data_file)
    return agent
Beispiel #9
0
def run_bot_online(interpreter, domain_file, training_data_file):
    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")

    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.3,
                              nlu_threshold=0.3)

    agent = Agent(domain=domain_file,
                  policies=[
                      MemoizationPolicy(max_history=6),
                      KerasPolicy(max_history=6, epochs=200), fallback,
                      FormPolicy()
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)

    data = agent.load_data(training_data_file)
    agent.train(data)
    interactive.run_interactive_learning(agent, training_data_file)
    return agent
def run_trade_online(interpreter,
                     domain_file=Path.TRADE_DOMAIN.value,
                     training_data_file=Path.STORIES.value):
    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.5,
                              nlu_threshold=0.5)
    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(max_history=2),
                      KerasPolicy(max_history=3, epochs=3, batch_size=50),
                      fallback
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)

    data = agent.load_data(training_data_file)
    agent.train(data)
    interactive.run_interactive_learning(agent, training_data_file)
    return agent
Beispiel #11
0
def run_recipe_online(interpreter,
                      domain_file="recipes_domain.yml",
                      training_data_file="data/stories.md"):

    # Set up endpoint for Custom Action
    # NOTE: Custom Actions run on a separate server
    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")

    # Create Agent
    # NOTE: Agent processes stories and uses Rasa Interpreter for input analysis
    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(max_history=5),
                      KerasPolicy(max_history=5, epochs=6, batch_size=150)
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)

    # Train agent
    data = agent.load_data(training_data_file)
    agent.train(data)
    interactive.run_interactive_learning(agent, training_data_file)

    return agent