def run_eventbot_online(input_channel,
                        interpreter,
                        domain_file="./data/domain.yml",
                        training_data_file='./data/stories'):
    try:
        KnowledgeGraph()
    except ServiceUnavailable:
        print('Neo4j connection failed. Program stopped.')
        return

    fallback = FallbackPolicy(fallback_action_name="utter_not_understood",
                              core_threshold=0.3,
                              nlu_threshold=0.6)
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(),
                            KerasPolicy(), fallback],
                  interpreter=interpreter)

    data = agent.load_data(training_data_file)
    agent.train_online(data,
                       input_channel=input_channel,
                       max_history=2,
                       batch_size=50,
                       epochs=200,
                       max_training_samples=300)

    return agent
	def train_dialogue(self, domain_file, model_path, training_data_file):
		fallback = FallbackPolicy(fallback_action_name="utter_default",core_threshold=0.2, nlu_threshold=0.5)
		featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=10)
		self.agent = Agent(domain_file , policies=[MemoizationPolicy(max_history=10), KerasPolicy(epochs = 90, batch_size = 20, validation_split = 0.1), fallback])
		data = self.agent.load_data(training_data_file)
		self.agent.train(data)
		self.agent.persist(model_path)
예제 #3
0
def train_dialog(dialog_training_data_file,
                 domain_file,
                 path_to_model='models/dialogue'):
    logging.basicConfig(level='INFO')
    logging.info(dialog_training_data_file)

    # Action to be called if the confidence of intent / action is below the threshold
    fallback = FallbackPolicy(
        fallback_action_name="action_default_fallback",
        core_threshold=0.3,  # Define the threshold that you need to capture
        nlu_threshold=0.3)

    # Configuring the endpoint webhook
    core_endpoint_config = EndpointConfig(url='http://localhost:5055/webhook')

    # Configuring the agent
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=2), fallback],
                  interpreter=RasaNLUInterpreter('models/nlu/default/chat'),
                  action_endpoint=core_endpoint_config)

    # Load the stories for training the dialog
    training_data = agent.load_data(dialog_training_data_file)

    # Start training the dialog
    agent.train(training_data)

    # Save the training data
    agent.persist(path_to_model)

    # Run interactive learning
    # interactive.run_interactive_learning(agent, dialog_training_data_file, skip_visualization=True)
    return agent
예제 #4
0
def initChatBot():
    print("STEP 1:Training the NLU Model")
    #Training the NLU MODEL:
    # loading the nlu training samples
    training_data = load_data("NLU_Train.json")
    # trainer to create the pipeline
    trainer = Trainer(config.load("NLU_model_Config.yml"))
    # training the model
    interpreter = trainer.train(training_data)
    # storeing it for future
    model_directory = trainer.persist("./models/nlu",
                                      fixed_model_name="current")
    print("Done")

    print("STEP 2: Training the CORE model")
    fallback = FallbackPolicy(fallback_action_name="utter_default",
                              core_threshold=0.2,
                              nlu_threshold=0.1)

    agent = Agent(domain='restaurant_domain.yml',
                  policies=[
                      MemoizationPolicy(),
                      KerasPolicy(validation_split=0.0, epochs=200), fallback
                  ])
    training_data = agent.load_data('Core_Stories.md')
    agent.train(training_data)
    agent.persist('models/dialogue')
    print("Done")
    return model_directory
예제 #5
0
def train_core(domain_file="robot/config/domain.yml",
               model_path="robot/models/dialogue",
               training_data_file="robot/config/stories.md"):
    from rasa_core.featurizers import (MaxHistoryTrackerFeaturizer,
                                       BinarySingleStateFeaturizer)
    # fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
    #                           core_threshold=0.9,
    #                           nlu_threshold=0.9)
    fallback = FallbackPolicy(fallback_action_name="action_default_custom",
                              core_threshold=0.8,
                              nlu_threshold=0.8)

    agent = Agent(
        domain_file,
        policies=[
            MemoizationPolicy(max_history=5),
            KerasPolicy(
                MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
                                            max_history=5)), fallback
        ])
    training_data = agent.load_data(training_data_file)
    # 训练agent的策略policy
    agent.train(training_data, epochs=500)
    agent.persist(model_path)
    return agent
def train_dialogue(domain_file='sample_configs/leave_domain.yml',
                   model_path='./models/dialogue',
                   training_data_file='./data/stories.md'):
    agent = Agent(domain_file, policies=[FallbackPolicy(),MemoizationPolicy(), KerasPolicy(max_history=3, epochs=200, batch_size=50)])
    data = agent.load_data(training_data_file)

    agent.train(data)

    agent.persist(model_path)
    return agent
예제 #7
0
def TrainCore():
    fallback = FallbackPolicy(fallback_action_name="utter_unclear",
                              core_threshold=0.2,
                              nlu_threshold=0.1)

    agent = Agent('domain.yml',
                  policies=[MemoizationPolicy(),
                            KerasPolicy(), fallback])
    training_data = agent.load_data('stories.md')

    agent.train(training_data, validation_split=0.0, epochs=500)
    agent.persist('models/dialogue')
예제 #8
0
def train_dialog(dialog_training_data_file, domain_file="domain.yml", path_to_model = 'models/dialogue'):
    logging.basicConfig(level='INFO')
    fallback = FallbackPolicy(fallback_action_name="action_handle_unclear", core_threshold=0.2, nlu_threshold=0.1)

    agent = Agent(domain_file,
              policies=[MemoizationPolicy(max_history=1),KerasPolicy(epochs=300,
        batch_size=20), fallback])
    training_data = agent.load_data(dialog_training_data_file)
    agent.train(
        training_data,
        augmentation_factor=50,
        validation_split=0.2)
    agent.persist(path_to_model)
def train_dialogue(domain_file='./domain/domain.yml',
                   model_path='./models/dialogue',
                   training_data_file='./data/stories.md'):
    fallback = FallbackPolicy(fallback_action_name="utter_unclear",
                              core_threshold=0.2,
                              nlu_threshold=0.65)
    agent = Agent(
        domain_file,
        policies=[MemoizationPolicy(max_history=10),
                  KerasPolicy(), fallback])
    data = agent.load_data(training_data_file)
    agent.train(data)
    agent.persist(model_path)
    return agent
예제 #10
0
def train_dialogue(domain_file='restaurant_domain.yml',
                   model_path='./models/dialogue',
                   training_data_file='./data/stories.md'):
    fallback = FallbackPolicy(
        fallback_action_name="Sorry! Couldn't get what you are saying",
        core_threshold=0.2,
        nlu_threshold=0.1)
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(),
                            KerasPolicy(), fallback])
    training_data = agent.load_data(training_data_file)
    agent.train(training_data)
    agent.persist(model_path)
    return agent
예제 #11
0
def train_dialogue(domain_file = './backend/domain.yml',
					model_path = './backend/models/dialogue',
					training_data_file = './backend/stories.md'):
	fallback = FallbackPolicy(fallback_action_name="utter_unclear", core_threshold=0.1, nlu_threshold=0.1)				
	agent = Agent(domain_file, policies = [fallback, MemoizationPolicy(), KerasPolicy()])
	data = agent.load_data(training_data_file)	
	agent.train(
				data,
				epochs = 200,
				batch_size = 50,
				validation_split = 0.2)
				
	agent.persist(model_path)
	return agent
예제 #12
0
def train_dialogue(domain_file='restaurant_domain.yml',
                   model_path='./models/dialogue',
                   training_data_file='./data/stories.md'):
    fallback = FallbackPolicy(
        fallback_action_name="action_default_fallback",
        core_threshold=0.3,
        nlu_threshold=0.3,
    )
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(),
                            KerasPolicy(), fallback])
    data = agent.load_data(training_data_file)
    agent.train(data, epochs=300, batch_size=50, validation_split=0.2)
    agent.persist(model_path)
    return agent
예제 #13
0
def train_dialogue_model(domain_file, stories_file, output_path,
                         nlu_model_path=None,
                         endpoints=None,
                         max_history=None,
                         dump_flattened_stories=False,
                         kwargs=None):
    if not kwargs:
        kwargs = {}

    action_endpoint = utils.read_endpoint_config(endpoints, "action_endpoint")

    fallback_args, kwargs = utils.extract_args(kwargs,
                                               {"nlu_threshold",
                                                "core_threshold",
                                                "fallback_action_name"})

    policies = [
        FallbackPolicy(
                fallback_args.get("nlu_threshold",
                                  DEFAULT_NLU_FALLBACK_THRESHOLD),
                fallback_args.get("core_threshold",
                                  DEFAULT_CORE_FALLBACK_THRESHOLD),
                fallback_args.get("fallback_action_name",
                                  DEFAULT_FALLBACK_ACTION)),
        MemoizationPolicy(
                max_history=max_history),
        KerasPolicy(
                MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
                                            max_history=max_history))]

    agent = Agent(domain_file,
                  action_endpoint=action_endpoint,
                  interpreter=nlu_model_path,
                  policies=policies)

    data_load_args, kwargs = utils.extract_args(kwargs,
                                                {"use_story_concatenation",
                                                 "unique_last_num_states",
                                                 "augmentation_factor",
                                                 "remove_duplicates",
                                                 "debug_plots"})

    training_data = agent.load_data(stories_file, **data_load_args)
    agent.train(training_data, **kwargs)
    agent.persist(output_path, dump_flattened_stories)

    return agent
예제 #14
0
def train_dialogue(domain_file='domain.yml',
                   model_path='./models/dialogue',
                   training_data_file='stories.md'):
    fallback = FallbackPolicy(fallback_action_name='utter_unclear',
                              core_threshold=0.2,
                              nlu_threshold=0.6)
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(),
                            KerasPolicy(), fallback])

    agent.train(training_data_file,
                epochs=300,
                batch_size=50,
                validation_split=0.2)

    agent.persist(model_path)
    return agent
예제 #15
0
def run_online(input_channel,
               interpreter,
               domain_file="domain.yml",
               training_data_file='stories.md'):
    fallback = FallbackPolicy(fallback_action_name="utter_unclear",
                              core_threshold=0.2,
                              nlu_threshold=0.4)

    agent = Agent('domain.yml',
                  policies=[MemoizationPolicy(),
                            KerasPolicy(), fallback],
                  interpreter=interpreter)

    training_data = agent.load_data(training_data_file)
    agent.train_online(training_data, input_channel=input_channel, epochs=200)

    return agent
예제 #16
0
def run_online_training(input_channel,
                        interpreter,
                        domain_file="domain.yml",
                        training_data_file='stories.md'):
    fallback = FallbackPolicy(fallback_action_name='utter_unclear',
                              core_threshold=0.2,
                              nlu_threshold=0.6)
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=2),
                            KerasPolicy()],
                  interpreter=interpreter)

    agent.train_online(training_data_file,
                       input_channel=input_channel,
                       batch_size=50,
                       epochs=200,
                       max_training_samples=300)

    return agent
예제 #17
0
def train_dialog(dialog_training_data_file, domain_file, path_to_model='models/'):
    logging.basicConfig(level='INFO')
    fallback = FallbackPolicy(
        fallback_action_name="utter_default", core_threshold=0.3, nlu_threshold=0.3)

    # agent = Agent(domain_file,
    #           policies=[MemoizationPolicy(max_history=1),KerasPolicy(epochs=200,
    #     batch_size=20), fallback, FormPolicy])
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=1), KerasPolicy(epochs=200,
                                                                      batch_size=20), FormPolicy(), fallback])

    loop = asyncio.get_event_loop()
    data = loop.run_until_complete(agent.load_data(dialog_training_data_file))
    # training_data = agent.load_data(dialog_training_data_file)
    agent.train(
        data,
        augmentation_factor=50,
        validation_split=0.2)
    agent.persist(path_to_model)
예제 #18
0
def train_dialogue(domain_file='./config/domain/domain.yml',
                   training_data_file='./config/stories/stories.md',
                   model_path='./models/dialogue'):
    fallback = FallbackPolicy(fallback_action_name="utter_default",
                              core_threshold=0.2,
                              nlu_threshold=0.1)
    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(),
                      KerasPolicy(), fallback,
                      FormPolicy(),
                      EmbeddingPolicy(epochs=100)
                  ])
    agent.visualize(training_data_file,
                    output_file="graph.html",
                    max_history=4)
    training_data = agent.load_data(
        training_data_file)  # augmentation_factor=0
    agent.train(training_data)
    agent.persist(model_path)
    return agent
예제 #19
0
파일: bot.py 프로젝트: karthikcs/USTA-Bot
def train_dialogue(domain_file="bot_domain.yml",
                   model_path="models/dialogue",
                   training_data_file="data/bot_stories.md"):

    fallback = FallbackPolicy(fallback_action_name="utter_default",
                          core_threshold=0.2,
                          nlu_threshold=0.1)
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=3),
                            KerasPolicy(), fallback])

    training_data = agent.load_data(training_data_file)
    agent.train(
            training_data,
            epochs=400,
            batch_size=100,
            validation_split=0.2
    )

    agent.persist(model_path)
    return agent
def run_interactive_training(input_channel,
                             interpreter,
                             domain_file="./data/domain.yml",
                             training_data_file='./data/stories'):
    try:
        NetworkGraph()
    except ServiceUnavailable:
        print('Neo4j connection failed. Program stopped.')
        return

    if interpreter == 'luis':
        interpreter = LuisInterpreter()
    elif interpreter == 'dialogflow':
        interpreter = DialogflowInterpreter()
    elif interpreter == 'witai':
        interpreter = WitInterpreter()
    elif interpreter == 'rasa':
        interpreter = 'rasa-nlu/models/rasa-nlu/default/socialcompanionnlu'
    else:
        return "Please provide one of these interpreters: luis, dialogflow, witai or rasa"

    fallback = FallbackPolicy(fallback_action_name="action_not_understood",
                              core_threshold=0.5,
                              nlu_threshold=0.35)
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(),
                            KerasPolicy(), fallback],
                  interpreter=interpreter)

    data = agent.load_data(training_data_file)
    agent.train_online(data,
                       input_channel=input_channel,
                       max_history=2,
                       batch_size=50,
                       epochs=200,
                       max_training_samples=300)

    agent.persist('./models/dialogue')
    return agent
예제 #21
0
import logging
import warnings

import ruamel.yaml

warnings.simplefilter('ignore', ruamel.yaml.error.UnsafeLoaderWarning)

from rasa_core.agent import Agent
from rasa_core.policies import FallbackPolicy, KerasPolicy, MemoizationPolicy

logger = logging.getLogger(__name__)

# this will catch predictions the model isn't very certain about
# there is a threshold for the NLU predictions as well as the action predictions
fallback = FallbackPolicy(fallback_action_name="utter_unclear",
                          core_threshold=0.15,
                          nlu_threshold=0.1)


def train_dialogue():
    agent = Agent('domain.yml', policies=[MemoizationPolicy(), KerasPolicy()])
    training_data = agent.load_data('stories.md')

    agent.train(
        training_data)

    agent.persist('models/dialogue')
    return agent


# def run_weather_bot():
예제 #22
0
from rasa_core.policies import FallbackPolicy, KerasPolicy, MemoizationPolicy
from rasa_core.agent import Agent

# this will catch predictions the model isn't very certain about
# there is a threshold for the NLU predictions as well as the action predictions
fallback = FallbackPolicy(fallback_action_name="action_default_fallback",core_threshold=0.2,nlu_threshold=0.2)
agent = Agent('domain.yml', policies=[MemoizationPolicy(), KerasPolicy(), fallback])
training_data = agent.load_data('stories.md')
agent.train(
    training_data,
    validation_split=0.0
)
agent.persist('models/dialogue')

    
예제 #23
0
from rasa_core.agent import Agent
from rasa_core.policies import FallbackPolicy, KerasPolicy, MemoizationPolicy

fallback = FallbackPolicy(core_threshold=0.2,
                          nlu_threshold=0.1,
                          fallback_action_name='utter_default')


def visualize(domain_file, training_data_file):
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(),
                            KerasPolicy(), fallback])
    agent.visualize(training_data_file,
                    output_file='graphs/graph.html',
                    max_history=2)


if __name__ == '__main__':
    visualize('domain.yml', 'data/stories.md')
예제 #24
0
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals

import logging

from rasa_core.agent import Agent
from rasa_core.policies import KerasPolicy, MemoizationPolicy, FallbackPolicy
#from rasa_core.policies.memoization import MemoizationPolicy

if __name__ == '__main__':
    logging.basicConfig(level='INFO')
    training_data_file = 'stories.md'
    model_path = './models/dialogue'
    fallback = FallbackPolicy(fallback_action_name='utter_unclear', core_threshold=0.2, nlu_threshold=0.6)
    agent = Agent('domain.yml', policies=[MemoizationPolicy(max_history=2), KerasPolicy(), fallback])

    agent.train(
        training_data_file,
        epochs=400,
        batch_size=20,
        validation_split=0.1)

    agent.persist(model_path)
예제 #25
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

from rasa_core.featurizers import (MaxHistoryTrackerFeaturizer,
                                   BinarySingleStateFeaturizer)

featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
                                         max_history=5)

from rasa_core.policies import FallbackPolicy, KerasPolicy, MemoizationPolicy
from rasa_core.agent import Agent
fallback = FallbackPolicy(fallback_action_name="utter_error_scope_message",
                          core_threshold=0.5,
                          nlu_threshold=0.8)

from rasa_core import config as policy_config
policies = policy_config.load("policies.yml")

agent = Agent("subset_flightdomain.yml", policies=policies)
training_data = agent.load_data('stories.md')
agent.train(training_data)

agent.persist('models/dialogue_subset')
예제 #26
0
import logging
from rasa_core import training
from rasa_core.actions import Action
from rasa_core.agent import Agent
from rasa_core.domain import Domain
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
from rasa_core.featurizers import MaxHistoryTrackerFeaturizer, BinarySingleStateFeaturizer
from rasa_core.interpreter import RegexInterpreter
from rasa_core.interpreter import RasaNLUInterpreter
from rasa_core.policies import FallbackPolicy, KerasPolicy, MemoizationPolicy, FormPolicy
fallback = FallbackPolicy(fallback_action_name="utter_default",
                          core_threshold=0.2,
                          nlu_threshold=0.1)
from rasa_core import config as policy_config
policies = policy_config.load("policy.yml")

# - name: KerasPolicy
# epochs: 100
# max_history: 3
# - name: MemoizationPolicy
# max_history: 3
# - name: FallbackPolicy
# nlu_threshold: 0.1
# core_threshold: 0.2
# fallback_action_name: 'utter_default'
# - name: FormPolicy


# Function
#------------