Beispiel #1
0
def train_dialogue(domain_file, stories_file, model_dir):
    #assign domain to agent
    #agent = Agent(domain_file, policies=[KerasPolicy(), FallbackPolicy(), MemoizationPolicy(), FormPolicy()])
    agent = Agent(domain_file,
                  policies=[KerasPolicy(),
                            MemoizationPolicy(),
                            FormPolicy()])
    #load story
    training_data = agent.load_data(stories_file)
    #train agent
    agent.train(training_data)
    #create model folder and store dialoge
    agent.persist(model_dir)
def train_dialogue(domain_file='domain.yml',
                   model_path='./models/dialogue',
                   training_data_file='./data/stories.md'):

    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(),
                      KerasPolicy(max_history=5, epochs=100, batch_size=25),
                      FormPolicy()
                  ])
    data = agent.load_data(training_data_file)

    agent.train(data)

    agent.persist(model_path)
    return agent
Beispiel #3
0
def train_dialogue(domain_file="domain.yml",
                   model_path="models/dialogue",
                   training_data_file="data/stories.md"):
    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(max_history=3),
                      FormPolicy(),
                      jobPolicy(batch_size=100,
                                epochs=400,
                                validation_split=0.2)
                  ])

    training_data = agent.load_data(training_data_file)
    agent.train(training_data)

    agent.persist(model_path)
    return agent
def run_interactive_online(interpreter,
                           domain_file="domain.yml",
                           training_data_file='stories.md'):
    action_endpoint = EndpointConfig(url="http://localhost:5005/webhook")
    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(max_history=2),
                      KerasPolicy(max_history=3, epochs=3, batch_size=50),
                      FallbackPolicy(),
                      FormPolicy()
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)

    data = agent.load_data(training_data_file)
    agent.train(data)
    interactive.run_interactive_learning(agent, training_data_file)
    return agent
Beispiel #5
0
    def default_policies(cls, fallback_args, max_history):
        # type: (Dict[Text, Any], int) -> List[Policy]
        """Load the default policy setup consisting of
        FallbackPolicy, MemoizationPolicy and KerasPolicy."""

        return [
            FallbackPolicy(
                fallback_args.get("nlu_threshold",
                                  DEFAULT_NLU_FALLBACK_THRESHOLD),
                fallback_args.get("core_threshold",
                                  DEFAULT_CORE_FALLBACK_THRESHOLD),
                fallback_args.get("fallback_action_name",
                                  DEFAULT_FALLBACK_ACTION)),
            MemoizationPolicy(max_history=max_history),
            KerasPolicy(
                MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
                                            max_history=max_history)),
            FormPolicy()
        ]
Beispiel #6
0
def run_bot_online(interpreter, domain_file, training_data_file):
    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")

    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.3,
                              nlu_threshold=0.3)

    agent = Agent(domain=domain_file,
                  policies=[
                      MemoizationPolicy(max_history=6),
                      KerasPolicy(max_history=6, epochs=200), fallback,
                      FormPolicy()
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)

    data = agent.load_data(training_data_file)
    agent.train(data)
    interactive.run_interactive_learning(agent, training_data_file)
    return agent
def train_dialogue(
    domain_file="domain.yml",
    model_path="./models/dialogue",
    training_data_file="./data/stories.md"):
    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                            core_threshold=0.4,
                            nlu_threshold=0.4)
    agent = Agent(
        domain_file,
        policies=[
            FormPolicy(),
            MemoizationPolicy(),
            KerasPolicy(max_history=3, epochs=500, batch_size=50),
            fallback
        ],
    )
    data = agent.load_data(training_data_file)
    agent.train(data)
    agent.persist(model_path)
    return agent
Beispiel #8
0
def train_dialogue(domain_file="adobe_domain.yml",
                   model_path="models/dialogue",
                   training_data_file="data/stories.md"):
    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.3,
                              nlu_threshold=0.3)
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=5),
                            AdobePolicy(epochs=200, batch_size=32,
                                             validation_split=0.2,
                                             max_history=4),
                            fallback,
                            FormPolicy()])

    training_data = agent.load_data(training_data_file)
    agent.train(
        training_data
    )

    agent.persist(model_path)
    return agent
Beispiel #9
0
def train_dialogue(
    domain_file='/home/saradindu/dev/Work-II/Happsales/assistant_domain.yml',
    model_path='/home/saradindu/dev/Work-II/Happsales/models/dialogue',
    training_data_file='/home/saradindu/dev/Work-II/Happsales/data/stories.md'
):

    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(),
                      FormPolicy(),
                      MappingPolicy(),
                      FallbackPolicy(
                          nlu_threshold=0.4,
                          core_threshold=0.3,
                          fallback_action_name="action_default_fallback"),
                      KerasPolicy(max_history=3, epochs=200, batch_size=50)
                  ])
    data = agent.load_data(training_data_file)

    agent.train(data)

    agent.persist(model_path)
    return agent
Beispiel #10
0
def train_dialogue_keras(domain_file="mobile_domain.yml",
                   model_path="models/dialogue_keras",
                   training_data_file="data/mobile_edit_story.md"):

    fallback = FallbackPolicy(
        fallback_action_name="action_unknown_intent",
        nlu_threshold=0.7,
        core_threshold=0.3
    )
    
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=8),
                            MobilePolicy(epochs=100, batch_size=16, max_history=8),
                            FormPolicy(),
                            fallback])

    training_data = agent.load_data(training_data_file)
    agent.train(
            training_data,
            validation_split=0.2
    )

    agent.persist(model_path)
    return agent
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals

import logging

from rasa_core.agent import Agent
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.form_policy import FormPolicy
from rasa_core.policies.memoization import MemoizationPolicy

if __name__ == '__main__':
    logging.basicConfig(level='INFO')

    training_data_file = './data/stories.md'
    model_path = './models/dialogue'

    memoization_policy = MemoizationPolicy(max_history=4)
    keras_policy = KerasPolicy(max_history=5, epochs=500)

    agent = Agent('restaurant_domain.yml',
                  policies=[memoization_policy, keras_policy,
                            FormPolicy()])
    training_data = agent.load_data(training_data_file, augmentation_factor=50)
    agent.train(training_data)
    agent.persist(model_path)
Beispiel #12
0
 def create_policy(self, featurizer, priority):
     p = FormPolicy(priority=priority)
     return p
warnings.simplefilter('ignore', yaml.error.UnsafeLoaderWarning)
logging.basicConfig(level='INFO')

'''
training the nlu
'''
args1 = {"pipeline": "tensorflow_embedding"}
conf1 = RasaNLUModelConfig(args1)
trainer1 = Trainer(conf1)

#nlu for agent 1
training_data1 = load_data("./data1/nlu.md")
Interpreter1 = trainer1.train(training_data1)
model_directory1 = trainer1.persist('./models', fixed_model_name="ner_a1")

#core for agent1
domain_file = "domain1.yml"
training_data_file = './data1/stories.md'
model_path = './models/dialogue_agent_1'
agent = Agent(domain_file, policies=[MemoizationPolicy(max_history=3), KerasPolicy(max_history=3, epochs=500, batch_size=10), FormPolicy()])
data = agent.load_data(training_data_file)
agent.train(data)
agent.persist(model_path)
agent = Agent(domain_file, policies=[MemoizationPolicy(), KerasPolicy(max_history=3, epochs=500, batch_size=50)])
data = agent.load_data(training_data_file)

agent.train(data)

agent.persist(model_path)

Beispiel #14
0
 def create_policy(self, featurizer):
     p = FormPolicy()
     return p
Beispiel #15
0
def train_dialog(dialog_training_data_file, domain_file, path_to_model='models/'):
    logging.basicConfig(level='INFO')
    fallback = FallbackPolicy(
        fallback_action_name="utter_default", core_threshold=0.3, nlu_threshold=0.3)

    # agent = Agent(domain_file,
    #           policies=[MemoizationPolicy(max_history=1),KerasPolicy(epochs=200,
    #     batch_size=20), fallback, FormPolicy])
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=1), KerasPolicy(epochs=200,
                                                                      batch_size=20), FormPolicy(), fallback])

    loop = asyncio.get_event_loop()
    data = loop.run_until_complete(agent.load_data(dialog_training_data_file))
    # training_data = agent.load_data(dialog_training_data_file)
    agent.train(
        data,
        augmentation_factor=50,
        validation_split=0.2)
    agent.persist(path_to_model)