Ejemplo n.º 1
0
def train_bot():
    logging.basicConfig(level='INFO')

    training_data_file = './data/stories'
    model_path = './models/dialogue'

    fallback = FallbackPolicy(fallback_action_name="utter_not_understood",
                              core_threshold=0.3,
                              nlu_threshold=0.6)
    featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
                                             max_history=5)
    agent = Agent('./data/domain.yml',
                  policies=[
                      MemoizationPolicy(max_history=5),
                      KerasPolicy(featurizer), fallback
                  ])

    training_data = agent.load_data(training_data_file)
    agent.train(training_data,
                augmentation_factor=50,
                epochs=500,
                batch_size=10,
                validation_split=0.2)

    agent.persist(model_path)
def run_eventbot_online(input_channel,
                        interpreter,
                        domain_file="./data/domain.yml",
                        training_data_file='./data/stories'):
    try:
        KnowledgeGraph()
    except ServiceUnavailable:
        print('Neo4j connection failed. Program stopped.')
        return

    fallback = FallbackPolicy(fallback_action_name="utter_not_understood",
                              core_threshold=0.3,
                              nlu_threshold=0.6)
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(),
                            KerasPolicy(), fallback],
                  interpreter=interpreter)

    data = agent.load_data(training_data_file)
    agent.train_online(data,
                       input_channel=input_channel,
                       max_history=2,
                       batch_size=50,
                       epochs=200,
                       max_training_samples=300)

    return agent
Ejemplo n.º 3
0
def train_dialogue(project='Lambton'):

    domain_file = '../Chatbots/projects/' + project + '/domain.yml'
    training_data_file = "../Chatbots/projects/" + project + "/stories/stories.md"
    model_path = "../Chatbots/projects/" + project + "/models/dialogue"
    fallback = FallbackPolicy(fallback_action_name="utter_fallback",
                              core_threshold=0.5,
                              nlu_threshold=0.5)  #0.3

    #agent = Agent(domain_file, policies=[MemoizationPolicy(max_history=3), KerasPolicy(), fallback])
    #agent = Agent(domain_file, policies=[MemoizationPolicy(max_history=5), KerasPolicy(), fallback])
    agent = Agent(domain_file,
                  policies=[
                      AugmentedMemoizationPolicy(max_history=7),
                      SklearnPolicy(), fallback
                  ])
    training_data = agent.load_data(training_data_file)
    # ***  FASTER  ***
    # agent.train(
    #         training_data,
    #         #max_training_samples=500,
    #         epochs=300,
    #         batch_size=33,
    #         validation_split=0.2
    # )

    # *** Precise  ***
    agent.train(training_data,
                augmentation_factor=50,
                epochs=500,
                batch_size=10,
                validation_split=0.2)

    agent.persist(model_path)
    return agent
Ejemplo n.º 4
0
def train_dialogue_model(domain_file,
                         stories_file,
                         output_path,
                         interpreter=None,
                         endpoints=AvailableEndpoints(),
                         dump_stories=False,
                         policy_config=None,
                         exclusion_percentage=None,
                         kwargs=None):
    if not kwargs:
        kwargs = {}

    policies = config.load(policy_config)

    agent = Agent(domain_file,
                  generator=endpoints.nlg,
                  action_endpoint=endpoints.action,
                  interpreter=interpreter,
                  policies=policies)

    data_load_args, kwargs = utils.extract_args(
        kwargs, {
            "use_story_concatenation", "unique_last_num_states",
            "augmentation_factor", "remove_duplicates", "debug_plots"
        })

    training_data = agent.load_data(stories_file,
                                    exclusion_percentage=exclusion_percentage,
                                    **data_load_args)
    agent.train(training_data, **kwargs)
    agent.persist(output_path, dump_stories)

    return agent
Ejemplo n.º 5
0
def train_dialogue(domain_file='chat_domain.yml',
                   model_path='./models/dialogue',
                   training_data_file='./data/stories.md'):
    print(training_data_file)

    #fall back policy initialized
    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.3,
                              nlu_threshold=0.3)

    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(),
                      KerasPolicy(max_history=3, epochs=600, batch_size=50),
                      fallback
                  ])
    # import training data
    data = agent.load_data(training_data_file)
    # generating diagram of story
    agent.visualize("data/stories.md", output_file="graph.html", max_history=3)
    # start training
    agent.train(data)

    agent.persist(model_path)
    return agent
def run_bot_online(interpreter,
                   domain_file="domain.yml",
                   training_data_file='data/stories.md'):
    '''
    This function trains the bot in an interactive manner
    :param interpreter: NLU Interpreter
    :param domain_file: Domain file
    :param training_data_file: Chat story board file
    :return: Agent
    '''

    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")

    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(max_history=3),
                      KerasPolicy(max_history=5, epochs=300, batch_size=50)
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)

    data = agent.load_data(training_data_file)
    agent.train(data)
    interactive.run_interactive_learning(agent, training_data_file)
    return agent
Ejemplo n.º 7
0
def train_dialogue_model(domain_file,
                         stories_file,
                         output_path,
                         use_online_learning=False,
                         nlu_model_path=None,
                         max_history=None,
                         kwargs=None):
    if not kwargs:
        kwargs = {}

    agent = Agent(
        domain_file,
        policies=[MemoizationPolicy(max_history=max_history),
                  KerasPolicy()])

    data_load_args, kwargs = utils.extract_args(
        kwargs, {
            "use_story_concatenation", "unique_last_num_states",
            "augmentation_factor", "remove_duplicates", "debug_plots"
        })
    training_data = agent.load_data(stories_file, **data_load_args)

    if use_online_learning:
        if nlu_model_path:
            agent.interpreter = RasaNLUInterpreter(nlu_model_path)
        else:
            agent.interpreter = RegexInterpreter()
        agent.train_online(training_data,
                           input_channel=ConsoleInputChannel(),
                           model_path=output_path,
                           **kwargs)
    else:
        agent.train(training_data, **kwargs)

    agent.persist(output_path)
Ejemplo n.º 8
0
def train_dialog(dialog_training_data_file,
                 domain_file,
                 path_to_model='models/dialogue'):
    logging.basicConfig(level='INFO')
    logging.info(dialog_training_data_file)

    # Action to be called if the confidence of intent / action is below the threshold
    fallback = FallbackPolicy(
        fallback_action_name="action_default_fallback",
        core_threshold=0.3,  # Define the threshold that you need to capture
        nlu_threshold=0.3)

    # Configuring the endpoint webhook
    core_endpoint_config = EndpointConfig(url='http://localhost:5055/webhook')

    # Configuring the agent
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=2), fallback],
                  interpreter=RasaNLUInterpreter('models/nlu/default/chat'),
                  action_endpoint=core_endpoint_config)

    # Load the stories for training the dialog
    training_data = agent.load_data(dialog_training_data_file)

    # Start training the dialog
    agent.train(training_data)

    # Save the training data
    agent.persist(path_to_model)

    # Run interactive learning
    # interactive.run_interactive_learning(agent, dialog_training_data_file, skip_visualization=True)
    return agent
Ejemplo n.º 9
0
def initChatBot():
    print("STEP 1:Training the NLU Model")
    #Training the NLU MODEL:
    # loading the nlu training samples
    training_data = load_data("NLU_Train.json")
    # trainer to create the pipeline
    trainer = Trainer(config.load("NLU_model_Config.yml"))
    # training the model
    interpreter = trainer.train(training_data)
    # storeing it for future
    model_directory = trainer.persist("./models/nlu",
                                      fixed_model_name="current")
    print("Done")

    print("STEP 2: Training the CORE model")
    fallback = FallbackPolicy(fallback_action_name="utter_default",
                              core_threshold=0.2,
                              nlu_threshold=0.1)

    agent = Agent(domain='restaurant_domain.yml',
                  policies=[
                      MemoizationPolicy(),
                      KerasPolicy(validation_split=0.0, epochs=200), fallback
                  ])
    training_data = agent.load_data('Core_Stories.md')
    agent.train(training_data)
    agent.persist('models/dialogue')
    print("Done")
    return model_directory
Ejemplo n.º 10
0
def train_dialog(dialog_training_data_file, domain_file, path_to_model='./models/dialogue'):
    """Train a rasa-core model and save it in a local path.

    Parameters:
    ----------
    dialogue_model_path:        str
                                Path, where the model should be stored
    dialog_training_data_file:  str
                                Path to the story files on which it will train
    domain_file:                str
                                Path to the file which describes the domain
    """

    # initialize the agent
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=2), KerasPolicy(), fallback])

    # load data from stories
    training_data = agent.load_data(dialog_training_data_file)

    # train a new model
    agent.train(
        training_data,
        augmentation_factor=5,
        epochs=2,
        batch_size=10,
        validation_split=0.2)

    # save the model to a local path
    agent.persist(path_to_model)
Ejemplo n.º 11
0
def train_dlg():
    rasa_core.policies.MemoizationPolicy.ENABLE_FEATURE_STRING_COMPRESSION = False

    print("=> Importing tensorflow...")
    import tensorflow as tf
    tf.logging.set_verbosity(tf.logging.ERROR)

    print("=> Importing rasa...")
    from rasa_core.agent import Agent
    from rasa_core.policies.memoization import MemoizationPolicy
    from rasa_core.policies.keras_policy import KerasPolicy
    """
    fallback = FallbackPolicy(fallback_action_name="utter_default",core_threshold=0.1,nlu_threshold=0.1)
    """

    agent = Agent(botpath.DOMAIN_FILE,
                  policies=[MemoizationPolicy(max_history=3),
                            KerasPolicy()])

    print("Training dialogue %s" % botpath.STORY_FILE)

    training_data = agent.load_data(botpath.STORY_FILE)

    agent.train(training_data,
                epochs=200,
                batch_size=50,
                max_training_samples=300)
    print("=> Saving Result to %s..." % botpath.DIALOGUE_PATH)

    shutil.rmtree(botpath.DIALOGUE_PATH, ignore_errors=True)
    agent.persist(botpath.DIALOGUE_PATH)
Ejemplo n.º 12
0
def train_bot():
    training_data_file = './data/stories'
    model_path = './models/dialogue'
    domain_file = './data/domain.yml'

    # core_threshold: min confidence needed to accept an action predicted by Rasa Core
    # nlu_threshold: min confidence needed to accept an intent predicted by the interpreter (NLU)
    fallback = FallbackPolicy(fallback_action_name="action_not_understood",
                              core_threshold=0.5,
                              nlu_threshold=0.35)

    featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
                                             max_history=3)
    agent = Agent(domain=domain_file,
                  policies=[
                      MemoizationPolicy(max_history=2),
                      KerasPolicy(featurizer), fallback
                  ])

    training_data = agent.load_data(training_data_file)
    agent.train(training_data,
                augmentation_factor=50,
                epochs=400,
                batch_size=50,
                validation_split=0.2)

    agent.persist(model_path)
Ejemplo n.º 13
0
def run_bot_cli(input_channel, interpreter,
                          domain_file="./data/student_info_domain.yml",
                          training_data_file='./data/stories.md'):

  # Featureizer Generation
  featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=5)
  
  # Not really sure what is happening here
  agent = Agent(domain_file,
                policies=[MemoizationPolicy(max_history=5),
                          KerasPolicy(featurizer)],
                          interpreter=interpreter)

  # This is where our training data file is loaded in for training
  training_data = agent.load_data(training_data_file)
  
  # Training data is the training data object created in the above line
  # input_channel - How the trainer recieves its input
  # batch_size - How many times the model is updated per pass
  # epochs - Number of training passes
  # validation_split - Fraction of the training data to be used as validation data 
  # augmentation_factor - How many of the dialogue stories are randomly glued together
      # the more stories you have the higher the augmentation factor you want
  agent.train_online(training_data,
                      input_channel=input_channel,
                      batch_size=35,
                      epochs=400,
                      max_training_samples=200,
                      validation_split = 0.2,
                      augmentation_factor = 20)

  return agent
class CoreServer():
	def __init__(self):
		self.agent = None
		self.config = Settings.Settings().getConfig()

	logger = logging.getLogger(__name__)


	def train_dialogue(self, domain_file, model_path, training_data_file):
		fallback = FallbackPolicy(fallback_action_name="utter_default",core_threshold=0.2, nlu_threshold=0.5)
		featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=10)
		self.agent = Agent(domain_file , policies=[MemoizationPolicy(max_history=10), KerasPolicy(epochs = 90, batch_size = 20, validation_split = 0.1), fallback])
		data = self.agent.load_data(training_data_file)
		self.agent.train(data)
		self.agent.persist(model_path)

	def run_dialogue(self, core_model_path, nlu_model_path, serve_forever=True):
		interpreter = RasaNLUInterpreter(nlu_model_path)
		action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
		self.agent = Agent.load(core_model_path, interpreter=interpreter, action_endpoint=action_endpoint)
		#rasa_core.run.serve_application(self.agent ,channel='cmdline')

	def build_core(self):
		if self.agent is None:
			self.train_dialogue(self.config['domain_file_path'], self.config['core_model_path'], self.config['training_core_data_path'])
			self.run_dialogue(self.config['core_model_path'], self.config['default_nlu_model_path'])
		return self.agent
Ejemplo n.º 15
0
def train_core(domain_file="robot/config/domain.yml",
               model_path="robot/models/dialogue",
               training_data_file="robot/config/stories.md"):
    from rasa_core.featurizers import (MaxHistoryTrackerFeaturizer,
                                       BinarySingleStateFeaturizer)
    # fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
    #                           core_threshold=0.9,
    #                           nlu_threshold=0.9)
    fallback = FallbackPolicy(fallback_action_name="action_default_custom",
                              core_threshold=0.8,
                              nlu_threshold=0.8)

    agent = Agent(
        domain_file,
        policies=[
            MemoizationPolicy(max_history=5),
            KerasPolicy(
                MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
                                            max_history=5)), fallback
        ])
    training_data = agent.load_data(training_data_file)
    # 训练agent的策略policy
    agent.train(training_data, epochs=500)
    agent.persist(model_path)
    return agent
Ejemplo n.º 16
0
def train(domain_file: Text,
          stories_file: Text,
          output_path: Text,
          interpreter: Optional[NaturalLanguageInterpreter] = None,
          endpoints: AvailableEndpoints = AvailableEndpoints(),
          dump_stories: bool = False,
          policy_config: Text = None,
          exclusion_percentage: int = None,
          kwargs: Optional[Dict] = None):
    from rasa_core.agent import Agent

    if not kwargs:
        kwargs = {}

    policies = config.load(policy_config)

    agent = Agent(domain_file,
                  generator=endpoints.nlg,
                  action_endpoint=endpoints.action,
                  interpreter=interpreter,
                  policies=policies)

    data_load_args, kwargs = utils.extract_args(
        kwargs, {
            "use_story_concatenation", "unique_last_num_states",
            "augmentation_factor", "remove_duplicates", "debug_plots"
        })

    training_data = agent.load_data(stories_file,
                                    exclusion_percentage=exclusion_percentage,
                                    **data_load_args)
    agent.train(training_data, **kwargs)
    agent.persist(output_path, dump_stories)

    return agent
Ejemplo n.º 17
0
def train_core(domain_file, model_path, training_data_file, policy_config):
    logging.basicConfig(filename=logfile, level=logging.DEBUG)
    agent = Agent(domain_file, policies=config.load(policy_config))
    training_data = agent.load_data(training_data_file)
    agent.train(training_data)
    agent.persist(model_path)
    return agent
Ejemplo n.º 18
0
def train_dialogue_model(domain_file,
                         stories_file,
                         output_path,
                         interpreter=None,
                         endpoints=AvailableEndpoints(),
                         max_history=None,
                         dump_flattened_stories=False,
                         policy_config=None,
                         kwargs=None):
    if not kwargs:
        kwargs = {}

    fallback_args, kwargs = utils.extract_args(
        kwargs, {"nlu_threshold", "core_threshold", "fallback_action_name"})

    policies = config.load(policy_config, fallback_args, max_history)

    agent = Agent(domain_file,
                  generator=endpoints.nlg,
                  action_endpoint=endpoints.action,
                  interpreter=interpreter,
                  policies=policies)

    data_load_args, kwargs = utils.extract_args(
        kwargs, {
            "use_story_concatenation", "unique_last_num_states",
            "augmentation_factor", "remove_duplicates", "debug_plots"
        })

    training_data = agent.load_data(stories_file, **data_load_args)
    agent.train(training_data, **kwargs)
    agent.persist(output_path, dump_flattened_stories)

    return agent
Ejemplo n.º 19
0
def run_weather_online(input_channel, interpreter,domain_file="weather_domain.yml",training_data_file='data/stories.md'):

    policies2 = policy_config.load("config.yml")
    agent = Agent("weather_domain.yml", policies=policies2)
    data = asyncio.run(agent.load_data(training_data_file))
    agent.train(data)
    return agent
Ejemplo n.º 20
0
def train(max_training_samples=3, serve_forever=True):
    story = 'stories.md'

    from rasa_core.interpreter import RasaNLUInterpreter
    interpreter = RasaNLUInterpreter(nlu_model_path)

    #domain配置文件

    default_domain = TemplateDomain.load("./domain.yml")

    if os.path.exists(agent_model_path):
        agent = Agent.load(agent_model_path,
                           interpreter=interpreter,
                           tracker_store=InMemoryTrackerStore(default_domain))
    else:
        agent = Agent(domain_conf_path,
                      policies=[MemoizationPolicy(),
                                KerasPolicy()],
                      interpreter=interpreter,
                      tracker_store=InMemoryTrackerStore(default_domain))
    #for debug: print(interpreter.parse(u"你好"))

    logger.info("开始训练...")

    training_data = agent.load_data(story)

    agent.train(training_data, epochs=50)
    return agent
Ejemplo n.º 21
0
def test_agent_train(tmpdir, default_domain):
    training_data_file = 'examples/moodbot/data/stories.md'
    agent = Agent("examples/moodbot/domain.yml",
                  policies=[AugmentedMemoizationPolicy()])

    training_data = agent.load_data(training_data_file)
    agent.train(training_data)
    agent.persist(tmpdir.strpath)

    loaded = Agent.load(tmpdir.strpath)

    # test domain
    assert [a.name() for a in loaded.domain.actions] == \
           [a.name() for a in agent.domain.actions]
    assert loaded.domain.intents == agent.domain.intents
    assert loaded.domain.entities == agent.domain.entities
    assert loaded.domain.templates == agent.domain.templates
    assert [s.name for s in loaded.domain.slots] == \
           [s.name for s in agent.domain.slots]

    # test policies
    assert type(loaded.policy_ensemble) is type(
        agent.policy_ensemble)  # nopep8
    assert [type(p) for p in loaded.policy_ensemble.policies] == \
           [type(p) for p in agent.policy_ensemble.policies]
Ejemplo n.º 22
0
Archivo: train.py Proyecto: O2br/tais
def train_dialogue(domain_file, model_path, training_folder):
    MemoizationPolicy.USE_NLU_CONFIDENCE_AS_SCORE = True
    #keras_1 = KerasPolicy(
    #             MaxHistoryTrackerFeaturizer(
    #                 BinarySingleStateFeaturizer(),
    #                 max_history=MAX_HISTORY
    #                 )
    #             )
    keras_2 = KerasPolicy(
        FullDialogueTrackerFeaturizer(LabelTokenizerSingleStateFeaturizer()))
    #agent = Agent(domain_file, policies=[
    #        keras_2,
    #        MemoizationPolicy(max_history=MAX_HISTORY),
    #                                                CustomFallbackPolicy(
    #                    fallback_action_name=FALLBACK_ACTION_NAME,
    #                    nlu_threshold=NLU_THRESHOLD,
    #                    core_threshold=CORE_THRESHOLD)])

    agent = Agent(domain_file,
                  policies=[
                      keras_2,
                      MemoizationPolicy(max_history=MAX_HISTORY),
                      FallbackPolicy(nlu_threshold=NLU_THRESHOLD,
                                     core_threshold=CORE_THRESHOLD)
                  ])

    training_data = agent.load_data(training_folder, augmentation_factor=20)

    agent.train(training_data,
                epochs=TRAINING_EPOCHS,
                batch_size=BATCH_SIZE,
                validation_split=VALIDATION_SPLIT)
    agent.persist(model_path)
Ejemplo n.º 23
0
def train_dialogue_model(domain_file,
                         stories_file,
                         output_path,
                         use_online_learning=False,
                         nlu_model_path=None,
                         kwargs=None):
    if not kwargs:
        kwargs = {}

    agent = Agent(domain_file, policies=[MemoizationPolicy(), KerasPolicy()])
    training_data = agent.load_data(stories_file)

    if use_online_learning:
        if nlu_model_path:
            agent.interpreter = RasaNLUInterpreter(nlu_model_path)
        else:
            agent.interpreter = RegexInterpreter()
        agent.train_online(training_data,
                           input_channel=ConsoleInputChannel(),
                           model_path=output_path,
                           **kwargs)
    else:
        agent.train(training_data, **kwargs)

    agent.persist(output_path)
def train_dialogue(domain_file = 'restaurant_domain.yml',
					model_path = './models/dialogue',
					training_data_file = './data/stories.md'):

	featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=5)
	agent = Agent(domain_file, policies = [MemoizationPolicy(max_history = 5), KerasPolicy(featurizer)])
	
	data = agent.load_data(training_data_file,
				 		   augmentation_factor = 50)

	agent.train(data,
				epochs = 500,
				batch_size = 30,
				validation_split = 0.2)

	# agent.train(
	# 			training_data_file,
	# 			#max_history = 3,
	# 			epochs = 300,
	# 			batch_size = 50,
	# 			validation_split = 0.2,
	# 			augmentation_factor = 50)

	agent.persist(model_path)
	return agent
Ejemplo n.º 25
0
def run_weather_online(interpreter,
                       domain_file='weather_domain.yml',
                       training_data_file='data/stories.md'):
    action_endpoint = EndpointConfig(url="http://localhost:5000/webhook")
    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.8,
                              nlu_threshold=0.8)
    agent = Agent('./weather_domain.yml',
                  policies=[
                      MemoizationPolicy(max_history=2, ),
                      KerasPolicy(epochs=500,
                                  batch_size=50,
                                  validation_split=0.2), fallback
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)
    data_ = agent.load_data(training_data_file, augmentation_factor=50)

    agent.train(data_)
    interactive.run_interactive_learning(agent,
                                         training_data_file,
                                         skip_visualization=True)

    # agent.handle_channels(input_channel)
    return agent
Ejemplo n.º 26
0
def run_bot(interpreter,
            domain_file="domain.yml",
            training_data_file='./data/stories.md'):
    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=2),
                            KerasPolicy()],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)

    data = agent.load_data(training_data_file)
    """
    agent.train(data,
                batch_size=50,
                epochs=200,
                max_training_samples=300)
    online.serve_agent(agent)
    online.ser




"""

    agent.persist('./models/dialogue')
    return agent
Ejemplo n.º 27
0
def train_dialogue(domain_file="hainan_domain.yml",
                   model_path="models/dialogue",
                   training_data_file="data/story.md"):
    #agent = Agent(domain_file,
    #policies=[MemoizationPolicy(), MobilePolicy()])
    '''
    featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=2)
    agent = Agent(domain_file,
              policies=[MemoizationPolicy(max_history=2),
                        KerasPolicy(featurizer)])
    '''

    agent = Agent(domain_file, policies=[MemoizationPolicy(), KerasPolicy()])

    training_data = agent.load_data(training_data_file)

    agent.train(
        training_data,
        epochs=200,
        batch_size=16,
        augmentation_factor=50,
        #validation_split=0.2
    )

    agent.persist(model_path)
    return agent
Ejemplo n.º 28
0
def default_agent(default_domain):
    agent = Agent(default_domain,
                  policies=[MemoizationPolicy()],
                  interpreter=RegexInterpreter(),
                  tracker_store=InMemoryTrackerStore(default_domain))
    training_data = agent.load_data(DEFAULT_STORIES_FILE)
    agent.train(training_data)
    return agent
Ejemplo n.º 29
0
def default_agent(default_domain):
    agent = Agent(default_domain,
                  policies=[MemoizationPolicy()],
                  interpreter=RegexInterpreter(),
                  tracker_store=InMemoryTrackerStore(default_domain))
    training_data = agent.load_data(DEFAULT_STORIES_FILE)
    agent.train(training_data)
    return agent
Ejemplo n.º 30
0
def train_dialogue(domain_file = 'weather_domain.yml',
					model_path = './models/dialogue',
					training_data_file = './data/stories.md'):
					agent = Agent(domain_file, policies = [MemoizationPolicy(), KerasPolicy(max_history=3, epochs=200, batch_size=50)])
					data = agent.load_data(training_data_file)
					agent.train(data)
					agent.persist(model_path)
					return agent
Ejemplo n.º 31
0
def train_dialogue():
    agent = Agent('domain.yml', policies=[MemoizationPolicy(), KerasPolicy()])
    training_data = agent.load_data('stories.md')

    agent.train(
        training_data)

    agent.persist('models/dialogue')
    return agent
Ejemplo n.º 32
0
def train_dialogue(domain_file, model_path, training_folder):

    agent = Agent(domain_file, policies=[
            MemoizationPolicy(max_history=6),
            KerasPolicy(MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
                                                    max_history=6)),
            FallbackPolicy(nlu_threshold=0.8, core_threshold=0.3)])

    training_data = agent.load_data(training_folder)

    agent.train(training_data, epochs=100)
    agent.persist(model_path)
Ejemplo n.º 33
0
def default_processor(default_domain, default_nlg):
    agent = Agent(default_domain,
                  SimplePolicyEnsemble([AugmentedMemoizationPolicy()]),
                  interpreter=RegexInterpreter())

    training_data = agent.load_data(DEFAULT_STORIES_FILE)
    agent.train(training_data)
    tracker_store = InMemoryTrackerStore(default_domain)
    return MessageProcessor(agent.interpreter,
                            agent.policy_ensemble,
                            default_domain,
                            tracker_store,
                            default_nlg)
Ejemplo n.º 34
0
def run_concertbot_online(input_channel, interpreter,
                          domain_file="concert_domain.yml",
                          training_data_file='data/stories.md'):
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=2), KerasPolicy()],
                  interpreter=interpreter)

    training_data = agent.load_data(training_data_file)
    agent.train_online(training_data,
                       input_channel=input_channel,
                       batch_size=50,
                       epochs=200,
                       max_training_samples=300)

    return agent
Ejemplo n.º 35
0
def run_ivrbot_online(input_channel=ConsoleInputChannel(),
                      interpreter=RasaNLUInterpreter("projects/ivr_nlu/demo"),
                      domain_file="mobile_domain.yml",
                      training_data_file="data/mobile_story.md"):
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(), KerasPolicy()],
                  interpreter=interpreter)

    training_data = agent.load_data(training_data_file)
    agent.train_online(training_data,
                       input_channel=input_channel,
                       batch_size=16,
                       epochs=200,
                       max_training_samples=300)

    return agent
Ejemplo n.º 36
0
def train_dialogue_model(domain_file, stories_file, output_path,
                         nlu_model_path=None,
                         endpoints=None,
                         max_history=None,
                         dump_flattened_stories=False,
                         kwargs=None):
    if not kwargs:
        kwargs = {}

    action_endpoint = utils.read_endpoint_config(endpoints, "action_endpoint")

    fallback_args, kwargs = utils.extract_args(kwargs,
                                               {"nlu_threshold",
                                                "core_threshold",
                                                "fallback_action_name"})

    policies = [
        FallbackPolicy(
                fallback_args.get("nlu_threshold",
                                  DEFAULT_NLU_FALLBACK_THRESHOLD),
                fallback_args.get("core_threshold",
                                  DEFAULT_CORE_FALLBACK_THRESHOLD),
                fallback_args.get("fallback_action_name",
                                  DEFAULT_FALLBACK_ACTION)),
        MemoizationPolicy(
                max_history=max_history),
        KerasPolicy(
                MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
                                            max_history=max_history))]

    agent = Agent(domain_file,
                  action_endpoint=action_endpoint,
                  interpreter=nlu_model_path,
                  policies=policies)

    data_load_args, kwargs = utils.extract_args(kwargs,
                                                {"use_story_concatenation",
                                                 "unique_last_num_states",
                                                 "augmentation_factor",
                                                 "remove_duplicates",
                                                 "debug_plots"})

    training_data = agent.load_data(stories_file, **data_load_args)
    agent.train(training_data, **kwargs)
    agent.persist(output_path, dump_flattened_stories)

    return agent
Ejemplo n.º 37
0
def train_dialogue(domain_file="mobile_domain.yml",
                   model_path="projects/dialogue",
                   training_data_file="data/mobile_story.md"):
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(), KerasPolicy()])

    training_data = agent.load_data(training_data_file)
    agent.train(
        training_data,
        epochs=200,
        batch_size=16,
        augmentation_factor=50,
        validation_split=0.2
    )

    agent.persist(model_path)
    return agent
Ejemplo n.º 38
0
def train_dialogue(domain_file="domain.yml",
                   model_path="models/dialogue",
                   training_data_file="data/stories.md"):
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=3),
                            CustomPolicy()])

    training_data = agent.load_data(training_data_file)
    agent.train(
        training_data,
        epochs=400,
        batch_size=100,
        validation_split=0.2
    )

    agent.persist(model_path)
    return agent
Ejemplo n.º 39
0
def train_dialogue(domain_file="mobile_domain.yml",
                   model_path="models/dialogue",
                   training_data_file="data/mobile_edit_story.md"):

    fallback = FallbackPolicy(
        fallback_action_name="action_default_fallback",
        nlu_threshold=0.5,
        core_threshold=0.3
    )
    
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=5),
                            MobilePolicy(), fallback])

    training_data = agent.load_data(training_data_file)
    agent.train(
            training_data,
            epochs=500,
            batch_size=16,
            validation_split=0.2
    )

    agent.persist(model_path)
    return agent
Ejemplo n.º 40
0
def test_agent_train(tmpdir, default_domain):
    training_data_file = 'examples/moodbot/data/stories.md'
    agent = Agent("examples/moodbot/domain.yml",
                  policies=[AugmentedMemoizationPolicy()])

    training_data = agent.load_data(training_data_file)
    agent.train(training_data)
    agent.persist(tmpdir.strpath)

    loaded = Agent.load(tmpdir.strpath)

    # test domain
    assert loaded.domain.action_names == agent.domain.action_names
    assert loaded.domain.intents == agent.domain.intents
    assert loaded.domain.entities == agent.domain.entities
    assert loaded.domain.templates == agent.domain.templates
    assert [s.name for s in loaded.domain.slots] == \
           [s.name for s in agent.domain.slots]

    # test policies
    assert type(loaded.policy_ensemble) is type(
        agent.policy_ensemble)  # nopep8
    assert [type(p) for p in loaded.policy_ensemble.policies] == \
           [type(p) for p in agent.policy_ensemble.policies]
Ejemplo n.º 41
0
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

from rasa_core import utils
from rasa_core.agent import Agent
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy

if __name__ == '__main__':
    utils.configure_colored_logging(loglevel="INFO")

    training_data_file = 'data/stories.md'
    model_path = 'models/dialogue'

    agent = Agent("concert_domain.yml",
                  policies=[MemoizationPolicy(), KerasPolicy()])

    training_data = agent.load_data(training_data_file)

    agent.train(
            training_data,
            augmentation_factor=50,
            max_history=2,
            epochs=500,
            batch_size=10,
            validation_split=0.2
    )

    agent.persist(model_path)