Пример #1
0
def train_dialogue(project='Lambton'):

    domain_file = '../Chatbots/projects/' + project + '/domain.yml'
    training_data_file = "../Chatbots/projects/" + project + "/stories/stories.md"
    model_path = "../Chatbots/projects/" + project + "/models/dialogue"
    fallback = FallbackPolicy(fallback_action_name="utter_fallback",
                              core_threshold=0.5,
                              nlu_threshold=0.5)  #0.3

    #agent = Agent(domain_file, policies=[MemoizationPolicy(max_history=3), KerasPolicy(), fallback])
    #agent = Agent(domain_file, policies=[MemoizationPolicy(max_history=5), KerasPolicy(), fallback])
    agent = Agent(domain_file,
                  policies=[
                      AugmentedMemoizationPolicy(max_history=7),
                      SklearnPolicy(), fallback
                  ])
    training_data = agent.load_data(training_data_file)
    # ***  FASTER  ***
    # agent.train(
    #         training_data,
    #         #max_training_samples=500,
    #         epochs=300,
    #         batch_size=33,
    #         validation_split=0.2
    # )

    # *** Precise  ***
    agent.train(training_data,
                augmentation_factor=50,
                epochs=500,
                batch_size=10,
                validation_split=0.2)

    agent.persist(model_path)
    return agent
Пример #2
0
Файл: train.py Проект: O2br/tais
def train_dialogue(domain_file, model_path, training_folder):
    MemoizationPolicy.USE_NLU_CONFIDENCE_AS_SCORE = True
    #keras_1 = KerasPolicy(
    #             MaxHistoryTrackerFeaturizer(
    #                 BinarySingleStateFeaturizer(),
    #                 max_history=MAX_HISTORY
    #                 )
    #             )
    keras_2 = KerasPolicy(
        FullDialogueTrackerFeaturizer(LabelTokenizerSingleStateFeaturizer()))
    #agent = Agent(domain_file, policies=[
    #        keras_2,
    #        MemoizationPolicy(max_history=MAX_HISTORY),
    #                                                CustomFallbackPolicy(
    #                    fallback_action_name=FALLBACK_ACTION_NAME,
    #                    nlu_threshold=NLU_THRESHOLD,
    #                    core_threshold=CORE_THRESHOLD)])

    agent = Agent(domain_file,
                  policies=[
                      keras_2,
                      MemoizationPolicy(max_history=MAX_HISTORY),
                      FallbackPolicy(nlu_threshold=NLU_THRESHOLD,
                                     core_threshold=CORE_THRESHOLD)
                  ])

    training_data = agent.load_data(training_folder, augmentation_factor=20)

    agent.train(training_data,
                epochs=TRAINING_EPOCHS,
                batch_size=BATCH_SIZE,
                validation_split=VALIDATION_SPLIT)
    agent.persist(model_path)
Пример #3
0
def train_dialogue(domain_file="invest_domain.yml",
                   model_path="models/dialogue",
                   training_data_file="data/invest_story.md"):

    fallback = FallbackPolicy(
        fallback_action_name="action_tuling",
        # core_threshold=0.90,
        nlu_threshold=0.85)

    # featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
    #                                      max_history=3)

    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(),
                            KerasPolicy(), fallback])
    # agent = Agent(domain_file,
    #           policies=[MemoizationPolicy(max_history=5),
    #                     KerasPolicy(featurizer)],
    #                     interpreter=interpreter)
    agent.train(
        training_data_file,
        # max_history=2,
        epochs=300,
        batch_size=32,
        augmentation_factor=50,
        validation_split=0.1)
    agent.persist(model_path)
    return agent
Пример #4
0
def run_weather_online(interpreter,
                       domain_file='weather_domain.yml',
                       training_data_file='data/stories.md'):
    action_endpoint = EndpointConfig(url="http://localhost:5000/webhook")
    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.8,
                              nlu_threshold=0.8)
    agent = Agent('./weather_domain.yml',
                  policies=[
                      MemoizationPolicy(max_history=2, ),
                      KerasPolicy(epochs=500,
                                  batch_size=50,
                                  validation_split=0.2), fallback
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)
    data_ = agent.load_data(training_data_file, augmentation_factor=50)

    agent.train(data_)
    interactive.run_interactive_learning(agent,
                                         training_data_file,
                                         skip_visualization=True)

    # agent.handle_channels(input_channel)
    return agent
Пример #5
0
def train_dialogue(domain_file='chat_domain.yml',
                   model_path='./models/dialogue',
                   training_data_file='./data/stories.md'):
    print(training_data_file)

    #fall back policy initialized
    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.3,
                              nlu_threshold=0.3)

    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(),
                      KerasPolicy(max_history=3, epochs=600, batch_size=50),
                      fallback
                  ])
    # import training data
    data = agent.load_data(training_data_file)
    # generating diagram of story
    agent.visualize("data/stories.md", output_file="graph.html", max_history=3)
    # start training
    agent.train(data)

    agent.persist(model_path)
    return agent
Пример #6
0
def train_dialogue(domain_file='restaurant_domain.yml',
                   model_path='./models/dialogue',
                   training_data_file='./data/stories.md'):

    featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
                                             max_history=5)
    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.3,
                              nlu_threshold=0.3)
    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(max_history=5),
                      KerasPolicy(featurizer), fallback
                  ])

    agent.train(
        training_data_file,
        #max_history = 3,
        epochs=300,
        batch_size=50,
        validation_split=0.2,
        augmentation_factor=50)

    agent.persist(model_path)
    return agent
Пример #7
0
def train_bot():
    logging.basicConfig(level='INFO')

    training_data_file = './data/stories'
    model_path = './models/dialogue'

    fallback = FallbackPolicy(fallback_action_name="utter_not_understood",
                              core_threshold=0.3,
                              nlu_threshold=0.6)
    featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
                                             max_history=5)
    agent = Agent('./data/domain.yml',
                  policies=[
                      MemoizationPolicy(max_history=5),
                      KerasPolicy(featurizer), fallback
                  ])

    training_data = agent.load_data(training_data_file)
    agent.train(training_data,
                augmentation_factor=50,
                epochs=500,
                batch_size=10,
                validation_split=0.2)

    agent.persist(model_path)
Пример #8
0
def train_bot():
    training_data_file = './data/stories'
    model_path = './models/dialogue'
    domain_file = './data/domain.yml'

    # core_threshold: min confidence needed to accept an action predicted by Rasa Core
    # nlu_threshold: min confidence needed to accept an intent predicted by the interpreter (NLU)
    fallback = FallbackPolicy(fallback_action_name="action_not_understood",
                              core_threshold=0.5,
                              nlu_threshold=0.35)

    featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
                                             max_history=3)
    agent = Agent(domain=domain_file,
                  policies=[
                      MemoizationPolicy(max_history=2),
                      KerasPolicy(featurizer), fallback
                  ])

    training_data = agent.load_data(training_data_file)
    agent.train(training_data,
                augmentation_factor=50,
                epochs=400,
                batch_size=50,
                validation_split=0.2)

    agent.persist(model_path)
Пример #9
0
def train_core(domain, story, dialogue):
    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=1,
                              nlu_threshold=0.7)
    agent = Agent(
        domain,
        policies=[MemoizationPolicy(max_history=3), fallback,
                  KerasPolicy()])
    training_data = agent.load_data(story)
    agent.train(training_data, epochs=100, validation_split=0.2)
    agent.persist(dialogue)
Пример #10
0
def train_core(domain_file="restaurant_domain.yml",
				   model_path="models/dialogue",
				   training_data_file="data/train_babi_stories.md"):

	policies_array = [
		[MemoizationPolicy(max_history=3)],                                               #gut
		[MemoizationPolicy(max_history=5)],                                               #gut
		[AugmentedMemoizationPolicy()],                                                   #gut
		[RestaurantPolicy()],                                                             #gut
		[KerasPolicy()],                                                                  #gut
		[FallbackPolicy()],                                                               #gut
		[FallbackPolicy(core_threshold=0.5)],                                             #gut
		[SklearnPolicy(scoring=['accuracy','f1'])],                                       #gut
		[MemoizationPolicy(max_history=3), RestaurantPolicy()],                           #gut
		[AugmentedMemoizationPolicy(), RestaurantPolicy()],                               #gut
		[MemoizationPolicy(max_history=3), KerasPolicy()],                                #gut
		[AugmentedMemoizationPolicy(), KerasPolicy()],                                    #gut
		[MemoizationPolicy(max_history=3), SklearnPolicy(scoring=['accuracy','f1'])],     #gut
		[AugmentedMemoizationPolicy(), SklearnPolicy(scoring=['accuracy','f1'])],         #gut
		[MemoizationPolicy(max_history=3), KerasPolicy(), SklearnPolicy()],               #gut
		[MemoizationPolicy(max_history=3), RestaurantPolicy(), SklearnPolicy()],          #gut
		[AugmentedMemoizationPolicy(), RestaurantPolicy(), SklearnPolicy(scoring=['accuracy','f1'])]  #gut
	]

	index_policies = -4
	print("Training policies: [%s]" % ",".join([x.__class__.__name__ for x in policies_array[index_policies]]))
	agent = Agent(domain_file,policies_array[index_policies])

	training_data = agent.load_data(training_data_file)
	agent.train(
			training_data,
			epochs=400,
			batch_size=100,
			validation_split=0.2
	)

	agent.persist(model_path)
	return agent
Пример #11
0
def train_core():
    keras = KerasPolicy(max_history=5)
    memoization = MemoizationPolicy(max_history=5)
    fallback = FallbackPolicy(nlu_threshold=0.28)

    LOGGER.info("[CORE] Creating agent from YAML at %s", CORE_DOMAIN)
    agent = Agent(CORE_DOMAIN, policies=[memoization, keras, fallback])

    LOGGER.info("[CORE] Loading training data from %s", CORE_DATA)
    data = agent.load_data(CORE_DATA)

    LOGGER.info("[CORE] Training Core...")
    agent.train(data)
    agent.persist(CORE_MODEL)
Пример #12
0
def train_dialogue(domain_file, model_path, training_folder):

    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(max_history=6),
                      KerasPolicy(
                          MaxHistoryTrackerFeaturizer(
                              BinarySingleStateFeaturizer(), max_history=6)),
                      FallbackPolicy(nlu_threshold=0.8, core_threshold=0.3)
                  ])

    training_data = agent.load_data(training_folder)

    agent.train(training_data, epochs=100)
    agent.persist(model_path)
Пример #13
0
def train_dialogue(domain_file="domain.yml",
                   policy_file="policy.yml",
                   model_path="models/dialogue",
                   training_data_file="data/stories.md"):
    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.2,
                              nlu_threshold=0.2)

    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(),
                            KerasPolicy(), fallback])
    training_data = agent.load_data(training_data_file)
    agent.train(training_data)
    agent.persist(model_path)
    return agent
Пример #14
0
def train_dialogue(domain_file='malu_domain.yml',
                   model_path='./models/dialogue',
                   training_data_file='./data/stories.md'):

    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.01,
                              nlu_threshold=0.01)
    agent = Agent(
        domain_file,
        policies=[MemoizationPolicy(max_history=2),
                  KerasPolicy(), fallback])
    data = agent.load_data(training_data_file)
    agent.train(data, epochs=300, batch_size=50, validation_split=0.2)

    agent.persist(model_path)
    return agent
Пример #15
0
def train_dialogue(domain_file="restaurant_domain.yml",
                   model_path="models/dialogue",
                   training_data_file="data/babi_stories.md"):
    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.3,
                              nlu_threshold=0.3)
    agent = Agent(domain_file, policies=[MemoizationPolicy(), KerasPolicy()])

    #training_data = agent.load_data(training_data_file)
    agent.train(training_data_file,
                epochs=400,
                batch_size=100,
                validation_split=0.2)

    agent.persist(model_path)
    return agent
Пример #16
0
def train_dialogue(model_path="models/dialogue",
                   training_data_file="data/stories.md"):
    domain_file = "domain.yml"

    fallback = FallbackPolicy(fallback_action_name="utter_unclear",
                              core_threshold=0.55,
                              nlu_threshold=0.55)
    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(max_history=3),
                      KerasPolicy(epochs=100, batch_size=50), fallback
                  ])
    training_data = agent.load_data(training_data_file)
    agent.train(training_data, validation_split=0.0)
    agent.persist(model_path)
    return agent
Пример #17
0
def train_dialouge(domain_file='domain.yml',
                   output_model_path='./models/dialogue',
                   training_data_file='./data/stories.md'):

    fallback = FallbackPolicy(fallback_action_name='action_default_fallback',
                              core_threshold = 0.3,
                              nlu_threshold=0.1)

    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(),
                            KerasPolicy(max_history=3, epochs=50, batch_size=2),
                            fallback])
    data = agent.load_data(training_data_file)
    agent.train(data)
    agent.persist(output_model_path)

    return agent
Пример #18
0
def train_dialogue(domain_file='domain.yml',
                   model_path='models/dialogue',
                   train_data_file='data/core/stories.md'):
    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.3,
                              nlu_threshold=0.3)
    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(),
                      KerasPolicy(max_history=3, epochs=200, batch_size=50),
                      fallback
                  ])
    data = agent.load_data(train_data_file)

    agent.train(data)
    agent.persist(model_path)
    return agent
Пример #19
0
def run_interactive_online(interpreter,
                           domain_file="domain.yml",
                           training_data_file='stories.md'):
    action_endpoint = EndpointConfig(url="http://localhost:5005/webhook")
    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(max_history=2),
                      KerasPolicy(max_history=3, epochs=3, batch_size=50),
                      FallbackPolicy(),
                      FormPolicy()
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)

    data = agent.load_data(training_data_file)
    agent.train(data)
    interactive.run_interactive_learning(agent, training_data_file)
    return agent
Пример #20
0
    def default_policies(cls, fallback_args, max_history):
        # type: (Dict[Text, Any], int) -> List[Policy]
        """Load the default policy setup consisting of
        FallbackPolicy, MemoizationPolicy and KerasPolicy."""

        return [
            FallbackPolicy(
                fallback_args.get("nlu_threshold",
                                  DEFAULT_NLU_FALLBACK_THRESHOLD),
                fallback_args.get("core_threshold",
                                  DEFAULT_CORE_FALLBACK_THRESHOLD),
                fallback_args.get("fallback_action_name",
                                  DEFAULT_FALLBACK_ACTION)),
            MemoizationPolicy(max_history=max_history),
            KerasPolicy(
                MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
                                            max_history=max_history))
        ]
Пример #21
0
def train_dialogue_keras(domain_file="mobile_domain.yml",
                         model_path="models/dialogue_keras",
                         training_data_file="data/mobile_edit_story.md"):

    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              nlu_threshold=0.5,
                              core_threshold=0.3)

    agent = Agent(
        domain_file,
        policies=[MemoizationPolicy(max_history=5),
                  MobilePolicy(), fallback])

    training_data = agent.load_data(training_data_file)
    agent.train(training_data, epochs=100, batch_size=16, validation_split=0.2)

    agent.persist(model_path)
    return agent
Пример #22
0
def train_dialogue(domain_file="bank_domain.yml",
                   model_path="models/dialogue/",
                   training_data_file="data/stories.md"):
    from rasa_core.policies.fallback import FallbackPolicy
    fallback = FallbackPolicy(fallback_action_name="action_fallback",
                              core_threshold=0.2,
                              nlu_threshold=0.2)
    agent = Agent(
        domain_file,
        policies=[MemoizationPolicy(max_history=15),
                  KerasPolicy(), fallback])
    training_data = agent.load_data(training_data_file)
    agent.train(training_data,
                batch_size=200,
                epochs=150,
                validation_split=0.2)
    agent.persist(model_path)
    return agent
Пример #23
0
def run_weather_online(interpreter,
                       domain_file="malu_domain.yml",
                       training_data_file='data/stories.md'):
    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.01,
                              nlu_threshold=0.01)
    agent = Agent(
        domain_file,
        policies=[MemoizationPolicy(max_history=2),
                  KerasPolicy(), fallback],
        interpreter=interpreter,
        action_endpoint=action_endpoint)

    data = agent.load_data(training_data_file)
    agent.train(data, batch_size=50, epochs=200, max_training_samples=300)
    online.run_online_learning(agent)
    return agent
Пример #24
0
def train_dialogue_online(input_channel=ConsoleInputChannel(),
                          interpreter_path="./models/nlu/default/current",
                          domain_file="data/domain.yml",
                          training_data_file='data/stories.md'):

    fallback = FallbackPolicy(fallback_action_name="input_unknown",
                              core_threshold=0.1,
                              nlu_threshold=0.3)
    interpreter = RasaNLUInterpreter(interpreter_path)
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(),
                            KerasPolicy(), fallback],
                  interpreter=interpreter)
    agent.train_online(training_data_file,
                       input_channel=input_channel,
                       batch_size=50,
                       epochs=50)
    return agent
def run_cbeg(interpreter,
             domain_file="cbeg_domain.yml",
             training_data_file='data/stories.md'):
    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")
    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.4,
                              nlu_threshold=0.4)
    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(max_history=2),
                      KerasPolicy(max_history=3, epochs=3, batch_size=50)
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)

    data = agent.load_data(training_data_file)
    agent.train(data)
    interactive.run_interactive_learning(agent, training_data_file)
    return agent
def train_dialogue(domain_file='domain.yml',
                   model_path='./models/dialogue',
                   training_data_file='./data/stories.md'):

    fallback = FallbackPolicy(fallback_action_name="utter_action_fallback",
                              core_threshold=0.60,
                              nlu_threshold=0.60)

    agent = Agent(domain_file,
                  policies=[
                      MemoizationPolicy(),
                      KerasPolicy(max_history=4, epochs=1000, batch_size=100),
                      fallback
                  ])
    data = agent.load_data(training_data_file, augmentation_factor=20)
    agent.train(data)

    agent.persist(model_path)
    return agent
Пример #27
0
def train_dialogue(domain_file="domain.yml",
                   model_path="models/dialogue",
                   training_data_file="data/stories.md"):
    fallback = FallbackPolicy(fallback_action_name="bot.utter.default",
                              core_threshold=0.3,
                              nlu_threshold=0.2)
    agent = Agent(
        domain_file,
        policies=[MemoizationPolicy(max_history=10),
                  KerasPolicy(), fallback])

    training_data = agent.load_data(training_data_file)
    agent.train(training_data,
                epochs=500,
                batch_size=100,
                validation_split=0.1)

    agent.persist(model_path)
    return agent
Пример #28
0
def train_dialogue_init(domain_file="./data/domain.yml",
                        model_path="models/dialogue",
                        training_data_file="./data/stories.md"):

    fallback = FallbackPolicy(fallback_action_name="input_unknown",
                              core_threshold=0.1,
                              nlu_threshold=0.3)
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(),
                            KerasPolicy(), fallback])

    agent.train(training_data_file,
                augmentation_factor=50,
                epochs=200,
                batch_size=10,
                validation_split=0.2)

    agent.persist(model_path)
    return agent
Пример #29
0
def trainingBot(to_bot_queue, to_human_queue, base_model, output_model,
                nlu_model, training_data):

    utils.configure_colored_logging(loglevel="INFO")

    max_history = None
    interactive_learning_on = True

    channel = TrainingInputChannel(to_bot_queue, to_human_queue)
    preloaded_model = True

    if preloaded_model:
        agent = CustomAgent.load(base_model,
                                 NaturalLanguageInterpreter.create(nlu_model))
        training_data = agent.load_data(training_data)

        agent.train_online_preloaded_model(training_data,
                                           input_channel=channel,
                                           model_path=output_model)
    else:
        agent = CustomAgent(
            "domain.yml",
            policies=[
                MemoizationPolicy(max_history=max_history),
                KerasPolicy(
                    MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(),
                                                max_history=max_history)),
                FallbackPolicy(fallback_action_name="utter_fallback",
                               nlu_threshold=0.3)
            ])

        training_data = agent.load_data(training_data)
        agent.interpreter = NaturalLanguageInterpreter.create(nlu_model)
        agent.train_online(training_data,
                           input_channel=channel,
                           model_path=output_model,
                           augmentation_factor=50,
                           epochs=250,
                           batch_size=10,
                           validation_split=0.2)

    agent.persist(output_model)
Пример #30
0
def run_bot_online(interpreter, domain_file, training_data_file):
    action_endpoint = EndpointConfig(url="http://localhost:5055/webhook")

    fallback = FallbackPolicy(fallback_action_name="action_default_fallback",
                              core_threshold=0.3,
                              nlu_threshold=0.3)

    agent = Agent(domain=domain_file,
                  policies=[
                      MemoizationPolicy(max_history=6),
                      KerasPolicy(max_history=6, epochs=200), fallback,
                      FormPolicy()
                  ],
                  interpreter=interpreter,
                  action_endpoint=action_endpoint)

    data = agent.load_data(training_data_file)
    agent.train(data)
    interactive.run_interactive_learning(agent, training_data_file)
    return agent