Ejemplo n.º 1
0
def run_concertbot_online(interpreter,
                          domain_file="domain.yml",
                          training_data_file='data/stories.md'):

    YOUR_FB_VERIFY = "rasa-bot"
    YOUR_FB_SECRET = "a9f5370c907e14a983051bd4d266c47b"
    YOUR_FB_PAGE_ID = "158943344706542"
    YOUR_FB_PAGE_TOKEN = "EAACZAVkjEPR8BANiwfuKaSVz8yxtLsytuOPvaUzUTlCMAmvuX9TdqGR5P4F1EepBfZCQoKhSR49zM5C9pYX9hmmv3qqiUnRCMDE0eJ1lWRjeqNYTLLA5nbXelSMw0p7neZBSyyIcNHS3e1lbbf2raWPY8IUosJZBMlDLLA7ZBJgTxZAZCvhbO84"

    input_channel = FacebookInput(
        fb_verify=
        YOUR_FB_VERIFY,  # you need tell facebook this token, to confirm your URL
        fb_secret=YOUR_FB_SECRET,  # your app secret
        fb_tokens={YOUR_FB_PAGE_ID:
                   YOUR_FB_PAGE_TOKEN},  # page ids + tokens you subscribed to
        debug_mode=True  # enable debug mode for underlying fb library
    )
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(),
                            KerasPolicy()],
                  interpreter=interpreter)
    #agent.handle_channel()
    agent.train_online(training_data_file,
                       input_channel=HttpInputChannel(8080, "", input_channel),
                       max_history=2,
                       batch_size=50,
                       epochs=200,
                       max_training_samples=300)

    return agent
Ejemplo n.º 2
0
def run_bot_cli(input_channel, interpreter,
                          domain_file="./data/student_info_domain.yml",
                          training_data_file='./data/stories.md'):

  # Featureizer Generation
  featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), max_history=5)
  
  # Not really sure what is happening here
  agent = Agent(domain_file,
                policies=[MemoizationPolicy(max_history=5),
                          KerasPolicy(featurizer)],
                          interpreter=interpreter)

  # This is where our training data file is loaded in for training
  training_data = agent.load_data(training_data_file)
  
  # Training data is the training data object created in the above line
  # input_channel - How the trainer recieves its input
  # batch_size - How many times the model is updated per pass
  # epochs - Number of training passes
  # validation_split - Fraction of the training data to be used as validation data 
  # augmentation_factor - How many of the dialogue stories are randomly glued together
      # the more stories you have the higher the augmentation factor you want
  agent.train_online(training_data,
                      input_channel=input_channel,
                      batch_size=35,
                      epochs=400,
                      max_training_samples=200,
                      validation_split = 0.2,
                      augmentation_factor = 20)

  return agent
def run_eventbot_online(input_channel,
                        interpreter,
                        domain_file="./data/domain.yml",
                        training_data_file='./data/stories'):
    try:
        KnowledgeGraph()
    except ServiceUnavailable:
        print('Neo4j connection failed. Program stopped.')
        return

    fallback = FallbackPolicy(fallback_action_name="utter_not_understood",
                              core_threshold=0.3,
                              nlu_threshold=0.6)
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(),
                            KerasPolicy(), fallback],
                  interpreter=interpreter)

    data = agent.load_data(training_data_file)
    agent.train_online(data,
                       input_channel=input_channel,
                       max_history=2,
                       batch_size=50,
                       epochs=200,
                       max_training_samples=300)

    return agent
Ejemplo n.º 4
0
    def runRasaTrainOnline(self):
        try:
            input_channel = ConsoleInputChannel()
            interpreter = RasaNLUInterpreter(
                os.path.realpath(self.config.get('nluModel',
                                                 'model_location')))
            domain_file = os.path.realpath(
                self.config.get('inputData', 'coreyml'))
            training_data_file = os.path.realpath(
                self.config.get('inputData', 'stories'))
            logger.info(
                "nluModel = %s, domain_file = %s, train_data_file = %s" %
                (str(
                    os.path.realpath(
                        self.config.get('nluModel', 'model_location'))),
                 str(domain_file), str(training_data_file)))
            agent = Agent(domain_file,
                          policies=[MemoizationPolicy(),
                                    KerasPolicy()],
                          interpreter=interpreter)

            agent.train_online(training_data_file,
                               input_channel=input_channel,
                               max_history=2,
                               batch_size=50,
                               epochs=200,
                               max_training_samples=300)

            return agent, "Rasa Train Online completed successfully"
        except Exception as e:
            logger.error("Unable to run Rasa Train Online, exception : %s" %
                         (str(e)))
            raise (e)
Ejemplo n.º 5
0
def train_dialogue_model(domain_file, stories_file, output_path,
                         use_online_learning=False,
                         nlu_model_path=None,
                         max_history=None,
                         kwargs=None):
    if not kwargs:
        kwargs = {}

    agent = Agent(domain_file, policies=[
        MemoizationPolicy(max_history=max_history),
        KerasPolicy()])
    training_data = agent.load_data(stories_file)

    if use_online_learning:
        if nlu_model_path:
            agent.interpreter = RasaNLUInterpreter(nlu_model_path)
        else:
            agent.interpreter = RegexInterpreter()
        agent.train_online(
                training_data,
                input_channel=ConsoleInputChannel(),
                model_path=output_path,
                **kwargs)
    else:
        agent.train(training_data, **kwargs)

    agent.persist(output_path)
Ejemplo n.º 6
0
def run_hello_world(max_training_samples=10, serve_forever=True):
    training_data = '../mom/data/stories.md'

    default_domain = TemplateDomain.load("../mom/domain.yml")
    agent = Agent(
        default_domain,
        # policies=[SimplePolicy()],
        policies=[MemoizationPolicy(), KerasPolicy()],
        interpreter=HelloInterpreter(),
        tracker_store=InMemoryTrackerStore(default_domain))
    logger.info("Starting to train policy")
    # agent = Agent(default_domain,
    #               policies=[SimplePolicy()],
    #               interpreter=HelloInterpreter(),
    #               tracker_store=InMemoryTrackerStore(default_domain))

    # if serve_forever:
    #     # Attach the commandline input to the controller to handle all
    #     # incoming messages from that channel
    #     agent.handle_channel(ConsoleInputChannel())

    agent.train_online(training_data,
                       input_channel=ConsoleInputChannel(),
                       epochs=1,
                       max_training_samples=max_training_samples)

    return agent
Ejemplo n.º 7
0
def run_weather_online(input_channle, interpreter, domain_file = './weather_domain.yml', training_data_file = './data/stories.md'):

	agent = Agent(domain_file, policies=[MemoizationPolicy(max_history=2), KerasPolicy()], interpreter = interpreter)

	agent.train_online(training_data_file, input_channle = input_channle, max_history = 2,batch_size = 50, epochs = 200, max_traning_samples = 300)

	return agent
Ejemplo n.º 8
0
def run_fake_user(input_channel, max_training_samples=10, serve_forever=True):
    customer = Customer()
    training_data = 'examples/babi/data/babi_task5_fu_rasa_fewer_actions.md'

    logger.info("Starting to train policy")

    agent = Agent("examples/restaurant_domain.yml",
                  policies=[MemoizationPolicy(),
                            KerasPolicy()],
                  interpreter=RegexInterpreter())

    agent.train_online(training_data,
                       input_channel=input_channel,
                       epochs=1,
                       max_training_samples=max_training_samples)

    while serve_forever:
        tracker = agent.tracker_store.retrieve('default')
        back = customer.respond_to_action(tracker)
        if back == 'reset':
            agent.handle_message("_greet",
                                 output_channel=ConsoleOutputChannel())
        else:
            agent.handle_message(back, output_channel=ConsoleOutputChannel())

    return agent
Ejemplo n.º 9
0
def train_dialogue_model(domain_file,
                         stories_file,
                         output_path,
                         use_online_learning=False,
                         nlu_model_path=None,
                         max_history=None,
                         kwargs=None):
    if not kwargs:
        kwargs = {}

    agent = Agent(
        domain_file,
        policies=[MemoizationPolicy(max_history=max_history),
                  KerasPolicy()])

    data_load_args, kwargs = utils.extract_args(
        kwargs, {
            "use_story_concatenation", "unique_last_num_states",
            "augmentation_factor", "remove_duplicates", "debug_plots"
        })
    training_data = agent.load_data(stories_file, **data_load_args)

    if use_online_learning:
        if nlu_model_path:
            agent.interpreter = RasaNLUInterpreter(nlu_model_path)
        else:
            agent.interpreter = RegexInterpreter()
        agent.train_online(training_data,
                           input_channel=ConsoleInputChannel(),
                           model_path=output_path,
                           **kwargs)
    else:
        agent.train(training_data, **kwargs)

    agent.persist(output_path)
Ejemplo n.º 10
0
def train_dialogue():
    interpreter = RasaNLUInterpreter(CHAT)
    agent = Agent(CONFIG, policies=[MemoizationPolicy(max_history=3), KerasPolicy()], interpreter=interpreter)

    training_data = STORIES ###TODO
    agent.train(
            training_data,
            epochs=500,
            batch_size=100,
            validation_split=0.3
    )
    agent.persist(MODEL_DIALOGUE)

    # input_channel = TelegramInput(
    #     access_token="577522303:AAG6_5NcdBVRq-ndzThybnOh7SHL9I2ylKo", # you get this when setting up a bot
    #     verify="chatmoviedomainbot", # this is your bots username
    #     webhook_url="https://07958fff.ngrok.io" # the url your bot should listen for messages
    # )

    input_channel = ConsoleInputChannel()
    agent.train_online(
                training_data,
                input_channel=input_channel,
                epochs=400,
                batch_size=100
        )
    
    return agent
Ejemplo n.º 11
0
def train_dialogue_model(domain_file, stories_file, output_path,
                         use_online_learning=False, nlu_model_path=None,
                         kwargs=None):
    if not kwargs:
        kwargs = {}

    agent = Agent(domain_file, policies=[MemoizationPolicy(), KerasPolicy()])

    if use_online_learning:
        if nlu_model_path:
            agent.interpreter = RasaNLUInterpreter(nlu_model_path)
        else:
            agent.interpreter = RegexInterpreter()
        agent.train_online(
                stories_file,
                input_channel=ConsoleInputChannel(),
                epochs=10,
                model_path=output_path)
    else:
        agent.train(
                stories_file,
                validation_split=0.1,
                **kwargs
        )

    agent.persist(output_path)
Ejemplo n.º 12
0
def run_online_trainer(input_channel,
                        interpreter,
                        domain_def_file='domain.yml',
                        training_data_file='./stories.md'):
    agent = Agent(domain_def_file,
                    policies=[MemoizationPolicy(), KerasPolicy()])
    agent.train_online(training_data_file,
                        input_channel=input_channel)

    return agent
Ejemplo n.º 13
0
def train_dialog_online(intent_classificator, input_channel):
    conf = Config()
    agent = Agent(conf.get_value('domain-file'), policies=[MemoizationPolicy(), KerasPolicy()],
                  interpreter=intent_classificator)

    agent.train_online(conf.get_value('stories-file'),
                       input_channel=input_channel,
                       max_history=conf.get_value('dialog-model-max-history'),
                       batch_size=conf.get_value('dialog-model-batch-size'),
                       epochs=conf.get_value('dialog-model-epochs'),
                       max_training_samples=conf.get_value('dialog-model-max-training-samples'))
    return agent
Ejemplo n.º 14
0
def run_weather_online(input_channel, interpreter,
                          domain_file="weather_domain.yml",
                          training_data_file='data/stories.md'):
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=2), KerasPolicy(), fallback],
                  interpreter=interpreter)

    agent.train_online(training_data_file,
                       input_channel=input_channel,
                       batch_size=15,
                       epochs=400,
                       max_training_samples=400)

    return agent
Ejemplo n.º 15
0
def run_weather_online(input_channel, interpreter, domain_file,
                       training_data_file):
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=3),
                            KerasPolicy()],
                  interpreter=interpreter)

    training_data = agent.load_data(training_data_file)

    agent.train_online(training_data,
                       input_channel=input_channel,
                       epochs=400,
                       batch_size=100,
                       validation_split=0.2)
Ejemplo n.º 16
0
 def learnonline(self, msg, args):
     """Command to trigger learn_online on rasa agent"""
     token = config.BOT_IDENTITY['token']
     if token is None:
         raise Exception('No slack token')
     train_agent= Agent(self.domain_file,
               policies=[MemoizationPolicy(max_history=2), KerasPolicy()],
               interpreter=RegexInterpreter())
     training_data = train_agent.load_data(self.training_data_file)
     train_agent.train_online(training_data,
                              input_channel=self.backend_adapter,
                              batch_size=50,
                              epochs=200,
                              max_training_samples=300)
def run_coco_online(input_channel, interpreter,
                          domain_file="coco_domain.yml",
                          training_data_file='data/coco_stories.md'):
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=2), KerasPolicy()],
                  interpreter=interpreter)

    agent.train_online(training_data_file,
                       input_channel=input_channel,
                       batch_size=50,
                       epochs=200,
                       max_training_samples=300)

    return agent
Ejemplo n.º 18
0
def resto_test(interpreter,
               domain_file="resto_domain.yml",
               training_data_file='D:/RasaBot/data/stories.md'):

    #action_endpoint = EndpointConfig(url="http://localhost:5004/webhook")
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(),KerasPolicy()],
                  interpreter=interpreter)

    data = agent.load_data(training_data_file)
    agent.train_online(data)
    #interactive.run_interactive_learning(agent, training_data_file)

    return agent
def run_fake_user(input_channel, max_training_samples=10, serve_forever=True):
    logger.info("Starting to train policy")
    agent = Agent(RASA_CORE_DOMAIN_PATH,
                  policies=[MemoizationPolicy(), KerasPolicy()],
                  interpreter=RegexInterpreter())

    agent.train_online(RASA_CORE_TRAINING_DATA_PATH,
                       input_channel=input_channel,
                       epochs=RASA_CORE_EPOCHS,
                       max_training_samples=max_training_samples)

    while serve_forever:
        agent.handle_message(UserMessage(back, ConsoleOutputChannel()))

    return agent
Ejemplo n.º 20
0
def run_weather_online(input_channel, interpreter,
                          domain_file="hospital_domain.yml",
                          training_data_file='stories.md'):
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(), KerasPolicy()],
                  interpreter=interpreter)

    agent.train_online(training_data_file,
                       input_channel=input_channel,
                       max_history=3,
                       batch_size=50,
                       epochs=300,
                       max_training_samples=50000)

    return agent
Ejemplo n.º 21
0
def run_babi_online(max_messages=10):
    training_data = 'stories.md'
    logger.info("Starting to train policy")
    agent = Agent("domain.yml",
                  policies=[MemoizationPolicy(),
                            MusicPlayerPolicy()],
                  interpreter=RegexInterpreter())

    input_c = FileInputChannel(training_data,
                               message_line_pattern='^\s*\*\s(.*)$',
                               max_messages=max_messages)
    agent.train_online(training_data, input_channel=input_c, epochs=10)

    agent.interpreter = RasaNLUInterpreter(nlu_model_path)
    return agent
Ejemplo n.º 22
0
def run_concertbot_online(input_channel, interpreter,
                          domain_file="../config/dialogue/demo/fund_domain.yml",
                          training_data_file='../data/dialogue/demo/fund_stories.md'):
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=2), KerasPolicy()],
                  interpreter=interpreter)

    training_data = agent.load_data(training_data_file)
    agent.train_online(training_data,
                       input_channel=input_channel,
                       batch_size=50,
                       epochs=200,
                       max_training_samples=300)

    return agent
Ejemplo n.º 23
0
def run_concertbot_online(input_channel, interpreter):
    training_data_file = 'data/stories.md'

    agent = Agent("concert_domain.yml",
                  policies=[MemoizationPolicy(), ConcertPolicy()],
                  interpreter=interpreter)

    agent.train_online(training_data_file,
                       input_channel=input_channel,
                       max_history=2,
                       batch_size=50,
                       epochs=200,
                       max_training_samples=300)

    return agent
Ejemplo n.º 24
0
def run_concertbot_online(input_channel, interpreter,
                          domain_file="concert_domain.yml",
                          training_data_file='data/stories.md'):
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=2), KerasPolicy()],
                  interpreter=interpreter)

    training_data = agent.load_data(training_data_file)
    agent.train_online(training_data,
                       input_channel=input_channel,
                       batch_size=50,
                       epochs=200,
                       max_training_samples=300)

    return agent
Ejemplo n.º 25
0
def run_babi_online(max_messages=10):
    training_data = 'examples/babi/data/babi_task5_dev_rasa_even_smaller.md'
    logger.info("Starting to train policy")
    agent = Agent("examples/restaurant_domain.yml",
                  policies=[MemoizationPolicy(),
                            RestaurantPolicy()],
                  interpreter=RegexInterpreter())

    input_c = FileInputChannel(training_data,
                               message_line_pattern='^\s*\*\s(.*)$',
                               max_messages=max_messages)
    agent.train_online(training_data, input_channel=input_c, epochs=10)

    agent.interpreter = RasaNLUInterpreter(nlu_model_path)
    return agent
Ejemplo n.º 26
0
def run_weather_online(input_channel, interpreter,
                          domain_file="customer_domain.yml",
                          training_data_file='C:/Murali/Testing/hackathon2018/customer_bot-master/data/stories.md'):
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(), KerasPolicy()],
                  interpreter=interpreter)

    agent.train_online(training_data_file,
                       input_channel=input_channel,
                       max_history=2,
                       batch_size=50,
                       epochs=200,
                       max_training_samples=300)

    return agent
Ejemplo n.º 27
0
def run_concertbot_online(input_channel, interpreter,
                          domain_file="models/dialogue/domain.yml",
                          training_data_file='data/examples/rasa/stories.md'):
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(), KerasPolicy()],
                  interpreter=interpreter)

    agent.train_online(training_data_file,
                       input_channel=input_channel,
                       max_history=2,
                       batch_size=50,
                       epochs=200,
                       max_training_samples=300)

    return agent
Ejemplo n.º 28
0
def train_diag_model_online(input_channel, interpreter,
                          domain_file="dynamo_domain.yml",
                          training_data_file='data/dialouge_stories/stories.md'):
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=2), KerasPolicy()],
                  interpreter=interpreter)

    training_data = agent.load_data(training_data_file)
    agent.train_online(training_data,
                       input_channel=input_channel,
                       batch_size=50,
                       epochs=200,
                       max_training_samples=300)

    return agent
Ejemplo n.º 29
0
def train_model_online():
    agent = Agent(RASA_CORE_DOMAIN_PATH,
                  policies=[MemoizationPolicy(),
                            StatusPolicy()],
                  interpreter=RegexInterpreter())

    agent.train_online(RASA_CORE_TRAINING_DATA_PATH,
                       input_channel=FileInputChannel(
                           RASA_CORE_TRAINING_DATA_PATH,
                           message_line_pattern='^\s*\*\s(.*)$',
                           max_messages=10),
                       epochs=RASA_CORE_EPOCHS)

    agent.interpreter = RasaNLUInterpreter(RASA_NLU_MODEL_PATH)
    return agent
Ejemplo n.º 30
0
def run_concertbot_online(input_channel,
                          domain_file="domain.yml",
                          training_data_file='data/stories.md'):
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=2), KerasPolicy()],
                  interpreter=RasaNLUInterpreter("models/nlu/default/current"))

    training_data = agent.load_data(training_data_file)
    agent.train_online(training_data,
                       input_channel=input_channel,
                       batch_size=BATCH_SIZE,
                       epochs=EPOCHS,
                       max_training_samples=MAX_TRAINING_SAMPLES)

    return agent
Ejemplo n.º 31
0
def run_ivrbot_online(input_channel=ConsoleInputChannel(),
                      interpreter=RasaNLUInterpreter("projects/ivr_nlu/demo"),
                      domain_file="mobile_domain.yml",
                      training_data_file="data/mobile_story.md"):
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(), KerasPolicy()],
                  interpreter=interpreter)

    training_data = agent.load_data(training_data_file)
    agent.train_online(training_data,
                       input_channel=input_channel,
                       batch_size=16,
                       epochs=200,
                       max_training_samples=300)

    return agent
Ejemplo n.º 32
0
def run_train_bot_online(input_channel, interpreter, domain_id="default"):
    domain_file = "{}/{}/domain.yml".format(data_folder, domain_id)
    training_data_file = '{}/{}/stories.md'.format(data_folder, domain_id)
    agent = Agent(domain_file,
                  policies=[MemoizationPolicy(max_history=2),
                            KerasPolicy()],
                  interpreter=interpreter)

    training_data = agent.load_data(training_data_file)
    agent.train_online(training_data,
                       input_channel=input_channel,
                       batch_size=50,
                       epochs=200,
                       max_training_samples=300)

    return agent