Exemple #1
0
def decode(bot, dataset, teacher_mode=True):
    """Runs a chat session between the given chatbot and user."""

    # We decode one sentence at a time.
    bot.batch_size = 1
    # Decode from standard input.
    print("Type \"exit\" to exit.")
    print(
        "Write stuff after the \">\" below and I, your robot friend, will respond."
    )
    sentence = io_utils.get_sentence()
    while sentence:
        # Convert input sentence to token-ids.
        token_ids = sentence_to_token_ids(tf.compat.as_bytes(sentence),
                                          dataset.word_to_idx)
        # Get output sentence from the chatbot.
        outputs = decode_inputs(token_ids, dataset.idx_to_word, bot)
        # Print the chatbot's response.
        print(outputs)
        if teacher_mode:
            print("What should I have said?")
            feedback = io_utils.get_sentence()
            feedback_ids = sentence_to_token_ids(tf.compat.as_bytes(feedback),
                                                 dataset.inputs_to_word)
            outputs = train_on_feedback(bot, token_ids, feedback_ids,
                                        dataset.idx_to_word)
            print("Okay. Let me try again:\n", outputs)
        # Wait for next input.
        sentence = io_utils.get_sentence()
        # Stop program if sentence == 'exit\n'.
        if sentence == 'exit':
            print("Fine, bye :(")
            break
    def __call__(self, sentence):
        """This is how we talk to the bot interactively.
        
        While decode(self) above sets up/manages the chat session, 
        users can also use this directly to get responses from the bot, 
        given an input sentence. 
        
        For example, one could do:
            sentence = 'Hi, bot!'
            response = bot(sentence)
        for a single input-to-response with the bot.

        Args:
            sentence: (str) Input sentence from user.

        Returns:
            response string from bot.
        """
        # Convert input sentence to token-ids.
        encoder_inputs = io_utils.sentence_to_token_ids(
            tf.compat.as_bytes(sentence), self.dataset.word_to_idx)

        encoder_inputs = np.array([encoder_inputs[::-1]])
        self.pipeline.feed_user_input(encoder_inputs)
        # Get output sentence from the chatbot.
        _, _, response = self.step(forward_only=True)
        # response has shape [1, response_length].
        # Its last element is the EOS_ID, which we don't show user.
        response = self.dataset.as_words(response[0][:-1])
        if 'UNK' in response:
            response = "I don't know."
        return response
    def __call__(self, sentence):
        """This is how we talk to the bot interactively.
        
        While decode(self) above sets up/manages the chat session, 
        users can also use this directly to get responses from the bot, 
        given an input sentence. 
        
        For example, one could do:
            sentence = 'Hi, bot!'
            response = bot(sentence)
        for a single input-to-response with the bot.

        Args:
            sentence: (str) Input sentence from user.

        Returns:
            response string from bot.
        """
        # Convert input sentence to token-ids.
        encoder_inputs = io_utils.sentence_to_token_ids(
            tf.compat.as_bytes(sentence), self.dataset.word_to_idx)

        encoder_inputs = np.array([encoder_inputs[::-1]])
        self.pipeline.feed_user_input(encoder_inputs)
        # Get output sentence from the chatbot.
        _, _, response = self.step(forward_only=True)
        # response has shape [1, response_length].
        # Its last element is the EOS_ID, which we don't show user.
        response = self.dataset.as_words(response[0][:-1])
        if 'UNK' in response:
            response = "I don't know."
        return response
Exemple #4
0
 def __call__(self, sentence):
     encoder_inputs = io_utils.sentence_to_token_ids(
         tf.compat.as_bytes(sentence), self.dataset.word_to_idx)
     encoder_inputs = np.array([encoder_inputs[::-1]])
     self.pipeline.feed_user_input(encoder_inputs)
     # Get output sentence from the chatbot.
     response = self.step(forward_only=True)
     return self.dataset.as_words(response[0])
Exemple #5
0
 def __call__(self, sentence):
     encoder_inputs = io_utils.sentence_to_token_ids(
         tf.compat.as_bytes(sentence), self.dataset.word_to_idx)
     encoder_inputs = np.array([encoder_inputs[::-1]])
     self.pipeline.feed_user_input(encoder_inputs)
     # Get output sentence from the chatbot.
     response = self.step(forward_only=True)
     return self.dataset.as_words(response[0])
Exemple #6
0
    def __call__(self, sentence):
        """Outputs response sentence (string) given input (string)."""
        # Convert input sentence to token-ids.
        sentence_tokens = io_utils.sentence_to_token_ids(
            tf.compat.as_bytes(sentence), self.word_to_idx)
        sentence_tokens = np.array([sentence_tokens[::-1]])

        # Get output sentence from the chatbot.
        fetches = self.tensor_dict['outputs']
        feed_dict = {self.tensor_dict['inputs']: sentence_tokens}
        response = self.sess.run(fetches=fetches, feed_dict=feed_dict)
        return self.as_words(response[0][:-1])
Exemple #7
0
    def __call__(self, sentence):
        """Outputs response sentence (string) given input (string)."""
        # Convert input sentence to token-ids.
        sentence_tokens = io_utils.sentence_to_token_ids(
            tf.compat.as_bytes(sentence), self.word_to_idx)
        sentence_tokens = np.array([sentence_tokens[::-1]])

        # Get output sentence from the chatbot.
        fetches = self.tensor_dict['outputs']
        feed_dict={self.tensor_dict['inputs']: sentence_tokens}
        response = self.sess.run(fetches=fetches, feed_dict=feed_dict)
        return self.as_words(response[0][:-1])
    def test_manual_freeze(self):
        """Make sure we can freeze the bot, unfreeze, and still chat."""

        # ================================================
        # 1. Create & train bot.
        # ================================================
        flags = TEST_FLAGS
        flags = flags._replace(
            model_params=dict(ckpt_dir=os.path.join(TEST_DIR, 'out'),
                              reset_model=True,
                              steps_per_ckpt=20,
                              max_steps=40))
        bot = create_bot(flags)
        self.assertEqual(bot.reset_model, True)
        # Simulate small train sesh on bot.
        bot.train()

        # ================================================
        # 2. Recreate a chattable bot.
        # ================================================
        # Recreate bot from scratch with decode set to true.
        logging.info("Resetting default graph . . . ")
        tf.reset_default_graph()
        flags = flags._replace(
            model_params={
                **flags.model_params, 'reset_model': False,
                'decode': True,
                'max_steps': 100,
                'steps_per_ckpt': 50
            })
        self.assertTrue(flags.model_params.get('decode'))
        bot = create_bot(flags)
        self.assertTrue(bot.is_chatting)
        self.assertTrue(bot.decode)

        print("Testing quick chat sesh . . . ")
        config = io_utils.parse_config(flags=flags)
        dataset_class = pydoc.locate(config['dataset']) \
                        or getattr(data, config['dataset'])
        dataset = dataset_class(config['dataset_params'])
        test_input = "How's it going?"
        encoder_inputs = io_utils.sentence_to_token_ids(
            tf.compat.as_bytes(test_input), dataset.word_to_idx)
        encoder_inputs = np.array([encoder_inputs[::-1]])
        bot.pipeline._feed_dict = {bot.pipeline.user_input: encoder_inputs}

        # Get output sentence from the chatbot.
        _, _, response = bot.step(forward_only=True)
        print("Robot:", dataset.as_words(response[0][:-1]))

        # ================================================
        # 3. Freeze the chattable bot.
        # ================================================
        logging.info("Calling bot.freeze() . . . ")
        bot.freeze()

        # ================================================
        # 4. Try to unfreeze and use it.
        # ================================================
        logging.info("Resetting default graph . . . ")
        tf.reset_default_graph()
        logging.info("Importing frozen graph into default . . . ")
        frozen_graph = bot_freezer.load_graph(bot.ckpt_dir)

        logging.info("Extracting input/output tensors.")
        tensors, frozen_graph = bot_freezer.unfreeze_bot(bot.ckpt_dir)
        self.assertIsNotNone(tensors['inputs'])
        self.assertIsNotNone(tensors['outputs'])

        with tf.Session(graph=frozen_graph) as sess:
            raw_input = "How's it going?"
            encoder_inputs = io_utils.sentence_to_token_ids(
                tf.compat.as_bytes(raw_input), dataset.word_to_idx)
            encoder_inputs = np.array([encoder_inputs[::-1]])
            feed_dict = {tensors['inputs'].name: encoder_inputs}
            response = sess.run(tensors['outputs'], feed_dict=feed_dict)
            logging.info('Reponse: %s', response)
    def test_manual_freeze(self):
        """Make sure we can freeze the bot, unfreeze, and still chat."""

        # ================================================
        # 1. Create & train bot.
        # ================================================
        flags = TEST_FLAGS
        flags = flags._replace(model_params=dict(
            ckpt_dir=os.path.join(TEST_DIR, 'out'),
            reset_model=True,
            steps_per_ckpt=20,
            max_steps=40))
        bot = create_bot(flags)
        self.assertEqual(bot.reset_model, True)
        # Simulate small train sesh on bot.
        bot.train()

        # ================================================
        # 2. Recreate a chattable bot.
        # ================================================
        # Recreate bot from scratch with decode set to true.
        logging.info("Resetting default graph . . . ")
        tf.reset_default_graph()
        flags = flags._replace(model_params={
            **flags.model_params,
            'reset_model': False,
            'decode': True,
            'max_steps': 100,
            'steps_per_ckpt': 50})
        self.assertTrue(flags.model_params.get('decode'))
        bot = create_bot(flags)
        self.assertTrue(bot.is_chatting)
        self.assertTrue(bot.decode)

        print("Testing quick chat sesh . . . ")
        config = io_utils.parse_config(flags=flags)
        dataset_class = pydoc.locate(config['dataset']) \
                        or getattr(data, config['dataset'])
        dataset = dataset_class(config['dataset_params'])
        test_input = "How's it going?"
        encoder_inputs = io_utils.sentence_to_token_ids(
            tf.compat.as_bytes(test_input),
            dataset.word_to_idx)
        encoder_inputs = np.array([encoder_inputs[::-1]])
        bot.pipeline._feed_dict = {
            bot.pipeline.user_input: encoder_inputs}

        # Get output sentence from the chatbot.
        _, _, response = bot.step(forward_only=True)
        print("Robot:", dataset.as_words(response[0][:-1]))

        # ================================================
        # 3. Freeze the chattable bot.
        # ================================================
        logging.info("Calling bot.freeze() . . . ")
        bot.freeze()

        # ================================================
        # 4. Try to unfreeze and use it.
        # ================================================
        logging.info("Resetting default graph . . . ")
        tf.reset_default_graph()
        logging.info("Importing frozen graph into default . . . ")
        frozen_graph = bot_freezer.load_graph(bot.ckpt_dir)

        logging.info("Extracting input/output tensors.")
        tensors, frozen_graph = bot_freezer.unfreeze_bot(bot.ckpt_dir)
        self.assertIsNotNone(tensors['inputs'])
        self.assertIsNotNone(tensors['outputs'])

        with tf.Session(graph=frozen_graph) as sess:
            raw_input = "How's it going?"
            encoder_inputs  = io_utils.sentence_to_token_ids(
                tf.compat.as_bytes(raw_input),
                dataset.word_to_idx)
            encoder_inputs = np.array([encoder_inputs[::-1]])
            feed_dict = {tensors['inputs'].name: encoder_inputs}
            response = sess.run(tensors['outputs'], feed_dict=feed_dict)
            logging.info('Reponse: %s', response)