Exemple #1
0
def get_response(message):
    line = str.encode(message)
    if len(line) > 0 and line[-1] == '\n':
        line = line[:-1]
    if line == '':
        response = 'What did you say?'
        output_file.write('Human: ' + message + '\n' + 'Bot: ' +
                          str(response) + '\n')
        return message, response

    token_ids = data_utils.sentence2id(enc_vocab, line)
    if len(token_ids) > max_length:
        response = ('The maximum length I can handle is ', max_length)
        output_file.write('Human: ' + message + '\n' + 'Bot: ' +
                          str(response) + '\n')
        return message, response

    bucket_id = chatbot.find_right_bucket(len(token_ids))
    encoder_inputs, decoder_inputs, decoder_masks = data_utils.get_batch(
        [(token_ids, [])], bucket_id, batch_size=1)
    _, _, output_logits = chatbot.run_step(sess, model, encoder_inputs,
                                           decoder_inputs, decoder_masks,
                                           bucket_id, True)
    response = chatbot.construct_response(output_logits, inv_dec_vocab)
    output_file.write('Human: ' + message + '\n' + 'Bot: ' + str(response) +
                      '\n')

    return message, response
def chat(question):
    """
    In test mode, we don"t to create the backward path.
    """
    _, enc_vocab = data_utils.load_vocab(
        os.path.join(config.DATA_PATH, "vocab.enc"))
    # `inv_dec_vocab` <type "list">: id2word.
    inv_dec_vocab, _ = data_utils.load_vocab(
        os.path.join(config.DATA_PATH, "vocab.dec"))

    model = ChatBotModel(True, batch_size=1)
    model.build_graph()

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        check_restore_parameters(sess, saver)
        output_file = open(os.path.join(config.DATA_PATH,
                                        config.TERMINAL_OUTPUT),
                           "a+",
                           encoding="utf-8")
        # Decode from standard input.
        max_length = config.BUCKETS[-1][0]
        print(
            "Welcome to TensorBro. Say something. Enter to exit. Max length is",
            max_length)

        line = question
        if hasattr(line, "decode"):
            # If using Python 2
            # FIXME: UnicodeError when deleting Chinese in terminal.
            line = line.decode("utf-8")
        if len(line) > 0 and line[-1] == "\n":
            line = line[:-1]
        if not line:
            pass
        output_file.write("HUMAN ++++ " + line + "\n")
        # Get token-ids for the input sentence.
        token_ids = data_utils.sentence2id(enc_vocab, line)
        if len(token_ids) > max_length:
            print("Max length I can handle is:", max_length)
            # line = _get_user_input()
            pass
        # Which bucket does it belong to?
        bucket_id = find_right_bucket(len(token_ids))
        # Get a 1-element batch to feed the sentence to the model.
        encoder_inputs, decoder_inputs, decoder_masks = data_utils.get_batch(
            [(token_ids, [])], bucket_id, batch_size=1)
        # Get output logits for the sentence.
        _, _, output_logits = run_step(sess, model, encoder_inputs,
                                       decoder_inputs, decoder_masks,
                                       bucket_id, True)
        response = construct_response(output_logits, inv_dec_vocab)
        print(response)
        output_file.write("BOT ++++ " + response + "\n")

        output_file.write("=============================================\n")
        output_file.close()
Exemple #3
0
def chat():
    """ in test mode, we don't to create the backward path """
    _, enc_vocab = data_utils.load_vocab(
        os.path.join(config.PROCESSED_PATH, 'vocab.enc'))
    inv_dec_vocab, _ = data_utils.load_vocab(
        os.path.join(config.PROCESSED_PATH, 'vocab.dec'))

    model = ChatBotModel(True, batch_size=1)
    model.build_graph()

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        check_restore_parameters(sess, saver)
        output_file = open(
            '/Users/EleanorLeung/Documents/CITS4404/chatbot/output_convo.txt',
            'a+')
        # Decode from standard input.
        max_length = config.BUCKETS[-1][0]
        print('Talk to me! Enter to exit. Max length is', max_length)
        while True:
            line = str.encode(get_user_input())
            if len(line) > 0 and line[-1] == '\n':
                line = line[:-1]
            if line == '':
                break
            output_file.write('HUMAN: ' + str(line) + '\n')
            token_ids = data_utils.sentence2id(enc_vocab, line)
            if len(token_ids) > max_length:
                print('Max length I can handle is:', max_length)
                line = get_user_input()
                continue
            bucket_id = find_right_bucket(len(token_ids))
            # Get a 1-element batch to feed the sentence to the model.
            encoder_inputs, decoder_inputs, decoder_masks = data_utils.get_batch(
                [(token_ids, [])], bucket_id, batch_size=1)
            # Get output logits for the sentence.
            _, _, output_logits = run_step(sess, model, encoder_inputs,
                                           decoder_inputs, decoder_masks,
                                           bucket_id, True)
            response = construct_response(output_logits, inv_dec_vocab)
            print(response)
            output_file.write('BOT: ' + response + '\n')
        output_file.write('=============================================\n')
        output_file.close()
Exemple #4
0
def wechat_text(msg):
    '''
    get the response to msg
    :param msg: the type of the msg is 'Text'
    :return: response to msg by using seq2seq model
    '''
    text = msg["Text"]
    token_ids = data_utils.sentence2id(enc_vocab, text)
    if len(token_ids) > max_length:
        print("Max length I can handle is:", max_length)
        # line = _get_user_input()
    # Which bucket does it belong to?
    bucket_id = find_right_bucket(len(token_ids))
    # Get a 1-element batch to feed the sentence to the model.
    encoder_inputs, decoder_inputs, decoder_masks = data_utils.get_batch(
        [(token_ids, [])], bucket_id, batch_size=1)
    # Get output logits for the sentence.
    _, _, output_logits = run_step(sess, model, encoder_inputs, decoder_inputs,
                                   decoder_masks, bucket_id, True)
    response = construct_response(output_logits, inv_dec_vocab)

    return response
Exemple #5
0
def seq_pred(question):
    _, enc_vocab = data_utils.load_vocab(os.path.join(config.DATA_PATH, "vocab.enc"))
    inv_dec_vocab, _ = data_utils.load_vocab(os.path.join(config.DATA_PATH, "vocab.dec"))
    model = ChatBotModel(True, batch_size=1)
    model.build_graph()
    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        check_restore_parameters(sess, saver)
        max_length = config.BUCKETS[-1][0]
        line = question
        if hasattr(line, "decode"):
            # If using Python 2
            # FIXME: UnicodeError when deleting Chinese in terminal.
            line = line.decode("utf-8")
        if len(line) > 0 and line[-1] == "\n":
            line = line[:-1]
        if not line:
            pass

        token_ids = data_utils.sentence2id(enc_vocab, line)
        if len(token_ids) > max_length:
            line = question
            pass

        bucket_id = find_right_bucket(len(token_ids))
        # Get a 1-element batch to feed the sentence to the model.
        encoder_inputs, decoder_inputs, decoder_masks = data_utils.get_batch(
            [(token_ids, [])], bucket_id, batch_size=1)
        # Get output logits for the sentence.
        _, _, output_logits = run_step(sess, model, encoder_inputs,
                                       decoder_inputs, decoder_masks,
                                       bucket_id, True)
        response = construct_response(output_logits, inv_dec_vocab)
        answer = response
        return answer