コード例 #1
0
def predict_anti(params):
    """测试不同参数在生成的假数据上的运行结果"""

    x_data, _ = pickle.load(open(chatbot_data_cg_xy_anti, 'rb'))
    ws = pickle.load(open(chatbot_data_cg_ws_anti, 'rb'))

    for x in x_data[:5]:
        print(' '.join(x))

    config = tf.ConfigProto(
        # device_count={'CPU': 1, 'GPU': 0},
        allow_soft_placement=True,
        log_device_placement=False)

    save_path = model_ckpt_cg_anti

    # 测试部分
    tf.reset_default_graph()
    model_pred = SequenceToSequence(input_vocab_size=len(ws),
                                    target_vocab_size=len(ws),
                                    batch_size=1,
                                    mode='decode',
                                    beam_width=0,
                                    **params)
    init = tf.global_variables_initializer()

    with tf.Session(config=config) as sess:
        sess.run(init)
        model_pred.load(sess, save_path)

        while True:
            user_text = input('Input Chat Sentence:')
            if user_text in ('exit', 'quit'):
                exit(0)
            x_test = [list(user_text.lower())]
            # x_test = [word_tokenize(user_text)]
            bar = batch_flow([x_test], ws, 1)
            x, xl = next(bar)
            x = np.flip(x, axis=1)
            # x = np.array([
            #     list(reversed(xx))
            #     for xx in x
            # ])
            print(x, xl)
            pred = model_pred.predict(sess, np.array(x), np.array(xl))
            print(pred)
            # prob = np.exp(prob.transpose())
            print(ws.inverse_transform(x[0]))
            # print(ws.inverse_transform(pred[0]))
            # print(pred.shape, prob.shape)
            for p in pred:
                ans = ws.inverse_transform(p)
                print(ans)
コード例 #2
0
def train_word_anti(bidirectional, cell_type, depth,
         attention_type, use_residual, use_dropout, time_major, hidden_units):
    """测试不同参数在生成的假数据上的运行结果"""

    emb = pickle.load(open(train_data_web_emb_anti, 'rb'))

    x_data, y_data, ws = pickle.load(
        open(train_data_web_xyw_anti, 'rb'))

    # 训练部分
    n_epoch = 10
    batch_size = 128
    # x_data, y_data = shuffle(x_data, y_data, random_state=0)
    # x_data = x_data[:100000]
    # y_data = y_data[:100000]
    steps = int(len(x_data) / batch_size) + 1

    config = tf.ConfigProto(
        # device_count={'CPU': 1, 'GPU': 0},
        allow_soft_placement=True,
        log_device_placement=False
    )

    save_path = model_ckpt_web_anti_word

    tf.reset_default_graph()
    with tf.Graph().as_default():
        random.seed(0)
        np.random.seed(0)
        tf.set_random_seed(0)

        with tf.Session(config=config) as sess:

            model = SequenceToSequence(
                input_vocab_size=len(ws),
                target_vocab_size=len(ws),
                batch_size=batch_size,
                bidirectional=bidirectional,
                cell_type=cell_type,
                depth=depth,
                attention_type=attention_type,
                use_residual=use_residual,
                use_dropout=use_dropout,
                hidden_units=hidden_units,
                time_major=time_major,
                learning_rate=0.001,
                optimizer='adam',
                share_embedding=True,
                dropout=0.2,
                pretrained_embedding=True
            )
            init = tf.global_variables_initializer()
            sess.run(init)

            # 加载训练好的embedding
            model.feed_embedding(sess, encoder=emb)

            # print(sess.run(model.input_layer.kernel))
            # exit(1)

            flow = ThreadedGenerator(
                batch_flow([x_data, y_data], ws, batch_size),
                queue_maxsize=30)

            dummy_encoder_inputs = np.array([
                np.array([WordSequence.PAD]) for _ in range(batch_size)])
            dummy_encoder_inputs_lengths = np.array([1] * batch_size)

            for epoch in range(1, n_epoch + 1):
                costs = []
                bar = tqdm(range(steps), total=steps,
                           desc='epoch {}, loss=0.000000'.format(epoch))
                for _ in bar:
                    x, xl, y, yl = next(flow)
                    x = np.flip(x, axis=1)

                    add_loss = model.train(sess,
                                           dummy_encoder_inputs,
                                           dummy_encoder_inputs_lengths,
                                           y, yl, loss_only=True)

                    add_loss *= -0.5
                    # print(x, y)
                    cost, lr = model.train(sess, x, xl, y, yl,
                                           return_lr=True, add_loss=add_loss)
                    costs.append(cost)
                    bar.set_description('epoch {} loss={:.6f} lr={:.6f}'.format(
                        epoch,
                        np.mean(costs),
                        lr
                    ))

                model.save(sess, save_path)

            flow.close()

    # 测试部分
    tf.reset_default_graph()
    model_pred = SequenceToSequence(
        input_vocab_size=len(ws),
        target_vocab_size=len(ws),
        batch_size=1,
        mode='decode',
        beam_width=12,
        bidirectional=bidirectional,
        cell_type=cell_type,
        depth=depth,
        attention_type=attention_type,
        use_residual=use_residual,
        use_dropout=use_dropout,
        hidden_units=hidden_units,
        time_major=time_major,
        parallel_iterations=1,
        learning_rate=0.001,
        optimizer='adam',
        share_embedding=True,
        pretrained_embedding=True
    )
    init = tf.global_variables_initializer()

    with tf.Session(config=config) as sess:
        sess.run(init)
        model_pred.load(sess, save_path)

        bar = batch_flow([x_data, y_data], ws, 1)
        t = 0
        for x, xl, y, yl in bar:
            x = np.flip(x, axis=1)
            pred = model_pred.predict(
                sess,
                np.array(x),
                np.array(xl)
            )
            print(ws.inverse_transform(x[0]))
            print(ws.inverse_transform(y[0]))
            print(ws.inverse_transform(pred[0]))
            t += 1
            if t >= 3:
                break

    tf.reset_default_graph()
    model_pred = SequenceToSequence(
        input_vocab_size=len(ws),
        target_vocab_size=len(ws),
        batch_size=1,
        mode='decode',
        beam_width=1,
        bidirectional=bidirectional,
        cell_type=cell_type,
        depth=depth,
        attention_type=attention_type,
        use_residual=use_residual,
        use_dropout=use_dropout,
        hidden_units=hidden_units,
        time_major=time_major,
        parallel_iterations=1,
        learning_rate=0.001,
        optimizer='adam',
        share_embedding=True,
        pretrained_embedding=True
    )
    init = tf.global_variables_initializer()

    with tf.Session(config=config) as sess:
        sess.run(init)
        model_pred.load(sess, save_path)

        bar = batch_flow([x_data, y_data], ws, 1)
        t = 0
        for x, xl, y, yl in bar:
            pred = model_pred.predict(
                sess,
                np.array(x),
                np.array(xl)
            )
            print(ws.inverse_transform(x[0]))
            print(ws.inverse_transform(y[0]))
            print(ws.inverse_transform(pred[0]))
            t += 1
            if t >= 3:
                break
コード例 #3
0
ファイル: train_char_cg.py プロジェクト: zzisme/nlp_xiaojiang
def train_and_dev(params):
    """测试不同参数在生成的假数据上的运行结果"""

    x_data, y_data = pickle.load(open(chatbot_data_cg_xy_anti, 'rb'))
    ws = pickle.load(open(chatbot_data_cg_ws_anti, 'rb'))

    # 训练部分
    n_epoch = 100
    batch_size = 128
    x_data, y_data = shuffle(x_data, y_data, random_state=20190412)

    steps = int(len(x_data) / batch_size) + 1

    config = tf.ConfigProto(
        # device_count={'CPU': 1, 'GPU': 0},
        allow_soft_placement=True,
        log_device_placement=False)

    save_path = model_ckpt_cg_anti

    tf.reset_default_graph()
    with tf.Graph().as_default():
        random.seed(0)
        np.random.seed(0)
        tf.set_random_seed(0)

        with tf.Session(config=config) as sess:

            model = SequenceToSequence(input_vocab_size=len(ws),
                                       target_vocab_size=len(ws),
                                       batch_size=batch_size,
                                       **params)
            init = tf.global_variables_initializer()
            sess.run(init)

            flow = ThreadedGenerator(batch_flow([x_data, y_data],
                                                ws,
                                                batch_size,
                                                add_end=[False, True]),
                                     queue_maxsize=30)

            dummy_encoder_inputs = np.array(
                [np.array([WordSequence.PAD]) for _ in range(batch_size)])
            dummy_encoder_inputs_lengths = np.array([1] * batch_size)

            for epoch in range(1, n_epoch + 1):
                costs = []
                bar = tqdm(range(steps),
                           total=steps,
                           desc='epoch {}, loss=0.000000'.format(epoch))
                for _ in bar:
                    x, xl, y, yl = next(flow)
                    x = np.flip(x, axis=1)

                    add_loss = model.train(sess,
                                           dummy_encoder_inputs,
                                           dummy_encoder_inputs_lengths,
                                           y,
                                           yl,
                                           loss_only=True)

                    add_loss *= -0.5

                    cost, lr = model.train(sess,
                                           x,
                                           xl,
                                           y,
                                           yl,
                                           return_lr=True,
                                           add_loss=add_loss)
                    costs.append(cost)
                    bar.set_description(
                        'epoch {} loss={:.6f} lr={:.6f}'.format(
                            epoch, np.mean(costs), lr))

                model.save(sess, save_path)

            flow.close()

    # 测试部分
    tf.reset_default_graph()
    model_pred = SequenceToSequence(input_vocab_size=len(ws),
                                    target_vocab_size=len(ws),
                                    batch_size=1,
                                    mode='decode',
                                    beam_width=12,
                                    **params)
    init = tf.global_variables_initializer()

    with tf.Session(config=config) as sess:
        sess.run(init)
        model_pred.load(sess, save_path)

        bar = batch_flow([x_data, y_data], ws, 1, add_end=False)
        t = 0
        for x, xl, y, yl in bar:
            x = np.flip(x, axis=1)
            pred = model_pred.predict(sess, np.array(x), np.array(xl))
            print(ws.inverse_transform(x[0]))
            print(ws.inverse_transform(y[0]))
            print(ws.inverse_transform(pred[0]))
            t += 1
            if t >= 3:
                break

    tf.reset_default_graph()
    model_pred = SequenceToSequence(input_vocab_size=len(ws),
                                    target_vocab_size=len(ws),
                                    batch_size=1,
                                    mode='decode',
                                    beam_width=1,
                                    **params)
    init = tf.global_variables_initializer()

    with tf.Session(config=config) as sess:
        sess.run(init)
        model_pred.load(sess, save_path)

        bar = batch_flow([x_data, y_data], ws, 1, add_end=False)
        t = 0
        for x, xl, y, yl in bar:
            pred = model_pred.predict(sess, np.array(x), np.array(xl))
            print(ws.inverse_transform(x[0]))
            print(ws.inverse_transform(y[0]))
            print(ws.inverse_transform(pred[0]))
            t += 1
            if t >= 3:
                break
コード例 #4
0
ファイル: pred_word_cg.py プロジェクト: zzisme/nlp_xiaojiang
def test(bidirectional, cell_type, depth, attention_type, use_residual,
         use_dropout, time_major, hidden_units):
    """测试不同参数在生成的假数据上的运行结果"""

    x_data, _, ws = pickle.load(open(chatbot_data_cg_xyw_anti_word, 'rb'))

    for x in x_data[:5]:
        print(' '.join(x))

    config = tf.ConfigProto(device_count={
        'CPU': 1,
        'GPU': 0
    },
                            allow_soft_placement=True,
                            log_device_placement=False)

    # save_path = '/tmp/s2ss_chatbot.ckpt'
    save_path = model_ckpt_cg_anti_word

    # 测试部分
    tf.reset_default_graph()
    model_pred = SequenceToSequence(input_vocab_size=len(ws),
                                    target_vocab_size=len(ws),
                                    batch_size=1,
                                    mode='decode',
                                    beam_width=0,
                                    bidirectional=bidirectional,
                                    cell_type=cell_type,
                                    depth=depth,
                                    attention_type=attention_type,
                                    use_residual=use_residual,
                                    use_dropout=use_dropout,
                                    parallel_iterations=1,
                                    time_major=time_major,
                                    hidden_units=hidden_units,
                                    share_embedding=True,
                                    pretrained_embedding=True)
    init = tf.global_variables_initializer()

    with tf.Session(config=config) as sess:
        sess.run(init)
        model_pred.load(sess, save_path)

        while True:
            user_text = input('Input Chat Sentence:')
            if user_text in ('exit', 'quit'):
                exit(0)
            x_test = [jieba.lcut(user_text.lower())]
            # x_test = [word_tokenize(user_text)]
            bar = batch_flow([x_test], ws, 1)
            x, xl = next(bar)
            x = np.flip(x, axis=1)
            # x = np.array([
            #     list(reversed(xx))
            #     for xx in x
            # ])
            print(x, xl)
            pred = model_pred.predict(sess, np.array(x), np.array(xl))
            print(pred)
            # prob = np.exp(prob.transpose())
            print(ws.inverse_transform(x[0]))
            # print(ws.inverse_transform(pred[0]))
            # print(pred.shape, prob.shape)
            for p in pred:
                ans = ws.inverse_transform(p)
                print(ans)