Exemple #1
0
def test():
    _, _, _, sentence_size, vocab_size = build_corpus()
    v2i, _ = build_vocab()
    _, i2l = build_label()
    origin_questions = ['今天 天气 不错', '介绍 贵金属 产品']
    questions = [q.split() for q in origin_questions]
    questions = [[v2i[vocab] for vocab in ques if vocab in v2i]
                 for ques in questions]

    config = tf.ConfigProto()
    with tf.Session(config=config) as sess:
        model = Model(sentence_size, vocab_size, FLAGS.embed_size,
                      FLAGS.class_num, FLAGS.learning_rate, FLAGS.decay_step,
                      FLAGS.decay_rate, FLAGS.layer_size,
                      FLAGS.multi_channel_size)

        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint(FLAGS.check_point))

        questions = pad_sequences(questions, maxlen=sentence_size, value=0)
        feed_dict = {
            model.encoder_input: questions,
            model.batch_size: FLAGS.batch_size
        }

        p = sess.run([model.predict], feed_dict=feed_dict)
        p = p[0].tolist()
    for index in range(len(questions)):
        print(f'{origin_questions[index]} is_business: {i2l[p[index]]}')
Exemple #2
0
def main(_):
    train, test, _, sentence_size, vocab_size = build_corpus()
    train_x, train_y = train
    test_x, test_y = test

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        model = Model(sentence_size, FLAGS.class_num, vocab_size,
                      FLAGS.embed_size, FLAGS.filters, FLAGS.filter_num,
                      FLAGS.channel_size, FLAGS.keep_prob, FLAGS.learning_rate,
                      FLAGS.decay_step, FLAGS.decay_rate, FLAGS.k1,
                      FLAGS.k_top)

        saver = tf.train.Saver()
        if os.path.exists(FLAGS.check_point + 'checkpoint'):
            saver.restore(sess, tf.train.latest_checkpoint(FLAGS.checkpoint))
        else:
            sess.run(tf.global_variables_initializer())

        cur_epoch = sess.run(model.epoch_step)
        corpus_size = len(train_x)
        for epoch in range(cur_epoch, FLAGS.epoch_num):
            loss, acc, count = 0, 0, 1
            for start, end in zip(
                    range(0, corpus_size, FLAGS.batch_size),
                    range(FLAGS.batch_size, corpus_size, FLAGS.batch_size)):
                _loss, _, _, _acc = sess.run([
                    model.loss, model.optimize, model.predict, model.accuracy
                ],
                                             feed_dict={
                                                 model.x: train_x[start:end],
                                                 model.y: train_y[start:end]
                                             })
                loss, acc, count = loss + _loss, acc + _acc, count + 1
                if count % FLAGS.batch_size == 0:
                    print(
                        f'Train -- epoch: {epoch}, count: {count}, loss: {loss/count}, acc: {acc/count}'
                    )
            sess.run(model.epoch_increment)
            if epoch % FLAGS.epoch_val == 0:
                cur_loss, cur_predict, cur_acc = sess.run(
                    [model.loss, model.predict, model.accuracy],
                    feed_dict={
                        model.x: test_x,
                        model.y: test_y
                    })
                print(
                    f'Eval -- epoch: {epoch}, loss: {cur_loss}, acc: {cur_acc}'
                )

                save_path = FLAGS.check_point + "a04_dcnn.model"
                saver.save(sess, save_path, global_step=model.epoch_step)
Exemple #3
0
def main(_):
    train, test, _, sentence_size, vocab_size = build_corpus()
    train_x, train_y = train
    test_x, test_y = test

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        model = Model(sentence_size, vocab_size, FLAGS.embed_size,
                      FLAGS.hidden_size, FLAGS.class_num, FLAGS.learning_rate,
                      FLAGS.decay_step, FLAGS.decay_rate, FLAGS.mode)

        saver = tf.train.Saver()
        if os.path.exists(FLAGS.check_point + 'checkpoint'):
            saver.restore(sess, tf.train.latest_checkpoint(FLAGS.checkpoint))
        else:
            sess.run(tf.global_variables_initializer())

        cur_epoch = sess.run(model.epoch_step)
        corpus_size = len(train_x)
        for epoch in range(cur_epoch, FLAGS.epoch_num):
            loss, acc, count = 0, 0, 1
            for start, end in zip(
                    range(0, corpus_size, FLAGS.batch_size),
                    range(FLAGS.batch_size, corpus_size, FLAGS.batch_size)):
                train_yy = np.array([[2, i] + [i + 1 if i == 0 else i - 1]
                                     for i in train_y[start:end]],
                                    dtype=np.int32)
                _loss, _, _, _acc = sess.run(
                    [
                        model.loss, model.optimize, model.predict,
                        model.accuracy
                    ],
                    feed_dict={
                        model.encoder_input: train_x[start:end],
                        model.decoder_input: train_yy,
                        model.decoder_output: train_y[start:end],
                        model.batch_size: FLAGS.batch_size
                    })
                loss, acc, count = loss + _loss, acc + _acc, count + 1
                if count % FLAGS.batch_size == 0:
                    print(
                        f'Train -- epoch: {epoch}, count: {count}, loss: {loss/count}, acc: {acc/count}'
                    )
            sess.run(model.epoch_increment)
            if epoch % FLAGS.epoch_val == 0:
                test_yy = np.array([[2, i] + [i + 1 if i == 0 else i - 1]
                                    for i in test_y],
                                   dtype=np.int32)
                cur_loss, cur_predict, cur_acc = sess.run(
                    [model.loss, model.predict, model.accuracy],
                    feed_dict={
                        model.encoder_input: test_x,
                        model.decoder_input: test_yy,
                        model.decoder_output: test_y,
                        model.batch_size: len(test_x)
                    })
                print(
                    f'Eval -- epoch: {epoch}, loss: {cur_loss}, acc: {cur_acc}'
                )

                save_path = FLAGS.check_point + "a05_attention.model"
                saver.save(sess, save_path, global_step=model.epoch_step)