Exemplo n.º 1
0
def inference():
    dicPath = "C:\\wuwei\\work\\github\\data\\imdb.dict.pkl"
    inferenceFile = "C:\\wuwei\\work\\github\\dl\\sentiment\\inferencedata\\pos1.txt"
    dic = loadDic(dicPath)
    inference_data = preProcessData.preProcess_inference_data(
        inferenceFile, dic)
    print(inference_data)
    inference_data = dataload.load_inference_data(inference_data)
    print(inference_data)
    graph = tf.Graph()
    with graph.as_default():

        encoder_inputs = tf.placeholder(shape=(batchSize, None),
                                        dtype=tf.int32,
                                        name='encoder_inputs')

        embeddings_inputs = tf.Variable(
            tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))

        logist = model.inference2(encoder_inputs,
                                  embeddings_inputs,
                                  classNum,
                                  isTrainModel=False)

        inputs_batch_major = np.zeros(
            shape=[batchSize, len(inference_data[0])], dtype=np.int32)
        for i in range(len(inference_data[0])):
            inputs_batch_major[0, i] = inference_data[0][i]

        saver = tf.train.Saver()

    with tf.Session(graph=graph) as sess:
        saver.restore(sess, ckpt_dir)
        # sess.run(tf.global_variables_initializer())

        feed = {encoder_inputs: inputs_batch_major}

        logist_val = sess.run([logist], feed_dict=feed)
        results = np.squeeze(logist_val)
        top_1 = results.argsort()[-1:][::-1]

        print("The logist val is: ", results)
        print("The predict class is: {}".format(top_1[0]))
Exemplo n.º 2
0
def validate2():
    val_tfrecords = TFRECORDS_PATH + 'test5.tfrecords'

    val_image_batch, val_label_batch = input_data.get_batch(val_tfrecords,
                                                            BATCH_SIZE,
                                                            num_epochs=1)

    x = tf.placeholder(tf.float32, [BATCH_SIZE, WIDTH, HEIGHT, CHANNEL],
                       name='x-input')
    y_ = tf.placeholder(tf.float32, [BATCH_SIZE, NUM_CLASSES], name='y-input')

    y = model.inference2(x, NUM_CLASSES, evaluate=False)
    predict_y = tf.argmax(y, 1)
    val_loss = tools.loss(logits=y, labels=y_)
    val_acc = tools.accuracy(logits=y, labels=y_)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run([
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ])

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        ckpt = tf.train.get_checkpoint_state(MODEL2_SAVE_PATH)

        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                '-')[-1]

            global_score = 0.
            num_step = 0
            try:
                for i in range(TRAINING_STEPS):
                    if coord.should_stop():
                        break

                    val_xs, val_ys = sess.run(
                        [val_image_batch, val_label_batch])
                    yy, loss_value, acc_value = sess.run(
                        [predict_y, val_loss, val_acc],
                        feed_dict={
                            x: val_xs,
                            y_: val_ys
                        })
                    global_score += acc_value
                    num_step += 1
                    if i % 5 == 0:
                        print(
                            "in the %dth batch: After %s training step(s), validation accuracy = %g"
                            % (i, global_step, acc_value))


#                        plot_images(val_xs, val_ys)

            except tf.errors.OutOfRangeError:
                print("global accuracy = %g" % (global_score / num_step))
                print('Done testing -- epoch limit reached')

            finally:
                coord.request_stop()

            coord.join(threads)
Exemplo n.º 3
0
def train2():
    ckpt = tf.train.get_checkpoint_state(MODEL2_SAVE_PATH)

    train_tfrecords = TFRECORDS_PATH + 'train2.tfrecords'
    val_tfrecords = TFRECORDS_PATH + 'val.tfrecords'

    train_image_batch, train_label_batch = input_data.get_batch(
        train_tfrecords, BATCH_SIZE)
    val_image_batch, val_label_batch = input_data.get_batch(
        val_tfrecords, BATCH_SIZE)

    x = tf.placeholder(tf.float32, [BATCH_SIZE, WIDTH, HEIGHT, CHANNEL],
                       name='x-input')
    y_ = tf.placeholder(tf.float32, [BATCH_SIZE, NUM_CLASSES], name='y-input')

    if ckpt and ckpt.model_checkpoint_path:
        num_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        num_step = int(num_step)
        print(num_step)
        global_step = tf.Variable(num_step, trainable=False)
    else:
        global_step = tf.Variable(0, trainable=False)

    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
    y = model.inference2(x, NUM_CLASSES, regularizer=regularizer)
    train_loss = tools.loss(logits=y, labels=y_)
    train_acc = tools.accuracy(logits=y, labels=y_)
    train_op = tools.optimizer(train_loss,
                               LEARNING_RATE,
                               global_step=global_step)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            tf.global_variables_initializer().run()

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        summary_op = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(LOG2_TRAIN_PATH, sess.graph)
        val_writer = tf.summary.FileWriter(LOG2_VAL_PATH, sess.graph)

        try:
            for i in range(TRAINING_STEPS):
                if coord.should_stop():
                    break

                xs, ys = sess.run([train_image_batch, train_label_batch])
                _, loss_value, acc_value, step = sess.run(
                    [train_op, train_loss, train_acc, global_step],
                    feed_dict={
                        x: xs,
                        y_: ys
                    })

                if i % 1 == 0:
                    print(
                        "After %d training step(s), loss on training batch is %g, accuracy is %g"
                        % (step, loss_value, acc_value))

                if i % 1 == 0:
                    summary_str = sess.run(summary_op,
                                           feed_dict={
                                               x: xs,
                                               y_: ys
                                           })
                    train_writer.add_summary(summary_str, step)

                if i % 5 == 0:
                    val_xs, val_ys = sess.run(
                        [val_image_batch, val_label_batch])
                    val_loss_value, val_acc_value = sess.run(
                        [train_loss, train_acc],
                        feed_dict={
                            x: val_xs,
                            y_: val_ys
                        })
                    print(
                        "After %d training step(s), valuation loss is %g, accuracy is %g"
                        % (step, val_loss_value, val_acc_value))
                    summary_str = sess.run(summary_op,
                                           feed_dict={
                                               x: val_xs,
                                               y_: val_ys
                                           })
                    val_writer.add_summary(summary_str, step)

                if i % 10 == 0 or step + 1 == TRAINING_STEPS:
                    saver.save(sess,
                               os.path.join(MODEL2_SAVE_PATH, MODEL_NAME),
                               global_step=global_step)

        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')

        finally:
            coord.request_stop()

        coord.join(threads)