Beispiel #1
0
def check_accuracy(mnist):
    with tf.Graph().as_default() as grh:
        x = tf.placeholder(tf.float32, [None, mnsit_forward.INPUT_NODE])
        y_ = tf.placeholder(tf.float32, [None, mnsit_forward.OUTPUT_NODE])
        y = mnsit_forward.forward(x, None)

        ema = tf.train.ExponentialMovingAverage(
            mnsit_backward.MOVING_AVG_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)

        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        while 1:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(
                    mnsit_backward.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]
                    accuracy_rate = sess.run(accuracy,
                                             feed_dict={
                                                 x: mnist.test.images,
                                                 y_: mnist.test.labels
                                             })
                    fmt = 'After {:s} steps, test accuracy rate is {:.09f}'
                    print(fmt.format(global_step, accuracy_rate))
            time.sleep(TEST_INTERVAL_SECS)
Beispiel #2
0
def check_accuracy():
    with tf.Graph().as_default() as grh:
        x = tf.placeholder(tf.float32, [None, mnsit_forward.INPUT_NODE])
        y_ = tf.placeholder(tf.float32, [None, mnsit_forward.OUTPUT_NODE])
        y = mnsit_forward.forward(x, None)

        ema = tf.train.ExponentialMovingAverage(
            mnsit_backward.MOVING_AVG_DECAY)
        ema_restore = ema.variables_to_restore()
        saver = tf.train.Saver(ema_restore)

        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        img_batch, label_batch = mnist_generate_recode.get_tf_record(
            TEST_NUM, is_train=False)

        while 1:
            with tf.Session() as sess:
                ckpt = tf.train.get_checkpoint_state(
                    mnsit_backward.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]

                    coord = tf.train.Coordinator()
                    threads = tf.train.start_queue_runners(sess=sess,
                                                           coord=coord)
                    xs, ys = sess.run([img_batch, label_batch])

                    accuracy_rate = sess.run(accuracy,
                                             feed_dict={
                                                 x: xs,
                                                 y_: ys
                                             })
                    fmt = 'After {:s} steps, test accuracy rate is {:.09f}'
                    print(fmt.format(global_step, accuracy_rate))

                    coord.request_stop()
                    coord.join(threads)
            time.sleep(TEST_INTERVAL_SECS)
Beispiel #3
0
def restore_model(pic_array):
    # 重现计算图
    with tf.Graph().as_default() as gph:
        # 只需要对输入占位
        x = tf.placeholder(tf.float32, [None, mnsit_forward.INPUT_NODE])
        y = mnsit_forward.forward(x, None)

        # y的最大值对应的索引号,就是预测的数字的值
        pre_value = tf.argmax(y, 1)

        variable_avg = tf.train.ExponentialMovingAverage(
            mnsit_backward.MOVING_AVG_DECAY)
        variable_to_restore = variable_avg.variables_to_restore()
        saver = tf.train.Saver(variable_to_restore)

        with tf.Session() as sess:
            ckpt = tf.train.get_checkpoint_state(
                mnsit_backward.MODEL_SAVE_PATH)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                # 预测操作
                pre_value = sess.run(pre_value, feed_dict={x: pic_array})
                return pre_value
Beispiel #4
0
def backward(mnsit):
    x = tf.placeholder(tf.float32, [None, mnsit_forward.INPUT_NODE])
    y_ = tf.placeholder(tf.float32, [None, mnsit_forward.OUTPUT_NODE])
    y = mnsit_forward.forward(x, FLAGS.regularizer)
    global_step = tf.Variable(0, trainable=False)

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.argmax(y_, 1))

    cross_entropy_mean = tf.reduce_mean(cross_entropy)

    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))

    learning_rate = tf.train.exponential_decay(FLAGS.learning_rate_base,
                                               global_step,
                                               mnsit.train.num_examples /
                                               FLAGS.batch_size,
                                               FLAGS.learning_rate_decay,
                                               staircase=True)

    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)

    ema = tf.train.ExponentialMovingAverage(FLAGS.moving_avg_decay,
                                            global_step)
    ema_op = ema.apply(tf.trainable_variables())
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.control_dependencies([train_step, ema_op]):
        train_op = tf.no_op(name='train')

    saver = tf.train.Saver()

    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)

        ckpt = tf.train.get_checkpoint_state(FLAGS.model_save_path)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        for i in range(FLAGS.steps):
            xs, ys = mnsit.train.next_batch(FLAGS.batch_size)
            feed_dict = {x: xs, y_: ys}
            _, loss_value, learning_rate_val, step = sess.run(
                [train_op, loss, learning_rate, global_step],
                feed_dict=feed_dict)
            if 0 == i % 100:
                fmt = 'After {:05d} steps, loss is {:.09f}, learning rate is {:.09f}'
                print(fmt.format(step, loss_value, learning_rate_val))

                saver.save(sess,
                           os.path.join(FLAGS.model_save_path,
                                        FLAGS.model_name),
                           global_step=global_step)
                accuracy_rate = sess.run(accuracy,
                                         feed_dict={
                                             x: mnsit.test.images,
                                             y_: mnsit.test.labels
                                         })
                fmt = 'After {:d} steps, test accuracy rate is {:.09f}'
                print(fmt.format(step, accuracy_rate))