Ejemplo n.º 1
0
def evaluate(mnist):
    with tf.Graph().as_default() as g:
        # 定义输入输出的格式
        x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE],
                           name="x-input")
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUPUT_NODE],
                            name="y-input")
        validate_feed = {
            x: mnist.validation.images,
            y_: mnist.validation.labels
        }

        # 直接通过调用封装好的函数来计算前向传播的结果。因为测试时不关注正则化损失的值,所以这里用于计算正则化损失的函数被设置为None。
        y = mnist_inference.inference(x, None)

        # 使用前向传播的结果计算正确率。如果需要对未知的样例进行分类,那么使用tf.argmax(y,1)就可以得到输入样例的预测类别了。
        correct_prediction = tf.equal(tf.arg_max(y, 1), tf.arg_max(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        # 通过变量重命名的方式来加载模型,这样在前向传播的过程中就不需要调用求滑动平均的函数来获取平均值了。这样就可以完全共用
        # mnist_inference.py中定义的前向传播过程。
        variable_averages = tf.train.ExponentialMovingAverage(
            mnist_train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # 每隔EVAL_INTERVAL_SECS秒调用一次计算正确率的过程以检测训练过程中正确率的变化。
        while True:
            with tf.Session() as sess:
                # tf.train.get_checkpoint_state函数会通过checkpoint文件自动找到目录中最新模型的文件名。
                ckpt = tf.train.get_checkpoint_state(
                    mnist_train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    # 加载模型
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    # 通过文件名得到模型保存迭代的轮数。
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]
                    accuracy_score = sess.run(accuracy,
                                              feed_dict=validate_feed)
                    print(
                        f"After {global_step} training step(s), validation accuracy = {accuracy_score}"
                    )
                else:
                    print('No checkpoint file found')
                    return
                time.sleep(EVAL_INTERVAL_SECS)
Ejemplo n.º 2
0
def evaluate(mnist):
    with tf.Graph().as_default() as g:
        # 定义输入输出placeholder
        x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE],
                           name='x-input')
        y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE],
                            name='y-input')

        validate_feed = {
            x: mnist.validation.images,
            y_: mnist.validation.labels
        }
        # 直接使用mnist_inference.py中定义的前向传播过程
        y = mnist_inference.inference(x, None)

        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        # 通过变量重命名的方式来加载模型
        variable_averages = tf.train.ExponentialMovingAverage(
            mnist_train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        # 每隔EVAL_INTERVAL_SECS秒调用一次计算正确率的过程,检测训练过程中的正确率的变化
        while True:
            with tf.Session() as sess:
                # tf.train.get_checkpoint_state函数会通过checkpoint文件自动找到目录中最新的模型文件名
                ckpt = tf.train.get_checkpoint_state(
                    mnist_train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    # 加载模型
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    # 通过文件名得到模型保存时迭代的轮数
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]
                    accuracy_score = sess.run(accuracy,
                                              feed_dict=validate_feed)
                    print("After %s training step(s), validation accuracy=%g" %
                          (global_step, accuracy_score))
                else:
                    print("No checkpoint file found")
                    return

            time.sleep(EVAL_INTERVAL_SECS)
Ejemplo n.º 3
0
def evaluate(mnist):
    x = tf.placeholder(tf.float32, [None, mnist_inference.input_node],
                       name="x_input")
    y_true = tf.placeholder(tf.float32, [None, mnist_inference.output_node],
                            name="y_true")
    validate_feed = {
        x: mnist.validation.images,
        y_true: mnist.validation.labels
    }
    # 计算前向传播,因为是测试,不需关注正则化的值
    y = mnist_inference.inference(x, None)
    # 计算准确率
    correct_prediction = tf.equal(tf.arg_max(y, 1), tf.arg_max(y_true, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    # 通过变量重命名的方式来加载模型,这样在前向传播中就不需要调用求滑动平均值的函数来获取平均值了
    variable_averages = tf.train.ExponentialMovingAverage(
        mnist_train.moving_average_rate)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    while True:
        # 初始化会话并开始训练过程
        session_conf = tf.ConfigProto(allow_soft_placement=True,
                                      log_device_placement=False)
        session_conf.gpu_options.allow_growth = True
        with tf.Session(config=session_conf) as sess:
            # 找到目录中最新模型的文件名
            ckpt = tf.train.get_checkpoint_state(mnist_train.model_save_path)
            if ckpt and ckpt.model_checkpoint_path:
                # 加载模型
                saver.restore(sess, ckpt.model_checkpoint_path)
                # 通过文件名得到模型保存时迭代的轮数
                global_step = ckpt.model_checkpoint_path.split("/")[-1].split(
                    "-")[-1]
                accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
                print(
                    "After {:s} training step(s), validation accuracy = {:g}".
                    format(global_step, accuracy_score))
            else:
                print("No checkpoint file found.")
                return
        time.sleep(eval_interval_secs)
Ejemplo n.º 4
0
def train(mnist):
    # 定义placeholder。
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name="x-input")
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUPUT_NODE], name="y-input")

    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
    # 直接使用mnist_inference.py中定义的前向传播过程。
    y = mnist_inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)
    # 和5.2.1节样例中类似的定义损失函数、学习率、滑动平均操作以及训练过程。
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.arg_max(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection("losses"))
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name="train")

    # 初始化TensorFlow持久化类。
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        # 在训练过程中不在测试模型在验证数据上的表现,验证和测试的过程将会有一个独立的程序来完成。
        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
            # 每1000轮保存一次模型
            if i % 1000 == 0:
                # 输出当前的训练情况。这里只输出了模型在当前训练batch上的损失函数大小。通过损失函数的大小可以大概了解训练的情况。在验证
                # 数据集上正确率信息会有一个单独的程序来生成。
                print(f"After {step} training step(s), loss on training batch is {loss_value}")
                # 保存当前模型。这里给出了global_step参数。这样可以让每个被保存模型的文件名末尾加上训练的轮数,比如“mode.ckpt-1000"
                # 表示训练1000轮之后得到的模型。
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
Ejemplo n.º 5
0
def train(mnist):
    x = tf.placeholder(tf.float32, [None, mnist_inference.input_node],
                       name="x_input")
    y_true = tf.placeholder(tf.float32, [None, mnist_inference.output_node],
                            name="y_true")
    regularizer = tf.contrib.layers.l2_regularizer(regularization_rate)

    # 计算前向传播
    y = mnist_inference.inference(x, regularizer)

    # 定义存储训练轮数的变量。这个变量不需要计算滑动平均值。指定为不可训练(trainable=False)
    global_step = tf.Variable(0, trainable=False)

    # 给定滑动平均衰减率和训练轮数的变量,初始化滑动平均类
    variable_averages = tf.train.ExponentialMovingAverage(
        moving_average_rate, global_step)

    # 在所有代表神经网络参数的变量上使用滑动平均。tf.trainable_variables()返回的就是所有trainable=True训练参数的集合
    variable_averages_op = variable_averages.apply(tf.trainable_variables())

    # 计算交叉熵损失函数
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.arg_max(y_true, 1))
    # cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_true)
    # 计算当前batch中所有样例的交叉熵平均值
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    # 总的损失函数
    loss = cross_entropy_mean + tf.add_n(tf.get_collection("losses"))

    # 设置指数衰减的学习率
    learning_rate = tf.train.exponential_decay(
        learning_rate_base,  # 基础的学习率
        global_step,  # 当前迭代的轮数
        mnist.train.num_examples / batch_size,  # 过完所有训练数据需要的迭代次数
        learning_rate_decay,  # 学习率衰减速度
    )

    # 优化损失函数
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)

    # 一边更新参数,一边更新每个参数的滑动平均值
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name="train")

    # 初始化tf持久化类
    saver = tf.train.Saver()

    # 初始化会话并开始训练过程
    session_conf = tf.ConfigProto(allow_soft_placement=True,
                                  log_device_placement=False)
    session_conf.gpu_options.allow_growth = True
    with tf.Session(config=session_conf) as sess:
        tf.global_variables_initializer().run()

        # 迭代训练神经网络
        for i in range(training_steps):
            # 生成batch训练数据
            xs, ys = mnist.train.next_batch(batch_size)
            _, loss_value, step = sess.run([train_op, loss, global_step],
                                           feed_dict={
                                               x: xs,
                                               y_true: ys
                                           })
            if i % 1000 == 0:
                print(
                    "After {:d} training step(s), loss on training batch is {:g}"
                    .format(step, loss_value))
                # 保存当前模型
                saver.save(sess,
                           os.path.join(model_save_path, model_name),
                           global_step=global_step)
Ejemplo n.º 6
0
def train(mnist):
    # 定义输入输出placeholder
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE],
                       name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE],
                        name='y-input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    # 直接使用mnist_inference.py中定义的前向传播过程
    y = mnist_inference.inference(x, regularizer)

    # 定义存储训练轮数的变量,将改变量指定为不可训练的参数
    global_step = tf.Variable(0, trainable=False)
    # 定义滑动平均类
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)

    # 在所有神经网络参数的变量上使用滑动平均
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    # 定义损失函数
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.argmax(y_, 1))

    # 计算在当前batch中所有样例的交叉熵平均值
    cross_entropy_mean = tf.reduce_mean(cross_entropy)

    # 总损失等于交叉熵损失和正则化损失的和
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))

    # 设置指数衰减的学习率
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY)

    # 定义优化算法
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)

    train_op = tf.group(train_step, variables_averages_op)

    # 初始化TensorFlow持久化类
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step],
                                           feed_dict={
                                               x: xs,
                                               y_: ys
                                           })

            # 每1000轮保存一次模型
            if i % 1000 == 0:
                print(
                    'After %d training step(s), loss on training batch is %g' %
                    (step, loss_value))

                # 保存当前的模型
                saver.save(sess,
                           os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step)